use bitcoin::blockdata::block::BlockHeader;
use bitcoin::blockdata::transaction::Transaction;
-use bitcoin::blockdata::constants::{genesis_block, ChainHash};
+use bitcoin::blockdata::constants::ChainHash;
use bitcoin::network::constants::Network;
use bitcoin::hashes::Hash;
use bitcoin::secp256k1::Secp256k1;
use bitcoin::{LockTime, secp256k1, Sequence};
+use crate::blinded_path::BlindedPath;
+use crate::blinded_path::payment::{PaymentConstraints, ReceiveTlvs};
use crate::chain;
use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock};
use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
// construct one themselves.
use crate::ln::{inbound_payment, ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
use crate::ln::channel::{Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel};
-use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
+use crate::ln::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
#[cfg(any(feature = "_test_utils", test))]
use crate::ln::features::Bolt11InvoiceFeatures;
use crate::routing::gossip::NetworkGraph;
use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError};
#[cfg(test)]
use crate::ln::outbound_payment;
-use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs};
+use crate::ln::outbound_payment::{Bolt12PaymentError, OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs, StaleExpiration};
use crate::ln::wire::Encode;
+use crate::offers::invoice::{BlindedPayInfo, Bolt12Invoice, DEFAULT_RELATIVE_EXPIRY, DerivedSigningPubkey, InvoiceBuilder};
+use crate::offers::invoice_error::InvoiceError;
+use crate::offers::merkle::SignError;
+use crate::offers::offer::{DerivedMetadata, Offer, OfferBuilder};
+use crate::offers::parse::Bolt12SemanticError;
+use crate::offers::refund::{Refund, RefundBuilder};
+use crate::onion_message::{Destination, OffersMessage, OffersMessageHandler, PendingOnionMessage};
use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, WriteableEcdsaChannelSigner};
use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate};
use crate::util::wakers::{Future, Notifier};
use crate::util::logger::{Level, Logger};
use crate::util::errors::APIError;
-use alloc::collections::BTreeMap;
+use alloc::collections::{btree_map, BTreeMap};
use crate::io;
use crate::prelude::*;
}
#[inline]
fn from_finish_shutdown(err: String, channel_id: ChannelId, user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>, channel_capacity: u64) -> Self {
+ let err_msg = msgs::ErrorMessage { channel_id, data: err.clone() };
+ let action = if let (Some(_), ..) = &shutdown_res {
+ // We have a closing `ChannelMonitorUpdate`, which means the channel was funded and we
+ // should disconnect our peer such that we force them to broadcast their latest
+ // commitment upon reconnecting.
+ msgs::ErrorAction::DisconnectPeer { msg: Some(err_msg) }
+ } else {
+ msgs::ErrorAction::SendErrorMessage { msg: err_msg }
+ };
Self {
- err: LightningError {
- err: err.clone(),
- action: msgs::ErrorAction::SendErrorMessage {
- msg: msgs::ErrorMessage {
- channel_id,
- data: err
- },
- },
- },
+ err: LightningError { err, action },
chan_id: Some((channel_id, user_channel_id)),
shutdown_finish: Some((shutdown_res, channel_update)),
channel_capacity: Some(channel_capacity)
/// usually because we're running pre-full-init. They are handled immediately once we detect we are
/// running normally, and specifically must be processed before any other non-background
/// [`ChannelMonitorUpdate`]s are applied.
+#[derive(Debug)]
enum BackgroundEvent {
/// Handle a ChannelMonitorUpdate which closes the channel or for an already-closed channel.
/// This is only separated from [`Self::MonitorUpdateRegeneratedOnStartup`] as the
event: events::Event,
downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, RAAMonitorUpdateBlockingAction)>,
},
+ /// Indicates we should immediately resume the operation of another channel, unless there is
+ /// some other reason why the channel is blocked. In practice this simply means immediately
+ /// removing the [`RAAMonitorUpdateBlockingAction`] provided from the blocking set.
+ ///
+ /// This is usually generated when we've forwarded an HTLC and want to block the outbound edge
+ /// from completing a monitor update which removes the payment preimage until the inbound edge
+ /// completes a monitor update containing the payment preimage. However, we use this variant
+ /// instead of [`Self::EmitEventAndFreeOtherChannel`] when we discover that the claim was in
+ /// fact duplicative and we simply want to resume the outbound edge channel immediately.
+ ///
+ /// This variant should thus never be written to disk, as it is processed inline rather than
+ /// stored for later processing.
+ FreeOtherChannelImmediately {
+ downstream_counterparty_node_id: PublicKey,
+ downstream_funding_outpoint: OutPoint,
+ blocking_action: RAAMonitorUpdateBlockingAction,
+ },
}
impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction,
(0, PaymentClaimed) => { (0, payment_hash, required) },
+ // Note that FreeOtherChannelImmediately should never be written - we were supposed to free
+ // *immediately*. However, for simplicity we implement read/write here.
+ (1, FreeOtherChannelImmediately) => {
+ (0, downstream_counterparty_node_id, required),
+ (2, downstream_funding_outpoint, required),
+ (4, blocking_action, required),
+ },
(2, EmitEventAndFreeOtherChannel) => {
(0, event, upgradable_required),
// LDK prior to 0.0.116 did not have this field as the monitor update application order was
Arc<DefaultRouter<
Arc<NetworkGraph<Arc<L>>>,
Arc<L>,
- Arc<Mutex<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>,
+ Arc<RwLock<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>,
ProbabilisticScoringFeeParameters,
ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>,
>>,
&'e DefaultRouter<
&'f NetworkGraph<&'g L>,
&'g L,
- &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>,
+ &'h RwLock<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>,
ProbabilisticScoringFeeParameters,
ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>
>,
>;
/// A trivial trait which describes any [`ChannelManager`].
+///
+/// This is not exported to bindings users as general cover traits aren't useful in other
+/// languages.
pub trait AChannelManager {
/// A type implementing [`chain::Watch`].
type Watch: chain::Watch<Self::Signer> + ?Sized;
//
// Lock order tree:
//
+// `pending_offers_messages`
+//
// `total_consistency_lock`
// |
// |__`forward_htlcs`
// | |__`pending_intercepted_htlcs`
// |
// |__`per_peer_state`
-// | |
-// | |__`pending_inbound_payments`
-// | |
-// | |__`claimable_payments`
-// | |
-// | |__`pending_outbound_payments` // This field's struct contains a map of pending outbounds
-// | |
-// | |__`peer_state`
-// | |
-// | |__`id_to_peer`
-// | |
-// | |__`short_to_chan_info`
-// | |
-// | |__`outbound_scid_aliases`
-// | |
-// | |__`best_block`
-// | |
-// | |__`pending_events`
-// | |
-// | |__`pending_background_events`
+// |
+// |__`pending_inbound_payments`
+// |
+// |__`claimable_payments`
+// |
+// |__`pending_outbound_payments` // This field's struct contains a map of pending outbounds
+// |
+// |__`peer_state`
+// |
+// |__`id_to_peer`
+// |
+// |__`short_to_chan_info`
+// |
+// |__`outbound_scid_aliases`
+// |
+// |__`best_block`
+// |
+// |__`pending_events`
+// |
+// |__`pending_background_events`
//
pub struct ChannelManager<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
where
L::Target: Logger,
{
default_configuration: UserConfig,
- genesis_hash: BlockHash,
+ chain_hash: ChainHash,
fee_estimator: LowerBoundedFeeEstimator<F>,
chain_monitor: M,
tx_broadcaster: T,
/// `PersistenceNotifierGuard::notify_on_drop(..)` and pass the lock to it, to ensure the
/// Notifier the lock contains sends out a notification when the lock is released.
total_consistency_lock: RwLock<()>,
+ /// Tracks the progress of channels going through batch funding by whether funding_signed was
+ /// received and the monitor has been persisted.
+ ///
+ /// This information does not need to be persisted as funding nodes can forget
+ /// unfunded channels upon disconnection.
+ funding_batch_states: Mutex<BTreeMap<Txid, Vec<(ChannelId, PublicKey, bool)>>>,
background_events_processed_since_startup: AtomicBool,
event_persist_notifier: Notifier,
needs_persist_flag: AtomicBool,
+ pending_offers_messages: Mutex<Vec<PendingOnionMessage<OffersMessage>>>,
+
entropy_source: ES,
node_signer: NS,
signer_provider: SP,
}
/// Details of a channel, as returned by [`ChannelManager::list_channels`] and [`ChannelManager::list_usable_channels`]
-///
-/// Balances of a channel are available through [`ChainMonitor::get_claimable_balances`] and
-/// [`ChannelMonitor::get_claimable_balances`], calculated with respect to the corresponding on-chain
-/// transactions.
-///
-/// [`ChainMonitor::get_claimable_balances`]: crate::chain::chainmonitor::ChainMonitor::get_claimable_balances
#[derive(Clone, Debug, PartialEq)]
pub struct ChannelDetails {
/// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
///
/// This value will be `None` for objects serialized with LDK versions prior to 0.0.115.
pub feerate_sat_per_1000_weight: Option<u32>,
+ /// Our total balance. This is the amount we would get if we close the channel.
+ /// This value is not exact. Due to various in-flight changes and feerate changes, exactly this
+ /// amount is not likely to be recoverable on close.
+ ///
+ /// This does not include any pending HTLCs which are not yet fully resolved (and, thus, whose
+ /// balance is not available for inclusion in new outbound HTLCs). This further does not include
+ /// any pending outgoing HTLCs which are awaiting some other resolution to be sent.
+ /// This does not consider any on-chain fees.
+ ///
+ /// See also [`ChannelDetails::outbound_capacity_msat`]
+ pub balance_msat: u64,
/// The available outbound capacity for sending HTLCs to the remote peer. This does not include
/// any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not
/// available for inclusion in new outbound HTLCs). This further does not include any pending
/// outgoing HTLCs which are awaiting some other resolution to be sent.
///
+ /// See also [`ChannelDetails::balance_msat`]
+ ///
/// This value is not exact. Due to various in-flight changes, feerate changes, and our
/// conflict-avoidance policy, exactly this amount is not likely to be spendable. However, we
/// should be able to spend nearly this amount.
/// the current state and per-HTLC limit(s). This is intended for use when routing, allowing us
/// to use a limit as close as possible to the HTLC limit we can currently send.
///
- /// See also [`ChannelDetails::next_outbound_htlc_minimum_msat`] and
- /// [`ChannelDetails::outbound_capacity_msat`].
+ /// See also [`ChannelDetails::next_outbound_htlc_minimum_msat`],
+ /// [`ChannelDetails::balance_msat`], and [`ChannelDetails::outbound_capacity_msat`].
pub next_outbound_htlc_limit_msat: u64,
/// The minimum value for sending a single HTLC to the remote peer. This is the equivalent of
/// [`ChannelDetails::next_outbound_htlc_limit_msat`] but represents a lower-bound, rather than
channel_value_satoshis: context.get_value_satoshis(),
feerate_sat_per_1000_weight: Some(context.get_feerate_sat_per_1000_weight()),
unspendable_punishment_reserve: to_self_reserve_satoshis,
+ balance_msat: balance.balance_msat,
inbound_capacity_msat: balance.inbound_capacity_msat,
outbound_capacity_msat: balance.outbound_capacity_msat,
next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
let mut msg_events = Vec::with_capacity(2);
if let Some((shutdown_res, update_option)) = shutdown_finish {
- $self.finish_force_close_channel(shutdown_res);
+ $self.finish_close_channel(shutdown_res);
if let Some(update) = update_option {
msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
macro_rules! handle_monitor_update_completion {
($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
let mut updates = $chan.monitor_updating_restored(&$self.logger,
- &$self.node_signer, $self.genesis_hash, &$self.default_configuration,
+ &$self.node_signer, $self.chain_hash, &$self.default_configuration,
$self.best_block.read().unwrap().height());
let counterparty_node_id = $chan.context.get_counterparty_node_id();
let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() {
}
let channel_id = $chan.context.channel_id();
+ let unbroadcasted_batch_funding_txid = $chan.context.unbroadcasted_batch_funding_txid();
core::mem::drop($peer_state_lock);
core::mem::drop($per_peer_state_lock);
+ // If the channel belongs to a batch funding transaction, the progress of the batch
+ // should be updated as we have received funding_signed and persisted the monitor.
+ if let Some(txid) = unbroadcasted_batch_funding_txid {
+ let mut funding_batch_states = $self.funding_batch_states.lock().unwrap();
+ let mut batch_completed = false;
+ if let Some(batch_state) = funding_batch_states.get_mut(&txid) {
+ let channel_state = batch_state.iter_mut().find(|(chan_id, pubkey, _)| (
+ *chan_id == channel_id &&
+ *pubkey == counterparty_node_id
+ ));
+ if let Some(channel_state) = channel_state {
+ channel_state.2 = true;
+ } else {
+ debug_assert!(false, "Missing channel batch state for channel which completed initial monitor update");
+ }
+ batch_completed = batch_state.iter().all(|(_, _, completed)| *completed);
+ } else {
+ debug_assert!(false, "Missing batch state for channel which completed initial monitor update");
+ }
+
+ // When all channels in a batched funding transaction have become ready, it is not necessary
+ // to track the progress of the batch anymore and the state of the channels can be updated.
+ if batch_completed {
+ let removed_batch_state = funding_batch_states.remove(&txid).into_iter().flatten();
+ let per_peer_state = $self.per_peer_state.read().unwrap();
+ let mut batch_funding_tx = None;
+ for (channel_id, counterparty_node_id, _) in removed_batch_state {
+ if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
+ let mut peer_state = peer_state_mutex.lock().unwrap();
+ if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
+ batch_funding_tx = batch_funding_tx.or_else(|| chan.context.unbroadcasted_funding());
+ chan.set_batch_ready();
+ let mut pending_events = $self.pending_events.lock().unwrap();
+ emit_channel_pending_event!(pending_events, chan);
+ }
+ }
+ }
+ if let Some(tx) = batch_funding_tx {
+ log_info!($self.logger, "Broadcasting batch funding transaction with txid {}", tx.txid());
+ $self.tx_broadcaster.broadcast_transactions(&[&tx]);
+ }
+ }
+ }
+
$self.handle_monitor_update_completion_actions(update_actions);
if let Some(forwards) = htlc_forwards {
}
macro_rules! handle_new_monitor_update {
- ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, _internal, $remove: expr, $completed: expr) => { {
- // update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
- // any case so that it won't deadlock.
- debug_assert_ne!($self.id_to_peer.held_by_thread(), LockHeldState::HeldByThread);
+ ($self: ident, $update_res: expr, $chan: expr, _internal, $completed: expr) => { {
debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire));
match $update_res {
+ ChannelMonitorUpdateStatus::UnrecoverableError => {
+ let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
+ log_error!($self.logger, "{}", err_str);
+ panic!("{}", err_str);
+ },
ChannelMonitorUpdateStatus::InProgress => {
log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
&$chan.context.channel_id());
},
}
} };
- ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING_INITIAL_MONITOR, $remove: expr) => {
- handle_new_monitor_update!($self, $update_res, $peer_state_lock, $peer_state,
- $per_peer_state_lock, $chan, _internal, $remove,
+ ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, INITIAL_MONITOR) => {
+ handle_new_monitor_update!($self, $update_res, $chan, _internal,
handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan))
};
- ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr, INITIAL_MONITOR) => {
- if let ChannelPhase::Funded(chan) = $chan_entry.get_mut() {
- handle_new_monitor_update!($self, $update_res, $peer_state_lock, $peer_state,
- $per_peer_state_lock, chan, MANUALLY_REMOVING_INITIAL_MONITOR, { $chan_entry.remove() })
- } else {
- // We're not supposed to handle monitor updates for unfunded channels (they have no monitors to
- // update). Throwing away a monitor update could be dangerous, so we assert even in
- // release builds.
- panic!("Initial Monitors should not exist for non-funded channels");
- }
- };
- ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
+ ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
let in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo)
.or_insert_with(Vec::new);
// During startup, we push monitor updates as background events through to here in
in_flight_updates.len() - 1
});
let update_res = $self.chain_monitor.update_channel($funding_txo, &in_flight_updates[idx]);
- handle_new_monitor_update!($self, update_res, $peer_state_lock, $peer_state,
- $per_peer_state_lock, $chan, _internal, $remove,
+ handle_new_monitor_update!($self, update_res, $chan, _internal,
{
let _ = in_flight_updates.remove(idx);
if in_flight_updates.is_empty() && $chan.blocked_monitor_updates_pending() == 0 {
}
})
} };
- ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr) => {
- if let ChannelPhase::Funded(chan) = $chan_entry.get_mut() {
- handle_new_monitor_update!($self, $funding_txo, $update, $peer_state_lock, $peer_state,
- $per_peer_state_lock, chan, MANUALLY_REMOVING, { $chan_entry.remove() })
- } else {
- // We're not supposed to handle monitor updates for unfunded channels (they have no monitors to
- // update). Throwing away a monitor update could be dangerous, so we assert even in
- // release builds.
- panic!("Monitor updates should not exist for non-funded channels");
- }
- }
}
macro_rules! process_events_body {
let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
ChannelManager {
default_configuration: config.clone(),
- genesis_hash: genesis_block(params.network).header.block_hash(),
+ chain_hash: ChainHash::using_genesis_block(params.network),
fee_estimator: LowerBoundedFeeEstimator::new(fee_est),
chain_monitor,
tx_broadcaster,
pending_background_events: Mutex::new(Vec::new()),
total_consistency_lock: RwLock::new(()),
background_events_processed_since_startup: AtomicBool::new(false),
-
event_persist_notifier: Notifier::new(),
needs_persist_flag: AtomicBool::new(false),
+ funding_batch_states: Mutex::new(BTreeMap::new()),
+
+ pending_offers_messages: Mutex::new(Vec::new()),
entropy_source,
node_signer,
if cfg!(fuzzing) { // fuzzing chacha20 doesn't use the key at all so we always get the same alias
outbound_scid_alias += 1;
} else {
- outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
+ outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
}
if outbound_scid_alias != 0 && self.outbound_scid_aliases.lock().unwrap().insert(outbound_scid_alias) {
break;
},
}
};
- let res = channel.get_open_channel(self.genesis_hash.clone());
+ let res = channel.get_open_channel(self.chain_hash);
let temporary_channel_id = channel.context.channel_id();
match peer_state.channel_by_id.entry(temporary_channel_id) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
+ let mut shutdown_result = None;
loop {
let per_peer_state = self.per_peer_state.read().unwrap();
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let funding_txo_opt = chan.context.get_funding_txo();
let their_features = &peer_state.latest_features;
+ let unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid();
let (shutdown_msg, mut monitor_update_opt, htlcs) =
chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
failed_htlcs = htlcs;
// Update the monitor with the shutdown script if necessary.
if let Some(monitor_update) = monitor_update_opt.take() {
handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
- peer_state_lock, peer_state, per_peer_state, chan_phase_entry);
+ peer_state_lock, peer_state, per_peer_state, chan);
break;
}
});
}
self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
+ shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid));
}
}
break;
// it does not exist for this peer. Either way, we can attempt to force-close it.
//
// An appropriate error will be returned for non-existence of the channel if that's the case.
+ mem::drop(peer_state_lock);
+ mem::drop(per_peer_state);
return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ())
},
}
self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
}
+ if let Some(shutdown_result) = shutdown_result {
+ self.finish_close_channel(shutdown_result);
+ }
+
Ok(())
}
self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
}
- #[inline]
- fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
- let (monitor_update_option, mut failed_htlcs) = shutdown_res;
+ fn finish_close_channel(&self, shutdown_res: ShutdownResult) {
+ debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
+ #[cfg(debug_assertions)]
+ for (_, peer) in self.per_peer_state.read().unwrap().iter() {
+ debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
+ }
+
+ let (monitor_update_option, mut failed_htlcs, unbroadcasted_batch_funding_txid) = shutdown_res;
log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
for htlc_source in failed_htlcs.drain(..) {
let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
// ignore the result here.
let _ = self.chain_monitor.update_channel(funding_txo, &monitor_update);
}
+ let mut shutdown_results = Vec::new();
+ if let Some(txid) = unbroadcasted_batch_funding_txid {
+ let mut funding_batch_states = self.funding_batch_states.lock().unwrap();
+ let affected_channels = funding_batch_states.remove(&txid).into_iter().flatten();
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let mut has_uncompleted_channel = None;
+ for (channel_id, counterparty_node_id, state) in affected_channels {
+ if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
+ let mut peer_state = peer_state_mutex.lock().unwrap();
+ if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) {
+ update_maps_on_chan_removal!(self, &chan.context());
+ self.issue_channel_close_events(&chan.context(), ClosureReason::FundingBatchClosure);
+ shutdown_results.push(chan.context_mut().force_shutdown(false));
+ }
+ }
+ has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state));
+ }
+ debug_assert!(
+ has_uncompleted_channel.unwrap_or(true),
+ "Closing a batch where all channels have completed initial monitor update",
+ );
+ }
+ for shutdown_result in shutdown_results.drain(..) {
+ self.finish_close_channel(shutdown_result);
+ }
}
/// `peer_msg` should be set when we receive a message from a peer, but not set when the
let peer_state_mutex = per_peer_state.get(peer_node_id)
.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?;
let (update_opt, counterparty_node_id) = {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
+ let mut peer_state = peer_state_mutex.lock().unwrap();
let closure_reason = if let Some(peer_msg) = peer_msg {
ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) }
} else {
log_error!(self.logger, "Force-closing channel {}", channel_id);
self.issue_channel_close_events(&chan_phase_entry.get().context(), closure_reason);
let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
+ mem::drop(peer_state);
+ mem::drop(per_peer_state);
match chan_phase {
ChannelPhase::Funded(mut chan) => {
- self.finish_force_close_channel(chan.context.force_shutdown(broadcast));
+ self.finish_close_channel(chan.context.force_shutdown(broadcast));
(self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id())
},
ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => {
- self.finish_force_close_channel(chan_phase.context_mut().force_shutdown(false));
+ self.finish_close_channel(chan_phase.context_mut().force_shutdown(false));
// Unfunded channel has no update
(None, chan_phase.context().get_counterparty_node_id())
},
}
};
if let Some(update) = update_opt {
- let mut peer_state = peer_state_mutex.lock().unwrap();
- peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
+ // Try to send the `BroadcastChannelUpdate` to the peer we just force-closed on, but if
+ // not try to broadcast it via whatever peer we have.
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let a_peer_state_opt = per_peer_state.get(peer_node_id)
+ .ok_or(per_peer_state.values().next());
+ if let Ok(a_peer_state_mutex) = a_peer_state_opt {
+ let mut a_peer_state = a_peer_state_mutex.lock().unwrap();
+ a_peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
}
Ok(counterparty_node_id)
peer_state.pending_msg_events.push(
events::MessageSendEvent::HandleError {
node_id: counterparty_node_id,
- action: msgs::ErrorAction::SendErrorMessage {
- msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() }
+ action: msgs::ErrorAction::DisconnectPeer {
+ msg: Some(msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() })
},
}
);
// payment logic has enough time to fail the HTLC backward before our onchain logic triggers a
// channel closure (see HTLC_FAIL_BACK_BUFFER rationale).
let current_height: u32 = self.best_block.read().unwrap().height();
- if (outgoing_cltv_value as u64) <= current_height as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
+ if cltv_expiry <= current_height + HTLC_FAIL_BACK_BUFFER + 1 {
let mut err_data = Vec::with_capacity(12);
err_data.extend_from_slice(&amt_msat.to_be_bytes());
err_data.extend_from_slice(¤t_height.to_be_bytes());
// Note that this is likely a timing oracle for detecting whether an scid is a
// phantom or an intercept.
if (self.default_configuration.accept_intercept_htlcs &&
- fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.genesis_hash)) ||
- fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.genesis_hash)
+ fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)) ||
+ fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)
{
None
} else {
};
let unsigned = msgs::UnsignedChannelUpdate {
- chain_hash: self.genesis_hash,
+ chain_hash: self.chain_hash,
short_channel_id,
timestamp: chan.context.get_update_time_counter(),
flags: (!were_node_one) as u8 | ((!enabled as u8) << 1),
}, onion_packet, None, &self.fee_estimator, &self.logger);
match break_chan_phase_entry!(self, send_res, chan_phase_entry) {
Some(monitor_update) => {
- match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan_phase_entry) {
+ match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
false => {
// Note that MonitorUpdateInProgress here indicates (per function
// docs) that we will resend the commitment update once monitor
/// In general, a path may raise:
/// * [`APIError::InvalidRoute`] when an invalid route or forwarding parameter (cltv_delta, fee,
/// node public key) is specified.
- /// * [`APIError::ChannelUnavailable`] if the next-hop channel is not available for updates
- /// (including due to previous monitor update failure or new permanent monitor update
- /// failure).
+ /// * [`APIError::ChannelUnavailable`] if the next-hop channel is not available as it has been
+ /// closed, doesn't exist, or the peer is currently disconnected.
/// * [`APIError::MonitorUpdateInProgress`] if a new monitor update failure prevented sending the
/// relevant updates.
///
self.pending_outbound_payments.test_set_payment_metadata(payment_id, new_payment_metadata);
}
+ pub(super) fn send_payment_for_bolt12_invoice(&self, invoice: &Bolt12Invoice, payment_id: PaymentId) -> Result<(), Bolt12PaymentError> {
+ let best_block_height = self.best_block.read().unwrap().height();
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+ self.pending_outbound_payments
+ .send_payment_for_bolt12_invoice(
+ invoice, payment_id, &self.router, self.list_usable_channels(),
+ || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer,
+ best_block_height, &self.logger, &self.pending_events,
+ |args| self.send_payment_along_path(args)
+ )
+ }
/// Signals that no further attempts for the given payment should occur. Useful if you have a
/// pending outbound payment with retries remaining, but wish to stop retrying the payment before
///
/// # Requested Invoices
///
- /// In the case of paying a [`Bolt12Invoice`], abandoning the payment prior to receiving the
- /// invoice will result in an [`Event::InvoiceRequestFailed`] and prevent any attempts at paying
- /// it once received. The other events may only be generated once the invoice has been received.
+ /// In the case of paying a [`Bolt12Invoice`] via [`ChannelManager::pay_for_offer`], abandoning
+ /// the payment prior to receiving the invoice will result in an [`Event::InvoiceRequestFailed`]
+ /// and prevent any attempts at paying it once received. The other events may only be generated
+ /// once the invoice has been received.
///
/// # Restart Behavior
///
///
/// See [`ChannelManager::send_preflight_probes`] for more information.
pub fn send_spontaneous_preflight_probes(
- &self, node_id: PublicKey, amount_msat: u64, final_cltv_expiry_delta: u32,
+ &self, node_id: PublicKey, amount_msat: u64, final_cltv_expiry_delta: u32,
liquidity_limit_multiplier: Option<u64>,
) -> Result<Vec<(PaymentHash, PaymentId)>, ProbeSendFailure> {
let payment_params =
PaymentParameters::from_node_id(node_id, final_cltv_expiry_delta);
- let route_params = RouteParameters { payment_params, final_value_msat: amount_msat };
+ let route_params = RouteParameters::from_payment_params_and_value(payment_params, amount_msat);
self.send_preflight_probes(route_params, liquidity_limit_multiplier)
}
/// Handles the generation of a funding transaction, optionally (for tests) with a function
/// which checks the correctness of the funding transaction given the associated channel.
- fn funding_transaction_generated_intern<FundingOutput: Fn(&OutboundV1Channel<SP>, &Transaction) -> Result<OutPoint, APIError>>(
- &self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput
+ fn funding_transaction_generated_intern<FundingOutput: FnMut(&OutboundV1Channel<SP>, &Transaction) -> Result<OutPoint, APIError>>(
+ &self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction, is_batch_funding: bool,
+ mut find_funding_output: FundingOutput,
) -> Result<(), APIError> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
Some(ChannelPhase::UnfundedOutboundV1(chan)) => {
let funding_txo = find_funding_output(&chan, &funding_transaction)?;
- let funding_res = chan.get_funding_created(funding_transaction, funding_txo, &self.logger)
+ let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &self.logger)
.map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e {
let channel_id = chan.context.channel_id();
let user_id = chan.context.get_user_id();
#[cfg(test)]
pub(crate) fn funding_transaction_generated_unchecked(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction, output_index: u16) -> Result<(), APIError> {
- self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |_, tx| {
+ self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, false, |_, tx| {
Ok(OutPoint { txid: tx.txid(), index: output_index })
})
}
/// [`Event::FundingGenerationReady`]: crate::events::Event::FundingGenerationReady
/// [`Event::ChannelClosed`]: crate::events::Event::ChannelClosed
pub fn funding_transaction_generated(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction) -> Result<(), APIError> {
+ self.batch_funding_transaction_generated(&[(temporary_channel_id, counterparty_node_id)], funding_transaction)
+ }
+
+ /// Call this upon creation of a batch funding transaction for the given channels.
+ ///
+ /// Return values are identical to [`Self::funding_transaction_generated`], respective to
+ /// each individual channel and transaction output.
+ ///
+ /// Do NOT broadcast the funding transaction yourself. This batch funding transcaction
+ /// will only be broadcast when we have safely received and persisted the counterparty's
+ /// signature for each channel.
+ ///
+ /// If there is an error, all channels in the batch are to be considered closed.
+ pub fn batch_funding_transaction_generated(&self, temporary_channels: &[(&ChannelId, &PublicKey)], funding_transaction: Transaction) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+ let mut result = Ok(());
if !funding_transaction.is_coin_base() {
for inp in funding_transaction.input.iter() {
if inp.witness.is_empty() {
- return Err(APIError::APIMisuseError {
+ result = result.and(Err(APIError::APIMisuseError {
err: "Funding transaction must be fully signed and spend Segwit outputs".to_owned()
- });
+ }));
}
}
}
+ if funding_transaction.output.len() > u16::max_value() as usize {
+ result = result.and(Err(APIError::APIMisuseError {
+ err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
+ }));
+ }
{
let height = self.best_block.read().unwrap().height();
// Transactions are evaluated as final by network mempools if their locktime is strictly
// node might not have perfect sync about their blockchain views. Thus, if the wallet
// module is ahead of LDK, only allow one more block of headroom.
if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) && LockTime::from(funding_transaction.lock_time).is_block_height() && funding_transaction.lock_time.0 > height + 1 {
- return Err(APIError::APIMisuseError {
+ result = result.and(Err(APIError::APIMisuseError {
err: "Funding transaction absolute timelock is non-final".to_owned()
- });
+ }));
}
}
- self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |chan, tx| {
- if tx.output.len() > u16::max_value() as usize {
- return Err(APIError::APIMisuseError {
- err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
- });
- }
- let mut output_index = None;
- let expected_spk = chan.context.get_funding_redeemscript().to_v0_p2wsh();
- for (idx, outp) in tx.output.iter().enumerate() {
- if outp.script_pubkey == expected_spk && outp.value == chan.context.get_value_satoshis() {
- if output_index.is_some() {
+ let txid = funding_transaction.txid();
+ let is_batch_funding = temporary_channels.len() > 1;
+ let mut funding_batch_states = if is_batch_funding {
+ Some(self.funding_batch_states.lock().unwrap())
+ } else {
+ None
+ };
+ let mut funding_batch_state = funding_batch_states.as_mut().and_then(|states| {
+ match states.entry(txid) {
+ btree_map::Entry::Occupied(_) => {
+ result = result.clone().and(Err(APIError::APIMisuseError {
+ err: "Batch funding transaction with the same txid already exists".to_owned()
+ }));
+ None
+ },
+ btree_map::Entry::Vacant(vacant) => Some(vacant.insert(Vec::new())),
+ }
+ });
+ for &(temporary_channel_id, counterparty_node_id) in temporary_channels.iter() {
+ result = result.and_then(|_| self.funding_transaction_generated_intern(
+ temporary_channel_id,
+ counterparty_node_id,
+ funding_transaction.clone(),
+ is_batch_funding,
+ |chan, tx| {
+ let mut output_index = None;
+ let expected_spk = chan.context.get_funding_redeemscript().to_v0_p2wsh();
+ for (idx, outp) in tx.output.iter().enumerate() {
+ if outp.script_pubkey == expected_spk && outp.value == chan.context.get_value_satoshis() {
+ if output_index.is_some() {
+ return Err(APIError::APIMisuseError {
+ err: "Multiple outputs matched the expected script and value".to_owned()
+ });
+ }
+ output_index = Some(idx as u16);
+ }
+ }
+ if output_index.is_none() {
return Err(APIError::APIMisuseError {
- err: "Multiple outputs matched the expected script and value".to_owned()
+ err: "No output matched the script_pubkey and value in the FundingGenerationReady event".to_owned()
});
}
- output_index = Some(idx as u16);
+ let outpoint = OutPoint { txid: tx.txid(), index: output_index.unwrap() };
+ if let Some(funding_batch_state) = funding_batch_state.as_mut() {
+ funding_batch_state.push((outpoint.to_channel_id(), *counterparty_node_id, false));
+ }
+ Ok(outpoint)
+ })
+ );
+ }
+ if let Err(ref e) = result {
+ // Remaining channels need to be removed on any error.
+ let e = format!("Error in transaction funding: {:?}", e);
+ let mut channels_to_remove = Vec::new();
+ channels_to_remove.extend(funding_batch_states.as_mut()
+ .and_then(|states| states.remove(&txid))
+ .into_iter().flatten()
+ .map(|(chan_id, node_id, _state)| (chan_id, node_id))
+ );
+ channels_to_remove.extend(temporary_channels.iter()
+ .map(|(&chan_id, &node_id)| (chan_id, node_id))
+ );
+ let mut shutdown_results = Vec::new();
+ {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ for (channel_id, counterparty_node_id) in channels_to_remove {
+ per_peer_state.get(&counterparty_node_id)
+ .map(|peer_state_mutex| peer_state_mutex.lock().unwrap())
+ .and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id))
+ .map(|mut chan| {
+ update_maps_on_chan_removal!(self, &chan.context());
+ self.issue_channel_close_events(&chan.context(), ClosureReason::ProcessingError { err: e.clone() });
+ shutdown_results.push(chan.context_mut().force_shutdown(false));
+ });
}
}
- if output_index.is_none() {
- return Err(APIError::APIMisuseError {
- err: "No output matched the script_pubkey and value in the FundingGenerationReady event".to_owned()
- });
+ for shutdown_result in shutdown_results.drain(..) {
+ self.finish_close_channel(shutdown_result);
}
- Ok(OutPoint { txid: tx.txid(), index: output_index.unwrap() })
- })
+ }
+ result
}
/// Atomically applies partial updates to the [`ChannelConfig`] of the given channels.
for channel_id in channel_ids {
if !peer_state.has_channel(channel_id) {
return Err(APIError::ChannelUnavailable {
- err: format!("Channel with ID {} was not found for the passed counterparty_node_id {}", channel_id, counterparty_node_id),
+ err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, counterparty_node_id),
});
};
}
next_hop_channel_id, next_node_id)
}),
None => return Err(APIError::ChannelUnavailable {
- err: format!("Channel with id {} not found for the passed counterparty node_id {}.",
+ err: format!("Channel with id {} not found for the passed counterparty node_id {}",
next_hop_channel_id, next_node_id)
})
}
}
if let PendingHTLCRouting::Forward { onion_packet, .. } = routing {
let phantom_pubkey_res = self.node_signer.get_node_id(Recipient::PhantomNode);
- if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.genesis_hash) {
+ if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.chain_hash) {
let phantom_shared_secret = self.node_signer.ecdh(Recipient::PhantomNode, &onion_packet.public_key.unwrap(), None).unwrap().secret_bytes();
let next_hop = match onion_utils::decode_next_payment_hop(
phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac,
}
}
}
- let (counterparty_node_id, forward_chan_id) = match self.short_to_chan_info.read().unwrap().get(&short_chan_id) {
- Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
+ let chan_info_opt = self.short_to_chan_info.read().unwrap().get(&short_chan_id).cloned();
+ let (counterparty_node_id, forward_chan_id) = match chan_info_opt {
+ Some((cp_id, chan_id)) => (cp_id, chan_id),
None => {
forwarding_channel_not_found!();
continue;
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) {
hash_map::Entry::Occupied(mut chan_phase) => {
- updated_chan = true;
- handle_new_monitor_update!(self, funding_txo, update.clone(),
- peer_state_lock, peer_state, per_peer_state, chan_phase);
+ if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
+ updated_chan = true;
+ handle_new_monitor_update!(self, funding_txo, update.clone(),
+ peer_state_lock, peer_state, per_peer_state, chan);
+ } else {
+ debug_assert!(false, "We shouldn't have an update for a non-funded channel");
+ }
},
hash_map::Entry::Vacant(_) => {},
}
if !chan.context.is_outbound() { return NotifyOption::SkipPersistNoEvents; }
// If the feerate has decreased by less than half, don't bother
if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() {
- log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
+ if new_feerate != chan.context.get_feerate_sat_per_1000_weight() {
+ log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
+ }
return NotifyOption::SkipPersistNoEvents;
}
if !chan.context.is_live() {
/// with the current [`ChannelConfig`].
/// * Removing peers which have disconnected but and no longer have any channels.
/// * Force-closing and removing channels which have not completed establishment in a timely manner.
+ /// * Forgetting about stale outbound payments, either those that have already been fulfilled
+ /// or those awaiting an invoice that hasn't been delivered in the necessary amount of time.
+ /// The latter is determined using the system clock in `std` and the highest seen block time
+ /// minus two hours in `no-std`.
///
/// Note that this may cause reentrancy through [`chain::Watch::update_channel`] calls or feerate
/// estimate fetches.
let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
let mut timed_out_mpp_htlcs = Vec::new();
let mut pending_peers_awaiting_removal = Vec::new();
+ let mut shutdown_channels = Vec::new();
- let process_unfunded_channel_tick = |
+ let mut process_unfunded_channel_tick = |
chan_id: &ChannelId,
context: &mut ChannelContext<SP>,
unfunded_context: &mut UnfundedChannelContext,
"Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id);
update_maps_on_chan_removal!(self, &context);
self.issue_channel_close_events(&context, ClosureReason::HolderForceClosed);
- self.finish_force_close_channel(context.force_shutdown(false));
+ shutdown_channels.push(context.force_shutdown(false));
pending_msg_events.push(MessageSendEvent::HandleError {
node_id: counterparty_node_id,
action: msgs::ErrorAction::SendErrorMessage {
let _ = handle_error!(self, err, counterparty_node_id);
}
- self.pending_outbound_payments.remove_stale_payments(&self.pending_events);
+ for shutdown_res in shutdown_channels {
+ self.finish_close_channel(shutdown_res);
+ }
+
+ #[cfg(feature = "std")]
+ let duration_since_epoch = std::time::SystemTime::now()
+ .duration_since(std::time::SystemTime::UNIX_EPOCH)
+ .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
+ #[cfg(not(feature = "std"))]
+ let duration_since_epoch = Duration::from_secs(
+ self.highest_seen_timestamp.load(Ordering::Acquire).saturating_sub(7200) as u64
+ );
+
+ self.pending_outbound_payments.remove_stale_payments(
+ duration_since_epoch, &self.pending_events
+ );
// Technically we don't need to do this here, but if we have holding cell entries in a
// channel that need freeing, it's better to do that here and block a background task
// This ensures that future code doesn't introduce a lock-order requirement for
// `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
// this function with any `per_peer_state` peer lock acquired would.
+ #[cfg(debug_assertions)]
for (_, peer) in self.per_peer_state.read().unwrap().iter() {
debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
}
for htlc in sources.drain(..) {
if let Err((pk, err)) = self.claim_funds_from_hop(
htlc.prev_hop, payment_preimage,
- |_| Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash }))
- {
+ |_, definitely_duplicate| {
+ debug_assert!(!definitely_duplicate, "We shouldn't claim duplicatively from a payment");
+ Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash })
+ }
+ ) {
if let msgs::ErrorAction::IgnoreError = err.err.action {
// We got a temporary failure updating monitor, but will claim the
// HTLC when the monitor updating is restored (or on chain).
}
}
- fn claim_funds_from_hop<ComplFunc: FnOnce(Option<u64>) -> Option<MonitorUpdateCompletionAction>>(&self,
+ fn claim_funds_from_hop<ComplFunc: FnOnce(Option<u64>, bool) -> Option<MonitorUpdateCompletionAction>>(&self,
prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc)
-> Result<(), (PublicKey, MsgHandleErrInternal)> {
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
// `BackgroundEvent`s.
let during_init = !self.background_events_processed_since_startup.load(Ordering::Acquire);
+ // As we may call handle_monitor_update_completion_actions in rather rare cases, check that
+ // the required mutexes are not held before we start.
+ debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
+ debug_assert_ne!(self.claimable_payments.held_by_thread(), LockHeldState::HeldByThread);
+
{
let per_peer_state = self.per_peer_state.read().unwrap();
let chan_id = prev_hop.outpoint.to_channel_id();
let counterparty_node_id = chan.context.get_counterparty_node_id();
let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
- if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res {
- if let Some(action) = completion_action(Some(htlc_value_msat)) {
- log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
- chan_id, action);
- peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
+ match fulfill_res {
+ UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } => {
+ if let Some(action) = completion_action(Some(htlc_value_msat), false) {
+ log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
+ chan_id, action);
+ peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
+ }
+ if !during_init {
+ handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
+ peer_state, per_peer_state, chan);
+ } else {
+ // If we're running during init we cannot update a monitor directly -
+ // they probably haven't actually been loaded yet. Instead, push the
+ // monitor update as a background event.
+ self.pending_background_events.lock().unwrap().push(
+ BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
+ counterparty_node_id,
+ funding_txo: prev_hop.outpoint,
+ update: monitor_update.clone(),
+ });
+ }
}
- if !during_init {
- handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
- peer_state, per_peer_state, chan_phase_entry);
- } else {
- // If we're running during init we cannot update a monitor directly -
- // they probably haven't actually been loaded yet. Instead, push the
- // monitor update as a background event.
- self.pending_background_events.lock().unwrap().push(
- BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
- counterparty_node_id,
- funding_txo: prev_hop.outpoint,
- update: monitor_update.clone(),
- });
+ UpdateFulfillCommitFetch::DuplicateClaim {} => {
+ let action = if let Some(action) = completion_action(None, true) {
+ action
+ } else {
+ return Ok(());
+ };
+ mem::drop(peer_state_lock);
+
+ log_trace!(self.logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}",
+ chan_id, action);
+ let (node_id, funding_outpoint, blocker) =
+ if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
+ downstream_counterparty_node_id: node_id,
+ downstream_funding_outpoint: funding_outpoint,
+ blocking_action: blocker,
+ } = action {
+ (node_id, funding_outpoint, blocker)
+ } else {
+ debug_assert!(false,
+ "Duplicate claims should always free another channel immediately");
+ return Ok(());
+ };
+ if let Some(peer_state_mtx) = per_peer_state.get(&node_id) {
+ let mut peer_state = peer_state_mtx.lock().unwrap();
+ if let Some(blockers) = peer_state
+ .actions_blocking_raa_monitor_updates
+ .get_mut(&funding_outpoint.to_channel_id())
+ {
+ let mut found_blocker = false;
+ blockers.retain(|iter| {
+ // Note that we could actually be blocked, in
+ // which case we need to only remove the one
+ // blocker which was added duplicatively.
+ let first_blocker = !found_blocker;
+ if *iter == blocker { found_blocker = true; }
+ *iter != blocker || !first_blocker
+ });
+ debug_assert!(found_blocker);
+ }
+ } else {
+ debug_assert!(false);
+ }
}
}
}
// `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
// generally always allowed to be duplicative (and it's specifically noted in
// `PaymentForwarded`).
- self.handle_monitor_update_completion_actions(completion_action(None));
+ self.handle_monitor_update_completion_actions(completion_action(None, false));
Ok(())
}
}
fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage,
- forwarded_htlc_value_msat: Option<u64>, from_onchain: bool,
+ forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, startup_replay: bool,
next_channel_counterparty_node_id: Option<PublicKey>, next_channel_outpoint: OutPoint
) {
match source {
HTLCSource::PreviousHopData(hop_data) => {
let prev_outpoint = hop_data.outpoint;
let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
+ #[cfg(debug_assertions)]
+ let claiming_chan_funding_outpoint = hop_data.outpoint;
let res = self.claim_funds_from_hop(hop_data, payment_preimage,
- |htlc_claim_value_msat| {
- if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
- let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat {
- Some(claimed_htlc_value - forwarded_htlc_value)
- } else { None };
+ |htlc_claim_value_msat, definitely_duplicate| {
+ let chan_to_release =
+ if let Some(node_id) = next_channel_counterparty_node_id {
+ Some((node_id, next_channel_outpoint, completed_blocker))
+ } else {
+ // We can only get `None` here if we are processing a
+ // `ChannelMonitor`-originated event, in which case we
+ // don't care about ensuring we wake the downstream
+ // channel's monitor updating - the channel is already
+ // closed.
+ None
+ };
+ if definitely_duplicate && startup_replay {
+ // On startup we may get redundant claims which are related to
+ // monitor updates still in flight. In that case, we shouldn't
+ // immediately free, but instead let that monitor update complete
+ // in the background.
+ #[cfg(debug_assertions)] {
+ let background_events = self.pending_background_events.lock().unwrap();
+ // There should be a `BackgroundEvent` pending...
+ assert!(background_events.iter().any(|ev| {
+ match ev {
+ // to apply a monitor update that blocked the claiming channel,
+ BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
+ funding_txo, update, ..
+ } => {
+ if *funding_txo == claiming_chan_funding_outpoint {
+ assert!(update.updates.iter().any(|upd|
+ if let ChannelMonitorUpdateStep::PaymentPreimage {
+ payment_preimage: update_preimage
+ } = upd {
+ payment_preimage == *update_preimage
+ } else { false }
+ ), "{:?}", update);
+ true
+ } else { false }
+ },
+ // or the channel we'd unblock is already closed,
+ BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup(
+ (funding_txo, monitor_update)
+ ) => {
+ if *funding_txo == next_channel_outpoint {
+ assert_eq!(monitor_update.updates.len(), 1);
+ assert!(matches!(
+ monitor_update.updates[0],
+ ChannelMonitorUpdateStep::ChannelForceClosed { .. }
+ ));
+ true
+ } else { false }
+ },
+ // or the monitor update has completed and will unblock
+ // immediately once we get going.
+ BackgroundEvent::MonitorUpdatesComplete {
+ channel_id, ..
+ } =>
+ *channel_id == claiming_chan_funding_outpoint.to_channel_id(),
+ }
+ }), "{:?}", *background_events);
+ }
+ None
+ } else if definitely_duplicate {
+ if let Some(other_chan) = chan_to_release {
+ Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
+ downstream_counterparty_node_id: other_chan.0,
+ downstream_funding_outpoint: other_chan.1,
+ blocking_action: other_chan.2,
+ })
+ } else { None }
+ } else {
+ let fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
+ if let Some(claimed_htlc_value) = htlc_claim_value_msat {
+ Some(claimed_htlc_value - forwarded_htlc_value)
+ } else { None }
+ } else { None };
Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
event: events::Event::PaymentForwarded {
fee_earned_msat,
next_channel_id: Some(next_channel_outpoint.to_channel_id()),
outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
},
- downstream_counterparty_and_funding_outpoint:
- if let Some(node_id) = next_channel_counterparty_node_id {
- Some((node_id, next_channel_outpoint, completed_blocker))
- } else {
- // We can only get `None` here if we are processing a
- // `ChannelMonitor`-originated event, in which case we
- // don't care about ensuring we wake the downstream
- // channel's monitor updating - the channel is already
- // closed.
- None
- },
+ downstream_counterparty_and_funding_outpoint: chan_to_release,
})
- } else { None }
+ }
});
if let Err((pk, err)) = res {
let result: Result<(), _> = Err(err);
}
fn handle_monitor_update_completion_actions<I: IntoIterator<Item=MonitorUpdateCompletionAction>>(&self, actions: I) {
+ debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
+ debug_assert_ne!(self.claimable_payments.held_by_thread(), LockHeldState::HeldByThread);
+ debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
+
for action in actions.into_iter() {
match action {
MonitorUpdateCompletionAction::PaymentClaimed { payment_hash } => {
self.handle_monitor_update_release(node_id, funding_outpoint, Some(blocker));
}
},
+ MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
+ downstream_counterparty_node_id, downstream_funding_outpoint, blocking_action,
+ } => {
+ self.handle_monitor_update_release(
+ downstream_counterparty_node_id,
+ downstream_funding_outpoint,
+ Some(blocking_action),
+ );
+ },
}
}
}
fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
// Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are
// likely to be lost on restart!
- if msg.chain_hash != self.genesis_hash {
+ if msg.chain_hash != self.chain_hash {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
}
// hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
// accepted payment from yet. We do, however, need to wait to send our channel_ready
// until we have persisted our monitor.
- let new_channel_id = funding_msg.channel_id;
peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
node_id: counterparty_node_id.clone(),
msg: funding_msg,
if let ChannelPhase::Funded(chan) = e.insert(ChannelPhase::Funded(chan)) {
handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
- per_peer_state, chan, MANUALLY_REMOVING_INITIAL_MONITOR,
- { peer_state.channel_by_id.remove(&new_channel_id) });
+ per_peer_state, chan, INITIAL_MONITOR);
} else {
unreachable!("This must be a funded channel as we just inserted it.");
}
let monitor = try_chan_phase_entry!(self,
chan.funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan_phase_entry);
if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
- handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan_phase_entry, INITIAL_MONITOR);
+ handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
Ok(())
} else {
try_chan_phase_entry!(self, Err(ChannelError::Close("Channel funding outpoint was a duplicate".to_owned())), chan_phase_entry)
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let announcement_sigs_opt = try_chan_phase_entry!(self, chan.channel_ready(&msg, &self.node_signer,
- self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan_phase_entry);
+ self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan_phase_entry);
if let Some(announcement_sigs) = announcement_sigs_opt {
log_trace!(self.logger, "Sending announcement_signatures for channel {}", chan.context.channel_id());
peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
}
fn internal_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
- let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)>;
+ let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new();
+ let mut finish_shutdown = None;
{
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
// Update the monitor with the shutdown script if necessary.
if let Some(monitor_update) = monitor_update_opt {
handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
- peer_state_lock, peer_state, per_peer_state, chan_phase_entry);
+ peer_state_lock, peer_state, per_peer_state, chan);
}
},
ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
self.issue_channel_close_events(&context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
let mut chan = remove_channel_phase!(self, chan_phase_entry);
- self.finish_force_close_channel(chan.context_mut().force_shutdown(false));
- return Ok(());
+ finish_shutdown = Some(chan.context_mut().force_shutdown(false));
},
}
} else {
let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
}
+ if let Some(shutdown_res) = finish_shutdown {
+ self.finish_close_channel(shutdown_res);
+ }
Ok(())
}
fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
+ let mut shutdown_result = None;
+ let unbroadcasted_batch_funding_txid;
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+ unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid();
let (closing_signed, tx) = try_chan_phase_entry!(self, chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry);
if let Some(msg) = closing_signed {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
});
}
self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
+ shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid));
+ }
+ mem::drop(per_peer_state);
+ if let Some(shutdown_result) = shutdown_result {
+ self.finish_close_channel(shutdown_result);
}
Ok(())
}
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let res = try_chan_phase_entry!(self, chan.update_fulfill_htlc(&msg), chan_phase_entry);
if let HTLCSource::PreviousHopData(prev_hop) = &res.0 {
+ log_trace!(self.logger,
+ "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
+ msg.channel_id);
peer_state.actions_blocking_raa_monitor_updates.entry(msg.channel_id)
.or_insert_with(Vec::new)
.push(RAAMonitorUpdateBlockingAction::from_prev_hop_data(&prev_hop));
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
- self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, Some(*counterparty_node_id), funding_txo);
+ self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, false, Some(*counterparty_node_id), funding_txo);
Ok(())
}
let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &self.logger), chan_phase_entry);
if let Some(monitor_update) = monitor_update_opt {
handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
- peer_state, per_peer_state, chan_phase_entry);
+ peer_state, per_peer_state, chan);
}
Ok(())
} else {
},
hash_map::Entry::Vacant(entry) => {
if !is_our_scid && forward_info.incoming_amt_msat.is_some() &&
- fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, scid, &self.genesis_hash)
+ fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, scid, &self.chain_hash)
{
let intercept_id = InterceptId(Sha256::hash(&forward_info.incoming_shared_secret).into_inner());
let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
let funding_txo = funding_txo_opt
.expect("Funding outpoint must have been set for RAA handling to succeed");
handle_new_monitor_update!(self, funding_txo, monitor_update,
- peer_state_lock, peer_state, per_peer_state, chan_phase_entry);
+ peer_state_lock, peer_state, per_peer_state, chan);
}
htlcs_to_fail
} else {
peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
msg: try_chan_phase_entry!(self, chan.announcement_signatures(
- &self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(),
+ &self.node_signer, self.chain_hash, self.best_block.read().unwrap().height(),
msg, &self.default_configuration
), chan_phase_entry),
// Note that announcement_signatures fails if the channel cannot be announced,
if were_node_one == msg_from_node_one {
return Ok(NotifyOption::SkipPersistNoEvents);
} else {
- log_debug!(self.logger, "Received channel_update for channel {}.", chan_id);
- try_chan_phase_entry!(self, chan.channel_update(&msg), chan_phase_entry);
+ log_debug!(self.logger, "Received channel_update {:?} for channel {}.", msg, chan_id);
+ let did_change = try_chan_phase_entry!(self, chan.channel_update(&msg), chan_phase_entry);
+ // If nothing changed after applying their update, we don't need to bother
+ // persisting.
+ if !did_change {
+ return Ok(NotifyOption::SkipPersistNoEvents);
+ }
}
} else {
return try_chan_phase_entry!(self, Err(ChannelError::Close(
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
- MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ MsgHandleErrInternal::send_err_msg_no_close(
+ format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
+ msg.channel_id
+ )
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
// freed HTLCs to fail backwards. If in the future we no longer drop pending
// add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
let responses = try_chan_phase_entry!(self, chan.channel_reestablish(
- msg, &self.logger, &self.node_signer, self.genesis_hash,
+ msg, &self.logger, &self.node_signer, self.chain_hash,
&self.default_configuration, &*self.best_block.read().unwrap()), chan_phase_entry);
let mut channel_update = None;
if let Some(msg) = responses.shutdown_msg {
"Got a channel_reestablish message for an unfunded channel!".into())), chan_phase_entry);
}
},
- hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+ hash_map::Entry::Vacant(_) => {
+ log_debug!(self.logger, "Sending bogus ChannelReestablish for unknown channel {} to force channel closure",
+ log_bytes!(msg.channel_id.0));
+ // Unfortunately, lnd doesn't force close on errors
+ // (https://github.com/lightningnetwork/lnd/blob/abb1e3463f3a83bbb843d5c399869dbe930ad94f/htlcswitch/link.go#L2119).
+ // One of the few ways to get an lnd counterparty to force close is by
+ // replicating what they do when restoring static channel backups (SCBs). They
+ // send an invalid `ChannelReestablish` with `0` commitment numbers and an
+ // invalid `your_last_per_commitment_secret`.
+ //
+ // Since we received a `ChannelReestablish` for a channel that doesn't exist, we
+ // can assume it's likely the channel closed from our point of view, but it
+ // remains open on the counterparty's side. By sending this bogus
+ // `ChannelReestablish` message now as a response to theirs, we trigger them to
+ // force close broadcasting their latest state. If the closing transaction from
+ // our point of view remains unconfirmed, it'll enter a race with the
+ // counterparty's to-be-broadcast latest commitment transaction.
+ peer_state.pending_msg_events.push(MessageSendEvent::SendChannelReestablish {
+ node_id: *counterparty_node_id,
+ msg: msgs::ChannelReestablish {
+ channel_id: msg.channel_id,
+ next_local_commitment_number: 0,
+ next_remote_commitment_number: 0,
+ your_last_per_commitment_secret: [1u8; 32],
+ my_current_per_commitment_point: PublicKey::from_slice(&[2u8; 33]).unwrap(),
+ next_funding_txid: None,
+ },
+ });
+ return Err(MsgHandleErrInternal::send_err_msg_no_close(
+ format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}",
+ counterparty_node_id), msg.channel_id)
+ )
+ }
}
};
MonitorEvent::HTLCEvent(htlc_update) => {
if let Some(preimage) = htlc_update.payment_preimage {
log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", preimage);
- self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, counterparty_node_id, funding_outpoint);
+ self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, false, counterparty_node_id, funding_outpoint);
} else {
log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: chan.context.get_counterparty_node_id(),
- action: msgs::ErrorAction::SendErrorMessage {
- msg: msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() }
+ action: msgs::ErrorAction::DisconnectPeer {
+ msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() })
},
});
}
}
for failure in failed_channels.drain(..) {
- self.finish_force_close_channel(failure);
+ self.finish_close_channel(failure);
}
has_pending_monitor_events
if let Some(monitor_update) = monitor_opt {
has_monitor_update = true;
- let channel_id: ChannelId = *channel_id;
handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
- peer_state_lock, peer_state, per_peer_state, chan, MANUALLY_REMOVING,
- peer_state.channel_by_id.remove(&channel_id));
+ peer_state_lock, peer_state, per_peer_state, chan);
continue 'peer_loop;
}
}
fn maybe_generate_initial_closing_signed(&self) -> bool {
let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new();
let mut has_update = false;
+ let mut shutdown_results = Vec::new();
{
let per_peer_state = self.per_peer_state.read().unwrap();
peer_state.channel_by_id.retain(|channel_id, phase| {
match phase {
ChannelPhase::Funded(chan) => {
+ let unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid();
match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
Ok((msg_opt, tx_opt)) => {
if let Some(msg) = msg_opt {
log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
self.tx_broadcaster.broadcast_transactions(&[&tx]);
update_maps_on_chan_removal!(self, &chan.context);
+ shutdown_results.push((None, Vec::new(), unbroadcasted_batch_funding_txid));
false
} else { true }
},
let _ = handle_error!(self, err, counterparty_node_id);
}
+ for shutdown_result in shutdown_results.drain(..) {
+ self.finish_close_channel(shutdown_result);
+ }
+
has_update
}
counterparty_node_id, funding_txo, update
});
}
- self.finish_force_close_channel(failure);
+ self.finish_close_channel(failure);
+ }
+ }
+
+ /// Creates an [`OfferBuilder`] such that the [`Offer`] it builds is recognized by the
+ /// [`ChannelManager`] when handling [`InvoiceRequest`] messages for the offer. The offer will
+ /// not have an expiration unless otherwise set on the builder.
+ ///
+ /// Uses a one-hop [`BlindedPath`] for the offer with [`ChannelManager::get_our_node_id`] as the
+ /// introduction node and a derived signing pubkey for recipient privacy. As such, currently,
+ /// the node must be announced. Otherwise, there is no way to find a path to the introduction
+ /// node in order to send the [`InvoiceRequest`].
+ ///
+ /// [`Offer`]: crate::offers::offer::Offer
+ /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
+ pub fn create_offer_builder(
+ &self, description: String
+ ) -> OfferBuilder<DerivedMetadata, secp256k1::All> {
+ let node_id = self.get_our_node_id();
+ let expanded_key = &self.inbound_payment_key;
+ let entropy = &*self.entropy_source;
+ let secp_ctx = &self.secp_ctx;
+ let path = self.create_one_hop_blinded_path();
+
+ OfferBuilder::deriving_signing_pubkey(description, node_id, expanded_key, entropy, secp_ctx)
+ .chain_hash(self.chain_hash)
+ .path(path)
+ }
+
+ /// Creates a [`RefundBuilder`] such that the [`Refund`] it builds is recognized by the
+ /// [`ChannelManager`] when handling [`Bolt12Invoice`] messages for the refund.
+ ///
+ /// The builder will have the provided expiration set. Any changes to the expiration on the
+ /// returned builder will not be honored by [`ChannelManager`]. For `no-std`, the highest seen
+ /// block time minus two hours is used for the current time when determining if the refund has
+ /// expired.
+ ///
+ /// The provided `payment_id` is used to ensure that only one invoice is paid for the refund. To
+ /// revoke the refund, use [`ChannelManager::abandon_payment`] prior to receiving the invoice.
+ /// If an invoice isn't received before expiration, the payment will fail with an
+ /// [`Event::InvoiceRequestFailed`].
+ ///
+ /// Uses a one-hop [`BlindedPath`] for the refund with [`ChannelManager::get_our_node_id`] as
+ /// the introduction node and a derived payer id for sender privacy. As such, currently, the
+ /// node must be announced. Otherwise, there is no way to find a path to the introduction node
+ /// in order to send the [`Bolt12Invoice`].
+ ///
+ /// [`Refund`]: crate::offers::refund::Refund
+ /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
+ pub fn create_refund_builder(
+ &self, description: String, amount_msats: u64, absolute_expiry: Duration,
+ payment_id: PaymentId, retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>
+ ) -> Result<RefundBuilder<secp256k1::All>, Bolt12SemanticError> {
+ let node_id = self.get_our_node_id();
+ let expanded_key = &self.inbound_payment_key;
+ let entropy = &*self.entropy_source;
+ let secp_ctx = &self.secp_ctx;
+ let path = self.create_one_hop_blinded_path();
+
+ let builder = RefundBuilder::deriving_payer_id(
+ description, node_id, expanded_key, entropy, secp_ctx, amount_msats, payment_id
+ )?
+ .chain_hash(self.chain_hash)
+ .absolute_expiry(absolute_expiry)
+ .path(path);
+
+ let expiration = StaleExpiration::AbsoluteTimeout(absolute_expiry);
+ self.pending_outbound_payments
+ .add_new_awaiting_invoice(
+ payment_id, expiration, retry_strategy, max_total_routing_fee_msat,
+ )
+ .map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?;
+
+ Ok(builder)
+ }
+
+ /// Pays for an [`Offer`] using the given parameters by creating an [`InvoiceRequest`] and
+ /// enqueuing it to be sent via an onion message. [`ChannelManager`] will pay the actual
+ /// [`Bolt12Invoice`] once it is received.
+ ///
+ /// Uses [`InvoiceRequestBuilder`] such that the [`InvoiceRequest`] it builds is recognized by
+ /// the [`ChannelManager`] when handling a [`Bolt12Invoice`] message in response to the request.
+ /// The optional parameters are used in the builder, if `Some`:
+ /// - `quantity` for [`InvoiceRequest::quantity`] which must be set if
+ /// [`Offer::expects_quantity`] is `true`.
+ /// - `amount_msats` if overpaying what is required for the given `quantity` is desired, and
+ /// - `payer_note` for [`InvoiceRequest::payer_note`].
+ ///
+ /// The provided `payment_id` is used to ensure that only one invoice is paid for the request
+ /// when received. See [Avoiding Duplicate Payments] for other requirements once the payment has
+ /// been sent.
+ ///
+ /// To revoke the request, use [`ChannelManager::abandon_payment`] prior to receiving the
+ /// invoice. If abandoned, or an invoice isn't received in a reasonable amount of time, the
+ /// payment will fail with an [`Event::InvoiceRequestFailed`].
+ ///
+ /// Errors if a duplicate `payment_id` is provided given the caveats in the aforementioned link.
+ ///
+ /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
+ /// [`InvoiceRequest::quantity`]: crate::offers::invoice_request::InvoiceRequest::quantity
+ /// [`InvoiceRequest::payer_note`]: crate::offers::invoice_request::InvoiceRequest::payer_note
+ /// [`InvoiceRequestBuilder`]: crate::offers::invoice_request::InvoiceRequestBuilder
+ /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
+ /// [Avoiding Duplicate Payments]: #avoiding-duplicate-payments
+ pub fn pay_for_offer(
+ &self, offer: &Offer, quantity: Option<u64>, amount_msats: Option<u64>,
+ payer_note: Option<String>, payment_id: PaymentId, retry_strategy: Retry,
+ max_total_routing_fee_msat: Option<u64>
+ ) -> Result<(), Bolt12SemanticError> {
+ let expanded_key = &self.inbound_payment_key;
+ let entropy = &*self.entropy_source;
+ let secp_ctx = &self.secp_ctx;
+
+ let builder = offer
+ .request_invoice_deriving_payer_id(expanded_key, entropy, secp_ctx, payment_id)?
+ .chain_hash(self.chain_hash)?;
+ let builder = match quantity {
+ None => builder,
+ Some(quantity) => builder.quantity(quantity)?,
+ };
+ let builder = match amount_msats {
+ None => builder,
+ Some(amount_msats) => builder.amount_msats(amount_msats)?,
+ };
+ let builder = match payer_note {
+ None => builder,
+ Some(payer_note) => builder.payer_note(payer_note),
+ };
+
+ let invoice_request = builder.build_and_sign()?;
+ let reply_path = self.create_one_hop_blinded_path();
+
+ let expiration = StaleExpiration::TimerTicks(1);
+ self.pending_outbound_payments
+ .add_new_awaiting_invoice(
+ payment_id, expiration, retry_strategy, max_total_routing_fee_msat
+ )
+ .map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?;
+
+ let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap();
+ if offer.paths().is_empty() {
+ let message = PendingOnionMessage {
+ contents: OffersMessage::InvoiceRequest(invoice_request),
+ destination: Destination::Node(offer.signing_pubkey()),
+ reply_path: Some(reply_path),
+ };
+ pending_offers_messages.push(message);
+ } else {
+ // Send as many invoice requests as there are paths in the offer (with an upper bound).
+ // Using only one path could result in a failure if the path no longer exists. But only
+ // one invoice for a given payment id will be paid, even if more than one is received.
+ const REQUEST_LIMIT: usize = 10;
+ for path in offer.paths().into_iter().take(REQUEST_LIMIT) {
+ let message = PendingOnionMessage {
+ contents: OffersMessage::InvoiceRequest(invoice_request.clone()),
+ destination: Destination::BlindedPath(path.clone()),
+ reply_path: Some(reply_path.clone()),
+ };
+ pending_offers_messages.push(message);
+ }
+ }
+
+ Ok(())
+ }
+
+ /// Creates a [`Bolt12Invoice`] for a [`Refund`] and enqueues it to be sent via an onion
+ /// message.
+ ///
+ /// The resulting invoice uses a [`PaymentHash`] recognized by the [`ChannelManager`] and a
+ /// [`BlindedPath`] containing the [`PaymentSecret`] needed to reconstruct the corresponding
+ /// [`PaymentPreimage`].
+ ///
+ /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
+ pub fn request_refund_payment(&self, refund: &Refund) -> Result<(), Bolt12SemanticError> {
+ let expanded_key = &self.inbound_payment_key;
+ let entropy = &*self.entropy_source;
+ let secp_ctx = &self.secp_ctx;
+
+ let amount_msats = refund.amount_msats();
+ let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
+
+ match self.create_inbound_payment(Some(amount_msats), relative_expiry, None) {
+ Ok((payment_hash, payment_secret)) => {
+ let payment_paths = vec![
+ self.create_one_hop_blinded_payment_path(payment_secret),
+ ];
+ #[cfg(not(feature = "no-std"))]
+ let builder = refund.respond_using_derived_keys(
+ payment_paths, payment_hash, expanded_key, entropy
+ )?;
+ #[cfg(feature = "no-std")]
+ let created_at = Duration::from_secs(
+ self.highest_seen_timestamp.load(Ordering::Acquire) as u64
+ );
+ #[cfg(feature = "no-std")]
+ let builder = refund.respond_using_derived_keys_no_std(
+ payment_paths, payment_hash, created_at, expanded_key, entropy
+ )?;
+ let invoice = builder.allow_mpp().build_and_sign(secp_ctx)?;
+ let reply_path = self.create_one_hop_blinded_path();
+
+ let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap();
+ if refund.paths().is_empty() {
+ let message = PendingOnionMessage {
+ contents: OffersMessage::Invoice(invoice),
+ destination: Destination::Node(refund.payer_id()),
+ reply_path: Some(reply_path),
+ };
+ pending_offers_messages.push(message);
+ } else {
+ for path in refund.paths() {
+ let message = PendingOnionMessage {
+ contents: OffersMessage::Invoice(invoice.clone()),
+ destination: Destination::BlindedPath(path.clone()),
+ reply_path: Some(reply_path.clone()),
+ };
+ pending_offers_messages.push(message);
+ }
+ }
+
+ Ok(())
+ },
+ Err(()) => Err(Bolt12SemanticError::InvalidAmount),
}
}
inbound_payment::get_payment_preimage(payment_hash, payment_secret, &self.inbound_payment_key)
}
+ /// Creates a one-hop blinded path with [`ChannelManager::get_our_node_id`] as the introduction
+ /// node.
+ fn create_one_hop_blinded_path(&self) -> BlindedPath {
+ let entropy_source = self.entropy_source.deref();
+ let secp_ctx = &self.secp_ctx;
+ BlindedPath::one_hop_for_message(self.get_our_node_id(), entropy_source, secp_ctx).unwrap()
+ }
+
+ /// Creates a one-hop blinded path with [`ChannelManager::get_our_node_id`] as the introduction
+ /// node.
+ fn create_one_hop_blinded_payment_path(
+ &self, payment_secret: PaymentSecret
+ ) -> (BlindedPayInfo, BlindedPath) {
+ let entropy_source = self.entropy_source.deref();
+ let secp_ctx = &self.secp_ctx;
+
+ let payee_node_id = self.get_our_node_id();
+ let max_cltv_expiry = self.best_block.read().unwrap().height() + LATENCY_GRACE_PERIOD_BLOCKS;
+ let payee_tlvs = ReceiveTlvs {
+ payment_secret,
+ payment_constraints: PaymentConstraints {
+ max_cltv_expiry,
+ htlc_minimum_msat: 1,
+ },
+ };
+ // TODO: Err for overflow?
+ BlindedPath::one_hop_for_payment(
+ payee_node_id, payee_tlvs, entropy_source, secp_ctx
+ ).unwrap()
+ }
+
/// Gets a fake short channel id for use in receiving [phantom node payments]. These fake scids
/// are used when constructing the phantom invoice's route hints.
///
let best_block_height = self.best_block.read().unwrap().height();
let short_to_chan_info = self.short_to_chan_info.read().unwrap();
loop {
- let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
+ let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
// Ensure the generated scid doesn't conflict with a real channel.
match short_to_chan_info.get(&scid_candidate) {
Some(_) => continue,
let best_block_height = self.best_block.read().unwrap().height();
let short_to_chan_info = self.short_to_chan_info.read().unwrap();
loop {
- let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
+ let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
// Ensure the generated scid doesn't conflict with a real channel.
if short_to_chan_info.contains_key(&scid_candidate) { continue }
return scid_candidate
/// operation. It will double-check that nothing *else* is also blocking the same channel from
/// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly.
fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
- let mut errors = Vec::new();
loop {
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
channel_funding_outpoint.to_channel_id());
handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
- peer_state_lck, peer_state, per_peer_state, chan_phase_entry);
+ peer_state_lck, peer_state, per_peer_state, chan);
if further_update_exists {
// If there are more `ChannelMonitorUpdate`s to process, restart at the
// top of the loop.
}
break;
}
- for (err, counterparty_node_id) in errors {
- let res = Err::<(), _>(err);
- let _ = handle_error!(self, res, counterparty_node_id);
- }
}
fn handle_post_event_actions(&self, actions: Vec<EventCompletionAction>) {
*best_block = BestBlock::new(header.prev_blockhash, new_height)
}
- self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
+ self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger));
}
}
let _persistence_guard =
PersistenceNotifierGuard::optionally_notify_skipping_background_events(
self, || -> NotifyOption { NotifyOption::DoPersist });
- self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger)
+ self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger)
.map(|(a, b)| (a, Vec::new(), b)));
let last_best_block_height = self.best_block.read().unwrap().height();
if height < last_best_block_height {
let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
- self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
+ self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger));
}
}
self, || -> NotifyOption { NotifyOption::DoPersist });
*self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
- self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
+ self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger));
macro_rules! max_time {
($timestamp: expr) => {
msg: announcement_sigs,
});
if let Some(height) = height_opt {
- if let Some(announcement) = channel.get_signed_channel_announcement(&self.node_signer, self.genesis_hash, height, &self.default_configuration) {
+ if let Some(announcement) = channel.get_signed_channel_announcement(&self.node_signer, self.chain_hash, height, &self.default_configuration) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
msg: announcement,
// Note that announcement_signatures fails if the channel cannot be announced,
self.issue_channel_close_events(&channel.context, reason);
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: channel.context.get_counterparty_node_id(),
- action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage {
- channel_id: channel.context.channel_id(),
- data: reason_message,
- } },
+ action: msgs::ErrorAction::DisconnectPeer {
+ msg: Some(msgs::ErrorMessage {
+ channel_id: channel.context.channel_id(),
+ data: reason_message,
+ })
+ },
});
return false;
}
self.best_block.read().unwrap().clone()
}
- /// Fetches the set of [`NodeFeatures`] flags which are provided by or required by
+ /// Fetches the set of [`NodeFeatures`] flags that are provided by or required by
/// [`ChannelManager`].
pub fn node_features(&self) -> NodeFeatures {
provided_node_features(&self.default_configuration)
}
- /// Fetches the set of [`Bolt11InvoiceFeatures`] flags which are provided by or required by
+ /// Fetches the set of [`Bolt11InvoiceFeatures`] flags that are provided by or required by
/// [`ChannelManager`].
///
/// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice"
/// or not. Thus, this method is not public.
#[cfg(any(feature = "_test_utils", test))]
- pub fn invoice_features(&self) -> Bolt11InvoiceFeatures {
- provided_invoice_features(&self.default_configuration)
+ pub fn bolt11_invoice_features(&self) -> Bolt11InvoiceFeatures {
+ provided_bolt11_invoice_features(&self.default_configuration)
}
- /// Fetches the set of [`ChannelFeatures`] flags which are provided by or required by
+ /// Fetches the set of [`Bolt12InvoiceFeatures`] flags that are provided by or required by
+ /// [`ChannelManager`].
+ fn bolt12_invoice_features(&self) -> Bolt12InvoiceFeatures {
+ provided_bolt12_invoice_features(&self.default_configuration)
+ }
+
+ /// Fetches the set of [`ChannelFeatures`] flags that are provided by or required by
/// [`ChannelManager`].
pub fn channel_features(&self) -> ChannelFeatures {
provided_channel_features(&self.default_configuration)
}
- /// Fetches the set of [`ChannelTypeFeatures`] flags which are provided by or required by
+ /// Fetches the set of [`ChannelTypeFeatures`] flags that are provided by or required by
/// [`ChannelManager`].
pub fn channel_type_features(&self) -> ChannelTypeFeatures {
provided_channel_type_features(&self.default_configuration)
}
- /// Fetches the set of [`InitFeatures`] flags which are provided by or required by
+ /// Fetches the set of [`InitFeatures`] flags that are provided by or required by
/// [`ChannelManager`].
pub fn init_features(&self) -> InitFeatures {
provided_init_features(&self.default_configuration)
fn peer_disconnected(&self, counterparty_node_id: &PublicKey) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(
self, || NotifyOption::SkipPersistHandleEvents);
-
let mut failed_channels = Vec::new();
let mut per_peer_state = self.per_peer_state.write().unwrap();
let remove_peer = {
peer_state.channel_by_id.retain(|_, phase| {
let context = match phase {
ChannelPhase::Funded(chan) => {
- chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
- // We only retain funded channels that are not shutdown.
- if !chan.is_shutdown() {
+ if chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger).is_ok() {
+ // We only retain funded channels that are not shutdown.
return true;
}
- &chan.context
+ &mut chan.context
},
// Unfunded channels will always be removed.
ChannelPhase::UnfundedOutboundV1(chan) => {
- &chan.context
+ &mut chan.context
},
ChannelPhase::UnfundedInboundV1(chan) => {
- &chan.context
+ &mut chan.context
},
};
// Clean up for removal.
update_maps_on_chan_removal!(self, &context);
self.issue_channel_close_events(&context, ClosureReason::DisconnectedPeer);
+ failed_channels.push(context.force_shutdown(false));
false
});
// Note that we don't bother generating any events for pre-accept channels -
mem::drop(per_peer_state);
for failure in failed_channels.drain(..) {
- self.finish_force_close_channel(failure);
+ self.finish_close_channel(failure);
}
}
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
if let Some(ChannelPhase::UnfundedOutboundV1(chan)) = peer_state.channel_by_id.get_mut(&msg.channel_id) {
- if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash, &self.fee_estimator) {
+ if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
node_id: *counterparty_node_id,
msg,
provided_init_features(&self.default_configuration)
}
- fn get_genesis_hashes(&self) -> Option<Vec<ChainHash>> {
- Some(vec![ChainHash::from(&self.genesis_hash[..])])
+ fn get_chain_hashes(&self) -> Option<Vec<ChainHash>> {
+ Some(vec![self.chain_hash])
}
fn handle_tx_add_input(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAddInput) {
}
}
-/// Fetches the set of [`NodeFeatures`] flags which are provided by or required by
+impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
+OffersMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, L>
+where
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+ T::Target: BroadcasterInterface,
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ SP::Target: SignerProvider,
+ F::Target: FeeEstimator,
+ R::Target: Router,
+ L::Target: Logger,
+{
+ fn handle_message(&self, message: OffersMessage) -> Option<OffersMessage> {
+ let secp_ctx = &self.secp_ctx;
+ let expanded_key = &self.inbound_payment_key;
+
+ match message {
+ OffersMessage::InvoiceRequest(invoice_request) => {
+ let amount_msats = match InvoiceBuilder::<DerivedSigningPubkey>::amount_msats(
+ &invoice_request
+ ) {
+ Ok(amount_msats) => Some(amount_msats),
+ Err(error) => return Some(OffersMessage::InvoiceError(error.into())),
+ };
+ let invoice_request = match invoice_request.verify(expanded_key, secp_ctx) {
+ Ok(invoice_request) => invoice_request,
+ Err(()) => {
+ let error = Bolt12SemanticError::InvalidMetadata;
+ return Some(OffersMessage::InvoiceError(error.into()));
+ },
+ };
+ let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
+
+ match self.create_inbound_payment(amount_msats, relative_expiry, None) {
+ Ok((payment_hash, payment_secret)) if invoice_request.keys.is_some() => {
+ let payment_paths = vec![
+ self.create_one_hop_blinded_payment_path(payment_secret),
+ ];
+ #[cfg(not(feature = "no-std"))]
+ let builder = invoice_request.respond_using_derived_keys(
+ payment_paths, payment_hash
+ );
+ #[cfg(feature = "no-std")]
+ let created_at = Duration::from_secs(
+ self.highest_seen_timestamp.load(Ordering::Acquire) as u64
+ );
+ #[cfg(feature = "no-std")]
+ let builder = invoice_request.respond_using_derived_keys_no_std(
+ payment_paths, payment_hash, created_at
+ );
+ match builder.and_then(|b| b.allow_mpp().build_and_sign(secp_ctx)) {
+ Ok(invoice) => Some(OffersMessage::Invoice(invoice)),
+ Err(error) => Some(OffersMessage::InvoiceError(error.into())),
+ }
+ },
+ Ok((payment_hash, payment_secret)) => {
+ let payment_paths = vec![
+ self.create_one_hop_blinded_payment_path(payment_secret),
+ ];
+ #[cfg(not(feature = "no-std"))]
+ let builder = invoice_request.respond_with(payment_paths, payment_hash);
+ #[cfg(feature = "no-std")]
+ let created_at = Duration::from_secs(
+ self.highest_seen_timestamp.load(Ordering::Acquire) as u64
+ );
+ #[cfg(feature = "no-std")]
+ let builder = invoice_request.respond_with_no_std(
+ payment_paths, payment_hash, created_at
+ );
+ let response = builder.and_then(|builder| builder.allow_mpp().build())
+ .map_err(|e| OffersMessage::InvoiceError(e.into()))
+ .and_then(|invoice|
+ match invoice.sign(|invoice| self.node_signer.sign_bolt12_invoice(invoice)) {
+ Ok(invoice) => Ok(OffersMessage::Invoice(invoice)),
+ Err(SignError::Signing(())) => Err(OffersMessage::InvoiceError(
+ InvoiceError::from_str("Failed signing invoice")
+ )),
+ Err(SignError::Verification(_)) => Err(OffersMessage::InvoiceError(
+ InvoiceError::from_str("Failed invoice signature verification")
+ )),
+ });
+ match response {
+ Ok(invoice) => Some(invoice),
+ Err(error) => Some(error),
+ }
+ },
+ Err(()) => {
+ Some(OffersMessage::InvoiceError(Bolt12SemanticError::InvalidAmount.into()))
+ },
+ }
+ },
+ OffersMessage::Invoice(invoice) => {
+ match invoice.verify(expanded_key, secp_ctx) {
+ Err(()) => {
+ Some(OffersMessage::InvoiceError(InvoiceError::from_str("Unrecognized invoice")))
+ },
+ Ok(_) if invoice.invoice_features().requires_unknown_bits_from(&self.bolt12_invoice_features()) => {
+ Some(OffersMessage::InvoiceError(Bolt12SemanticError::UnknownRequiredFeatures.into()))
+ },
+ Ok(payment_id) => {
+ if let Err(e) = self.send_payment_for_bolt12_invoice(&invoice, payment_id) {
+ log_trace!(self.logger, "Failed paying invoice: {:?}", e);
+ Some(OffersMessage::InvoiceError(InvoiceError::from_str(&format!("{:?}", e))))
+ } else {
+ None
+ }
+ },
+ }
+ },
+ OffersMessage::InvoiceError(invoice_error) => {
+ log_trace!(self.logger, "Received invoice_error: {}", invoice_error);
+ None
+ },
+ }
+ }
+
+ fn release_pending_messages(&self) -> Vec<PendingOnionMessage<OffersMessage>> {
+ core::mem::take(&mut self.pending_offers_messages.lock().unwrap())
+ }
+}
+
+/// Fetches the set of [`NodeFeatures`] flags that are provided by or required by
/// [`ChannelManager`].
pub(crate) fn provided_node_features(config: &UserConfig) -> NodeFeatures {
let mut node_features = provided_init_features(config).to_context();
node_features
}
-/// Fetches the set of [`Bolt11InvoiceFeatures`] flags which are provided by or required by
+/// Fetches the set of [`Bolt11InvoiceFeatures`] flags that are provided by or required by
/// [`ChannelManager`].
///
/// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice"
/// or not. Thus, this method is not public.
#[cfg(any(feature = "_test_utils", test))]
-pub(crate) fn provided_invoice_features(config: &UserConfig) -> Bolt11InvoiceFeatures {
+pub(crate) fn provided_bolt11_invoice_features(config: &UserConfig) -> Bolt11InvoiceFeatures {
provided_init_features(config).to_context()
}
-/// Fetches the set of [`ChannelFeatures`] flags which are provided by or required by
+/// Fetches the set of [`Bolt12InvoiceFeatures`] flags that are provided by or required by
+/// [`ChannelManager`].
+pub(crate) fn provided_bolt12_invoice_features(config: &UserConfig) -> Bolt12InvoiceFeatures {
+ provided_init_features(config).to_context()
+}
+
+/// Fetches the set of [`ChannelFeatures`] flags that are provided by or required by
/// [`ChannelManager`].
pub(crate) fn provided_channel_features(config: &UserConfig) -> ChannelFeatures {
provided_init_features(config).to_context()
}
-/// Fetches the set of [`ChannelTypeFeatures`] flags which are provided by or required by
+/// Fetches the set of [`ChannelTypeFeatures`] flags that are provided by or required by
/// [`ChannelManager`].
pub(crate) fn provided_channel_type_features(config: &UserConfig) -> ChannelTypeFeatures {
ChannelTypeFeatures::from_init(&provided_init_features(config))
}
-/// Fetches the set of [`InitFeatures`] flags which are provided by or required by
+/// Fetches the set of [`InitFeatures`] flags that are provided by or required by
/// [`ChannelManager`].
pub fn provided_init_features(config: &UserConfig) -> InitFeatures {
// Note that if new features are added here which other peers may (eventually) require, we
(10, self.channel_value_satoshis, required),
(12, self.unspendable_punishment_reserve, option),
(14, user_channel_id_low, required),
- (16, self.next_outbound_htlc_limit_msat, required), // Forwards compatibility for removed balance_msat field.
+ (16, self.balance_msat, required),
(18, self.outbound_capacity_msat, required),
(19, self.next_outbound_htlc_limit_msat, required),
(20, self.inbound_capacity_msat, required),
(10, channel_value_satoshis, required),
(12, unspendable_punishment_reserve, option),
(14, user_channel_id_low, required),
- (16, _balance_msat, option), // Backwards compatibility for removed balance_msat field.
+ (16, balance_msat, required),
(18, outbound_capacity_msat, required),
// Note that by the time we get past the required read above, outbound_capacity_msat will be
// filled in, so we can safely unwrap it here.
let user_channel_id = user_channel_id_low as u128 +
((user_channel_id_high_opt.unwrap_or(0 as u64) as u128) << 64);
- let _balance_msat: Option<u64> = _balance_msat;
-
Ok(Self {
inbound_scid_alias,
channel_id: channel_id.0.unwrap(),
channel_value_satoshis: channel_value_satoshis.0.unwrap(),
unspendable_punishment_reserve,
user_channel_id,
+ balance_msat: balance_msat.0.unwrap(),
outbound_capacity_msat: outbound_capacity_msat.0.unwrap(),
next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(),
next_outbound_htlc_minimum_msat: next_outbound_htlc_minimum_msat.0.unwrap(),
write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
- self.genesis_hash.write(writer)?;
+ self.chain_hash.write(writer)?;
{
let best_block = self.best_block.read().unwrap();
best_block.height().write(writer)?;
}
number_of_funded_channels += peer_state.channel_by_id.iter().filter(
- |(_, phase)| if let ChannelPhase::Funded(chan) = phase { chan.context.is_funding_initiated() } else { false }
+ |(_, phase)| if let ChannelPhase::Funded(chan) = phase { chan.context.is_funding_broadcast() } else { false }
).count();
}
let peer_state = &mut *peer_state_lock;
for channel in peer_state.channel_by_id.iter().filter_map(
|(_, phase)| if let ChannelPhase::Funded(channel) = phase {
- if channel.context.is_funding_initiated() { Some(channel) } else { None }
+ if channel.context.is_funding_broadcast() { Some(channel) } else { None }
} else { None }
) {
channel.write(writer)?;
fn read<Reader: io::Read>(reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>) -> Result<Self, DecodeError> {
let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
- let genesis_hash: BlockHash = Readable::read(reader)?;
+ let chain_hash: ChainHash = Readable::read(reader)?;
let best_block_height: u32 = Readable::read(reader)?;
let best_block_hash: BlockHash = Readable::read(reader)?;
log_error!(args.logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
&channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
}
- let (monitor_update, mut new_failed_htlcs) = channel.context.force_shutdown(true);
+ let (monitor_update, mut new_failed_htlcs, batch_funding_txid) = channel.context.force_shutdown(true);
+ if batch_funding_txid.is_some() {
+ return Err(DecodeError::InvalidValue);
+ }
if let Some((counterparty_node_id, funding_txo, update)) = monitor_update {
close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id, funding_txo, update
if let Some(short_channel_id) = channel.context.get_short_channel_id() {
short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
}
- if channel.context.is_funding_initiated() {
+ if channel.context.is_funding_broadcast() {
id_to_peer.insert(channel.context.channel_id(), channel.context.get_counterparty_node_id());
}
match funded_peer_channels.entry(channel.context.get_counterparty_node_id()) {
pending_fee_msat: Some(path_fee),
total_msat: path_amt,
starting_block_height: best_block_height,
+ remaining_max_total_routing_fee_msat: None, // only used for retries, and we'll never retry on startup
});
log_info!(args.logger, "Added a pending payment for {} msat with payment hash {} for path with session priv {}",
path_amt, &htlc.payment_hash, log_bytes!(session_priv_bytes));
let mut outbound_scid_alias;
loop {
outbound_scid_alias = fake_scid::Namespace::OutboundAlias
- .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
+ .get_fake_scid(best_block_height, &chain_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
if outbound_scid_aliases.insert(outbound_scid_alias) { break; }
}
chan.context.set_outbound_scid_alias(outbound_scid_alias);
Some((blocked_node_id, blocked_channel_outpoint, blocking_action)), ..
} = action {
if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) {
+ log_trace!(args.logger,
+ "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
+ blocked_channel_outpoint.to_channel_id());
blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
.entry(blocked_channel_outpoint.to_channel_id())
.or_insert_with(Vec::new).push(blocking_action.clone());
// anymore.
}
}
+ if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately { .. } = action {
+ debug_assert!(false, "Non-event-generating channel freeing should not appear in our queue");
+ }
}
}
peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions;
}
let channel_manager = ChannelManager {
- genesis_hash,
+ chain_hash,
fee_estimator: bounded_fee_estimator,
chain_monitor: args.chain_monitor,
tx_broadcaster: args.tx_broadcaster,
event_persist_notifier: Notifier::new(),
needs_persist_flag: AtomicBool::new(false),
+ funding_batch_states: Mutex::new(BTreeMap::new()),
+
+ pending_offers_messages: Mutex::new(Vec::new()),
+
entropy_source: args.entropy_source,
node_signer: args.node_signer,
signer_provider: args.signer_provider,
// don't remember in the `ChannelMonitor` where we got a preimage from, but if the
// channel is closed we just assume that it probably came from an on-chain claim.
channel_manager.claim_funds_internal(source, preimage, Some(downstream_value),
- downstream_closed, downstream_node_id, downstream_funding);
+ downstream_closed, true, downstream_node_id, downstream_funding);
}
//TODO: Broadcast channel update for closed channels, but only after we've made a
TEST_FINAL_CLTV, false), 100_000);
let route = find_route(
&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
- None, nodes[0].logger, &scorer, &(), &random_seed_bytes
+ None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
).unwrap();
nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap();
let payment_preimage = PaymentPreimage([42; 32]);
let route = find_route(
&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
- None, nodes[0].logger, &scorer, &(), &random_seed_bytes
+ None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
).unwrap();
let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap();
);
let route = find_route(
&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
- None, nodes[0].logger, &scorer, &(), &random_seed_bytes
+ None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
).unwrap();
let payment_id_2 = PaymentId([45; 32]);
nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = find_route(
&payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
- nodes[0].logger, &scorer, &(), &random_seed_bytes
+ nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
).unwrap();
let test_preimage = PaymentPreimage([42; 32]);
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = find_route(
&payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
- nodes[0].logger, &scorer, &(), &random_seed_bytes
+ nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
).unwrap();
let test_preimage = PaymentPreimage([42; 32]);
check_api_error_message(expected_message, res_err)
}
+ fn check_channel_unavailable_error<T>(res_err: Result<T, APIError>, expected_channel_id: ChannelId, peer_node_id: PublicKey) {
+ let expected_message = format!("Channel with id {} not found for the passed counterparty node_id {}", expected_channel_id, peer_node_id);
+ check_api_error_message(expected_message, res_err)
+ }
+
+ fn check_api_misuse_error<T>(res_err: Result<T, APIError>) {
+ let expected_message = "No such channel awaiting to be accepted.".to_string();
+ check_api_error_message(expected_message, res_err)
+ }
+
fn check_api_error_message<T>(expected_err_message: String, res_err: Result<T, APIError>) {
match res_err {
Err(APIError::APIMisuseError { err }) => {
check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key);
}
+ #[test]
+ fn test_api_calls_with_unavailable_channel() {
+ // Tests that our API functions that expects a `counterparty_node_id` and a `channel_id`
+ // as input, behaves as expected if the `counterparty_node_id` is a known peer in the
+ // `ChannelManager::per_peer_state` map, but the peer state doesn't contain a channel with
+ // the given `channel_id`.
+ let chanmon_cfg = create_chanmon_cfgs(2);
+ let node_cfg = create_node_cfgs(2, &chanmon_cfg);
+ let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
+ let nodes = create_network(2, &node_cfg, &node_chanmgr);
+
+ let counterparty_node_id = nodes[1].node.get_our_node_id();
+
+ // Dummy values
+ let channel_id = ChannelId::from_bytes([4; 32]);
+
+ // Test the API functions.
+ check_api_misuse_error(nodes[0].node.accept_inbound_channel(&channel_id, &counterparty_node_id, 42));
+
+ check_channel_unavailable_error(nodes[0].node.close_channel(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
+
+ check_channel_unavailable_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
+
+ check_channel_unavailable_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
+
+ check_channel_unavailable_error(nodes[0].node.forward_intercepted_htlc(InterceptId([0; 32]), &channel_id, counterparty_node_id, 1_000_000), channel_id, counterparty_node_id);
+
+ check_channel_unavailable_error(nodes[0].node.update_channel_config(&counterparty_node_id, &[channel_id], &ChannelConfig::default()), channel_id, counterparty_node_id);
+ }
+
#[test]
fn test_connection_limiting() {
// Test that we limit un-channel'd peers and un-funded channels properly.
sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat)).is_ok());
}
+ #[test]
+ fn test_final_incorrect_cltv(){
+ let chanmon_cfg = create_chanmon_cfgs(1);
+ let node_cfg = create_node_cfgs(1, &chanmon_cfg);
+ let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]);
+ let node = create_network(1, &node_cfg, &node_chanmgr);
+
+ let result = node[0].node.construct_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive {
+ amt_msat: 100,
+ outgoing_cltv_value: 22,
+ payment_metadata: None,
+ keysend_preimage: None,
+ payment_data: Some(msgs::FinalOnionHopData {
+ payment_secret: PaymentSecret([0; 32]), total_msat: 100,
+ }),
+ custom_tlvs: Vec::new(),
+ }, [0; 32], PaymentHash([0; 32]), 100, 23, None, true, None);
+
+ // Should not return an error as this condition:
+ // https://github.com/lightning/bolts/blob/4dcc377209509b13cf89a4b91fde7d478f5b46d8/04-onion-routing.md?plain=1#L334
+ // is not satisfied.
+ assert!(result.is_ok());
+ }
+
#[test]
fn test_inbound_anchors_manual_acceptance() {
// Tests that we properly limit inbound channels when we have the manual-channel-acceptance
let payment_preimage = PaymentPreimage([42; 32]);
assert_eq!(format!("{}", &payment_preimage), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
}
+
+ #[test]
+ fn test_trigger_lnd_force_close() {
+ let chanmon_cfg = create_chanmon_cfgs(2);
+ let node_cfg = create_node_cfgs(2, &chanmon_cfg);
+ let user_config = test_default_channel_config();
+ let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]);
+ let nodes = create_network(2, &node_cfg, &node_chanmgr);
+
+ // Open a channel, immediately disconnect each other, and broadcast Alice's latest state.
+ let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+ nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id()).unwrap();
+ check_closed_broadcast(&nodes[0], 1, true);
+ check_added_monitors(&nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ {
+ let txn = nodes[0].tx_broadcaster.txn_broadcast();
+ assert_eq!(txn.len(), 1);
+ check_spends!(txn[0], funding_tx);
+ }
+
+ // Since they're disconnected, Bob won't receive Alice's `Error` message. Reconnect them
+ // such that Bob sends a `ChannelReestablish` to Alice since the channel is still open from
+ // their side.
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+ features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ let channel_reestablish = get_event_msg!(
+ nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()
+ );
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &channel_reestablish);
+
+ // Alice should respond with an error since the channel isn't known, but a bogus
+ // `ChannelReestablish` should be sent first, such that we actually trigger Bob to force
+ // close even if it was an lnd node.
+ let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), 2);
+ if let MessageSendEvent::SendChannelReestablish { node_id, msg } = &msg_events[0] {
+ assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+ assert_eq!(msg.next_local_commitment_number, 0);
+ assert_eq!(msg.next_remote_commitment_number, 0);
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &msg);
+ } else { panic!() };
+ check_closed_broadcast(&nodes[1], 1, true);
+ check_added_monitors(&nodes[1], 1);
+ let expected_close_reason = ClosureReason::ProcessingError {
+ err: "Peer sent an invalid channel_reestablish to force close in a non-standard way".to_string()
+ };
+ check_closed_event!(nodes[1], 1, expected_close_reason, [nodes[0].node.get_our_node_id()], 100000);
+ {
+ let txn = nodes[1].tx_broadcaster.txn_broadcast();
+ assert_eq!(txn.len(), 1);
+ check_spends!(txn[0], funding_tx);
+ }
+ }
}
#[cfg(ldk_bench)]
macro_rules! send_payment {
($node_a: expr, $node_b: expr) => {
let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id(), TEST_FINAL_CLTV)
- .with_bolt11_features($node_b.invoice_features()).unwrap();
+ .with_bolt11_features($node_b.bolt11_invoice_features()).unwrap();
let mut payment_preimage = PaymentPreimage([0; 32]);
payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes());
payment_count += 1;