//! on-chain transactions (it only monitors the chain to watch for any force-closes that might
//! imply it needs to fail HTLCs/payments/channels it manages).
-use bitcoin::blockdata::block::BlockHeader;
+use bitcoin::blockdata::block::Header;
use bitcoin::blockdata::transaction::Transaction;
use bitcoin::blockdata::constants::ChainHash;
+use bitcoin::key::constants::SECRET_KEY_SIZE;
use bitcoin::network::constants::Network;
use bitcoin::hashes::Hash;
use bitcoin::secp256k1::{SecretKey,PublicKey};
use bitcoin::secp256k1::Secp256k1;
-use bitcoin::{LockTime, secp256k1, Sequence};
+use bitcoin::{secp256k1, Sequence};
use crate::blinded_path::BlindedPath;
use crate::blinded_path::payment::{PaymentConstraints, ReceiveTlvs};
use crate::chain;
use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock};
use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
-use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
+use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, WithChannelMonitor, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
use crate::chain::transaction::{OutPoint, TransactionData};
use crate::events;
use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination, PaymentFailureReason};
// Since this struct is returned in `list_channels` methods, expose it here in case users want to
// construct one themselves.
use crate::ln::{inbound_payment, ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
-use crate::ln::channel::{Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel};
+use crate::ln::channel::{Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext};
use crate::ln::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
#[cfg(any(feature = "_test_utils", test))]
use crate::ln::features::Bolt11InvoiceFeatures;
use crate::routing::gossip::NetworkGraph;
use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router};
use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
+use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundOnionErr, NextPacketDetails};
use crate::ln::msgs;
use crate::ln::onion_utils;
-use crate::ln::onion_utils::HTLCFailReason;
+use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING};
use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError};
#[cfg(test)]
use crate::ln::outbound_payment;
use crate::offers::offer::{DerivedMetadata, Offer, OfferBuilder};
use crate::offers::parse::Bolt12SemanticError;
use crate::offers::refund::{Refund, RefundBuilder};
-use crate::onion_message::{Destination, OffersMessage, OffersMessageHandler, PendingOnionMessage};
-use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, WriteableEcdsaChannelSigner};
+use crate::onion_message::{Destination, OffersMessage, OffersMessageHandler, PendingOnionMessage, new_pending_onion_message};
+use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider};
+use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate};
use crate::util::wakers::{Future, Notifier};
use crate::util::scid_utils::fake_scid;
use crate::util::string::UntrustedString;
use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
-use crate::util::logger::{Level, Logger};
+use crate::util::logger::{Level, Logger, WithContext};
use crate::util::errors::APIError;
use alloc::collections::{btree_map, BTreeMap};
// Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is
// our payment, which we can use to decode errors or inform the user that the payment was sent.
+/// Information about where a received HTLC('s onion) has indicated the HTLC should go.
#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
-pub(super) enum PendingHTLCRouting {
+pub enum PendingHTLCRouting {
+ /// An HTLC which should be forwarded on to another node.
Forward {
+ /// The onion which should be included in the forwarded HTLC, telling the next hop what to
+ /// do with the HTLC.
onion_packet: msgs::OnionPacket,
- /// The SCID from the onion that we should forward to. This could be a real SCID or a fake one
- /// generated using `get_fake_scid` from the scid_utils::fake_scid module.
+ /// The short channel ID of the channel which we were instructed to forward this HTLC to.
+ ///
+ /// This could be a real on-chain SCID, an SCID alias, or some other SCID which has meaning
+ /// to the receiving node, such as one returned from
+ /// [`ChannelManager::get_intercept_scid`] or [`ChannelManager::get_phantom_scid`].
short_channel_id: u64, // This should be NonZero<u64> eventually when we bump MSRV
+ /// Set if this HTLC is being forwarded within a blinded path.
+ blinded: Option<BlindedForward>,
},
+ /// The onion indicates that this is a payment for an invoice (supposedly) generated by us.
+ ///
+ /// Note that at this point, we have not checked that the invoice being paid was actually
+ /// generated by us, but rather it's claiming to pay an invoice of ours.
Receive {
+ /// Information about the amount the sender intended to pay and (potential) proof that this
+ /// is a payment for an invoice we generated. This proof of payment is is also used for
+ /// linking MPP parts of a larger payment.
payment_data: msgs::FinalOnionHopData,
+ /// Additional data which we (allegedly) instructed the sender to include in the onion.
+ ///
+ /// For HTLCs received by LDK, this will ultimately be exposed in
+ /// [`Event::PaymentClaimable::onion_fields`] as
+ /// [`RecipientOnionFields::payment_metadata`].
payment_metadata: Option<Vec<u8>>,
- incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
+ /// CLTV expiry of the received HTLC.
+ ///
+ /// Used to track when we should expire pending HTLCs that go unclaimed.
+ incoming_cltv_expiry: u32,
+ /// If the onion had forwarding instructions to one of our phantom node SCIDs, this will
+ /// provide the onion shared secret used to decrypt the next level of forwarding
+ /// instructions.
phantom_shared_secret: Option<[u8; 32]>,
- /// See [`RecipientOnionFields::custom_tlvs`] for more info.
+ /// Custom TLVs which were set by the sender.
+ ///
+ /// For HTLCs received by LDK, this will ultimately be exposed in
+ /// [`Event::PaymentClaimable::onion_fields`] as
+ /// [`RecipientOnionFields::custom_tlvs`].
custom_tlvs: Vec<(u64, Vec<u8>)>,
},
+ /// The onion indicates that this is for payment to us but which contains the preimage for
+ /// claiming included, and is unrelated to any invoice we'd previously generated (aka a
+ /// "keysend" or "spontaneous" payment).
ReceiveKeysend {
- /// This was added in 0.0.116 and will break deserialization on downgrades.
+ /// Information about the amount the sender intended to pay and possibly a token to
+ /// associate MPP parts of a larger payment.
+ ///
+ /// This will only be filled in if receiving MPP keysend payments is enabled, and it being
+ /// present will cause deserialization to fail on versions of LDK prior to 0.0.116.
payment_data: Option<msgs::FinalOnionHopData>,
+ /// Preimage for this onion payment. This preimage is provided by the sender and will be
+ /// used to settle the spontaneous payment.
payment_preimage: PaymentPreimage,
+ /// Additional data which we (allegedly) instructed the sender to include in the onion.
+ ///
+ /// For HTLCs received by LDK, this will ultimately bubble back up as
+ /// [`RecipientOnionFields::payment_metadata`].
payment_metadata: Option<Vec<u8>>,
- incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
- /// See [`RecipientOnionFields::custom_tlvs`] for more info.
+ /// CLTV expiry of the received HTLC.
+ ///
+ /// Used to track when we should expire pending HTLCs that go unclaimed.
+ incoming_cltv_expiry: u32,
+ /// Custom TLVs which were set by the sender.
+ ///
+ /// For HTLCs received by LDK, these will ultimately bubble back up as
+ /// [`RecipientOnionFields::custom_tlvs`].
custom_tlvs: Vec<(u64, Vec<u8>)>,
},
}
+/// Information used to forward or fail this HTLC that is being forwarded within a blinded path.
+#[derive(Clone, Copy, Hash, PartialEq, Eq)]
+pub struct BlindedForward {
+ /// The `blinding_point` that was set in the inbound [`msgs::UpdateAddHTLC`], or in the inbound
+ /// onion payload if we're the introduction node. Useful for calculating the next hop's
+ /// [`msgs::UpdateAddHTLC::blinding_point`].
+ pub inbound_blinding_point: PublicKey,
+ // Another field will be added here when we support forwarding as a non-intro node.
+}
+
+impl PendingHTLCRouting {
+ // Used to override the onion failure code and data if the HTLC is blinded.
+ fn blinded_failure(&self) -> Option<BlindedFailure> {
+ // TODO: needs update when we support receiving to multi-hop blinded paths
+ if let Self::Forward { blinded: Some(_), .. } = self {
+ Some(BlindedFailure::FromIntroductionNode)
+ } else {
+ None
+ }
+ }
+}
+
+/// Information about an incoming HTLC, including the [`PendingHTLCRouting`] describing where it
+/// should go next.
#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
-pub(super) struct PendingHTLCInfo {
- pub(super) routing: PendingHTLCRouting,
- pub(super) incoming_shared_secret: [u8; 32],
- payment_hash: PaymentHash,
- /// Amount received
- pub(super) incoming_amt_msat: Option<u64>, // Added in 0.0.113
- /// Sender intended amount to forward or receive (actual amount received
- /// may overshoot this in either case)
- pub(super) outgoing_amt_msat: u64,
- pub(super) outgoing_cltv_value: u32,
- /// The fee being skimmed off the top of this HTLC. If this is a forward, it'll be the fee we are
- /// skimming. If we're receiving this HTLC, it's the fee that our counterparty skimmed.
- pub(super) skimmed_fee_msat: Option<u64>,
+pub struct PendingHTLCInfo {
+ /// Further routing details based on whether the HTLC is being forwarded or received.
+ pub routing: PendingHTLCRouting,
+ /// The onion shared secret we build with the sender used to decrypt the onion.
+ ///
+ /// This is later used to encrypt failure packets in the event that the HTLC is failed.
+ pub incoming_shared_secret: [u8; 32],
+ /// Hash of the payment preimage, to lock the payment until the receiver releases the preimage.
+ pub payment_hash: PaymentHash,
+ /// Amount received in the incoming HTLC.
+ ///
+ /// This field was added in LDK 0.0.113 and will be `None` for objects written by prior
+ /// versions.
+ pub incoming_amt_msat: Option<u64>,
+ /// The amount the sender indicated should be forwarded on to the next hop or amount the sender
+ /// intended for us to receive for received payments.
+ ///
+ /// If the received amount is less than this for received payments, an intermediary hop has
+ /// attempted to steal some of our funds and we should fail the HTLC (the sender should retry
+ /// it along another path).
+ ///
+ /// Because nodes can take less than their required fees, and because senders may wish to
+ /// improve their own privacy, this amount may be less than [`Self::incoming_amt_msat`] for
+ /// received payments. In such cases, recipients must handle this HTLC as if it had received
+ /// [`Self::outgoing_amt_msat`].
+ pub outgoing_amt_msat: u64,
+ /// The CLTV the sender has indicated we should set on the forwarded HTLC (or has indicated
+ /// should have been set on the received HTLC for received payments).
+ pub outgoing_cltv_value: u32,
+ /// The fee taken for this HTLC in addition to the standard protocol HTLC fees.
+ ///
+ /// If this is a payment for forwarding, this is the fee we are taking before forwarding the
+ /// HTLC.
+ ///
+ /// If this is a received payment, this is the fee that our counterparty took.
+ ///
+ /// This is used to allow LSPs to take fees as a part of payments, without the sender having to
+ /// shoulder them.
+ pub skimmed_fee_msat: Option<u64>,
}
#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
},
}
+// Used for failing blinded HTLCs backwards correctly.
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
+enum BlindedFailure {
+ FromIntroductionNode,
+ // Another variant will be added here for non-intro nodes.
+}
+
/// Tracks the inbound corresponding to an outbound HTLC
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
pub(crate) struct HTLCPreviousHopData {
htlc_id: u64,
incoming_packet_shared_secret: [u8; 32],
phantom_shared_secret: Option<[u8; 32]>,
+ blinded_failure: Option<BlindedFailure>,
// This field is consumed by `claim_funds_from_hop()` when updating a force-closed backwards
// channel with a preimage provided by the forward channel.
user_channel_id: val.prev_hop.user_channel_id.unwrap_or(0),
cltv_expiry: val.cltv_expiry,
value_msat: val.value,
+ counterparty_skimmed_fee_msat: val.counterparty_skimmed_fee_msat.unwrap_or(0),
}
}
}
/// Uniquely describes an HTLC by its source. Just the guaranteed-unique subset of [`HTLCSource`].
pub(crate) enum SentHTLCId {
PreviousHopData { short_channel_id: u64, htlc_id: u64 },
- OutboundRoute { session_priv: SecretKey },
+ OutboundRoute { session_priv: [u8; SECRET_KEY_SIZE] },
}
impl SentHTLCId {
pub(crate) fn from_source(source: &HTLCSource) -> Self {
htlc_id: hop_data.htlc_id,
},
HTLCSource::OutboundRoute { session_priv, .. } =>
- Self::OutboundRoute { session_priv: *session_priv },
+ Self::OutboundRoute { session_priv: session_priv.secret_bytes() },
}
}
}
}
}
-struct InboundOnionErr {
- err_code: u16,
- err_data: Vec<u8>,
- msg: &'static str,
-}
-
/// This enum is used to specify which error data to send to peers when failing back an HTLC
/// using [`ChannelManager::fail_htlc_backwards_with_reason`].
///
#[inline]
fn from_finish_shutdown(err: String, channel_id: ChannelId, user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>, channel_capacity: u64) -> Self {
let err_msg = msgs::ErrorMessage { channel_id, data: err.clone() };
- let action = if let (Some(_), ..) = &shutdown_res {
+ let action = if shutdown_res.monitor_update.is_some() {
// We have a closing `ChannelMonitorUpdate`, which means the channel was funded and we
// should disconnect our peer such that we force them to broadcast their latest
// commitment upon reconnecting.
/// or, respectively, [`Router`] for its router, but this type alias chooses the concrete types
/// of [`KeysManager`] and [`DefaultRouter`].
///
-/// This is not exported to bindings users as Arcs don't make sense in bindings
+/// This is not exported to bindings users as type aliases aren't supported in most languages.
+#[cfg(not(c_bindings))]
pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<
Arc<M>,
Arc<T>,
/// or, respectively, [`Router`] for its router, but this type alias chooses the concrete types
/// of [`KeysManager`] and [`DefaultRouter`].
///
-/// This is not exported to bindings users as Arcs don't make sense in bindings
+/// This is not exported to bindings users as type aliases aren't supported in most languages.
+#[cfg(not(c_bindings))]
pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> =
ChannelManager<
&'a M,
/// A type implementing [`WriteableEcdsaChannelSigner`].
type Signer: WriteableEcdsaChannelSigner + Sized;
/// A type implementing [`SignerProvider`] for [`Self::Signer`].
- type SignerProvider: SignerProvider<Signer = Self::Signer> + ?Sized;
+ type SignerProvider: SignerProvider<EcdsaSigner= Self::Signer> + ?Sized;
/// A type that may be dereferenced to [`Self::SignerProvider`].
type SP: Deref<Target = Self::SignerProvider>;
/// A type implementing [`FeeEstimator`].
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> AChannelManager
for ChannelManager<M, T, ES, NS, SP, F, R, L>
where
- M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
type ES = ES;
type NodeSigner = NS::Target;
type NS = NS;
- type Signer = <SP::Target as SignerProvider>::Signer;
+ type Signer = <SP::Target as SignerProvider>::EcdsaSigner;
type SignerProvider = SP::Target;
type SP = SP;
type FeeEstimator = F::Target;
//
pub struct ChannelManager<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
where
- M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
}
}
- log_error!($self.logger, "{}", err.err);
+ let logger = WithContext::from(
+ &$self.logger, Some($counterparty_node_id), chan_id.map(|(chan_id, _)| chan_id)
+ );
+ log_error!(logger, "{}", err.err);
if let msgs::ErrorAction::IgnoreError = err.action {
} else {
msg_events.push(events::MessageSendEvent::HandleError {
},
}
} };
- ($self: ident, $internal: expr) => {
- match $internal {
- Ok(res) => Ok(res),
- Err((chan, msg_handle_err)) => {
- let counterparty_node_id = chan.get_counterparty_node_id();
- handle_error!($self, Err(msg_handle_err), counterparty_node_id).map_err(|err| (chan, err))
- },
- }
- };
}
macro_rules! update_maps_on_chan_removal {
(false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), *$channel_id))
},
ChannelError::Close(msg) => {
- log_error!($self.logger, "Closing channel {} due to close-required error: {}", $channel_id, msg);
+ let logger = WithChannelContext::from(&$self.logger, &$channel.context);
+ log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg);
update_maps_on_chan_removal!($self, $channel.context);
let shutdown_res = $channel.context.force_shutdown(true);
let user_id = $channel.context.get_user_id();
macro_rules! handle_monitor_update_completion {
($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
- let mut updates = $chan.monitor_updating_restored(&$self.logger,
+ let logger = WithChannelContext::from(&$self.logger, &$chan.context);
+ let mut updates = $chan.monitor_updating_restored(&&logger,
&$self.node_signer, $self.chain_hash, &$self.default_configuration,
$self.best_block.read().unwrap().height());
let counterparty_node_id = $chan.context.get_counterparty_node_id();
macro_rules! handle_new_monitor_update {
($self: ident, $update_res: expr, $chan: expr, _internal, $completed: expr) => { {
debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire));
+ let logger = WithChannelContext::from(&$self.logger, &$chan.context);
match $update_res {
ChannelMonitorUpdateStatus::UnrecoverableError => {
let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
- log_error!($self.logger, "{}", err_str);
+ log_error!(logger, "{}", err_str);
panic!("{}", err_str);
},
ChannelMonitorUpdateStatus::InProgress => {
- log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
+ log_debug!(logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
&$chan.context.channel_id());
false
},
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, L>
where
- M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
/// connection is available, the outbound `open_channel` message may fail to send, resulting in
/// the channel eventually being silently forgotten (dropped on reload).
///
+ /// If `temporary_channel_id` is specified, it will be used as the temporary channel ID of the
+ /// channel. Otherwise, a random one will be generated for you.
+ ///
/// Returns the new Channel's temporary `channel_id`. This ID will appear as
/// [`Event::FundingGenerationReady::temporary_channel_id`] and in
/// [`ChannelDetails::channel_id`] until after
/// [`Event::FundingGenerationReady::user_channel_id`]: events::Event::FundingGenerationReady::user_channel_id
/// [`Event::FundingGenerationReady::temporary_channel_id`]: events::Event::FundingGenerationReady::temporary_channel_id
/// [`Event::ChannelClosed::channel_id`]: events::Event::ChannelClosed::channel_id
- pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u128, override_config: Option<UserConfig>) -> Result<ChannelId, APIError> {
+ pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u128, temporary_channel_id: Option<ChannelId>, override_config: Option<UserConfig>) -> Result<ChannelId, APIError> {
if channel_value_satoshis < 1000 {
return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
}
.ok_or_else(|| APIError::APIMisuseError{ err: format!("Not connected to node: {}", their_network_key) })?;
let mut peer_state = peer_state_mutex.lock().unwrap();
+
+ if let Some(temporary_channel_id) = temporary_channel_id {
+ if peer_state.channel_by_id.contains_key(&temporary_channel_id) {
+ return Err(APIError::APIMisuseError{ err: format!("Channel with temporary channel ID {} already exists!", temporary_channel_id)});
+ }
+ }
+
let channel = {
let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
let their_features = &peer_state.latest_features;
let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
match OutboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key,
their_features, channel_value_satoshis, push_msat, user_channel_id, config,
- self.best_block.read().unwrap().height(), outbound_scid_alias)
+ self.best_block.read().unwrap().height(), outbound_scid_alias, temporary_channel_id)
{
Ok(res) => res,
Err(e) => {
fn close_channel_internal(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
- let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
+ let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new();
let mut shutdown_result = None;
- loop {
+
+ {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let funding_txo_opt = chan.context.get_funding_txo();
let their_features = &peer_state.latest_features;
- let unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid();
let (shutdown_msg, mut monitor_update_opt, htlcs) =
chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
failed_htlcs = htlcs;
if let Some(monitor_update) = monitor_update_opt.take() {
handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
peer_state_lock, peer_state, per_peer_state, chan);
- break;
- }
-
- if chan.is_shutdown() {
- if let ChannelPhase::Funded(chan) = remove_channel_phase!(self, chan_phase_entry) {
- if let Ok(channel_update) = self.get_channel_update_for_broadcast(&chan) {
- peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: channel_update
- });
- }
- self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
- shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid));
- }
}
- break;
+ } else {
+ self.issue_channel_close_events(chan_phase_entry.get().context(), ClosureReason::HolderForceClosed);
+ let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
+ shutdown_result = Some(chan_phase.context_mut().force_shutdown(false));
}
},
hash_map::Entry::Vacant(_) => {
- // If we reach this point, it means that the channel_id either refers to an unfunded channel or
- // it does not exist for this peer. Either way, we can attempt to force-close it.
- //
- // An appropriate error will be returned for non-existence of the channel if that's the case.
- mem::drop(peer_state_lock);
- mem::drop(per_peer_state);
- return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ())
+ return Err(APIError::ChannelUnavailable {
+ err: format!(
+ "Channel with id {} not found for the passed counterparty node_id {}",
+ channel_id, counterparty_node_id,
+ )
+ });
},
}
}
self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
}
- fn finish_close_channel(&self, shutdown_res: ShutdownResult) {
+ fn finish_close_channel(&self, mut shutdown_res: ShutdownResult) {
debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
#[cfg(debug_assertions)]
for (_, peer) in self.per_peer_state.read().unwrap().iter() {
debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
}
- let (monitor_update_option, mut failed_htlcs, unbroadcasted_batch_funding_txid) = shutdown_res;
- log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
- for htlc_source in failed_htlcs.drain(..) {
+ let logger = WithContext::from(
+ &self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id),
+ );
+ log_debug!(logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len());
+ for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) {
let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
}
- if let Some((_, funding_txo, monitor_update)) = monitor_update_option {
+ if let Some((_, funding_txo, monitor_update)) = shutdown_res.monitor_update {
// There isn't anything we can do if we get an update failure - we're already
// force-closing. The monitor update on the required in-memory copy should broadcast
// the latest local state, which is the best we can do anyway. Thus, it is safe to
let _ = self.chain_monitor.update_channel(funding_txo, &monitor_update);
}
let mut shutdown_results = Vec::new();
- if let Some(txid) = unbroadcasted_batch_funding_txid {
+ if let Some(txid) = shutdown_res.unbroadcasted_batch_funding_txid {
let mut funding_batch_states = self.funding_batch_states.lock().unwrap();
let affected_channels = funding_batch_states.remove(&txid).into_iter().flatten();
let per_peer_state = self.per_peer_state.read().unwrap();
} else {
ClosureReason::HolderForceClosed
};
+ let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id));
if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
- log_error!(self.logger, "Force-closing channel {}", channel_id);
+ log_error!(logger, "Force-closing channel {}", channel_id);
self.issue_channel_close_events(&chan_phase_entry.get().context(), closure_reason);
let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
mem::drop(peer_state);
},
}
} else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() {
- log_error!(self.logger, "Force-closing channel {}", &channel_id);
+ log_error!(logger, "Force-closing channel {}", &channel_id);
// N.B. that we don't send any channel close event here: we
// don't have a user_channel_id, and we never sent any opening
// events anyway.
}
}
- fn construct_fwd_pending_htlc_info(
- &self, msg: &msgs::UpdateAddHTLC, hop_data: msgs::InboundOnionPayload, hop_hmac: [u8; 32],
- new_packet_bytes: [u8; onion_utils::ONION_DATA_LEN], shared_secret: [u8; 32],
- next_packet_pubkey_opt: Option<Result<PublicKey, secp256k1::Error>>
- ) -> Result<PendingHTLCInfo, InboundOnionErr> {
- debug_assert!(next_packet_pubkey_opt.is_some());
- let outgoing_packet = msgs::OnionPacket {
- version: 0,
- public_key: next_packet_pubkey_opt.unwrap_or(Err(secp256k1::Error::InvalidPublicKey)),
- hop_data: new_packet_bytes,
- hmac: hop_hmac,
- };
-
- let (short_channel_id, amt_to_forward, outgoing_cltv_value) = match hop_data {
- msgs::InboundOnionPayload::Forward { short_channel_id, amt_to_forward, outgoing_cltv_value } =>
- (short_channel_id, amt_to_forward, outgoing_cltv_value),
- msgs::InboundOnionPayload::Receive { .. } | msgs::InboundOnionPayload::BlindedReceive { .. } =>
- return Err(InboundOnionErr {
- msg: "Final Node OnionHopData provided for us as an intermediary node",
- err_code: 0x4000 | 22,
- err_data: Vec::new(),
- }),
- };
-
- Ok(PendingHTLCInfo {
- routing: PendingHTLCRouting::Forward {
- onion_packet: outgoing_packet,
- short_channel_id,
- },
- payment_hash: msg.payment_hash,
- incoming_shared_secret: shared_secret,
- incoming_amt_msat: Some(msg.amount_msat),
- outgoing_amt_msat: amt_to_forward,
- outgoing_cltv_value,
- skimmed_fee_msat: None,
- })
- }
-
- fn construct_recv_pending_htlc_info(
- &self, hop_data: msgs::InboundOnionPayload, shared_secret: [u8; 32], payment_hash: PaymentHash,
- amt_msat: u64, cltv_expiry: u32, phantom_shared_secret: Option<[u8; 32]>, allow_underpay: bool,
- counterparty_skimmed_fee_msat: Option<u64>,
- ) -> Result<PendingHTLCInfo, InboundOnionErr> {
- let (payment_data, keysend_preimage, custom_tlvs, onion_amt_msat, outgoing_cltv_value, payment_metadata) = match hop_data {
- msgs::InboundOnionPayload::Receive {
- payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata, ..
- } =>
- (payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata),
- msgs::InboundOnionPayload::BlindedReceive {
- amt_msat, total_msat, outgoing_cltv_value, payment_secret, ..
- } => {
- let payment_data = msgs::FinalOnionHopData { payment_secret, total_msat };
- (Some(payment_data), None, Vec::new(), amt_msat, outgoing_cltv_value, None)
- }
- msgs::InboundOnionPayload::Forward { .. } => {
- return Err(InboundOnionErr {
- err_code: 0x4000|22,
- err_data: Vec::new(),
- msg: "Got non final data with an HMAC of 0",
- })
- },
- };
- // final_incorrect_cltv_expiry
- if outgoing_cltv_value > cltv_expiry {
- return Err(InboundOnionErr {
- msg: "Upstream node set CLTV to less than the CLTV set by the sender",
- err_code: 18,
- err_data: cltv_expiry.to_be_bytes().to_vec()
- })
- }
- // final_expiry_too_soon
- // We have to have some headroom to broadcast on chain if we have the preimage, so make sure
- // we have at least HTLC_FAIL_BACK_BUFFER blocks to go.
- //
- // Also, ensure that, in the case of an unknown preimage for the received payment hash, our
- // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a
- // channel closure (see HTLC_FAIL_BACK_BUFFER rationale).
- let current_height: u32 = self.best_block.read().unwrap().height();
- if cltv_expiry <= current_height + HTLC_FAIL_BACK_BUFFER + 1 {
- let mut err_data = Vec::with_capacity(12);
- err_data.extend_from_slice(&amt_msat.to_be_bytes());
- err_data.extend_from_slice(¤t_height.to_be_bytes());
- return Err(InboundOnionErr {
- err_code: 0x4000 | 15, err_data,
- msg: "The final CLTV expiry is too soon to handle",
- });
- }
- if (!allow_underpay && onion_amt_msat > amt_msat) ||
- (allow_underpay && onion_amt_msat >
- amt_msat.saturating_add(counterparty_skimmed_fee_msat.unwrap_or(0)))
- {
- return Err(InboundOnionErr {
- err_code: 19,
- err_data: amt_msat.to_be_bytes().to_vec(),
- msg: "Upstream node sent less than we were supposed to receive in payment",
- });
- }
-
- let routing = if let Some(payment_preimage) = keysend_preimage {
- // We need to check that the sender knows the keysend preimage before processing this
- // payment further. Otherwise, an intermediary routing hop forwarding non-keysend-HTLC X
- // could discover the final destination of X, by probing the adjacent nodes on the route
- // with a keysend payment of identical payment hash to X and observing the processing
- // time discrepancies due to a hash collision with X.
- let hashed_preimage = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
- if hashed_preimage != payment_hash {
- return Err(InboundOnionErr {
- err_code: 0x4000|22,
- err_data: Vec::new(),
- msg: "Payment preimage didn't match payment hash",
- });
- }
- if !self.default_configuration.accept_mpp_keysend && payment_data.is_some() {
- return Err(InboundOnionErr {
- err_code: 0x4000|22,
- err_data: Vec::new(),
- msg: "We don't support MPP keysend payments",
- });
- }
- PendingHTLCRouting::ReceiveKeysend {
- payment_data,
- payment_preimage,
- payment_metadata,
- incoming_cltv_expiry: outgoing_cltv_value,
- custom_tlvs,
- }
- } else if let Some(data) = payment_data {
- PendingHTLCRouting::Receive {
- payment_data: data,
- payment_metadata,
- incoming_cltv_expiry: outgoing_cltv_value,
- phantom_shared_secret,
- custom_tlvs,
- }
- } else {
- return Err(InboundOnionErr {
- err_code: 0x4000|0x2000|3,
- err_data: Vec::new(),
- msg: "We require payment_secrets",
- });
- };
- Ok(PendingHTLCInfo {
- routing,
- payment_hash,
- incoming_shared_secret: shared_secret,
- incoming_amt_msat: Some(amt_msat),
- outgoing_amt_msat: onion_amt_msat,
- outgoing_cltv_value,
- skimmed_fee_msat: counterparty_skimmed_fee_msat,
- })
- }
-
fn decode_update_add_htlc_onion(
- &self, msg: &msgs::UpdateAddHTLC
- ) -> Result<(onion_utils::Hop, [u8; 32], Option<Result<PublicKey, secp256k1::Error>>), HTLCFailureMsg> {
- macro_rules! return_malformed_err {
- ($msg: expr, $err_code: expr) => {
- {
- log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
- return Err(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
- channel_id: msg.channel_id,
- htlc_id: msg.htlc_id,
- sha256_of_onion: Sha256::hash(&msg.onion_routing_packet.hop_data).into_inner(),
- failure_code: $err_code,
- }));
- }
- }
- }
-
- if let Err(_) = msg.onion_routing_packet.public_key {
- return_malformed_err!("invalid ephemeral pubkey", 0x8000 | 0x4000 | 6);
- }
-
- let shared_secret = self.node_signer.ecdh(
- Recipient::Node, &msg.onion_routing_packet.public_key.unwrap(), None
- ).unwrap().secret_bytes();
+ &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey,
+ ) -> Result<
+ (onion_utils::Hop, [u8; 32], Option<Result<PublicKey, secp256k1::Error>>), HTLCFailureMsg
+ > {
+ let (next_hop, shared_secret, next_packet_details_opt) = decode_incoming_update_add_htlc_onion(
+ msg, &self.node_signer, &self.logger, &self.secp_ctx
+ )?;
+
+ let is_blinded = match next_hop {
+ onion_utils::Hop::Forward {
+ next_hop_data: msgs::InboundOnionPayload::BlindedForward { .. }, ..
+ } => true,
+ _ => false, // TODO: update this when we support receiving to multi-hop blinded paths
+ };
- if msg.onion_routing_packet.version != 0 {
- //TODO: Spec doesn't indicate if we should only hash hop_data here (and in other
- //sha256_of_onion error data packets), or the entire onion_routing_packet. Either way,
- //the hash doesn't really serve any purpose - in the case of hashing all data, the
- //receiving node would have to brute force to figure out which version was put in the
- //packet by the node that send us the message, in the case of hashing the hop_data, the
- //node knows the HMAC matched, so they already know what is there...
- return_malformed_err!("Unknown onion packet version", 0x8000 | 0x4000 | 4);
- }
macro_rules! return_err {
($msg: expr, $err_code: expr, $data: expr) => {
{
- log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+ log_info!(
+ WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id)),
+ "Failed to accept/forward incoming HTLC: {}", $msg
+ );
+ let (err_code, err_data) = if is_blinded {
+ (INVALID_ONION_BLINDING, &[0; 32][..])
+ } else { ($err_code, $data) };
return Err(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
channel_id: msg.channel_id,
htlc_id: msg.htlc_id,
- reason: HTLCFailReason::reason($err_code, $data.to_vec())
+ reason: HTLCFailReason::reason(err_code, err_data.to_vec())
.get_encrypted_failure_packet(&shared_secret, &None),
}));
}
}
}
- let next_hop = match onion_utils::decode_next_payment_hop(
- shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac,
- msg.payment_hash, &self.node_signer
- ) {
- Ok(res) => res,
- Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
- return_malformed_err!(err_msg, err_code);
- },
- Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => {
- return_err!(err_msg, err_code, &[0; 0]);
- },
- };
- let (outgoing_scid, outgoing_amt_msat, outgoing_cltv_value, next_packet_pk_opt) = match next_hop {
- onion_utils::Hop::Forward {
- next_hop_data: msgs::InboundOnionPayload::Forward {
- short_channel_id, amt_to_forward, outgoing_cltv_value
- }, ..
- } => {
- let next_packet_pk = onion_utils::next_hop_pubkey(&self.secp_ctx,
- msg.onion_routing_packet.public_key.unwrap(), &shared_secret);
- (short_channel_id, amt_to_forward, outgoing_cltv_value, Some(next_packet_pk))
- },
- // We'll do receive checks in [`Self::construct_pending_htlc_info`] so we have access to the
- // inbound channel's state.
- onion_utils::Hop::Receive { .. } => return Ok((next_hop, shared_secret, None)),
- onion_utils::Hop::Forward { next_hop_data: msgs::InboundOnionPayload::Receive { .. }, .. } |
- onion_utils::Hop::Forward { next_hop_data: msgs::InboundOnionPayload::BlindedReceive { .. }, .. } =>
- {
- return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0; 0]);
- }
+ let NextPacketDetails {
+ next_packet_pubkey, outgoing_amt_msat, outgoing_scid, outgoing_cltv_value
+ } = match next_packet_details_opt {
+ Some(next_packet_details) => next_packet_details,
+ // it is a receive, so no need for outbound checks
+ None => return Ok((next_hop, shared_secret, None)),
};
// Perform outbound checks here instead of in [`Self::construct_pending_htlc_info`] because we
}
chan_update_opt
} else {
- if (msg.cltv_expiry as u64) < (outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 {
- // We really should set `incorrect_cltv_expiry` here but as we're not
- // forwarding over a real channel we can't generate a channel_update
- // for it. Instead we just return a generic temporary_node_failure.
- break Some((
- "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
- 0x2000 | 2, None,
- ));
- }
None
};
let cur_height = self.best_block.read().unwrap().height() + 1;
- // Theoretically, channel counterparty shouldn't send us a HTLC expiring now,
- // but we want to be robust wrt to counterparty packet sanitization (see
- // HTLC_FAIL_BACK_BUFFER rationale).
- if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
- break Some(("CLTV expiry is too close", 0x1000 | 14, chan_update_opt));
- }
- if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
- break Some(("CLTV expiry is too far in the future", 21, None));
- }
- // If the HTLC expires ~now, don't bother trying to forward it to our
- // counterparty. They should fail it anyway, but we don't want to bother with
- // the round-trips or risk them deciding they definitely want the HTLC and
- // force-closing to ensure they get it if we're offline.
- // We previously had a much more aggressive check here which tried to ensure
- // our counterparty receives an HTLC which has *our* risk threshold met on it,
- // but there is no need to do that, and since we're a bit conservative with our
- // risk threshold it just results in failing to forward payments.
- if (outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
- break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, chan_update_opt));
+
+ if let Err((err_msg, code)) = check_incoming_htlc_cltv(
+ cur_height, outgoing_cltv_value, msg.cltv_expiry
+ ) {
+ if code & 0x1000 != 0 && chan_update_opt.is_none() {
+ // We really should set `incorrect_cltv_expiry` here but as we're not
+ // forwarding over a real channel we can't generate a channel_update
+ // for it. Instead we just return a generic temporary_node_failure.
+ break Some((err_msg, 0x2000 | 2, None))
+ }
+ let chan_update_opt = if code & 0x1000 != 0 { chan_update_opt } else { None };
+ break Some((err_msg, code, chan_update_opt));
}
break None;
}
return_err!(err, code, &res.0[..]);
}
- Ok((next_hop, shared_secret, next_packet_pk_opt))
+ Ok((next_hop, shared_secret, Some(next_packet_pubkey)))
}
fn construct_pending_htlc_status<'a>(
- &self, msg: &msgs::UpdateAddHTLC, shared_secret: [u8; 32], decoded_hop: onion_utils::Hop,
- allow_underpay: bool, next_packet_pubkey_opt: Option<Result<PublicKey, secp256k1::Error>>
+ &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, shared_secret: [u8; 32],
+ decoded_hop: onion_utils::Hop, allow_underpay: bool,
+ next_packet_pubkey_opt: Option<Result<PublicKey, secp256k1::Error>>,
) -> PendingHTLCStatus {
macro_rules! return_err {
($msg: expr, $err_code: expr, $data: expr) => {
{
- log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+ let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id));
+ log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
channel_id: msg.channel_id,
htlc_id: msg.htlc_id,
match decoded_hop {
onion_utils::Hop::Receive(next_hop_data) => {
// OUR PAYMENT!
- match self.construct_recv_pending_htlc_info(next_hop_data, shared_secret, msg.payment_hash,
- msg.amount_msat, msg.cltv_expiry, None, allow_underpay, msg.skimmed_fee_msat)
+ let current_height: u32 = self.best_block.read().unwrap().height();
+ match create_recv_pending_htlc_info(next_hop_data, shared_secret, msg.payment_hash,
+ msg.amount_msat, msg.cltv_expiry, None, allow_underpay, msg.skimmed_fee_msat,
+ current_height, self.default_configuration.accept_mpp_keysend)
{
Ok(info) => {
// Note that we could obviously respond immediately with an update_fulfill_htlc
}
},
onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => {
- match self.construct_fwd_pending_htlc_info(msg, next_hop_data, next_hop_hmac,
+ match create_fwd_pending_htlc_info(msg, next_hop_data, next_hop_hmac,
new_packet_bytes, shared_secret, next_packet_pubkey_opt) {
Ok(info) => PendingHTLCStatus::Forward(info),
Err(InboundOnionErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
if chan.context.get_short_channel_id().is_none() {
return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError});
}
- log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", &chan.context.channel_id());
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
+ log_trace!(logger, "Attempting to generate broadcast channel update for channel {}", &chan.context.channel_id());
self.get_channel_update_for_unicast(chan)
}
/// [`channel_update`]: msgs::ChannelUpdate
/// [`internal_closing_signed`]: Self::internal_closing_signed
fn get_channel_update_for_unicast(&self, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
- log_trace!(self.logger, "Attempting to generate channel update for channel {}", &chan.context.channel_id());
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
+ log_trace!(logger, "Attempting to generate channel update for channel {}", chan.context.channel_id());
let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) {
None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
Some(id) => id,
}
fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
- log_trace!(self.logger, "Generating channel update for channel {}", &chan.context.channel_id());
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
+ log_trace!(logger, "Generating channel update for channel {}", chan.context.channel_id());
let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
let enabled = chan.context.is_usable() && match chan.channel_update_status() {
} = args;
// The top-level caller should hold the total_consistency_lock read lock.
debug_assert!(self.total_consistency_lock.try_write().is_err());
-
- log_trace!(self.logger,
- "Attempting to send payment with payment hash {} along path with next hop {}",
- payment_hash, path.hops.first().unwrap().short_channel_id);
let prng_seed = self.entropy_source.get_secure_random_bytes();
let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted");
- let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv)
- .map_err(|_| APIError::InvalidRoute{err: "Pubkey along hop was maliciously selected".to_owned()})?;
- let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, recipient_onion, cur_height, keysend_preimage)?;
-
- let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash)
- .map_err(|_| APIError::InvalidRoute { err: "Route size too large considering onion data".to_owned()})?;
+ let (onion_packet, htlc_msat, htlc_cltv) = onion_utils::create_payment_onion(
+ &self.secp_ctx, &path, &session_priv, total_value, recipient_onion, cur_height,
+ payment_hash, keysend_preimage, prng_seed
+ ).map_err(|e| {
+ let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None);
+ log_error!(logger, "Failed to build an onion for path for payment hash {}", payment_hash);
+ e
+ })?;
let err: Result<(), _> = loop {
let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.hops.first().unwrap().short_channel_id) {
- None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}),
+ None => {
+ let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None);
+ log_error!(logger, "Failed to find first-hop for payment hash {}", payment_hash);
+ return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()})
+ },
Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
};
+ let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(id));
+ log_trace!(logger,
+ "Attempting to send payment with payment hash {} along path with next hop {}",
+ payment_hash, path.hops.first().unwrap().short_channel_id);
+
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
.ok_or_else(|| APIError::ChannelUnavailable{err: "No peer matching the path's first hop found!".to_owned() })?;
return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
}
let funding_txo = chan.context.get_funding_txo().unwrap();
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
let send_res = chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(),
htlc_cltv, HTLCSource::OutboundRoute {
path: path.clone(),
session_priv: session_priv.clone(),
first_hop_htlc_msat: htlc_msat,
payment_id,
- }, onion_packet, None, &self.fee_estimator, &self.logger);
+ }, onion_packet, None, &self.fee_estimator, &&logger);
match break_chan_phase_entry!(self, send_res, chan_phase_entry) {
Some(monitor_update) => {
match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
}
return Ok(());
};
-
match handle_error!(self, err, path.hops.first().unwrap().pubkey) {
Ok(_) => unreachable!(),
Err(e) => {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- let (chan, msg) = match peer_state.channel_by_id.remove(temporary_channel_id) {
- Some(ChannelPhase::UnfundedOutboundV1(chan)) => {
+ let (chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) {
+ Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => {
let funding_txo = find_funding_output(&chan, &funding_transaction)?;
- let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &self.logger)
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger)
.map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e {
let channel_id = chan.context.channel_id();
let user_id = chan.context.get_user_id();
(chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None, channel_capacity))
} else { unreachable!(); });
match funding_res {
- Ok((chan, funding_msg)) => (chan, funding_msg),
+ Ok(funding_msg) => (chan, funding_msg),
Err((chan, err)) => {
mem::drop(peer_state_lock);
mem::drop(per_peer_state);
-
let _: Result<(), _> = handle_error!(self, Err(err), chan.context.get_counterparty_node_id());
return Err(APIError::ChannelUnavailable {
err: "Signer refused to sign the initial commitment transaction".to_owned()
}),
};
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
- node_id: chan.context.get_counterparty_node_id(),
- msg,
- });
+ if let Some(msg) = msg_opt {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
+ node_id: chan.context.get_counterparty_node_id(),
+ msg,
+ });
+ }
match peer_state.channel_by_id.entry(chan.context.channel_id()) {
hash_map::Entry::Occupied(_) => {
panic!("Generated duplicate funding txid?");
if id_to_peer.insert(chan.context.channel_id(), chan.context.get_counterparty_node_id()).is_some() {
panic!("id_to_peer map already contained funding txid, which shouldn't be possible");
}
- e.insert(ChannelPhase::Funded(chan));
+ e.insert(ChannelPhase::UnfundedOutboundV1(chan));
}
}
Ok(())
/// Return values are identical to [`Self::funding_transaction_generated`], respective to
/// each individual channel and transaction output.
///
- /// Do NOT broadcast the funding transaction yourself. This batch funding transcaction
+ /// Do NOT broadcast the funding transaction yourself. This batch funding transaction
/// will only be broadcast when we have safely received and persisted the counterparty's
/// signature for each channel.
///
// lower than the next block height. However, the modules constituting our Lightning
// node might not have perfect sync about their blockchain views. Thus, if the wallet
// module is ahead of LDK, only allow one more block of headroom.
- if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) && LockTime::from(funding_transaction.lock_time).is_block_height() && funding_transaction.lock_time.0 > height + 1 {
+ if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) &&
+ funding_transaction.lock_time.is_block_height() &&
+ funding_transaction.lock_time.to_consensus_u32() > height + 1
+ {
result = result.and(Err(APIError::APIMisuseError {
err: "Funding transaction absolute timelock is non-final".to_owned()
}));
btree_map::Entry::Vacant(vacant) => Some(vacant.insert(Vec::new())),
}
});
- for &(temporary_channel_id, counterparty_node_id) in temporary_channels.iter() {
+ for &(temporary_channel_id, counterparty_node_id) in temporary_channels {
result = result.and_then(|_| self.funding_transaction_generated_intern(
temporary_channel_id,
counterparty_node_id,
err: format!("Channel with id {} for the passed counterparty node_id {} is still opening.",
next_hop_channel_id, next_node_id)
}),
- None => return Err(APIError::ChannelUnavailable {
- err: format!("Channel with id {} not found for the passed counterparty node_id {}",
- next_hop_channel_id, next_node_id)
- })
+ None => {
+ let error = format!("Channel with id {} not found for the passed counterparty node_id {}",
+ next_hop_channel_id, next_node_id);
+ let logger = WithContext::from(&self.logger, Some(next_node_id), Some(*next_hop_channel_id));
+ log_error!(logger, "{} when attempting to forward intercepted HTLC", error);
+ return Err(APIError::ChannelUnavailable {
+ err: error
+ })
+ }
}
};
})?;
let routing = match payment.forward_info.routing {
- PendingHTLCRouting::Forward { onion_packet, .. } => {
- PendingHTLCRouting::Forward { onion_packet, short_channel_id: next_hop_scid }
+ PendingHTLCRouting::Forward { onion_packet, blinded, .. } => {
+ PendingHTLCRouting::Forward {
+ onion_packet, blinded, short_channel_id: next_hop_scid
+ }
},
_ => unreachable!() // Only `PendingHTLCRouting::Forward`s are intercepted
};
htlc_id: payment.prev_htlc_id,
incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret,
phantom_shared_secret: None,
+ blinded_failure: payment.forward_info.routing.blinded_failure(),
});
let failure_reason = HTLCFailReason::from_failure_code(0x4000 | 10);
for (short_chan_id, mut pending_forwards) in forward_htlcs {
if short_chan_id != 0 {
+ let mut forwarding_counterparty = None;
macro_rules! forwarding_channel_not_found {
() => {
for forward_info in pending_forwards.drain(..) {
}) => {
macro_rules! failure_handler {
($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
- log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+ let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_funding_outpoint.to_channel_id()));
+ log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: incoming_shared_secret,
phantom_shared_secret: $phantom_ss,
+ blinded_failure: routing.blinded_failure(),
});
let reason = if $next_hop_unknown {
}
}
}
- if let PendingHTLCRouting::Forward { onion_packet, .. } = routing {
+ if let PendingHTLCRouting::Forward { ref onion_packet, .. } = routing {
let phantom_pubkey_res = self.node_signer.get_node_id(Recipient::PhantomNode);
if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.chain_hash) {
let phantom_shared_secret = self.node_signer.ecdh(Recipient::PhantomNode, &onion_packet.public_key.unwrap(), None).unwrap().secret_bytes();
) {
Ok(res) => res,
Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
- let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).into_inner();
+ let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).to_byte_array();
// In this scenario, the phantom would have sent us an
// `update_fail_malformed_htlc`, meaning here we encrypt the error as
// if it came from us (the second-to-last hop) but contains the sha256
};
match next_hop {
onion_utils::Hop::Receive(hop_data) => {
- match self.construct_recv_pending_htlc_info(hop_data,
+ let current_height: u32 = self.best_block.read().unwrap().height();
+ match create_recv_pending_htlc_info(hop_data,
incoming_shared_secret, payment_hash, outgoing_amt_msat,
- outgoing_cltv_value, Some(phantom_shared_secret), false, None)
+ outgoing_cltv_value, Some(phantom_shared_secret), false, None,
+ current_height, self.default_configuration.accept_mpp_keysend)
{
Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])),
Err(InboundOnionErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
continue;
}
};
+ forwarding_counterparty = Some(counterparty_node_id);
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
if peer_state_mutex_opt.is_none() {
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
for forward_info in pending_forwards.drain(..) {
match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
forward_info: PendingHTLCInfo {
incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
- routing: PendingHTLCRouting::Forward { onion_packet, .. }, skimmed_fee_msat, ..
+ routing: PendingHTLCRouting::Forward {
+ onion_packet, blinded, ..
+ }, skimmed_fee_msat, ..
},
}) => {
- log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, &payment_hash, short_chan_id);
+ log_trace!(logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, &payment_hash, short_chan_id);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
user_channel_id: Some(prev_user_channel_id),
incoming_packet_shared_secret: incoming_shared_secret,
// Phantom payments are only PendingHTLCRouting::Receive.
phantom_shared_secret: None,
+ blinded_failure: blinded.map(|_| BlindedFailure::FromIntroductionNode),
+ });
+ let next_blinding_point = blinded.and_then(|b| {
+ let encrypted_tlvs_ss = self.node_signer.ecdh(
+ Recipient::Node, &b.inbound_blinding_point, None
+ ).unwrap().secret_bytes();
+ onion_utils::next_hop_pubkey(
+ &self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss
+ ).ok()
});
if let Err(e) = chan.queue_add_htlc(outgoing_amt_msat,
payment_hash, outgoing_cltv_value, htlc_source.clone(),
- onion_packet, skimmed_fee_msat, &self.fee_estimator,
- &self.logger)
+ onion_packet, skimmed_fee_msat, next_blinding_point, &self.fee_estimator,
+ &&logger)
{
if let ChannelError::Ignore(msg) = e {
- log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", &payment_hash, msg);
+ log_trace!(logger, "Failed to forward HTLC with payment_hash {}: {}", &payment_hash, msg);
} else {
panic!("Stated return value requirements in send_htlc() were not met");
}
panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
},
HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
- log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
+ log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
if let Err(e) = chan.queue_fail_htlc(
- htlc_id, err_packet, &self.logger
+ htlc_id, err_packet, &&logger
) {
if let ChannelError::Ignore(msg) = e {
- log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
+ log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
} else {
panic!("Stated return value requirements in queue_fail_htlc() were not met");
}
skimmed_fee_msat, ..
}
}) => {
+ let blinded_failure = routing.blinded_failure();
let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret, mut onion_fields) = match routing {
PendingHTLCRouting::Receive { payment_data, payment_metadata, incoming_cltv_expiry, phantom_shared_secret, custom_tlvs } => {
let _legacy_hop_data = Some(payment_data.clone());
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: incoming_shared_secret,
phantom_shared_secret,
+ blinded_failure,
},
// We differentiate the received value from the sender intended value
// if possible so that we don't prematurely mark MPP payments complete
htlc_id: $htlc.prev_hop.htlc_id,
incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
phantom_shared_secret,
+ blinded_failure: None,
}), payment_hash,
HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data),
HTLCDestination::FailedPayment { payment_hash: $payment_hash },
fn update_channel_fee(&self, chan_id: &ChannelId, chan: &mut Channel<SP>, new_feerate: u32) -> NotifyOption {
if !chan.context.is_outbound() { return NotifyOption::SkipPersistNoEvents; }
+
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
+
// If the feerate has decreased by less than half, don't bother
if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() {
if new_feerate != chan.context.get_feerate_sat_per_1000_weight() {
- log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
+ log_trace!(logger, "Channel {} does not qualify for a feerate change from {} to {}.",
chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
}
return NotifyOption::SkipPersistNoEvents;
}
if !chan.context.is_live() {
- log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
+ log_trace!(logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
return NotifyOption::SkipPersistNoEvents;
}
- log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.",
+ log_trace!(logger, "Channel {} qualifies for a feerate change from {} to {}.",
&chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
- chan.queue_update_fee(new_feerate, &self.fee_estimator, &self.logger);
+ chan.queue_update_fee(new_feerate, &self.fee_estimator, &&logger);
NotifyOption::DoPersist
}
| {
context.maybe_expire_prev_config();
if unfunded_context.should_expire_unfunded_channel() {
- log_error!(self.logger,
+ let logger = WithChannelContext::from(&self.logger, context);
+ log_error!(logger,
"Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id);
update_maps_on_chan_removal!(self, &context);
self.issue_channel_close_events(&context, ClosureReason::HolderForceClosed);
chan.context.maybe_expire_prev_config();
if chan.should_disconnect_peer_awaiting_response() {
- log_debug!(self.logger, "Disconnecting peer {} due to not making any progress on channel {}",
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
+ log_debug!(logger, "Disconnecting peer {} due to not making any progress on channel {}",
counterparty_node_id, chan_id);
pending_msg_events.push(MessageSendEvent::HandleError {
node_id: counterparty_node_id,
for (chan_id, req) in peer_state.inbound_channel_request_by_id.iter_mut() {
if { req.ticks_remaining -= 1 ; req.ticks_remaining } <= 0 {
- log_error!(self.logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", &chan_id);
+ let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*chan_id));
+ log_error!(logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", &chan_id);
peer_state.pending_msg_events.push(
events::MessageSendEvent::HandleError {
node_id: counterparty_node_id,
&self.pending_events, &self.logger)
{ self.push_pending_forwards_ev(); }
},
- HTLCSource::PreviousHopData(HTLCPreviousHopData { ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret, ref phantom_shared_secret, ref outpoint, .. }) => {
- log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with {:?}", &payment_hash, onion_error);
- let err_packet = onion_error.get_encrypted_failure_packet(incoming_packet_shared_secret, phantom_shared_secret);
+ HTLCSource::PreviousHopData(HTLCPreviousHopData {
+ ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret,
+ ref phantom_shared_secret, ref outpoint, ref blinded_failure, ..
+ }) => {
+ log_trace!(
+ WithContext::from(&self.logger, None, Some(outpoint.to_channel_id())),
+ "Failing {}HTLC with payment_hash {} backwards from us: {:?}",
+ if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error
+ );
+ let err_packet = match blinded_failure {
+ Some(BlindedFailure::FromIntroductionNode) => {
+ let blinded_onion_error = HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32]);
+ blinded_onion_error.get_encrypted_failure_packet(
+ incoming_packet_shared_secret, phantom_shared_secret
+ )
+ },
+ None => {
+ onion_error.get_encrypted_failure_packet(incoming_packet_shared_secret, phantom_shared_secret)
+ }
+ };
let mut push_forward_ev = false;
let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
}
fn claim_payment_internal(&self, payment_preimage: PaymentPreimage, custom_tlvs_known: bool) {
- let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
+ let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).to_byte_array());
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
}
if valid_mpp {
for htlc in sources.drain(..) {
+ let prev_hop_chan_id = htlc.prev_hop.outpoint.to_channel_id();
if let Err((pk, err)) = self.claim_funds_from_hop(
htlc.prev_hop, payment_preimage,
|_, definitely_duplicate| {
if let msgs::ErrorAction::IgnoreError = err.err.action {
// We got a temporary failure updating monitor, but will claim the
// HTLC when the monitor updating is restored (or on chain).
- log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
+ let logger = WithContext::from(&self.logger, None, Some(prev_hop_chan_id));
+ log_error!(logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
} else { errs.push((pk, err)); }
}
}
if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(chan_id) {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let counterparty_node_id = chan.context.get_counterparty_node_id();
- let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &&logger);
match fulfill_res {
UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } => {
if let Some(action) = completion_action(Some(htlc_value_msat), false) {
- log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
+ log_trace!(logger, "Tracking monitor update completion action for channel {}: {:?}",
chan_id, action);
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
}
};
mem::drop(peer_state_lock);
- log_trace!(self.logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}",
+ log_trace!(logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}",
chan_id, action);
let (node_id, funding_outpoint, blocker) =
if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
// with a preimage we *must* somehow manage to propagate it to the upstream
// channel, or we must have an ability to receive the same event and try
// again on restart.
- log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
+ log_error!(WithContext::from(&self.logger, None, Some(prev_hop.outpoint.to_channel_id())), "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
payment_preimage, update_res);
}
} else {
pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
-> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> {
- log_trace!(self.logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
+ let logger = WithChannelContext::from(&self.logger, &channel.context);
+ log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
&channel.context.channel_id(),
if raa.is_some() { "an" } else { "no" },
if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(),
}
if let Some(tx) = funding_broadcastable {
- log_info!(self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
+ log_info!(logger, "Broadcasting funding transaction with txid {}", tx.txid());
self.tx_broadcaster.broadcast_transactions(&[&tx]);
}
pending.retain(|upd| upd.update_id > highest_applied_update_id);
pending.len()
} else { 0 };
- log_trace!(self.logger, "ChannelMonitor updated to {}. Current highest is {}. {} pending in-flight updates.",
+ let logger = WithChannelContext::from(&self.logger, &channel.context);
+ log_trace!(logger, "ChannelMonitor updated to {}. Current highest is {}. {} pending in-flight updates.",
highest_applied_update_id, channel.context.get_latest_monitor_update_id(),
remaining_in_flight);
if !channel.is_awaiting_monitor_update() || channel.context.get_latest_monitor_update_id() != highest_applied_update_id {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- let (chan, funding_msg, monitor) =
+ let (chan, funding_msg_opt, monitor) =
match peer_state.channel_by_id.remove(&msg.temporary_channel_id) {
Some(ChannelPhase::UnfundedInboundV1(inbound_chan)) => {
- match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &self.logger) {
+ let logger = WithChannelContext::from(&self.logger, &inbound_chan.context);
+ match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) {
Ok(res) => res,
Err((mut inbound_chan, err)) => {
// We've already removed this inbound channel from the map in `PeerState`
None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
};
- match peer_state.channel_by_id.entry(funding_msg.channel_id) {
+ match peer_state.channel_by_id.entry(chan.context.channel_id()) {
hash_map::Entry::Occupied(_) => {
- Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
+ Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Already had channel with the new channel_id".to_owned(),
+ chan.context.channel_id()
+ ))
},
hash_map::Entry::Vacant(e) => {
let mut id_to_peer_lock = self.id_to_peer.lock().unwrap();
hash_map::Entry::Occupied(_) => {
return Err(MsgHandleErrInternal::send_err_msg_no_close(
"The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
- funding_msg.channel_id))
+ chan.context.channel_id()))
},
hash_map::Entry::Vacant(i_e) => {
let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
// hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
// accepted payment from yet. We do, however, need to wait to send our channel_ready
// until we have persisted our monitor.
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
- node_id: counterparty_node_id.clone(),
- msg: funding_msg,
- });
+ if let Some(msg) = funding_msg_opt {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
+ node_id: counterparty_node_id.clone(),
+ msg,
+ });
+ }
if let ChannelPhase::Funded(chan) = e.insert(ChannelPhase::Funded(chan)) {
handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
}
Ok(())
} else {
- log_error!(self.logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
+ log_error!(logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
+ let channel_id = match funding_msg_opt {
+ Some(msg) => msg.channel_id,
+ None => chan.context.channel_id(),
+ };
return Err(MsgHandleErrInternal::send_err_msg_no_close(
"The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
- funding_msg.channel_id));
+ channel_id));
}
}
}
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan_phase_entry) => {
- match chan_phase_entry.get_mut() {
- ChannelPhase::Funded(ref mut chan) => {
- let monitor = try_chan_phase_entry!(self,
- chan.funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan_phase_entry);
- if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
- handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
- Ok(())
- } else {
- try_chan_phase_entry!(self, Err(ChannelError::Close("Channel funding outpoint was a duplicate".to_owned())), chan_phase_entry)
+ hash_map::Entry::Occupied(chan_phase_entry) => {
+ if matches!(chan_phase_entry.get(), ChannelPhase::UnfundedOutboundV1(_)) {
+ let chan = if let ChannelPhase::UnfundedOutboundV1(chan) = chan_phase_entry.remove() { chan } else { unreachable!() };
+ let logger = WithContext::from(
+ &self.logger,
+ Some(chan.context.get_counterparty_node_id()),
+ Some(chan.context.channel_id())
+ );
+ let res =
+ chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger);
+ match res {
+ Ok((chan, monitor)) => {
+ if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
+ // We really should be able to insert here without doing a second
+ // lookup, but sadly rust stdlib doesn't currently allow keeping
+ // the original Entry around with the value removed.
+ let mut chan = peer_state.channel_by_id.entry(msg.channel_id).or_insert(ChannelPhase::Funded(chan));
+ if let ChannelPhase::Funded(ref mut chan) = &mut chan {
+ handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
+ } else { unreachable!(); }
+ Ok(())
+ } else {
+ let e = ChannelError::Close("Channel funding outpoint was a duplicate".to_owned());
+ return Err(convert_chan_phase_err!(self, e, &mut ChannelPhase::Funded(chan), &msg.channel_id).1);
+ }
+ },
+ Err((chan, e)) => {
+ debug_assert!(matches!(e, ChannelError::Close(_)),
+ "We don't have a channel anymore, so the error better have expected close");
+ // We've already removed this outbound channel from the map in
+ // `PeerState` above so at this point we just need to clean up any
+ // lingering entries concerning this channel as it is safe to do so.
+ return Err(convert_chan_phase_err!(self, e, &mut ChannelPhase::UnfundedOutboundV1(chan), &msg.channel_id).1);
}
- },
- _ => {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id));
- },
+ }
+ } else {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id));
}
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
let announcement_sigs_opt = try_chan_phase_entry!(self, chan.channel_ready(&msg, &self.node_signer,
- self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan_phase_entry);
+ self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &&logger), chan_phase_entry);
if let Some(announcement_sigs) = announcement_sigs_opt {
- log_trace!(self.logger, "Sending announcement_signatures for channel {}", chan.context.channel_id());
+ log_trace!(logger, "Sending announcement_signatures for channel {}", chan.context.channel_id());
peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
node_id: counterparty_node_id.clone(),
msg: announcement_sigs,
// counterparty's announcement_signatures. Thus, we only bother to send a
// channel_update here if the channel is not public, i.e. we're not sending an
// announcement_signatures.
- log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", chan.context.channel_id());
+ log_trace!(logger, "Sending private initial channel_update for our counterparty on channel {}", chan.context.channel_id());
if let Ok(msg) = self.get_channel_update_for_unicast(chan) {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
node_id: counterparty_node_id.clone(),
match phase {
ChannelPhase::Funded(chan) => {
if !chan.received_shutdown() {
- log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
+ log_info!(logger, "Received a shutdown message from our counterparty for channel {}{}.",
msg.channel_id,
if chan.sent_shutdown() { " after we initiated shutdown" } else { "" });
}
},
ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
let context = phase.context_mut();
- log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
+ let logger = WithChannelContext::from(&self.logger, context);
+ log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
self.issue_channel_close_events(&context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
let mut chan = remove_channel_phase!(self, chan_phase_entry);
finish_shutdown = Some(chan.context_mut().force_shutdown(false));
}
fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
- let mut shutdown_result = None;
- let unbroadcasted_batch_funding_txid;
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
})?;
- let (tx, chan_option) = {
+ let (tx, chan_option, shutdown_result) = {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
- unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid();
- let (closing_signed, tx) = try_chan_phase_entry!(self, chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry);
+ let (closing_signed, tx, shutdown_result) = try_chan_phase_entry!(self, chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry);
+ debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown());
if let Some(msg) = closing_signed {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
node_id: counterparty_node_id.clone(),
// also implies there are no pending HTLCs left on the channel, so we can
// fully delete it from tracking (the channel monitor is still around to
// watch for old state broadcasts)!
- (tx, Some(remove_channel_phase!(self, chan_phase_entry)))
- } else { (tx, None) }
+ (tx, Some(remove_channel_phase!(self, chan_phase_entry)), shutdown_result)
+ } else { (tx, None, shutdown_result) }
} else {
return try_chan_phase_entry!(self, Err(ChannelError::Close(
"Got a closing_signed message for an unfunded channel!".into())), chan_phase_entry);
}
};
if let Some(broadcast_tx) = tx {
- log_info!(self.logger, "Broadcasting {}", log_tx!(broadcast_tx));
+ let channel_id = chan_option.as_ref().map(|channel| channel.context().channel_id());
+ log_info!(WithContext::from(&self.logger, Some(*counterparty_node_id), channel_id), "Broadcasting {}", log_tx!(broadcast_tx));
self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
}
if let Some(ChannelPhase::Funded(chan)) = chan_option {
});
}
self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
- shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid));
}
mem::drop(per_peer_state);
if let Some(shutdown_result) = shutdown_result {
// Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
// closing a channel), so any changes are likely to be lost on restart!
- let decoded_hop_res = self.decode_update_add_htlc_onion(msg);
+ let decoded_hop_res = self.decode_update_add_htlc_onion(msg, counterparty_node_id);
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let pending_forward_info = match decoded_hop_res {
Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
- self.construct_pending_htlc_status(msg, shared_secret, next_hop,
- chan.context.config().accept_underpaying_htlcs, next_packet_pk_opt),
+ self.construct_pending_htlc_status(
+ msg, counterparty_node_id, shared_secret, next_hop,
+ chan.context.config().accept_underpaying_htlcs, next_packet_pk_opt,
+ ),
Err(e) => PendingHTLCStatus::Fail(e)
};
let create_pending_htlc_status = |chan: &Channel<SP>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
// but if we've sent a shutdown and they haven't acknowledged it yet, we just
// want to reject the new HTLC and fail it backwards instead of forwarding.
match pending_forward_info {
- PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
- let reason = if (error_code & 0x1000) != 0 {
+ PendingHTLCStatus::Forward(PendingHTLCInfo {
+ ref incoming_shared_secret, ref routing, ..
+ }) => {
+ let reason = if routing.blinded_failure().is_some() {
+ HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32])
+ } else if (error_code & 0x1000) != 0 {
let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
HTLCFailReason::reason(real_code, error_data)
} else {
_ => pending_forward_info
}
};
- try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &self.logger), chan_phase_entry);
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
+ try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &&logger), chan_phase_entry);
} else {
return try_chan_phase_entry!(self, Err(ChannelError::Close(
"Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry);
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let res = try_chan_phase_entry!(self, chan.update_fulfill_htlc(&msg), chan_phase_entry);
if let HTLCSource::PreviousHopData(prev_hop) = &res.0 {
- log_trace!(self.logger,
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
+ log_trace!(logger,
"Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
msg.channel_id);
peer_state.actions_blocking_raa_monitor_updates.entry(msg.channel_id)
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
let funding_txo = chan.context.get_funding_txo();
- let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &self.logger), chan_phase_entry);
+ let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &&logger), chan_phase_entry);
if let Some(monitor_update) = monitor_update_opt {
handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
peer_state, per_peer_state, chan);
if !is_our_scid && forward_info.incoming_amt_msat.is_some() &&
fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, scid, &self.chain_hash)
{
- let intercept_id = InterceptId(Sha256::hash(&forward_info.incoming_shared_secret).into_inner());
+ let intercept_id = InterceptId(Sha256::hash(&forward_info.incoming_shared_secret).to_byte_array());
let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
match pending_intercepts.entry(intercept_id) {
hash_map::Entry::Vacant(entry) => {
prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info });
},
hash_map::Entry::Occupied(_) => {
- log_info!(self.logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
+ let logger = WithContext::from(&self.logger, None, Some(prev_funding_outpoint.to_channel_id()));
+ log_info!(logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
user_channel_id: Some(prev_user_channel_id),
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: forward_info.incoming_shared_secret,
phantom_shared_secret: None,
+ blinded_failure: forward_info.routing.blinded_failure(),
});
failed_intercept_forwards.push((htlc_source, forward_info.payment_hash,
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
let funding_txo_opt = chan.context.get_funding_txo();
let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
self.raa_monitor_updates_held(
*counterparty_node_id)
} else { false };
let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self,
- chan.revoke_and_ack(&msg, &self.fee_estimator, &self.logger, mon_update_blocked), chan_phase_entry);
+ chan.revoke_and_ack(&msg, &self.fee_estimator, &&logger, mon_update_blocked), chan_phase_entry);
if let Some(monitor_update) = monitor_update_opt {
let funding_txo = funding_txo_opt
.expect("Funding outpoint must have been set for RAA handling to succeed");
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
- try_chan_phase_entry!(self, chan.update_fee(&self.fee_estimator, &msg, &self.logger), chan_phase_entry);
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
+ try_chan_phase_entry!(self, chan.update_fee(&self.fee_estimator, &msg, &&logger), chan_phase_entry);
} else {
return try_chan_phase_entry!(self, Err(ChannelError::Close(
"Got an update_fee message for an unfunded channel!".into())), chan_phase_entry);
if were_node_one == msg_from_node_one {
return Ok(NotifyOption::SkipPersistNoEvents);
} else {
- log_debug!(self.logger, "Received channel_update {:?} for channel {}.", msg, chan_id);
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
+ log_debug!(logger, "Received channel_update {:?} for channel {}.", msg, chan_id);
let did_change = try_chan_phase_entry!(self, chan.channel_update(&msg), chan_phase_entry);
// If nothing changed after applying their update, we don't need to bother
// persisting.
msg.channel_id
)
})?;
+ let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id));
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
// freed HTLCs to fail backwards. If in the future we no longer drop pending
// add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
let responses = try_chan_phase_entry!(self, chan.channel_reestablish(
- msg, &self.logger, &self.node_signer, self.chain_hash,
+ msg, &&logger, &self.node_signer, self.chain_hash,
&self.default_configuration, &*self.best_block.read().unwrap()), chan_phase_entry);
let mut channel_update = None;
if let Some(msg) = responses.shutdown_msg {
}
},
hash_map::Entry::Vacant(_) => {
- log_debug!(self.logger, "Sending bogus ChannelReestablish for unknown channel {} to force channel closure",
- log_bytes!(msg.channel_id.0));
+ log_debug!(logger, "Sending bogus ChannelReestablish for unknown channel {} to force channel closure",
+ msg.channel_id);
// Unfortunately, lnd doesn't force close on errors
// (https://github.com/lightningnetwork/lnd/blob/abb1e3463f3a83bbb843d5c399869dbe930ad94f/htlcswitch/link.go#L2119).
// One of the few ways to get an lnd counterparty to force close is by
for monitor_event in monitor_events.drain(..) {
match monitor_event {
MonitorEvent::HTLCEvent(htlc_update) => {
+ let logger = WithContext::from(&self.logger, counterparty_node_id, Some(funding_outpoint.to_channel_id()));
if let Some(preimage) = htlc_update.payment_preimage {
- log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", preimage);
+ log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, false, counterparty_node_id, funding_outpoint);
} else {
- log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
+ log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
let counterparty_node_id = chan.context.get_counterparty_node_id();
let funding_txo = chan.context.get_funding_txo();
let (monitor_opt, holding_cell_failed_htlcs) =
- chan.maybe_free_holding_cell_htlcs(&self.fee_estimator, &self.logger);
+ chan.maybe_free_holding_cell_htlcs(&self.fee_estimator, &&WithChannelContext::from(&self.logger, &chan.context));
if !holding_cell_failed_htlcs.is_empty() {
failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id));
}
has_update
}
+ /// When a call to a [`ChannelSigner`] method returns an error, this indicates that the signer
+ /// is (temporarily) unavailable, and the operation should be retried later.
+ ///
+ /// This method allows for that retry - either checking for any signer-pending messages to be
+ /// attempted in every channel, or in the specifically provided channel.
+ ///
+ /// [`ChannelSigner`]: crate::sign::ChannelSigner
+ #[cfg(test)] // This is only implemented for one signer method, and should be private until we
+ // actually finish implementing it fully.
+ pub fn signer_unblocked(&self, channel_opt: Option<(PublicKey, ChannelId)>) {
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+
+ let unblock_chan = |phase: &mut ChannelPhase<SP>, pending_msg_events: &mut Vec<MessageSendEvent>| {
+ let node_id = phase.context().get_counterparty_node_id();
+ match phase {
+ ChannelPhase::Funded(chan) => {
+ let msgs = chan.signer_maybe_unblocked(&self.logger);
+ if let Some(updates) = msgs.commitment_update {
+ pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id,
+ updates,
+ });
+ }
+ if let Some(msg) = msgs.funding_signed {
+ pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
+ node_id,
+ msg,
+ });
+ }
+ if let Some(msg) = msgs.channel_ready {
+ send_channel_ready!(self, pending_msg_events, chan, msg);
+ }
+ }
+ ChannelPhase::UnfundedOutboundV1(chan) => {
+ if let Some(msg) = chan.signer_maybe_unblocked(&self.logger) {
+ pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
+ node_id,
+ msg,
+ });
+ }
+ }
+ ChannelPhase::UnfundedInboundV1(_) => {},
+ }
+ };
+
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ if let Some((counterparty_node_id, channel_id)) = channel_opt {
+ if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ if let Some(chan) = peer_state.channel_by_id.get_mut(&channel_id) {
+ unblock_chan(chan, &mut peer_state.pending_msg_events);
+ }
+ }
+ } else {
+ for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ for (_, chan) in peer_state.channel_by_id.iter_mut() {
+ unblock_chan(chan, &mut peer_state.pending_msg_events);
+ }
+ }
+ }
+ }
+
/// Check whether any channels have finished removing all pending updates after a shutdown
/// exchange and can now send a closing_signed.
/// Returns whether any closing_signed messages were generated.
peer_state.channel_by_id.retain(|channel_id, phase| {
match phase {
ChannelPhase::Funded(chan) => {
- let unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid();
- match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
- Ok((msg_opt, tx_opt)) => {
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
+ match chan.maybe_propose_closing_signed(&self.fee_estimator, &&logger) {
+ Ok((msg_opt, tx_opt, shutdown_result_opt)) => {
if let Some(msg) = msg_opt {
has_update = true;
pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
node_id: chan.context.get_counterparty_node_id(), msg,
});
}
+ debug_assert_eq!(shutdown_result_opt.is_some(), chan.is_shutdown());
+ if let Some(shutdown_result) = shutdown_result_opt {
+ shutdown_results.push(shutdown_result);
+ }
if let Some(tx) = tx_opt {
// We're done with this channel. We got a closing_signed and sent back
// a closing_signed with a closing transaction to broadcast.
self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
- log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
+ log_info!(logger, "Broadcasting {}", log_tx!(tx));
self.tx_broadcaster.broadcast_transactions(&[&tx]);
update_maps_on_chan_removal!(self, &chan.context);
- shutdown_results.push((None, Vec::new(), unbroadcasted_batch_funding_txid));
false
} else { true }
},
// Channel::force_shutdown tries to make us do) as we may still be in initialization,
// so we track the update internally and handle it when the user next calls
// timer_tick_occurred, guaranteeing we're running normally.
- if let Some((counterparty_node_id, funding_txo, update)) = failure.0.take() {
+ if let Some((counterparty_node_id, funding_txo, update)) = failure.monitor_update.take() {
assert_eq!(update.updates.len(), 1);
if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
assert!(should_broadcast);
/// Requires a direct connection to the introduction node in the responding [`InvoiceRequest`]'s
/// reply path.
///
+ /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
+ ///
/// [`Offer`]: crate::offers::offer::Offer
/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
pub fn create_offer_builder(
/// Errors if a duplicate `payment_id` is provided given the caveats in the aforementioned link
/// or if `amount_msats` is invalid.
///
+ /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
+ ///
/// [`Refund`]: crate::offers::refund::Refund
/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
/// [`Bolt12Invoice::payment_paths`]: crate::offers::invoice::Bolt12Invoice::payment_paths
let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap();
if offer.paths().is_empty() {
- let message = PendingOnionMessage {
- contents: OffersMessage::InvoiceRequest(invoice_request),
- destination: Destination::Node(offer.signing_pubkey()),
- reply_path: Some(reply_path),
- };
+ let message = new_pending_onion_message(
+ OffersMessage::InvoiceRequest(invoice_request),
+ Destination::Node(offer.signing_pubkey()),
+ Some(reply_path),
+ );
pending_offers_messages.push(message);
} else {
// Send as many invoice requests as there are paths in the offer (with an upper bound).
// one invoice for a given payment id will be paid, even if more than one is received.
const REQUEST_LIMIT: usize = 10;
for path in offer.paths().into_iter().take(REQUEST_LIMIT) {
- let message = PendingOnionMessage {
- contents: OffersMessage::InvoiceRequest(invoice_request.clone()),
- destination: Destination::BlindedPath(path.clone()),
- reply_path: Some(reply_path.clone()),
- };
+ let message = new_pending_onion_message(
+ OffersMessage::InvoiceRequest(invoice_request.clone()),
+ Destination::BlindedPath(path.clone()),
+ Some(reply_path.clone()),
+ );
pending_offers_messages.push(message);
}
}
let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap();
if refund.paths().is_empty() {
- let message = PendingOnionMessage {
- contents: OffersMessage::Invoice(invoice),
- destination: Destination::Node(refund.payer_id()),
- reply_path: Some(reply_path),
- };
+ let message = new_pending_onion_message(
+ OffersMessage::Invoice(invoice),
+ Destination::Node(refund.payer_id()),
+ Some(reply_path),
+ );
pending_offers_messages.push(message);
} else {
for path in refund.paths() {
- let message = PendingOnionMessage {
- contents: OffersMessage::Invoice(invoice.clone()),
- destination: Destination::BlindedPath(path.clone()),
- reply_path: Some(reply_path.clone()),
- };
+ let message = new_pending_onion_message(
+ OffersMessage::Invoice(invoice.clone()),
+ Destination::BlindedPath(path.clone()),
+ Some(reply_path.clone()),
+ );
pending_offers_messages.push(message);
}
}
/// operation. It will double-check that nothing *else* is also blocking the same channel from
/// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly.
fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
+ let logger = WithContext::from(
+ &self.logger, Some(counterparty_node_id), Some(channel_funding_outpoint.to_channel_id())
+ );
loop {
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state_lck = peer_state_mtx.lock().unwrap();
let peer_state = &mut *peer_state_lck;
-
if let Some(blocker) = completed_blocker.take() {
// Only do this on the first iteration of the loop.
if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates
// Check that, while holding the peer lock, we don't have anything else
// blocking monitor updates for this channel. If we do, release the monitor
// update(s) when those blockers complete.
- log_trace!(self.logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
+ log_trace!(logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
&channel_funding_outpoint.to_channel_id());
break;
}
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
debug_assert_eq!(chan.context.get_funding_txo().unwrap(), channel_funding_outpoint);
if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
- log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
+ log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor",
channel_funding_outpoint.to_channel_id());
handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
peer_state_lck, peer_state, per_peer_state, chan);
continue;
}
} else {
- log_trace!(self.logger, "Unlocked monitor updating for channel {} without monitors to update",
+ log_trace!(logger, "Unlocked monitor updating for channel {} without monitors to update",
channel_funding_outpoint.to_channel_id());
}
}
}
} else {
- log_debug!(self.logger,
+ log_debug!(logger,
"Got a release post-RAA monitor update for peer {} but the channel is gone",
log_pubkey!(counterparty_node_id));
}
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<M, T, ES, NS, SP, F, R, L>
where
- M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> EventsProvider for ChannelManager<M, T, ES, NS, SP, F, R, L>
where
- M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> chain::Listen for ChannelManager<M, T, ES, NS, SP, F, R, L>
where
- M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
R::Target: Router,
L::Target: Logger,
{
- fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
+ fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
{
let best_block = self.best_block.read().unwrap();
assert_eq!(best_block.block_hash(), header.prev_blockhash,
self.best_block_updated(header, height);
}
- fn block_disconnected(&self, header: &BlockHeader, height: u32) {
+ fn block_disconnected(&self, header: &Header, height: u32) {
let _persistence_guard =
PersistenceNotifierGuard::optionally_notify_skipping_background_events(
self, || -> NotifyOption { NotifyOption::DoPersist });
*best_block = BestBlock::new(header.prev_blockhash, new_height)
}
- self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger));
+ self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context)));
}
}
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> chain::Confirm for ChannelManager<M, T, ES, NS, SP, F, R, L>
where
- M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
R::Target: Router,
L::Target: Logger,
{
- fn transactions_confirmed(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
+ fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) {
// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
// during initialization prior to the chain_monitor being fully configured in some cases.
// See the docs for `ChannelManagerReadArgs` for more.
let _persistence_guard =
PersistenceNotifierGuard::optionally_notify_skipping_background_events(
self, || -> NotifyOption { NotifyOption::DoPersist });
- self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger)
+ self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context))
.map(|(a, b)| (a, Vec::new(), b)));
let last_best_block_height = self.best_block.read().unwrap().height();
if height < last_best_block_height {
let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
- self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger));
+ self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context)));
}
}
- fn best_block_updated(&self, header: &BlockHeader, height: u32) {
+ fn best_block_updated(&self, header: &Header, height: u32) {
// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
// during initialization prior to the chain_monitor being fully configured in some cases.
// See the docs for `ChannelManagerReadArgs` for more.
self, || -> NotifyOption { NotifyOption::DoPersist });
*self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
- self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger));
+ self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context)));
macro_rules! max_time {
($timestamp: expr) => {
});
}
- fn get_relevant_txids(&self) -> Vec<(Txid, Option<BlockHash>)> {
+ fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option<BlockHash>)> {
let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
for (_cp_id, peer_state_mutex) in self.per_peer_state.read().unwrap().iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for chan in peer_state.channel_by_id.values().filter_map(|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }) {
- if let (Some(funding_txo), Some(block_hash)) = (chan.context.get_funding_txo(), chan.context.get_funding_tx_confirmed_in()) {
- res.push((funding_txo.txid, Some(block_hash)));
+ let txid_opt = chan.context.get_funding_txo();
+ let height_opt = chan.context.get_funding_tx_confirmation_height();
+ let hash_opt = chan.context.get_funding_tx_confirmed_in();
+ if let (Some(funding_txo), Some(conf_height), Some(block_hash)) = (txid_opt, height_opt, hash_opt) {
+ res.push((funding_txo.txid, conf_height, Some(block_hash)));
}
}
}
self.do_chain_event(None, |channel| {
if let Some(funding_txo) = channel.context.get_funding_txo() {
if funding_txo.txid == *txid {
- channel.funding_transaction_unconfirmed(&self.logger).map(|()| (None, Vec::new(), None))
+ channel.funding_transaction_unconfirmed(&&WithChannelContext::from(&self.logger, &channel.context)).map(|()| (None, Vec::new(), None))
} else { Ok((None, Vec::new(), None)) }
} else { Ok((None, Vec::new(), None)) }
});
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, L>
where
- M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data),
HTLCDestination::NextHopChannel { node_id: Some(channel.context.get_counterparty_node_id()), channel_id: channel.context.channel_id() }));
}
+ let logger = WithChannelContext::from(&self.logger, &channel.context);
if let Some(channel_ready) = channel_ready_opt {
send_channel_ready!(self, pending_msg_events, channel, channel_ready);
if channel.context.is_usable() {
- log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", channel.context.channel_id());
+ log_trace!(logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", channel.context.channel_id());
if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
node_id: channel.context.get_counterparty_node_id(),
});
}
} else {
- log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", channel.context.channel_id());
+ log_trace!(logger, "Sending channel_ready WITHOUT channel_update for {}", channel.context.channel_id());
}
}
}
if let Some(announcement_sigs) = announcement_sigs {
- log_trace!(self.logger, "Sending announcement_signatures for channel {}", channel.context.channel_id());
+ log_trace!(logger, "Sending announcement_signatures for channel {}", channel.context.channel_id());
pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
node_id: channel.context.get_counterparty_node_id(),
msg: announcement_sigs,
incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret,
phantom_shared_secret: None,
outpoint: htlc.prev_funding_outpoint,
+ blinded_failure: htlc.forward_info.routing.blinded_failure(),
});
let requested_forward_scid /* intercept scid */ = match htlc.forward_info.routing {
timed_out_htlcs.push((prev_hop_data, htlc.forward_info.payment_hash,
HTLCFailReason::from_failure_code(0x2000 | 2),
HTLCDestination::InvalidForward { requested_forward_scid }));
- log_trace!(self.logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
+ let logger = WithContext::from(
+ &self.logger, None, Some(htlc.prev_funding_outpoint.to_channel_id())
+ );
+ log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
false
} else { true }
});
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
ChannelMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, L>
where
- M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
});
}
+ fn handle_stfu(&self, counterparty_node_id: &PublicKey, msg: &msgs::Stfu) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Quiescence not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_splice(&self, counterparty_node_id: &PublicKey, msg: &msgs::Splice) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Splicing not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_splice_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceAck) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Splicing not supported (splice_ack)".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_splice_locked(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceLocked) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Splicing not supported (splice_locked)".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
fn handle_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_shutdown(counterparty_node_id, msg), *counterparty_node_id);
let mut failed_channels = Vec::new();
let mut per_peer_state = self.per_peer_state.write().unwrap();
let remove_peer = {
- log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates.",
- log_pubkey!(counterparty_node_id));
+ log_debug!(
+ WithContext::from(&self.logger, Some(*counterparty_node_id), None),
+ "Marking channels with {} disconnected and generating channel_updates.",
+ log_pubkey!(counterparty_node_id)
+ );
if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
peer_state.channel_by_id.retain(|_, phase| {
let context = match phase {
ChannelPhase::Funded(chan) => {
- if chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger).is_ok() {
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
+ if chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok() {
// We only retain funded channels that are not shutdown.
return true;
}
// Common Channel Establishment
&events::MessageSendEvent::SendChannelReady { .. } => false,
&events::MessageSendEvent::SendAnnouncementSignatures { .. } => false,
+ // Quiescence
+ &events::MessageSendEvent::SendStfu { .. } => false,
+ // Splicing
+ &events::MessageSendEvent::SendSplice { .. } => false,
+ &events::MessageSendEvent::SendSpliceAck { .. } => false,
+ &events::MessageSendEvent::SendSpliceLocked { .. } => false,
// Interactive Transaction Construction
&events::MessageSendEvent::SendTxAddInput { .. } => false,
&events::MessageSendEvent::SendTxAddOutput { .. } => false,
}
fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init, inbound: bool) -> Result<(), ()> {
+ let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), None);
if !init_msg.features.supports_static_remote_key() {
- log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id));
+ log_debug!(logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id));
return Err(());
}
}
}
- log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
+ log_debug!(logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
None
}
).for_each(|chan| {
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
node_id: chan.context.get_counterparty_node_id(),
- msg: chan.get_channel_reestablish(&self.logger),
+ msg: chan.get_channel_reestablish(&&logger),
});
});
}
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
OffersMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, L>
where
- M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
(6, real_node_pubkey, required),
});
+impl_writeable_tlv_based!(BlindedForward, {
+ (0, inbound_blinding_point, required),
+});
+
impl_writeable_tlv_based_enum!(PendingHTLCRouting,
(0, Forward) => {
(0, onion_packet, required),
+ (1, blinded, option),
(2, short_channel_id, required),
},
(1, Receive) => {
(1, Fail),
);
+impl_writeable_tlv_based_enum!(BlindedFailure,
+ (0, FromIntroductionNode) => {}, ;
+);
+
impl_writeable_tlv_based!(HTLCPreviousHopData, {
(0, short_channel_id, required),
(1, phantom_shared_secret, option),
(2, outpoint, required),
+ (3, blinded_failure, option),
(4, htlc_id, required),
(6, incoming_packet_shared_secret, required),
(7, user_channel_id, option),
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> Writeable for ChannelManager<M, T, ES, NS, SP, F, R, L>
where
- M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
pub struct ChannelManagerReadArgs<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
where
- M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
/// this struct.
///
/// This is not exported to bindings users because we have no HashMap bindings
- pub channel_monitors: HashMap<OutPoint, &'a mut ChannelMonitor<<SP::Target as SignerProvider>::Signer>>,
+ pub channel_monitors: HashMap<OutPoint, &'a mut ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
}
impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>
where
- M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
/// HashMap for you. This is primarily useful for C bindings where it is not practical to
/// populate a HashMap directly from C.
pub fn new(entropy_source: ES, node_signer: NS, signer_provider: SP, fee_estimator: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, default_config: UserConfig,
- mut channel_monitors: Vec<&'a mut ChannelMonitor<<SP::Target as SignerProvider>::Signer>>) -> Self {
+ mut channel_monitors: Vec<&'a mut ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>) -> Self {
Self {
entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster, router, logger, default_config,
channel_monitors: channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect()
impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
ReadableArgs<ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>> for (BlockHash, Arc<ChannelManager<M, T, ES, NS, SP, F, R, L>>)
where
- M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
ReadableArgs<ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>> for (BlockHash, ChannelManager<M, T, ES, NS, SP, F, R, L>)
where
- M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
let mut channel: Channel<SP> = Channel::read(reader, (
&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
))?;
+ let logger = WithChannelContext::from(&args.logger, &channel.context);
let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
funding_txo_set.insert(funding_txo.clone());
if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
// But if the channel is behind of the monitor, close the channel:
- log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
- log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
+ log_error!(logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
+ log_error!(logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
if channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
- log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
+ log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
&channel.context.channel_id(), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
}
if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() {
- log_error!(args.logger, " The ChannelMonitor for channel {} is at holder commitment number {} but the ChannelManager is at holder commitment number {}.",
+ log_error!(logger, " The ChannelMonitor for channel {} is at holder commitment number {} but the ChannelManager is at holder commitment number {}.",
&channel.context.channel_id(), monitor.get_cur_holder_commitment_number(), channel.get_cur_holder_commitment_transaction_number());
}
if channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() {
- log_error!(args.logger, " The ChannelMonitor for channel {} is at revoked counterparty transaction number {} but the ChannelManager is at revoked counterparty transaction number {}.",
+ log_error!(logger, " The ChannelMonitor for channel {} is at revoked counterparty transaction number {} but the ChannelManager is at revoked counterparty transaction number {}.",
&channel.context.channel_id(), monitor.get_min_seen_secret(), channel.get_revoked_counterparty_commitment_transaction_number());
}
if channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() {
- log_error!(args.logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
+ log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
&channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
}
- let (monitor_update, mut new_failed_htlcs, batch_funding_txid) = channel.context.force_shutdown(true);
- if batch_funding_txid.is_some() {
+ let mut shutdown_result = channel.context.force_shutdown(true);
+ if shutdown_result.unbroadcasted_batch_funding_txid.is_some() {
return Err(DecodeError::InvalidValue);
}
- if let Some((counterparty_node_id, funding_txo, update)) = monitor_update {
+ if let Some((counterparty_node_id, funding_txo, update)) = shutdown_result.monitor_update {
close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id, funding_txo, update
});
}
- failed_htlcs.append(&mut new_failed_htlcs);
+ failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs);
channel_closures.push_back((events::Event::ChannelClosed {
channel_id: channel.context.channel_id(),
user_channel_id: channel.context.get_user_id(),
// claim update ChannelMonitor updates were persisted prior to persising
// the ChannelMonitor update for the forward leg, so attempting to fail the
// backwards leg of the HTLC will simply be rejected.
- log_info!(args.logger,
+ log_info!(logger,
"Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager",
&channel.context.channel_id(), &payment_hash);
failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.context.get_counterparty_node_id(), channel.context.channel_id()));
}
}
} else {
- log_info!(args.logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}",
+ log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}",
&channel.context.channel_id(), channel.context.get_latest_monitor_update_id(),
monitor.get_latest_update_id());
if let Some(short_channel_id) = channel.context.get_short_channel_id() {
channel_capacity_sats: Some(channel.context.get_value_satoshis()),
}, None));
} else {
- log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", &channel.context.channel_id());
- log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
- log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
- log_error!(args.logger, " Without the ChannelMonitor we cannot continue without risking funds.");
- log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
+ log_error!(logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", &channel.context.channel_id());
+ log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
+ log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
+ log_error!(logger, " Without the ChannelMonitor we cannot continue without risking funds.");
+ log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
return Err(DecodeError::InvalidValue);
}
}
- for (funding_txo, _) in args.channel_monitors.iter() {
+ for (funding_txo, monitor) in args.channel_monitors.iter() {
if !funding_txo_set.contains(funding_txo) {
- log_info!(args.logger, "Queueing monitor update to ensure missing channel {} is force closed",
+ let logger = WithChannelMonitor::from(&args.logger, monitor);
+ log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
&funding_txo.to_channel_id());
let monitor_update = ChannelMonitorUpdate {
update_id: CLOSED_CHANNEL_UPDATE_ID,
let mut pending_background_events = Vec::new();
macro_rules! handle_in_flight_updates {
($counterparty_node_id: expr, $chan_in_flight_upds: expr, $funding_txo: expr,
- $monitor: expr, $peer_state: expr, $channel_info_log: expr
+ $monitor: expr, $peer_state: expr, $logger: expr, $channel_info_log: expr
) => { {
let mut max_in_flight_update_id = 0;
$chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id());
for update in $chan_in_flight_upds.iter() {
- log_trace!(args.logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
+ log_trace!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
update.update_id, $channel_info_log, &$funding_txo.to_channel_id());
max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id);
pending_background_events.push(
});
}
if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() {
- log_error!(args.logger, "Duplicate in-flight monitor update set for the same channel!");
+ log_error!($logger, "Duplicate in-flight monitor update set for the same channel!");
return Err(DecodeError::InvalidValue);
}
max_in_flight_update_id
let peer_state = &mut *peer_state_lock;
for phase in peer_state.channel_by_id.values() {
if let ChannelPhase::Funded(chan) = phase {
+ let logger = WithChannelContext::from(&args.logger, &chan.context);
+
// Channels that were persisted have to be funded, otherwise they should have been
// discarded.
let funding_txo = chan.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
if let Some(mut chan_in_flight_upds) = in_flight_upds.remove(&(*counterparty_id, funding_txo)) {
max_in_flight_update_id = cmp::max(max_in_flight_update_id,
handle_in_flight_updates!(*counterparty_id, chan_in_flight_upds,
- funding_txo, monitor, peer_state, ""));
+ funding_txo, monitor, peer_state, logger, ""));
}
}
if chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id {
// If the channel is ahead of the monitor, return InvalidValue:
- log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
- log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
+ log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
+ log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
chan.context.channel_id(), monitor.get_latest_update_id(), max_in_flight_update_id);
- log_error!(args.logger, " but the ChannelManager is at update_id {}.", chan.get_latest_unblocked_monitor_update_id());
- log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
- log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
- log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
- log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
+ log_error!(logger, " but the ChannelManager is at update_id {}.", chan.get_latest_unblocked_monitor_update_id());
+ log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
+ log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
+ log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
+ log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
return Err(DecodeError::InvalidValue);
}
} else {
if let Some(in_flight_upds) = in_flight_monitor_updates {
for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds {
+ let logger = WithContext::from(&args.logger, Some(counterparty_id), Some(funding_txo.to_channel_id()));
if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
// Now that we've removed all the in-flight monitor updates for channels that are
// still open, we need to replay any monitor updates that are for closed channels,
});
let mut peer_state = peer_state_mutex.lock().unwrap();
handle_in_flight_updates!(counterparty_id, chan_in_flight_updates,
- funding_txo, monitor, peer_state, "closed ");
+ funding_txo, monitor, peer_state, logger, "closed ");
} else {
- log_error!(args.logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!");
- log_error!(args.logger, " The ChannelMonitor for channel {} is missing.",
+ log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!");
+ log_error!(logger, " The ChannelMonitor for channel {} is missing.",
&funding_txo.to_channel_id());
- log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
- log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
- log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
- log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
+ log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
+ log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
+ log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
+ log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
return Err(DecodeError::InvalidValue);
}
}
// 0.0.102+
for (_, monitor) in args.channel_monitors.iter() {
let counterparty_opt = id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id());
+ let chan_id = monitor.get_funding_txo().0.to_channel_id();
if counterparty_opt.is_none() {
+ let logger = WithChannelMonitor::from(&args.logger, monitor);
for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
if let HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } = htlc_source {
if path.hops.is_empty() {
- log_error!(args.logger, "Got an empty path for a pending payment");
+ log_error!(logger, "Got an empty path for a pending payment");
return Err(DecodeError::InvalidValue);
}
match pending_outbounds.pending_outbound_payments.lock().unwrap().entry(payment_id) {
hash_map::Entry::Occupied(mut entry) => {
let newly_added = entry.get_mut().insert(session_priv_bytes, &path);
- log_info!(args.logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}",
- if newly_added { "Added" } else { "Had" }, path_amt, log_bytes!(session_priv_bytes), &htlc.payment_hash);
+ log_info!(logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}",
+ if newly_added { "Added" } else { "Had" }, path_amt, log_bytes!(session_priv_bytes), htlc.payment_hash);
},
hash_map::Entry::Vacant(entry) => {
let path_fee = path.fee_msat();
starting_block_height: best_block_height,
remaining_max_total_routing_fee_msat: None, // only used for retries, and we'll never retry on startup
});
- log_info!(args.logger, "Added a pending payment for {} msat with payment hash {} for path with session priv {}",
+ log_info!(logger, "Added a pending payment for {} msat with payment hash {} for path with session priv {}",
path_amt, &htlc.payment_hash, log_bytes!(session_priv_bytes));
}
}
forwards.retain(|forward| {
if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
if pending_forward_matches_htlc(&htlc_info) {
- log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
+ log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
&htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id());
false
} else { true }
});
pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
if pending_forward_matches_htlc(&htlc_info) {
- log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
+ log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
&htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id());
pending_events_read.retain(|(event, _)| {
if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
counterparty_node_id: path.hops[0].pubkey,
};
pending_outbounds.claim_htlc(payment_id, preimage, session_priv,
- path, false, compl_action, &pending_events, &args.logger);
+ path, false, compl_action, &pending_events, &&logger);
pending_events_read = pending_events.into_inner().unwrap();
}
},
let peer_state = &mut *peer_state_lock;
for (chan_id, phase) in peer_state.channel_by_id.iter_mut() {
if let ChannelPhase::Funded(chan) = phase {
+ let logger = WithChannelContext::from(&args.logger, &chan.context);
if chan.context.outbound_scid_alias() == 0 {
let mut outbound_scid_alias;
loop {
} else if !outbound_scid_aliases.insert(chan.context.outbound_scid_alias()) {
// Note that in rare cases its possible to hit this while reading an older
// channel if we just happened to pick a colliding outbound alias above.
- log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
+ log_error!(logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
return Err(DecodeError::InvalidValue);
}
if chan.context.is_usable() {
if short_to_chan_info.insert(chan.context.outbound_scid_alias(), (chan.context.get_counterparty_node_id(), *chan_id)).is_some() {
// Note that in rare cases its possible to hit this while reading an older
// channel if we just happened to pick a colliding outbound alias above.
- log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
+ log_error!(logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
return Err(DecodeError::InvalidValue);
}
}
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
if let Some(ChannelPhase::Funded(channel)) = peer_state.channel_by_id.get_mut(&previous_channel_id) {
- channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &args.logger);
+ let logger = WithChannelContext::from(&args.logger, &channel.context);
+ channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &&logger);
}
}
if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) {
for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() {
if let Some(peer_state) = per_peer_state.get(&node_id) {
- for (_, actions) in monitor_update_blocked_actions.iter() {
+ for (channel_id, actions) in monitor_update_blocked_actions.iter() {
+ let logger = WithContext::from(&args.logger, Some(node_id), Some(*channel_id));
for action in actions.iter() {
if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
downstream_counterparty_and_funding_outpoint:
Some((blocked_node_id, blocked_channel_outpoint, blocking_action)), ..
} = action {
if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) {
- log_trace!(args.logger,
+ log_trace!(logger,
"Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
blocked_channel_outpoint.to_channel_id());
blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
}
peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions;
} else {
- log_error!(args.logger, "Got blocked actions without a per-peer-state for {}", node_id);
+ log_error!(WithContext::from(&args.logger, Some(node_id), None), "Got blocked actions without a per-peer-state for {}", node_id);
return Err(DecodeError::InvalidValue);
}
}
use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
use crate::ln::ChannelId;
- use crate::ln::channelmanager::{inbound_payment, PaymentId, PaymentSendFailure, RecipientOnionFields, InterceptId};
+ use crate::ln::channelmanager::{create_recv_pending_htlc_info, inbound_payment, PaymentId, PaymentSendFailure, RecipientOnionFields, InterceptId};
use crate::ln::functional_test_utils::*;
use crate::ln::msgs::{self, ErrorAction};
use crate::ln::msgs::ChannelMessageHandler;
let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
let route_params = RouteParameters::from_payment_params_and_value(
PaymentParameters::for_keysend(payee_pubkey, 40, false), 10_000);
- let network_graph = nodes[0].network_graph.clone();
+ let network_graph = nodes[0].network_graph;
let first_hops = nodes[0].node.list_usable_channels();
let scorer = test_utils::TestScorer::new();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
let route_params = RouteParameters::from_payment_params_and_value(
PaymentParameters::for_keysend(payee_pubkey, 40, false), 10_000);
- let network_graph = nodes[0].network_graph.clone();
+ let network_graph = nodes[0].network_graph;
let first_hops = nodes[0].node.list_usable_channels();
let scorer = test_utils::TestScorer::new();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let test_preimage = PaymentPreimage([42; 32]);
let test_secret = PaymentSecret([43; 32]);
- let payment_hash = PaymentHash(Sha256::hash(&test_preimage.0).into_inner());
+ let payment_hash = PaymentHash(Sha256::hash(&test_preimage.0).to_byte_array());
let session_privs = nodes[0].node.test_add_new_pending_payment(payment_hash,
RecipientOnionFields::secret_only(test_secret), PaymentId(payment_hash.0), &route).unwrap();
nodes[0].node.test_send_payment_internal(&route, payment_hash,
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap();
let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
- let channel_id = ChannelId::from_bytes(tx.txid().into_inner());
+ let channel_id = ChannelId::from_bytes(tx.txid().to_byte_array());
{
// Ensure that the `id_to_peer` map is empty until either party has received the
// funding transaction, and have the real `channel_id`.
let intercept_id = InterceptId([0; 32]);
// Test the API functions.
- check_not_connected_to_peer_error(nodes[0].node.create_channel(unkown_public_key, 1_000_000, 500_000_000, 42, None), unkown_public_key);
+ check_not_connected_to_peer_error(nodes[0].node.create_channel(unkown_public_key, 1_000_000, 500_000_000, 42, None, None), unkown_public_key);
check_unkown_peer_error(nodes[0].node.accept_inbound_channel(&channel_id, &unkown_public_key, 42), unkown_public_key);
// Note that create_network connects the nodes together for us
- nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
let mut funding_tx = None;
open_channel_msg.temporary_channel_id);
// Of course, however, outbound channels are always allowed
- nodes[1].node.create_channel(last_random_pk, 100_000, 0, 42, None).unwrap();
+ nodes[1].node.create_channel(last_random_pk, 100_000, 0, 42, None, None).unwrap();
get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, last_random_pk);
// If we fund the first channel, nodes[0] has a live on-chain channel with us, it is now
// Note that create_network connects the nodes together for us
- nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
for _ in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
open_channel_msg.temporary_channel_id);
// but we can still open an outbound channel.
- nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+ nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
// but even with such an outbound channel, additional inbound channels will still fail.
// Note that create_network connects the nodes together for us
- nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
// First, get us up to MAX_UNFUNDED_CHANNEL_PEERS so we can test at the edge
};
// Check that if the amount we received + the penultimate hop extra fee is less than the sender
// intended amount, we fail the payment.
+ let current_height: u32 = node[0].node.best_block.read().unwrap().height();
if let Err(crate::ln::channelmanager::InboundOnionErr { err_code, .. }) =
- node[0].node.construct_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
- sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat))
+ create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
+ sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat),
+ current_height, node[0].node.default_configuration.accept_mpp_keysend)
{
assert_eq!(err_code, 19);
} else { panic!(); }
}),
custom_tlvs: Vec::new(),
};
- assert!(node[0].node.construct_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
- sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat)).is_ok());
+ let current_height: u32 = node[0].node.best_block.read().unwrap().height();
+ assert!(create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
+ sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat),
+ current_height, node[0].node.default_configuration.accept_mpp_keysend).is_ok());
}
#[test]
let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]);
let node = create_network(1, &node_cfg, &node_chanmgr);
- let result = node[0].node.construct_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive {
+ let current_height: u32 = node[0].node.best_block.read().unwrap().height();
+ let result = create_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive {
amt_msat: 100,
outgoing_cltv_value: 22,
payment_metadata: None,
payment_secret: PaymentSecret([0; 32]), total_msat: 100,
}),
custom_tlvs: Vec::new(),
- }, [0; 32], PaymentHash([0; 32]), 100, 23, None, true, None);
+ }, [0; 32], PaymentHash([0; 32]), 100, 23, None, true, None, current_height,
+ node[0].node.default_configuration.accept_mpp_keysend);
// Should not return an error as this condition:
// https://github.com/lightning/bolts/blob/4dcc377209509b13cf89a4b91fde7d478f5b46d8/04-onion-routing.md?plain=1#L334
&[Some(anchors_cfg.clone()), Some(anchors_cfg.clone()), Some(anchors_manual_accept_cfg.clone())]);
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
- nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config.clone())]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 0, None).unwrap();
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 0, None, None).unwrap();
let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
assert!(open_channel_msg.channel_type.as_ref().unwrap().supports_anchors_zero_fee_htlc_tx());
use crate::util::test_utils;
use crate::util::config::{UserConfig, MaxDustHTLCExposure};
+ use bitcoin::blockdata::locktime::absolute::LockTime;
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
- use bitcoin::{Block, BlockHeader, PackedLockTime, Transaction, TxMerkleNode, TxOut};
+ use bitcoin::{Block, Transaction, TxOut};
use crate::sync::{Arc, Mutex, RwLock};
node_b.peer_connected(&node_a.get_our_node_id(), &Init {
features: node_a.init_features(), networks: None, remote_network_address: None
}, false).unwrap();
- node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
+ node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None, None).unwrap();
node_b.handle_open_channel(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
node_a.handle_accept_channel(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
let tx;
if let Event::FundingGenerationReady { temporary_channel_id, output_script, .. } = get_event!(node_a_holder, Event::FundingGenerationReady) {
- tx = Transaction { version: 2, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
+ tx = Transaction { version: 2, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
value: 8_000_000, script_pubkey: output_script,
}]};
node_a.funding_transaction_generated(&temporary_channel_id, &node_b.get_our_node_id(), tx.clone()).unwrap();
let mut payment_preimage = PaymentPreimage([0; 32]);
payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes());
payment_count += 1;
- let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
+ let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
$node_a.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),