// Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is
// our payment, which we can use to decode errors or inform the user that the payment was sent.
-/// Routing info for an inbound HTLC onion.
+/// Information about where a received HTLC('s onion) has indicated the HTLC should go.
#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
+#[cfg_attr(test, derive(Debug, PartialEq))]
pub enum PendingHTLCRouting {
- /// A forwarded HTLC.
+ /// An HTLC which should be forwarded on to another node.
Forward {
- /// BOLT 4 onion packet.
+ /// The onion which should be included in the forwarded HTLC, telling the next hop what to
+ /// do with the HTLC.
onion_packet: msgs::OnionPacket,
- /// The SCID from the onion that we should forward to. This could be a real SCID or a fake one
- /// generated using `get_fake_scid` from the scid_utils::fake_scid module.
+ /// The short channel ID of the channel which we were instructed to forward this HTLC to.
+ ///
+ /// This could be a real on-chain SCID, an SCID alias, or some other SCID which has meaning
+ /// to the receiving node, such as one returned from
+ /// [`ChannelManager::get_intercept_scid`] or [`ChannelManager::get_phantom_scid`].
short_channel_id: u64, // This should be NonZero<u64> eventually when we bump MSRV
/// Set if this HTLC is being forwarded within a blinded path.
blinded: Option<BlindedForward>,
},
- /// An HTLC paid to an invoice (supposedly) generated by us.
- /// At this point, we have not checked that the invoice being paid was actually generated by us,
- /// but rather it's claiming to pay an invoice of ours.
+ /// The onion indicates that this is a payment for an invoice (supposedly) generated by us.
+ ///
+ /// Note that at this point, we have not checked that the invoice being paid was actually
+ /// generated by us, but rather it's claiming to pay an invoice of ours.
Receive {
- /// Payment secret and total msat received.
+ /// Information about the amount the sender intended to pay and (potential) proof that this
+ /// is a payment for an invoice we generated. This proof of payment is is also used for
+ /// linking MPP parts of a larger payment.
payment_data: msgs::FinalOnionHopData,
- /// See [`RecipientOnionFields::payment_metadata`] for more info.
+ /// Additional data which we (allegedly) instructed the sender to include in the onion.
+ ///
+ /// For HTLCs received by LDK, this will ultimately be exposed in
+ /// [`Event::PaymentClaimable::onion_fields`] as
+ /// [`RecipientOnionFields::payment_metadata`].
payment_metadata: Option<Vec<u8>>,
/// CLTV expiry of the received HTLC.
+ ///
/// Used to track when we should expire pending HTLCs that go unclaimed.
incoming_cltv_expiry: u32,
- /// Shared secret derived using a phantom node secret key. If this field is Some, the
- /// payment was sent to a phantom node (one hop beyond the current node), but can be
- /// settled by this node.
+ /// If the onion had forwarding instructions to one of our phantom node SCIDs, this will
+ /// provide the onion shared secret used to decrypt the next level of forwarding
+ /// instructions.
phantom_shared_secret: Option<[u8; 32]>,
- /// See [`RecipientOnionFields::custom_tlvs`] for more info.
+ /// Custom TLVs which were set by the sender.
+ ///
+ /// For HTLCs received by LDK, this will ultimately be exposed in
+ /// [`Event::PaymentClaimable::onion_fields`] as
+ /// [`RecipientOnionFields::custom_tlvs`].
custom_tlvs: Vec<(u64, Vec<u8>)>,
+ /// Set if this HTLC is the final hop in a multi-hop blinded path.
+ requires_blinded_error: bool,
},
- /// Incoming keysend (sender provided the preimage in a TLV).
+ /// The onion indicates that this is for payment to us but which contains the preimage for
+ /// claiming included, and is unrelated to any invoice we'd previously generated (aka a
+ /// "keysend" or "spontaneous" payment).
ReceiveKeysend {
- /// This was added in 0.0.116 and will break deserialization on downgrades.
+ /// Information about the amount the sender intended to pay and possibly a token to
+ /// associate MPP parts of a larger payment.
+ ///
+ /// This will only be filled in if receiving MPP keysend payments is enabled, and it being
+ /// present will cause deserialization to fail on versions of LDK prior to 0.0.116.
payment_data: Option<msgs::FinalOnionHopData>,
/// Preimage for this onion payment. This preimage is provided by the sender and will be
/// used to settle the spontaneous payment.
payment_preimage: PaymentPreimage,
- /// See [`RecipientOnionFields::payment_metadata`] for more info.
+ /// Additional data which we (allegedly) instructed the sender to include in the onion.
+ ///
+ /// For HTLCs received by LDK, this will ultimately bubble back up as
+ /// [`RecipientOnionFields::payment_metadata`].
payment_metadata: Option<Vec<u8>>,
/// CLTV expiry of the received HTLC.
+ ///
/// Used to track when we should expire pending HTLCs that go unclaimed.
incoming_cltv_expiry: u32,
- /// See [`RecipientOnionFields::custom_tlvs`] for more info.
+ /// Custom TLVs which were set by the sender.
+ ///
+ /// For HTLCs received by LDK, these will ultimately bubble back up as
+ /// [`RecipientOnionFields::custom_tlvs`].
custom_tlvs: Vec<(u64, Vec<u8>)>,
},
}
/// Information used to forward or fail this HTLC that is being forwarded within a blinded path.
-#[derive(Clone, Copy, Hash, PartialEq, Eq)]
+#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct BlindedForward {
/// The `blinding_point` that was set in the inbound [`msgs::UpdateAddHTLC`], or in the inbound
/// onion payload if we're the introduction node. Useful for calculating the next hop's
impl PendingHTLCRouting {
// Used to override the onion failure code and data if the HTLC is blinded.
fn blinded_failure(&self) -> Option<BlindedFailure> {
- // TODO: needs update when we support receiving to multi-hop blinded paths
- if let Self::Forward { blinded: Some(_), .. } = self {
- Some(BlindedFailure::FromIntroductionNode)
- } else {
- None
+ // TODO: needs update when we support forwarding blinded HTLCs as non-intro node
+ match self {
+ Self::Forward { blinded: Some(_), .. } => Some(BlindedFailure::FromIntroductionNode),
+ Self::Receive { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
+ _ => None,
}
}
}
-/// Full details of an incoming HTLC, including routing info.
+/// Information about an incoming HTLC, including the [`PendingHTLCRouting`] describing where it
+/// should go next.
#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
+#[cfg_attr(test, derive(Debug, PartialEq))]
pub struct PendingHTLCInfo {
/// Further routing details based on whether the HTLC is being forwarded or received.
pub routing: PendingHTLCRouting,
- /// Shared secret from the previous hop.
- /// Used encrypt failure packets in the event that the HTLC needs to be failed backwards.
+ /// The onion shared secret we build with the sender used to decrypt the onion.
+ ///
+ /// This is later used to encrypt failure packets in the event that the HTLC is failed.
pub incoming_shared_secret: [u8; 32],
/// Hash of the payment preimage, to lock the payment until the receiver releases the preimage.
pub payment_hash: PaymentHash,
- /// Amount offered by this HTLC.
- pub incoming_amt_msat: Option<u64>, // Added in 0.0.113
- /// Sender intended amount to forward or receive (actual amount received
- /// may overshoot this in either case)
+ /// Amount received in the incoming HTLC.
+ ///
+ /// This field was added in LDK 0.0.113 and will be `None` for objects written by prior
+ /// versions.
+ pub incoming_amt_msat: Option<u64>,
+ /// The amount the sender indicated should be forwarded on to the next hop or amount the sender
+ /// intended for us to receive for received payments.
+ ///
+ /// If the received amount is less than this for received payments, an intermediary hop has
+ /// attempted to steal some of our funds and we should fail the HTLC (the sender should retry
+ /// it along another path).
+ ///
+ /// Because nodes can take less than their required fees, and because senders may wish to
+ /// improve their own privacy, this amount may be less than [`Self::incoming_amt_msat`] for
+ /// received payments. In such cases, recipients must handle this HTLC as if it had received
+ /// [`Self::outgoing_amt_msat`].
pub outgoing_amt_msat: u64,
- /// Outgoing timelock expiration blockheight.
+ /// The CLTV the sender has indicated we should set on the forwarded HTLC (or has indicated
+ /// should have been set on the received HTLC for received payments).
pub outgoing_cltv_value: u32,
- /// The fee being skimmed off the top of this HTLC. If this is a forward, it'll be the fee we are
- /// skimming. If we're receiving this HTLC, it's the fee that our counterparty skimmed.
+ /// The fee taken for this HTLC in addition to the standard protocol HTLC fees.
+ ///
+ /// If this is a payment for forwarding, this is the fee we are taking before forwarding the
+ /// HTLC.
+ ///
+ /// If this is a received payment, this is the fee that our counterparty took.
+ ///
+ /// This is used to allow LSPs to take fees as a part of payments, without the sender having to
+ /// shoulder them.
pub skimmed_fee_msat: Option<u64>,
}
Fail(HTLCFailureMsg),
}
+#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
pub(super) struct PendingAddHTLCInfo {
pub(super) forward_info: PendingHTLCInfo,
prev_user_channel_id: u128,
}
+#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
pub(super) enum HTLCForwardInfo {
AddHTLC(PendingAddHTLCInfo),
FailHTLC {
htlc_id: u64,
err_packet: msgs::OnionErrorPacket,
},
+ FailMalformedHTLC {
+ htlc_id: u64,
+ failure_code: u16,
+ sha256_of_onion: [u8; 32],
+ },
}
// Used for failing blinded HTLCs backwards correctly.
-#[derive(Clone, Debug, Hash, PartialEq, Eq)]
+#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
enum BlindedFailure {
FromIntroductionNode,
- // Another variant will be added here for non-intro nodes.
+ FromBlindedNode,
}
/// Tracks the inbound corresponding to an outbound HTLC
// then waiting ANTI_REORG_DELAY to be reorg-safe on the outbound HLTC and
// failing the corresponding htlc backward, and us now seeing the last block of ANTI_REORG_DELAY before
// LATENCY_GRACE_PERIOD_BLOCKS.
-#[deny(const_err)]
#[allow(dead_code)]
const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
// Check for ability of an attacker to make us fail on-chain by delaying an HTLC claim. See
// ChannelMonitor::should_broadcast_holder_commitment_txn for a description of why this is needed.
-#[deny(const_err)]
#[allow(dead_code)]
const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
fn close_channel_internal(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
- let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
- let shutdown_result;
- loop {
+ let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new();
+ let mut shutdown_result = None;
+
+ {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let funding_txo_opt = chan.context.get_funding_txo();
let their_features = &peer_state.latest_features;
- let (shutdown_msg, mut monitor_update_opt, htlcs, local_shutdown_result) =
+ let (shutdown_msg, mut monitor_update_opt, htlcs) =
chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
failed_htlcs = htlcs;
- shutdown_result = local_shutdown_result;
- debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown());
// We can send the `shutdown` message before updating the `ChannelMonitor`
// here as we don't need the monitor update to complete until we send a
if let Some(monitor_update) = monitor_update_opt.take() {
handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
peer_state_lock, peer_state, per_peer_state, chan);
- break;
}
-
- if chan.is_shutdown() {
- if let ChannelPhase::Funded(chan) = remove_channel_phase!(self, chan_phase_entry) {
- if let Ok(channel_update) = self.get_channel_update_for_broadcast(&chan) {
- peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: channel_update
- });
- }
- self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
- }
- }
- break;
+ } else {
+ self.issue_channel_close_events(chan_phase_entry.get().context(), ClosureReason::HolderForceClosed);
+ let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
+ shutdown_result = Some(chan_phase.context_mut().force_shutdown(false));
}
},
hash_map::Entry::Vacant(_) => {
- // If we reach this point, it means that the channel_id either refers to an unfunded channel or
- // it does not exist for this peer. Either way, we can attempt to force-close it.
- //
- // An appropriate error will be returned for non-existence of the channel if that's the case.
- mem::drop(peer_state_lock);
- mem::drop(per_peer_state);
- return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ())
+ return Err(APIError::ChannelUnavailable {
+ err: format!(
+ "Channel with id {} not found for the passed counterparty node_id {}",
+ channel_id, counterparty_node_id,
+ )
+ });
},
}
}
}
fn decode_update_add_htlc_onion(
- &self, msg: &msgs::UpdateAddHTLC
+ &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey,
) -> Result<
(onion_utils::Hop, [u8; 32], Option<Result<PublicKey, secp256k1::Error>>), HTLCFailureMsg
> {
msg, &self.node_signer, &self.logger, &self.secp_ctx
)?;
- let is_blinded = match next_hop {
+ let is_intro_node_forward = match next_hop {
onion_utils::Hop::Forward {
+ // TODO: update this when we support blinded forwarding as non-intro node
next_hop_data: msgs::InboundOnionPayload::BlindedForward { .. }, ..
} => true,
- _ => false, // TODO: update this when we support receiving to multi-hop blinded paths
+ _ => false,
};
macro_rules! return_err {
($msg: expr, $err_code: expr, $data: expr) => {
{
log_info!(
- WithContext::from(&self.logger, None, Some(msg.channel_id)),
+ WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id)),
"Failed to accept/forward incoming HTLC: {}", $msg
);
- let (err_code, err_data) = if is_blinded {
+ // If `msg.blinding_point` is set, we must always fail with malformed.
+ if msg.blinding_point.is_some() {
+ return Err(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
+ channel_id: msg.channel_id,
+ htlc_id: msg.htlc_id,
+ sha256_of_onion: [0; 32],
+ failure_code: INVALID_ONION_BLINDING,
+ }));
+ }
+
+ let (err_code, err_data) = if is_intro_node_forward {
(INVALID_ONION_BLINDING, &[0; 32][..])
} else { ($err_code, $data) };
return Err(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
}
fn construct_pending_htlc_status<'a>(
- &self, msg: &msgs::UpdateAddHTLC, shared_secret: [u8; 32], decoded_hop: onion_utils::Hop,
- allow_underpay: bool, next_packet_pubkey_opt: Option<Result<PublicKey, secp256k1::Error>>
+ &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, shared_secret: [u8; 32],
+ decoded_hop: onion_utils::Hop, allow_underpay: bool,
+ next_packet_pubkey_opt: Option<Result<PublicKey, secp256k1::Error>>,
) -> PendingHTLCStatus {
macro_rules! return_err {
($msg: expr, $err_code: expr, $data: expr) => {
{
- log_info!(WithContext::from(&self.logger, None, Some(msg.channel_id)), "Failed to accept/forward incoming HTLC: {}", $msg);
+ let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id));
+ log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+ if msg.blinding_point.is_some() {
+ return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
+ msgs::UpdateFailMalformedHTLC {
+ channel_id: msg.channel_id,
+ htlc_id: msg.htlc_id,
+ sha256_of_onion: [0; 32],
+ failure_code: INVALID_ONION_BLINDING,
+ }
+ ))
+ }
return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
channel_id: msg.channel_id,
htlc_id: msg.htlc_id,
/// [`internal_closing_signed`]: Self::internal_closing_signed
fn get_channel_update_for_unicast(&self, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
let logger = WithChannelContext::from(&self.logger, &chan.context);
- log_trace!(logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.context.channel_id().0));
+ log_trace!(logger, "Attempting to generate channel update for channel {}", chan.context.channel_id());
let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) {
None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
Some(id) => id,
fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
let logger = WithChannelContext::from(&self.logger, &chan.context);
- log_trace!(logger, "Generating channel update for channel {}", log_bytes!(chan.context.channel_id().0));
+ log_trace!(logger, "Generating channel update for channel {}", chan.context.channel_id());
let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
let enabled = chan.context.is_usable() && match chan.channel_update_status() {
} = args;
// The top-level caller should hold the total_consistency_lock read lock.
debug_assert!(self.total_consistency_lock.try_write().is_err());
- log_trace!(WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None),
- "Attempting to send payment with payment hash {} along path with next hop {}",
- payment_hash, path.hops.first().unwrap().short_channel_id);
let prng_seed = self.entropy_source.get_secure_random_bytes();
let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted");
let (onion_packet, htlc_msat, htlc_cltv) = onion_utils::create_payment_onion(
&self.secp_ctx, &path, &session_priv, total_value, recipient_onion, cur_height,
payment_hash, keysend_preimage, prng_seed
- )?;
+ ).map_err(|e| {
+ let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None);
+ log_error!(logger, "Failed to build an onion for path for payment hash {}", payment_hash);
+ e
+ })?;
let err: Result<(), _> = loop {
let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.hops.first().unwrap().short_channel_id) {
- None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}),
+ None => {
+ let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None);
+ log_error!(logger, "Failed to find first-hop for payment hash {}", payment_hash);
+ return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()})
+ },
Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
};
+ let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(id));
+ log_trace!(logger,
+ "Attempting to send payment with payment hash {} along path with next hop {}",
+ payment_hash, path.hops.first().unwrap().short_channel_id);
+
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
.ok_or_else(|| APIError::ChannelUnavailable{err: "No peer matching the path's first hop found!".to_owned() })?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let (chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) {
- Some(ChannelPhase::UnfundedOutboundV1(chan)) => {
+ Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => {
let funding_txo = find_funding_output(&chan, &funding_transaction)?;
let logger = WithChannelContext::from(&self.logger, &chan.context);
(chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None, channel_capacity))
} else { unreachable!(); });
match funding_res {
- Ok((chan, funding_msg)) => (chan, funding_msg),
+ Ok(funding_msg) => (chan, funding_msg),
Err((chan, err)) => {
mem::drop(peer_state_lock);
mem::drop(per_peer_state);
if id_to_peer.insert(chan.context.channel_id(), chan.context.get_counterparty_node_id()).is_some() {
panic!("id_to_peer map already contained funding txid, which shouldn't be possible");
}
- e.insert(ChannelPhase::Funded(chan));
+ e.insert(ChannelPhase::UnfundedOutboundV1(chan));
}
}
Ok(())
None => {
let error = format!("Channel with id {} not found for the passed counterparty node_id {}",
next_hop_channel_id, next_node_id);
- log_error!(self.logger, "{} when attempting to forward intercepted HTLC", error);
+ let logger = WithContext::from(&self.logger, Some(next_node_id), Some(*next_hop_channel_id));
+ log_error!(logger, "{} when attempting to forward intercepted HTLC", error);
return Err(APIError::ChannelUnavailable {
err: error
})
for (short_chan_id, mut pending_forwards) in forward_htlcs {
if short_chan_id != 0 {
+ let mut forwarding_counterparty = None;
macro_rules! forwarding_channel_not_found {
() => {
for forward_info in pending_forwards.drain(..) {
}) => {
macro_rules! failure_handler {
($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
- log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+ let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_funding_outpoint.to_channel_id()));
+ log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
let phantom_shared_secret = self.node_signer.ecdh(Recipient::PhantomNode, &onion_packet.public_key.unwrap(), None).unwrap().secret_bytes();
let next_hop = match onion_utils::decode_next_payment_hop(
phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac,
- payment_hash, &self.node_signer
+ payment_hash, None, &self.node_signer
) {
Ok(res) => res,
Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None);
}
},
- HTLCForwardInfo::FailHTLC { .. } => {
+ HTLCForwardInfo::FailHTLC { .. } | HTLCForwardInfo::FailMalformedHTLC { .. } => {
// Channel went away before we could fail it. This implies
// the channel is now on chain and our counterparty is
// trying to broadcast the HTLC-Timeout, but that's their
continue;
}
};
+ forwarding_counterparty = Some(counterparty_node_id);
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
if peer_state_mutex_opt.is_none() {
if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
let logger = WithChannelContext::from(&self.logger, &chan.context);
for forward_info in pending_forwards.drain(..) {
- match forward_info {
+ let queue_fail_htlc_res = match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
forward_info: PendingHTLCInfo {
));
continue;
}
+ None
},
HTLCForwardInfo::AddHTLC { .. } => {
panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
},
HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
- if let Err(e) = chan.queue_fail_htlc(
- htlc_id, err_packet, &&logger
- ) {
- if let ChannelError::Ignore(msg) = e {
- log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
- } else {
- panic!("Stated return value requirements in queue_fail_htlc() were not met");
- }
- // fail-backs are best-effort, we probably already have one
- // pending, and if not that's OK, if not, the channel is on
- // the chain and sending the HTLC-Timeout is their problem.
- continue;
- }
+ Some((chan.queue_fail_htlc(htlc_id, err_packet, &&logger), htlc_id))
},
+ HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
+ log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
+ let res = chan.queue_fail_malformed_htlc(
+ htlc_id, failure_code, sha256_of_onion, &&logger
+ );
+ Some((res, htlc_id))
+ },
+ };
+ if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
+ if let Err(e) = queue_fail_htlc_res {
+ if let ChannelError::Ignore(msg) = e {
+ log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
+ } else {
+ panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met");
+ }
+ // fail-backs are best-effort, we probably already have one
+ // pending, and if not that's OK, if not, the channel is on
+ // the chain and sending the HTLC-Timeout is their problem.
+ continue;
+ }
}
}
} else {
}) => {
let blinded_failure = routing.blinded_failure();
let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret, mut onion_fields) = match routing {
- PendingHTLCRouting::Receive { payment_data, payment_metadata, incoming_cltv_expiry, phantom_shared_secret, custom_tlvs } => {
+ PendingHTLCRouting::Receive {
+ payment_data, payment_metadata, incoming_cltv_expiry, phantom_shared_secret,
+ custom_tlvs, requires_blinded_error: _
+ } => {
let _legacy_hop_data = Some(payment_data.clone());
let onion_fields = RecipientOnionFields { payment_secret: Some(payment_data.payment_secret),
payment_metadata, custom_tlvs };
htlc_id: $htlc.prev_hop.htlc_id,
incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
phantom_shared_secret,
- blinded_failure: None,
+ blinded_failure,
}), payment_hash,
HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data),
HTLCDestination::FailedPayment { payment_hash: $payment_hash },
},
};
},
- HTLCForwardInfo::FailHTLC { .. } => {
+ HTLCForwardInfo::FailHTLC { .. } | HTLCForwardInfo::FailMalformedHTLC { .. } => {
panic!("Got pending fail of our own HTLC");
}
}
"Failing {}HTLC with payment_hash {} backwards from us: {:?}",
if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error
);
- let err_packet = match blinded_failure {
+ let failure = match blinded_failure {
Some(BlindedFailure::FromIntroductionNode) => {
let blinded_onion_error = HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32]);
- blinded_onion_error.get_encrypted_failure_packet(
+ let err_packet = blinded_onion_error.get_encrypted_failure_packet(
incoming_packet_shared_secret, phantom_shared_secret
- )
+ );
+ HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }
+ },
+ Some(BlindedFailure::FromBlindedNode) => {
+ HTLCForwardInfo::FailMalformedHTLC {
+ htlc_id: *htlc_id,
+ failure_code: INVALID_ONION_BLINDING,
+ sha256_of_onion: [0; 32]
+ }
},
None => {
- onion_error.get_encrypted_failure_packet(incoming_packet_shared_secret, phantom_shared_secret)
+ let err_packet = onion_error.get_encrypted_failure_packet(
+ incoming_packet_shared_secret, phantom_shared_secret
+ );
+ HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }
}
};
}
match forward_htlcs.entry(*short_channel_id) {
hash_map::Entry::Occupied(mut entry) => {
- entry.get_mut().push(HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet });
+ entry.get_mut().push(failure);
},
hash_map::Entry::Vacant(entry) => {
- entry.insert(vec!(HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }));
+ entry.insert(vec!(failure));
}
}
mem::drop(forward_htlcs);
}
if valid_mpp {
for htlc in sources.drain(..) {
+ let prev_hop_chan_id = htlc.prev_hop.outpoint.to_channel_id();
if let Err((pk, err)) = self.claim_funds_from_hop(
htlc.prev_hop, payment_preimage,
|_, definitely_duplicate| {
if let msgs::ErrorAction::IgnoreError = err.err.action {
// We got a temporary failure updating monitor, but will claim the
// HTLC when the monitor updating is restored (or on chain).
- log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
+ let logger = WithContext::from(&self.logger, None, Some(prev_hop_chan_id));
+ log_error!(logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
} else { errs.push((pk, err)); }
}
}
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan_phase_entry) => {
- match chan_phase_entry.get_mut() {
- ChannelPhase::Funded(ref mut chan) => {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
- let monitor = try_chan_phase_entry!(self,
- chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger), chan_phase_entry);
- if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
- handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
- Ok(())
- } else {
- try_chan_phase_entry!(self, Err(ChannelError::Close("Channel funding outpoint was a duplicate".to_owned())), chan_phase_entry)
+ hash_map::Entry::Occupied(chan_phase_entry) => {
+ if matches!(chan_phase_entry.get(), ChannelPhase::UnfundedOutboundV1(_)) {
+ let chan = if let ChannelPhase::UnfundedOutboundV1(chan) = chan_phase_entry.remove() { chan } else { unreachable!() };
+ let logger = WithContext::from(
+ &self.logger,
+ Some(chan.context.get_counterparty_node_id()),
+ Some(chan.context.channel_id())
+ );
+ let res =
+ chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger);
+ match res {
+ Ok((chan, monitor)) => {
+ if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
+ // We really should be able to insert here without doing a second
+ // lookup, but sadly rust stdlib doesn't currently allow keeping
+ // the original Entry around with the value removed.
+ let mut chan = peer_state.channel_by_id.entry(msg.channel_id).or_insert(ChannelPhase::Funded(chan));
+ if let ChannelPhase::Funded(ref mut chan) = &mut chan {
+ handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
+ } else { unreachable!(); }
+ Ok(())
+ } else {
+ let e = ChannelError::Close("Channel funding outpoint was a duplicate".to_owned());
+ return Err(convert_chan_phase_err!(self, e, &mut ChannelPhase::Funded(chan), &msg.channel_id).1);
+ }
+ },
+ Err((chan, e)) => {
+ debug_assert!(matches!(e, ChannelError::Close(_)),
+ "We don't have a channel anymore, so the error better have expected close");
+ // We've already removed this outbound channel from the map in
+ // `PeerState` above so at this point we just need to clean up any
+ // lingering entries concerning this channel as it is safe to do so.
+ return Err(convert_chan_phase_err!(self, e, &mut ChannelPhase::UnfundedOutboundV1(chan), &msg.channel_id).1);
}
- },
- _ => {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id));
- },
+ }
+ } else {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id));
}
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
// Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
// closing a channel), so any changes are likely to be lost on restart!
- let decoded_hop_res = self.decode_update_add_htlc_onion(msg);
+ let decoded_hop_res = self.decode_update_add_htlc_onion(msg, counterparty_node_id);
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let pending_forward_info = match decoded_hop_res {
Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
- self.construct_pending_htlc_status(msg, shared_secret, next_hop,
- chan.context.config().accept_underpaying_htlcs, next_packet_pk_opt),
+ self.construct_pending_htlc_status(
+ msg, counterparty_node_id, shared_secret, next_hop,
+ chan.context.config().accept_underpaying_htlcs, next_packet_pk_opt,
+ ),
Err(e) => PendingHTLCStatus::Fail(e)
};
let create_pending_htlc_status = |chan: &Channel<SP>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
+ if msg.blinding_point.is_some() {
+ return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
+ msgs::UpdateFailMalformedHTLC {
+ channel_id: msg.channel_id,
+ htlc_id: msg.htlc_id,
+ sha256_of_onion: [0; 32],
+ failure_code: INVALID_ONION_BLINDING,
+ }
+ ))
+ }
// If the update_add is completely bogus, the call will Err and we will close,
// but if we've sent a shutdown and they haven't acknowledged it yet, we just
// want to reject the new HTLC and fail it backwards instead of forwarding.
},
hash_map::Entry::Vacant(_) => {
log_debug!(logger, "Sending bogus ChannelReestablish for unknown channel {} to force channel closure",
- log_bytes!(msg.channel_id.0));
+ msg.channel_id);
// Unfortunately, lnd doesn't force close on errors
// (https://github.com/lightningnetwork/lnd/blob/abb1e3463f3a83bbb843d5c399869dbe930ad94f/htlcswitch/link.go#L2119).
// One of the few ways to get an lnd counterparty to force close is by
let unblock_chan = |phase: &mut ChannelPhase<SP>, pending_msg_events: &mut Vec<MessageSendEvent>| {
let node_id = phase.context().get_counterparty_node_id();
- if let ChannelPhase::Funded(chan) = phase {
- let msgs = chan.signer_maybe_unblocked(&self.logger);
- if let Some(updates) = msgs.commitment_update {
- pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id,
- updates,
- });
- }
- if let Some(msg) = msgs.funding_signed {
- pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
- node_id,
- msg,
- });
- }
- if let Some(msg) = msgs.funding_created {
- pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
- node_id,
- msg,
- });
+ match phase {
+ ChannelPhase::Funded(chan) => {
+ let msgs = chan.signer_maybe_unblocked(&self.logger);
+ if let Some(updates) = msgs.commitment_update {
+ pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id,
+ updates,
+ });
+ }
+ if let Some(msg) = msgs.funding_signed {
+ pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
+ node_id,
+ msg,
+ });
+ }
+ if let Some(msg) = msgs.channel_ready {
+ send_channel_ready!(self, pending_msg_events, chan, msg);
+ }
}
- if let Some(msg) = msgs.channel_ready {
- send_channel_ready!(self, pending_msg_events, chan, msg);
+ ChannelPhase::UnfundedOutboundV1(chan) => {
+ if let Some(msg) = chan.signer_maybe_unblocked(&self.logger) {
+ pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
+ node_id,
+ msg,
+ });
+ }
}
+ ChannelPhase::UnfundedInboundV1(_) => {},
}
};
(2, incoming_cltv_expiry, required),
(3, payment_metadata, option),
(5, custom_tlvs, optional_vec),
+ (7, requires_blinded_error, (default_value, false)),
},
(2, ReceiveKeysend) => {
(0, payment_preimage, required),
);
impl_writeable_tlv_based_enum!(BlindedFailure,
- (0, FromIntroductionNode) => {}, ;
+ (0, FromIntroductionNode) => {},
+ (2, FromBlindedNode) => {}, ;
);
impl_writeable_tlv_based!(HTLCPreviousHopData, {
(6, prev_funding_outpoint, required),
});
-impl_writeable_tlv_based_enum!(HTLCForwardInfo,
- (1, FailHTLC) => {
- (0, htlc_id, required),
- (2, err_packet, required),
- };
- (0, AddHTLC)
-);
+impl Writeable for HTLCForwardInfo {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ const FAIL_HTLC_VARIANT_ID: u8 = 1;
+ match self {
+ Self::AddHTLC(info) => {
+ 0u8.write(w)?;
+ info.write(w)?;
+ },
+ Self::FailHTLC { htlc_id, err_packet } => {
+ FAIL_HTLC_VARIANT_ID.write(w)?;
+ write_tlv_fields!(w, {
+ (0, htlc_id, required),
+ (2, err_packet, required),
+ });
+ },
+ Self::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
+ // Since this variant was added in 0.0.119, write this as `::FailHTLC` with an empty error
+ // packet so older versions have something to fail back with, but serialize the real data as
+ // optional TLVs for the benefit of newer versions.
+ FAIL_HTLC_VARIANT_ID.write(w)?;
+ let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
+ write_tlv_fields!(w, {
+ (0, htlc_id, required),
+ (1, failure_code, required),
+ (2, dummy_err_packet, required),
+ (3, sha256_of_onion, required),
+ });
+ },
+ }
+ Ok(())
+ }
+}
+
+impl Readable for HTLCForwardInfo {
+ fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let id: u8 = Readable::read(r)?;
+ Ok(match id {
+ 0 => Self::AddHTLC(Readable::read(r)?),
+ 1 => {
+ _init_and_read_len_prefixed_tlv_fields!(r, {
+ (0, htlc_id, required),
+ (1, malformed_htlc_failure_code, option),
+ (2, err_packet, required),
+ (3, sha256_of_onion, option),
+ });
+ if let Some(failure_code) = malformed_htlc_failure_code {
+ Self::FailMalformedHTLC {
+ htlc_id: _init_tlv_based_struct_field!(htlc_id, required),
+ failure_code,
+ sha256_of_onion: sha256_of_onion.ok_or(DecodeError::InvalidValue)?,
+ }
+ } else {
+ Self::FailHTLC {
+ htlc_id: _init_tlv_based_struct_field!(htlc_id, required),
+ err_packet: _init_tlv_based_struct_field!(err_packet, required),
+ }
+ }
+ },
+ _ => return Err(DecodeError::InvalidValue),
+ })
+ }
+}
impl_writeable_tlv_based!(PendingInboundPayment, {
(0, payment_secret, required),
log_error!(logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
log_error!(logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
if channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
- log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
+ log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
&channel.context.channel_id(), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
}
if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() {
// 0.0.102+
for (_, monitor) in args.channel_monitors.iter() {
let counterparty_opt = id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id());
- let chan_id = monitor.get_funding_txo().0.to_channel_id();
if counterparty_opt.is_none() {
- let logger = WithContext::from(&args.logger, None, Some(chan_id));
+ let logger = WithChannelMonitor::from(&args.logger, monitor);
for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
if let HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } = htlc_source {
if path.hops.is_empty() {
hash_map::Entry::Occupied(mut entry) => {
let newly_added = entry.get_mut().insert(session_priv_bytes, &path);
log_info!(logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}",
- if newly_added { "Added" } else { "Had" }, path_amt, log_bytes!(session_priv_bytes), log_bytes!(htlc.payment_hash.0));
+ if newly_added { "Added" } else { "Had" }, path_amt, log_bytes!(session_priv_bytes), htlc.payment_hash);
},
hash_map::Entry::Vacant(entry) => {
let path_fee = path.fee_msat();
}
}
if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) {
- let logger = WithChannelMonitor::from(&args.logger, previous_hop_monitor);
- previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &bounded_fee_estimator, &&logger);
+ previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &bounded_fee_estimator, &args.logger);
}
}
pending_events_read.push_back((events::Event::PaymentClaimed {
use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
use crate::ln::ChannelId;
- use crate::ln::channelmanager::{create_recv_pending_htlc_info, inbound_payment, PaymentId, PaymentSendFailure, RecipientOnionFields, InterceptId};
+ use crate::ln::channelmanager::{create_recv_pending_htlc_info, HTLCForwardInfo, inbound_payment, PaymentId, PaymentSendFailure, RecipientOnionFields, InterceptId};
use crate::ln::functional_test_utils::*;
use crate::ln::msgs::{self, ErrorAction};
use crate::ln::msgs::ChannelMessageHandler;
+ use crate::prelude::*;
use crate::routing::router::{PaymentParameters, RouteParameters, find_route};
use crate::util::errors::APIError;
+ use crate::util::ser::Writeable;
use crate::util::test_utils;
use crate::util::config::{ChannelConfig, ChannelConfigUpdate};
use crate::sign::EntropySource;
check_spends!(txn[0], funding_tx);
}
}
+
+ #[test]
+ fn test_malformed_forward_htlcs_ser() {
+ // Ensure that `HTLCForwardInfo::FailMalformedHTLC`s are (de)serialized properly.
+ let chanmon_cfg = create_chanmon_cfgs(1);
+ let node_cfg = create_node_cfgs(1, &chanmon_cfg);
+ let persister;
+ let chain_monitor;
+ let chanmgrs = create_node_chanmgrs(1, &node_cfg, &[None]);
+ let deserialized_chanmgr;
+ let mut nodes = create_network(1, &node_cfg, &chanmgrs);
+
+ let dummy_failed_htlc = |htlc_id| {
+ HTLCForwardInfo::FailHTLC { htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }, }
+ };
+ let dummy_malformed_htlc = |htlc_id| {
+ HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code: 0x4000, sha256_of_onion: [0; 32] }
+ };
+
+ let dummy_htlcs_1: Vec<HTLCForwardInfo> = (1..10).map(|htlc_id| {
+ if htlc_id % 2 == 0 {
+ dummy_failed_htlc(htlc_id)
+ } else {
+ dummy_malformed_htlc(htlc_id)
+ }
+ }).collect();
+
+ let dummy_htlcs_2: Vec<HTLCForwardInfo> = (1..10).map(|htlc_id| {
+ if htlc_id % 2 == 1 {
+ dummy_failed_htlc(htlc_id)
+ } else {
+ dummy_malformed_htlc(htlc_id)
+ }
+ }).collect();
+
+
+ let (scid_1, scid_2) = (42, 43);
+ let mut forward_htlcs = HashMap::new();
+ forward_htlcs.insert(scid_1, dummy_htlcs_1.clone());
+ forward_htlcs.insert(scid_2, dummy_htlcs_2.clone());
+
+ let mut chanmgr_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap();
+ *chanmgr_fwd_htlcs = forward_htlcs.clone();
+ core::mem::drop(chanmgr_fwd_htlcs);
+
+ reload_node!(nodes[0], nodes[0].node.encode(), &[], persister, chain_monitor, deserialized_chanmgr);
+
+ let mut deserialized_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap();
+ for scid in [scid_1, scid_2].iter() {
+ let deserialized_htlcs = deserialized_fwd_htlcs.remove(scid).unwrap();
+ assert_eq!(forward_htlcs.remove(scid).unwrap(), deserialized_htlcs);
+ }
+ assert!(deserialized_fwd_htlcs.is_empty());
+ core::mem::drop(deserialized_fwd_htlcs);
+
+ expect_pending_htlcs_forwardable!(nodes[0]);
+ }
}
#[cfg(ldk_bench)]