use core::cell::RefCell;
use crate::io::Read;
use crate::sync::{Arc, Mutex, RwLock, RwLockReadGuard, FairRwLock, LockTestExt, LockHeldState};
-use core::sync::atomic::{AtomicUsize, Ordering};
+use core::sync::atomic::{AtomicUsize, AtomicBool, Ordering};
use core::time::Duration;
use core::ops::Deref;
},
Receive {
payment_data: msgs::FinalOnionHopData,
+ payment_metadata: Option<Vec<u8>>,
incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
phantom_shared_secret: Option<[u8; 32]>,
},
ReceiveKeysend {
payment_preimage: PaymentPreimage,
+ payment_metadata: Option<Vec<u8>>,
incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
},
}
(4, receiver_node_id, required),
});
+struct ClaimablePayment {
+ purpose: events::PaymentPurpose,
+ onion_fields: Option<RecipientOnionFields>,
+ htlcs: Vec<ClaimableHTLC>,
+}
+
/// Information about claimable or being-claimed payments
struct ClaimablePayments {
/// Map from payment hash to the payment data and any HTLCs which are to us and can be
///
/// When adding to the map, [`Self::pending_claiming_payments`] must also be checked to ensure
/// we don't get a duplicate payment.
- claimable_htlcs: HashMap<PaymentHash, (events::PaymentPurpose, Vec<ClaimableHTLC>)>,
+ claimable_payments: HashMap<PaymentHash, ClaimablePayment>,
/// Map from payment hash to the payment data for HTLCs which we have begun claiming, but which
/// are waiting on a [`ChannelMonitorUpdate`] to complete in order to be surfaced to the user
/// See `ChannelManager` struct-level documentation for lock order requirements.
pending_events: Mutex<Vec<events::Event>>,
+ /// A simple atomic flag to ensure only one task at a time can be processing events asynchronously.
+ pending_events_processor: AtomicBool,
/// See `ChannelManager` struct-level documentation for lock order requirements.
pending_background_events: Mutex<Vec<BackgroundEvent>>,
/// Used when we have to take a BIG lock to make sure everything is self-consistent.
/// [`OutboundPayments::remove_stale_resolved_payments`].
pub(crate) const IDEMPOTENCY_TIMEOUT_TICKS: u8 = 7;
+/// The number of ticks of [`ChannelManager::timer_tick_occurred`] where a peer is disconnected
+/// until we mark the channel disabled and gossip the update.
+pub(crate) const DISABLE_GOSSIP_TICKS: u8 = 10;
+
+/// The number of ticks of [`ChannelManager::timer_tick_occurred`] where a peer is connected until
+/// we mark the channel enabled and gossip the update.
+pub(crate) const ENABLE_GOSSIP_TICKS: u8 = 5;
+
/// The maximum number of unfunded channels we can have per-peer before we start rejecting new
/// (inbound) ones. The number of peers with unfunded channels is limited separately in
/// [`MAX_UNFUNDED_CHANNEL_PEERS`].
($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
// update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
// any case so that it won't deadlock.
- debug_assert!($self.id_to_peer.try_lock().is_ok());
+ debug_assert_ne!($self.id_to_peer.held_by_thread(), LockHeldState::HeldByThread);
match $update_res {
ChannelMonitorUpdateStatus::InProgress => {
log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
macro_rules! process_events_body {
($self: expr, $event_to_handle: expr, $handle_event: expr) => {
- // We'll acquire our total consistency lock until the returned future completes so that
- // we can be sure no other persists happen while processing events.
- let _read_guard = $self.total_consistency_lock.read().unwrap();
+ let mut processed_all_events = false;
+ while !processed_all_events {
+ if $self.pending_events_processor.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed).is_err() {
+ return;
+ }
- let mut result = NotifyOption::SkipPersist;
+ let mut result = NotifyOption::SkipPersist;
- // TODO: This behavior should be documented. It's unintuitive that we query
- // ChannelMonitors when clearing other events.
- if $self.process_pending_monitor_events() {
- result = NotifyOption::DoPersist;
- }
+ {
+ // We'll acquire our total consistency lock so that we can be sure no other
+ // persists happen while processing monitor events.
+ let _read_guard = $self.total_consistency_lock.read().unwrap();
+
+ // TODO: This behavior should be documented. It's unintuitive that we query
+ // ChannelMonitors when clearing other events.
+ if $self.process_pending_monitor_events() {
+ result = NotifyOption::DoPersist;
+ }
+ }
- let pending_events = mem::replace(&mut *$self.pending_events.lock().unwrap(), vec![]);
- if !pending_events.is_empty() {
- result = NotifyOption::DoPersist;
- }
+ let pending_events = $self.pending_events.lock().unwrap().clone();
+ let num_events = pending_events.len();
+ if !pending_events.is_empty() {
+ result = NotifyOption::DoPersist;
+ }
- for event in pending_events {
- $event_to_handle = event;
- $handle_event;
- }
+ for event in pending_events {
+ $event_to_handle = event;
+ $handle_event;
+ }
+
+ {
+ let mut pending_events = $self.pending_events.lock().unwrap();
+ pending_events.drain(..num_events);
+ processed_all_events = pending_events.is_empty();
+ $self.pending_events_processor.store(false, Ordering::Release);
+ }
- if result == NotifyOption::DoPersist {
- $self.persistence_notifier.notify();
+ if result == NotifyOption::DoPersist {
+ $self.persistence_notifier.notify();
+ }
}
}
}
pending_inbound_payments: Mutex::new(HashMap::new()),
pending_outbound_payments: OutboundPayments::new(),
forward_htlcs: Mutex::new(HashMap::new()),
- claimable_payments: Mutex::new(ClaimablePayments { claimable_htlcs: HashMap::new(), pending_claiming_payments: HashMap::new() }),
+ claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: HashMap::new(), pending_claiming_payments: HashMap::new() }),
pending_intercepted_htlcs: Mutex::new(HashMap::new()),
id_to_peer: Mutex::new(HashMap::new()),
short_to_chan_info: FairRwLock::new(HashMap::new()),
per_peer_state: FairRwLock::new(HashMap::new()),
pending_events: Mutex::new(Vec::new()),
+ pending_events_processor: AtomicBool::new(false),
pending_background_events: Mutex::new(Vec::new()),
total_consistency_lock: RwLock::new(()),
persistence_notifier: Notifier::new(),
/// Raises [`APIError::APIMisuseError`] when `channel_value_satoshis` > 2**24 or `push_msat` is
/// greater than `channel_value_satoshis * 1k` or `channel_value_satoshis < 1000`.
///
+ /// Raises [`APIError::ChannelUnavailable`] if the channel cannot be opened due to failing to
+ /// generate a shutdown scriptpubkey or destination script set by
+ /// [`SignerProvider::get_shutdown_scriptpubkey`] or [`SignerProvider::get_destination_script`].
+ ///
/// Note that we do not check if you are currently connected to the given peer. If no
/// connection is available, the outbound `open_channel` message may fail to send, resulting in
/// the channel eventually being silently forgotten (dropped on reload).
///
/// May generate a [`SendShutdown`] message event on success, which should be relayed.
///
+ /// Raises [`APIError::ChannelUnavailable`] if the channel cannot be closed due to failing to
+ /// generate a shutdown scriptpubkey or destination script set by
+ /// [`SignerProvider::get_shutdown_scriptpubkey`]. A force-closure may be needed to close the
+ /// channel.
+ ///
/// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
/// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
/// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
///
/// May generate a [`SendShutdown`] message event on success, which should be relayed.
///
+ /// Raises [`APIError::ChannelUnavailable`] if the channel cannot be closed due to failing to
+ /// generate a shutdown scriptpubkey or destination script set by
+ /// [`SignerProvider::get_shutdown_scriptpubkey`]. A force-closure may be needed to close the
+ /// channel.
+ ///
/// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
/// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
/// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
msg: "Got non final data with an HMAC of 0",
});
},
- msgs::OnionHopDataFormat::FinalNode { payment_data, keysend_preimage } => {
+ msgs::OnionHopDataFormat::FinalNode { payment_data, keysend_preimage, payment_metadata } => {
if payment_data.is_some() && keysend_preimage.is_some() {
return Err(ReceiveError {
err_code: 0x4000|22,
} else if let Some(data) = payment_data {
PendingHTLCRouting::Receive {
payment_data: data,
+ payment_metadata,
incoming_cltv_expiry: hop_data.outgoing_cltv_value,
phantom_shared_secret,
}
PendingHTLCRouting::ReceiveKeysend {
payment_preimage,
+ payment_metadata,
incoming_cltv_expiry: hop_data.outgoing_cltv_value,
}
} else {
// hopefully an attacker trying to path-trace payments cannot make this occur
// on a small/per-node/per-channel scale.
if !chan.is_live() { // channel_disabled
- break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, chan_update_opt));
+ // If the channel_update we're going to return is disabled (i.e. the
+ // peer has been disabled for some time), return `channel_disabled`,
+ // otherwise return `temporary_channel_failure`.
+ if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) {
+ break Some(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt));
+ } else {
+ break Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt));
+ }
}
if *outgoing_amt_msat < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.channel_id()));
let were_node_one = self.our_network_pubkey.serialize()[..] < chan.get_counterparty_node_id().serialize()[..];
+ let enabled = chan.is_usable() && match chan.channel_update_status() {
+ ChannelUpdateStatus::Enabled => true,
+ ChannelUpdateStatus::DisabledStaged(_) => true,
+ ChannelUpdateStatus::Disabled => false,
+ ChannelUpdateStatus::EnabledStaged(_) => false,
+ };
+
let unsigned = msgs::UnsignedChannelUpdate {
chain_hash: self.genesis_hash,
short_channel_id,
timestamp: chan.get_update_time_counter(),
- flags: (!were_node_one) as u8 | ((!chan.is_live() as u8) << 1),
+ flags: (!were_node_one) as u8 | ((!enabled as u8) << 1),
cltv_expiry_delta: chan.get_cltv_expiry_delta(),
htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(),
htlc_maximum_msat: chan.get_announced_htlc_max_msat(),
self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, recipient_onion, payment_id, route, None, &self.entropy_source, best_block_height)
}
+ #[cfg(test)]
+ pub(crate) fn test_set_payment_metadata(&self, payment_id: PaymentId, new_payment_metadata: Option<Vec<u8>>) {
+ self.pending_outbound_payments.test_set_payment_metadata(payment_id, new_payment_metadata);
+ }
+
/// Signals that no further retries for the given payment should occur. Useful if you have a
/// pending outbound payment with retries remaining, but wish to stop retrying the payment before
}
}
} else {
- for forward_info in pending_forwards.drain(..) {
+ 'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat, ..
}
}) => {
- let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret) = match routing {
- PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry, phantom_shared_secret } => {
+ let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret, mut onion_fields) = match routing {
+ PendingHTLCRouting::Receive { payment_data, payment_metadata, incoming_cltv_expiry, phantom_shared_secret } => {
let _legacy_hop_data = Some(payment_data.clone());
- (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data }, Some(payment_data), phantom_shared_secret)
+ let onion_fields =
+ RecipientOnionFields { payment_secret: Some(payment_data.payment_secret), payment_metadata };
+ (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data },
+ Some(payment_data), phantom_shared_secret, onion_fields)
+ },
+ PendingHTLCRouting::ReceiveKeysend { payment_preimage, payment_metadata, incoming_cltv_expiry } => {
+ let onion_fields = RecipientOnionFields { payment_secret: None, payment_metadata };
+ (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage),
+ None, None, onion_fields)
},
- PendingHTLCRouting::ReceiveKeysend { payment_preimage, incoming_cltv_expiry } =>
- (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage), None, None),
_ => {
panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
}
onion_payload,
};
+ let mut committed_to_claimable = false;
+
macro_rules! fail_htlc {
($htlc: expr, $payment_hash: expr) => {
+ debug_assert!(!committed_to_claimable);
let mut htlc_msat_height_data = $htlc.value.to_be_bytes().to_vec();
htlc_msat_height_data.extend_from_slice(
&self.best_block.read().unwrap().height().to_be_bytes(),
HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data),
HTLCDestination::FailedPayment { payment_hash: $payment_hash },
));
+ continue 'next_forwardable_htlc;
}
}
let phantom_shared_secret = claimable_htlc.prev_hop.phantom_shared_secret;
let mut claimable_payments = self.claimable_payments.lock().unwrap();
if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) {
fail_htlc!(claimable_htlc, payment_hash);
- continue
}
- let (_, ref mut htlcs) = claimable_payments.claimable_htlcs.entry(payment_hash)
- .or_insert_with(|| (purpose(), Vec::new()));
+ let ref mut claimable_payment = claimable_payments.claimable_payments
+ .entry(payment_hash)
+ // Note that if we insert here we MUST NOT fail_htlc!()
+ .or_insert_with(|| {
+ committed_to_claimable = true;
+ ClaimablePayment {
+ purpose: purpose(), htlcs: Vec::new(), onion_fields: None,
+ }
+ });
+ if let Some(earlier_fields) = &mut claimable_payment.onion_fields {
+ if earlier_fields.check_merge(&mut onion_fields).is_err() {
+ fail_htlc!(claimable_htlc, payment_hash);
+ }
+ } else {
+ claimable_payment.onion_fields = Some(onion_fields);
+ }
+ let ref mut htlcs = &mut claimable_payment.htlcs;
if htlcs.len() == 1 {
if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0));
fail_htlc!(claimable_htlc, payment_hash);
- continue
}
}
let mut total_value = claimable_htlc.sender_intended_value;
log_bytes!(payment_hash.0));
fail_htlc!(claimable_htlc, payment_hash);
} else if total_value >= $payment_data.total_msat {
+ #[allow(unused_assignments)] {
+ committed_to_claimable = true;
+ }
let prev_channel_id = prev_funding_outpoint.to_channel_id();
htlcs.push(claimable_htlc);
let amount_msat = htlcs.iter().map(|htlc| htlc.value).sum();
via_channel_id: Some(prev_channel_id),
via_user_channel_id: Some(prev_user_channel_id),
claim_deadline: Some(earliest_expiry - HTLC_FAIL_BACK_BUFFER),
+ onion_fields: claimable_payment.onion_fields.clone(),
});
payment_claimable_generated = true;
} else {
// payment value yet, wait until we receive more
// MPP parts.
htlcs.push(claimable_htlc);
+ #[allow(unused_assignments)] {
+ committed_to_claimable = true;
+ }
}
payment_claimable_generated
}}
Err(()) => {
log_trace!(self.logger, "Failing new HTLC with payment_hash {} as payment verification failed", log_bytes!(payment_hash.0));
fail_htlc!(claimable_htlc, payment_hash);
- continue
}
};
if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta {
log_trace!(self.logger, "Failing new HTLC with payment_hash {} as its CLTV expiry was too soon (had {}, earliest expected {})",
log_bytes!(payment_hash.0), cltv_expiry, expected_min_expiry_height);
fail_htlc!(claimable_htlc, payment_hash);
- continue;
}
}
check_total_value!(payment_data, payment_preimage);
let mut claimable_payments = self.claimable_payments.lock().unwrap();
if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) {
fail_htlc!(claimable_htlc, payment_hash);
- continue
}
- match claimable_payments.claimable_htlcs.entry(payment_hash) {
+ match claimable_payments.claimable_payments.entry(payment_hash) {
hash_map::Entry::Vacant(e) => {
let amount_msat = claimable_htlc.value;
claimable_htlc.total_value_received = Some(amount_msat);
let claim_deadline = Some(claimable_htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER);
let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
- e.insert((purpose.clone(), vec![claimable_htlc]));
+ e.insert(ClaimablePayment {
+ purpose: purpose.clone(),
+ onion_fields: Some(onion_fields.clone()),
+ htlcs: vec![claimable_htlc],
+ });
let prev_channel_id = prev_funding_outpoint.to_channel_id();
new_events.push(events::Event::PaymentClaimable {
receiver_node_id: Some(receiver_node_id),
via_channel_id: Some(prev_channel_id),
via_user_channel_id: Some(prev_user_channel_id),
claim_deadline,
+ onion_fields: Some(onion_fields),
});
},
hash_map::Entry::Occupied(_) => {
if payment_data.is_none() {
log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0));
fail_htlc!(claimable_htlc, payment_hash);
- continue
};
let payment_data = payment_data.unwrap();
if inbound_payment.get().payment_secret != payment_data.payment_secret {
}
match chan.channel_update_status() {
- ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged),
- ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged),
- ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
- ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
- ChannelUpdateStatus::DisabledStaged if !chan.is_live() => {
- if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
+ ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)),
+ ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)),
+ ChannelUpdateStatus::DisabledStaged(_) if chan.is_live()
+ => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
+ ChannelUpdateStatus::EnabledStaged(_) if !chan.is_live()
+ => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
+ ChannelUpdateStatus::DisabledStaged(mut n) if !chan.is_live() => {
+ n += 1;
+ if n >= DISABLE_GOSSIP_TICKS {
+ chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
+ if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ should_persist = NotifyOption::DoPersist;
+ } else {
+ chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(n));
}
- should_persist = NotifyOption::DoPersist;
- chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
},
- ChannelUpdateStatus::EnabledStaged if chan.is_live() => {
- if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
+ ChannelUpdateStatus::EnabledStaged(mut n) if chan.is_live() => {
+ n += 1;
+ if n >= ENABLE_GOSSIP_TICKS {
+ chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
+ if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ should_persist = NotifyOption::DoPersist;
+ } else {
+ chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(n));
}
- should_persist = NotifyOption::DoPersist;
- chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
},
_ => {},
}
}
}
- self.claimable_payments.lock().unwrap().claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
- if htlcs.is_empty() {
+ self.claimable_payments.lock().unwrap().claimable_payments.retain(|payment_hash, payment| {
+ if payment.htlcs.is_empty() {
// This should be unreachable
debug_assert!(false);
return false;
}
- if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload {
+ if let OnionPayload::Invoice { .. } = payment.htlcs[0].onion_payload {
// Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
// In this case we're not going to handle any timeouts of the parts here.
// This condition determining whether the MPP is complete here must match
// exactly the condition used in `process_pending_htlc_forwards`.
- if htlcs[0].total_msat <= htlcs.iter().fold(0, |total, htlc| total + htlc.sender_intended_value) {
+ if payment.htlcs[0].total_msat <= payment.htlcs.iter()
+ .fold(0, |total, htlc| total + htlc.sender_intended_value)
+ {
return true;
- } else if htlcs.into_iter().any(|htlc| {
+ } else if payment.htlcs.iter_mut().any(|htlc| {
htlc.timer_ticks += 1;
return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
}) {
- timed_out_mpp_htlcs.extend(htlcs.drain(..).map(|htlc: ClaimableHTLC| (htlc.prev_hop, *payment_hash)));
+ timed_out_mpp_htlcs.extend(payment.htlcs.drain(..)
+ .map(|htlc: ClaimableHTLC| (htlc.prev_hop, *payment_hash)));
return false;
}
}
pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: FailureCode) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let removed_source = self.claimable_payments.lock().unwrap().claimable_htlcs.remove(payment_hash);
- if let Some((_, mut sources)) = removed_source {
- for htlc in sources.drain(..) {
+ let removed_source = self.claimable_payments.lock().unwrap().claimable_payments.remove(payment_hash);
+ if let Some(payment) = removed_source {
+ for htlc in payment.htlcs {
let reason = self.get_htlc_fail_reason_from_failure_code(failure_code, &htlc);
let source = HTLCSource::PreviousHopData(htlc.prev_hop);
let receiver = HTLCDestination::FailedPayment { payment_hash: *payment_hash };
let mut sources = {
let mut claimable_payments = self.claimable_payments.lock().unwrap();
- if let Some((payment_purpose, sources)) = claimable_payments.claimable_htlcs.remove(&payment_hash) {
+ if let Some(payment) = claimable_payments.claimable_payments.remove(&payment_hash) {
let mut receiver_node_id = self.our_network_pubkey;
- for htlc in sources.iter() {
+ for htlc in payment.htlcs.iter() {
if htlc.prev_hop.phantom_shared_secret.is_some() {
let phantom_pubkey = self.node_signer.get_node_id(Recipient::PhantomNode)
.expect("Failed to get node_id for phantom node recipient");
}
let dup_purpose = claimable_payments.pending_claiming_payments.insert(payment_hash,
- ClaimingPayment { amount_msat: sources.iter().map(|source| source.value).sum(),
- payment_purpose, receiver_node_id,
+ ClaimingPayment { amount_msat: payment.htlcs.iter().map(|source| source.value).sum(),
+ payment_purpose: payment.purpose, receiver_node_id,
});
if dup_purpose.is_some() {
debug_assert!(false, "Shouldn't get a duplicate pending claim event ever");
log_error!(self.logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug",
log_bytes!(payment_hash.0));
}
- sources
+ payment.htlcs
} else { return; }
};
debug_assert!(!sources.is_empty());
}
if let Some(height) = height_opt {
- self.claimable_payments.lock().unwrap().claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
- htlcs.retain(|htlc| {
+ self.claimable_payments.lock().unwrap().claimable_payments.retain(|payment_hash, payment| {
+ payment.htlcs.retain(|htlc| {
// If height is approaching the number of blocks we think it takes us to get
// our commitment transaction confirmed before the HTLC expires, plus the
// number of blocks we generally consider it to take to do a commitment update,
false
} else { true }
});
- !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
+ !payment.htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
});
let mut intercepted_htlcs = self.pending_intercepted_htlcs.lock().unwrap();
(0, payment_data, required),
(1, phantom_shared_secret, option),
(2, incoming_cltv_expiry, required),
+ (3, payment_metadata, option),
},
(2, ReceiveKeysend) => {
(0, payment_preimage, required),
(2, incoming_cltv_expiry, required),
+ (3, payment_metadata, option),
},
;);
let pending_outbound_payments = self.pending_outbound_payments.pending_outbound_payments.lock().unwrap();
let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new();
- (claimable_payments.claimable_htlcs.len() as u64).write(writer)?;
- for (payment_hash, (purpose, previous_hops)) in claimable_payments.claimable_htlcs.iter() {
+ let mut htlc_onion_fields: Vec<&_> = Vec::new();
+ (claimable_payments.claimable_payments.len() as u64).write(writer)?;
+ for (payment_hash, payment) in claimable_payments.claimable_payments.iter() {
payment_hash.write(writer)?;
- (previous_hops.len() as u64).write(writer)?;
- for htlc in previous_hops.iter() {
+ (payment.htlcs.len() as u64).write(writer)?;
+ for htlc in payment.htlcs.iter() {
htlc.write(writer)?;
}
- htlc_purposes.push(purpose);
+ htlc_purposes.push(&payment.purpose);
+ htlc_onion_fields.push(&payment.onion_fields);
}
let mut monitor_update_blocked_actions_per_peer = None;
(7, self.fake_scid_rand_bytes, required),
(9, htlc_purposes, vec_type),
(11, self.probing_cookie_secret, required),
+ (13, htlc_onion_fields, optional_vec),
});
Ok(())
let mut fake_scid_rand_bytes: Option<[u8; 32]> = None;
let mut probing_cookie_secret: Option<[u8; 32]> = None;
let mut claimable_htlc_purposes = None;
+ let mut claimable_htlc_onion_fields = None;
let mut pending_claiming_payments = Some(HashMap::new());
let mut monitor_update_blocked_actions_per_peer = Some(Vec::new());
read_tlv_fields!(reader, {
(7, fake_scid_rand_bytes, option),
(9, claimable_htlc_purposes, vec_type),
(11, probing_cookie_secret, option),
+ (13, claimable_htlc_onion_fields, optional_vec),
});
if fake_scid_rand_bytes.is_none() {
fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes());
session_privs: [session_priv_bytes].iter().map(|a| *a).collect(),
payment_hash: htlc.payment_hash,
payment_secret: None, // only used for retries, and we'll never retry on startup
+ payment_metadata: None, // only used for retries, and we'll never retry on startup
keysend_preimage: None, // only used for retries, and we'll never retry on startup
pending_amt_msat: path_amt,
pending_fee_msat: Some(path_fee),
let inbound_pmt_key_material = args.node_signer.get_inbound_payment_key_material();
let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
- let mut claimable_htlcs = HashMap::with_capacity(claimable_htlcs_list.len());
- if let Some(mut purposes) = claimable_htlc_purposes {
+ let mut claimable_payments = HashMap::with_capacity(claimable_htlcs_list.len());
+ if let Some(purposes) = claimable_htlc_purposes {
if purposes.len() != claimable_htlcs_list.len() {
return Err(DecodeError::InvalidValue);
}
- for (purpose, (payment_hash, previous_hops)) in purposes.drain(..).zip(claimable_htlcs_list.drain(..)) {
- claimable_htlcs.insert(payment_hash, (purpose, previous_hops));
+ if let Some(onion_fields) = claimable_htlc_onion_fields {
+ if onion_fields.len() != claimable_htlcs_list.len() {
+ return Err(DecodeError::InvalidValue);
+ }
+ for (purpose, (onion, (payment_hash, htlcs))) in
+ purposes.into_iter().zip(onion_fields.into_iter().zip(claimable_htlcs_list.into_iter()))
+ {
+ let existing_payment = claimable_payments.insert(payment_hash, ClaimablePayment {
+ purpose, htlcs, onion_fields: onion,
+ });
+ if existing_payment.is_some() { return Err(DecodeError::InvalidValue); }
+ }
+ } else {
+ for (purpose, (payment_hash, htlcs)) in purposes.into_iter().zip(claimable_htlcs_list.into_iter()) {
+ let existing_payment = claimable_payments.insert(payment_hash, ClaimablePayment {
+ purpose, htlcs, onion_fields: None,
+ });
+ if existing_payment.is_some() { return Err(DecodeError::InvalidValue); }
+ }
}
} else {
// LDK versions prior to 0.0.107 did not write a `pending_htlc_purposes`, but do
// include a `_legacy_hop_data` in the `OnionPayload`.
- for (payment_hash, previous_hops) in claimable_htlcs_list.drain(..) {
- if previous_hops.is_empty() {
+ for (payment_hash, htlcs) in claimable_htlcs_list.drain(..) {
+ if htlcs.is_empty() {
return Err(DecodeError::InvalidValue);
}
- let purpose = match &previous_hops[0].onion_payload {
+ let purpose = match &htlcs[0].onion_payload {
OnionPayload::Invoice { _legacy_hop_data } => {
if let Some(hop_data) = _legacy_hop_data {
events::PaymentPurpose::InvoicePayment {
OnionPayload::Spontaneous(payment_preimage) =>
events::PaymentPurpose::SpontaneousPayment(*payment_preimage),
};
- claimable_htlcs.insert(payment_hash, (purpose, previous_hops));
+ claimable_payments.insert(payment_hash, ClaimablePayment {
+ purpose, htlcs, onion_fields: None,
+ });
}
}
for (_, monitor) in args.channel_monitors.iter() {
for (payment_hash, payment_preimage) in monitor.get_stored_preimages() {
- if let Some((payment_purpose, claimable_htlcs)) = claimable_htlcs.remove(&payment_hash) {
+ if let Some(payment) = claimable_payments.remove(&payment_hash) {
log_info!(args.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", log_bytes!(payment_hash.0));
let mut claimable_amt_msat = 0;
let mut receiver_node_id = Some(our_network_pubkey);
- let phantom_shared_secret = claimable_htlcs[0].prev_hop.phantom_shared_secret;
+ let phantom_shared_secret = payment.htlcs[0].prev_hop.phantom_shared_secret;
if phantom_shared_secret.is_some() {
let phantom_pubkey = args.node_signer.get_node_id(Recipient::PhantomNode)
.expect("Failed to get node_id for phantom node recipient");
receiver_node_id = Some(phantom_pubkey)
}
- for claimable_htlc in claimable_htlcs {
+ for claimable_htlc in payment.htlcs {
claimable_amt_msat += claimable_htlc.value;
// Add a holding-cell claim of the payment to the Channel, which should be
pending_events_read.push(events::Event::PaymentClaimed {
receiver_node_id,
payment_hash,
- purpose: payment_purpose,
+ purpose: payment.purpose,
amount_msat: claimable_amt_msat,
});
}
pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()),
forward_htlcs: Mutex::new(forward_htlcs),
- claimable_payments: Mutex::new(ClaimablePayments { claimable_htlcs, pending_claiming_payments: pending_claiming_payments.unwrap() }),
+ claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap() }),
outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
id_to_peer: Mutex::new(id_to_peer),
short_to_chan_info: FairRwLock::new(short_to_chan_info),
per_peer_state: FairRwLock::new(per_peer_state),
pending_events: Mutex::new(pending_events_read),
+ pending_events_processor: AtomicBool::new(false),
pending_background_events: Mutex::new(pending_background_events),
total_consistency_lock: RwLock::new(()),
persistence_notifier: Notifier::new(),
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
- #[cfg(feature = "std")]
- use core::time::Duration;
use core::sync::atomic::Ordering;
use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};