msg: &'static str,
}
-/// Return value for claim_funds_from_hop
-enum ClaimFundsFromHop {
- PrevHopForceClosed,
- MonitorUpdateFail(PublicKey, MsgHandleErrInternal, Option<u64>),
- Success(u64),
- DuplicateClaim,
-}
-
type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>);
/// Error type returned across the channel_state mutex boundary. When an Err is generated for a
RevokeAndACKFirst,
}
+/// Information about a payment which is currently being claimed.
+struct ClaimingPayment {
+ amount_msat: u64,
+ payment_purpose: events::PaymentPurpose,
+ receiver_node_id: PublicKey,
+}
+impl_writeable_tlv_based!(ClaimingPayment, {
+ (0, amount_msat, required),
+ (2, payment_purpose, required),
+ (4, receiver_node_id, required),
+});
+
+/// Information about claimable or being-claimed payments
+struct ClaimablePayments {
+ /// Map from payment hash to the payment data and any HTLCs which are to us and can be
+ /// failed/claimed by the user.
+ ///
+ /// Note that, no consistency guarantees are made about the channels given here actually
+ /// existing anymore by the time you go to read them!
+ ///
+ /// When adding to the map, [`Self::pending_claiming_payments`] must also be checked to ensure
+ /// we don't get a duplicate payment.
+ claimable_htlcs: HashMap<PaymentHash, (events::PaymentPurpose, Vec<ClaimableHTLC>)>,
+
+ /// Map from payment hash to the payment data for HTLCs which we have begun claiming, but which
+ /// are waiting on a [`ChannelMonitorUpdate`] to complete in order to be surfaced to the user
+ /// as an [`events::Event::PaymentClaimed`].
+ pending_claiming_payments: HashMap<PaymentHash, ClaimingPayment>,
+}
+
// Note this is only exposed in cfg(test):
pub(super) struct ChannelHolder<Signer: Sign> {
pub(super) by_id: HashMap<[u8; 32], Channel<Signer>>,
ClosingMonitorUpdate((OutPoint, ChannelMonitorUpdate)),
}
+pub(crate) enum MonitorUpdateCompletionAction {
+ /// Indicates that a payment ultimately destined for us was claimed and we should emit an
+ /// [`events::Event::PaymentClaimed`] to the user if we haven't yet generated such an event for
+ /// this payment. Note that this is only best-effort. On restart it's possible such a duplicate
+ /// event can be generated.
+ PaymentClaimed { payment_hash: PaymentHash },
+ /// Indicates an [`events::Event`] should be surfaced to the user.
+ EmitEvent { event: events::Event },
+}
+
/// State we hold per-peer. In the future we should put channels in here, but for now we only hold
/// the latest Init features we heard from the peer.
struct PeerState {
// |
// |__`pending_inbound_payments`
// | |
-// | |__`claimable_htlcs`
+// | |__`claimable_payments`
// | |
// | |__`pending_outbound_payments`
// | |
/// See `ChannelManager` struct-level documentation for lock order requirements.
pending_intercepted_htlcs: Mutex<HashMap<InterceptId, PendingAddHTLCInfo>>,
- /// Map from payment hash to the payment data and any HTLCs which are to us and can be
- /// failed/claimed by the user.
- ///
- /// Note that, no consistency guarantees are made about the channels given here actually
- /// existing anymore by the time you go to read them!
+ /// The sets of payments which are claimable or currently being claimed. See
+ /// [`ClaimablePayments`]' individual field docs for more info.
///
/// See `ChannelManager` struct-level documentation for lock order requirements.
- claimable_htlcs: Mutex<HashMap<PaymentHash, (events::PaymentPurpose, Vec<ClaimableHTLC>)>>,
+ claimable_payments: Mutex<ClaimablePayments>,
/// The set of outbound SCID aliases across all our channels, including unconfirmed channels
/// and some closed channels which reached a usable state prior to being closed. This is used
pending_inbound_payments: Mutex::new(HashMap::new()),
pending_outbound_payments: Mutex::new(HashMap::new()),
forward_htlcs: Mutex::new(HashMap::new()),
- claimable_htlcs: Mutex::new(HashMap::new()),
+ claimable_payments: Mutex::new(ClaimablePayments { claimable_htlcs: HashMap::new(), pending_claiming_payments: HashMap::new() }),
pending_intercepted_htlcs: Mutex::new(HashMap::new()),
id_to_peer: Mutex::new(HashMap::new()),
short_to_chan_info: FairRwLock::new(HashMap::new()),
/// Signals that no further retries for the given payment will occur.
///
- /// After this method returns, any future calls to [`retry_payment`] for the given `payment_id`
- /// will fail with [`PaymentSendFailure::ParameterError`]. If no such event has been generated,
- /// an [`Event::PaymentFailed`] event will be generated as soon as there are no remaining
- /// pending HTLCs for this payment.
+ /// After this method returns, no future calls to [`retry_payment`] for the given `payment_id`
+ /// are allowed. If no [`Event::PaymentFailed`] event had been generated before, one will be
+ /// generated as soon as there are no remaining pending HTLCs for this payment.
///
/// Note that calling this method does *not* prevent a payment from succeeding. You must still
/// wait until you receive either a [`Event::PaymentFailed`] or [`Event::PaymentSent`] event to
/// determine the ultimate status of a payment.
///
+ /// If an [`Event::PaymentFailed`] event is generated and we restart without this
+ /// [`ChannelManager`] having been persisted, the payment may still be in the pending state
+ /// upon restart. This allows further calls to [`retry_payment`] (and requiring a second call
+ /// to [`abandon_payment`] to mark the payment as failed again). Otherwise, future calls to
+ /// [`retry_payment`] will fail with [`PaymentSendFailure::ParameterError`].
+ ///
+ /// [`abandon_payment`]: Self::abandon_payment
/// [`retry_payment`]: Self::retry_payment
/// [`Event::PaymentFailed`]: events::Event::PaymentFailed
/// [`Event::PaymentSent`]: events::Event::PaymentSent
let mut new_events = Vec::new();
let mut failed_forwards = Vec::new();
let mut phantom_receives: Vec<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
- let mut handle_errors = Vec::new();
{
let mut forward_htlcs = HashMap::new();
mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
continue;
},
hash_map::Entry::Occupied(mut chan) => {
- let mut add_htlc_msgs = Vec::new();
- let mut fail_htlc_msgs = Vec::new();
for forward_info in pending_forwards.drain(..) {
match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
// Phantom payments are only PendingHTLCRouting::Receive.
phantom_shared_secret: None,
});
- match chan.get_mut().send_htlc(outgoing_amt_msat, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet, &self.logger) {
- Err(e) => {
- if let ChannelError::Ignore(msg) = e {
- log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg);
- } else {
- panic!("Stated return value requirements in send_htlc() were not met");
- }
- let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
- failed_forwards.push((htlc_source, payment_hash,
- HTLCFailReason::reason(failure_code, data),
- HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id }
- ));
- continue;
- },
- Ok(update_add) => {
- match update_add {
- Some(msg) => { add_htlc_msgs.push(msg); },
- None => {
- // Nothing to do here...we're waiting on a remote
- // revoke_and_ack before we can add anymore HTLCs. The Channel
- // will automatically handle building the update_add_htlc and
- // commitment_signed messages when we can.
- // TODO: Do some kind of timer to set the channel as !is_live()
- // as we don't really want others relying on us relaying through
- // this channel currently :/.
- }
- }
+ if let Err(e) = chan.get_mut().queue_add_htlc(outgoing_amt_msat,
+ payment_hash, outgoing_cltv_value, htlc_source.clone(),
+ onion_packet, &self.logger)
+ {
+ if let ChannelError::Ignore(msg) = e {
+ log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg);
+ } else {
+ panic!("Stated return value requirements in send_htlc() were not met");
}
+ let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
+ failed_forwards.push((htlc_source, payment_hash,
+ HTLCFailReason::reason(failure_code, data),
+ HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id }
+ ));
+ continue;
}
},
HTLCForwardInfo::AddHTLC { .. } => {
},
HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
- match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) {
- Err(e) => {
- if let ChannelError::Ignore(msg) = e {
- log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
- } else {
- panic!("Stated return value requirements in get_update_fail_htlc() were not met");
- }
- // fail-backs are best-effort, we probably already have one
- // pending, and if not that's OK, if not, the channel is on
- // the chain and sending the HTLC-Timeout is their problem.
- continue;
- },
- Ok(Some(msg)) => { fail_htlc_msgs.push(msg); },
- Ok(None) => {
- // Nothing to do here...we're waiting on a remote
- // revoke_and_ack before we can update the commitment
- // transaction. The Channel will automatically handle
- // building the update_fail_htlc and commitment_signed
- // messages when we can.
- // We don't need any kind of timer here as they should fail
- // the channel onto the chain if they can't get our
- // update_fail_htlc in time, it's not our problem.
+ if let Err(e) = chan.get_mut().queue_fail_htlc(
+ htlc_id, err_packet, &self.logger
+ ) {
+ if let ChannelError::Ignore(msg) = e {
+ log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
+ } else {
+ panic!("Stated return value requirements in queue_fail_htlc() were not met");
}
+ // fail-backs are best-effort, we probably already have one
+ // pending, and if not that's OK, if not, the channel is on
+ // the chain and sending the HTLC-Timeout is their problem.
+ continue;
}
},
}
}
-
- if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() {
- let (commitment_msg, monitor_update) = match chan.get_mut().send_commitment(&self.logger) {
- Ok(res) => res,
- Err(e) => {
- // We surely failed send_commitment due to bad keys, in that case
- // close channel and then send error message to peer.
- let counterparty_node_id = chan.get().get_counterparty_node_id();
- let err: Result<(), _> = match e {
- ChannelError::Ignore(_) | ChannelError::Warn(_) => {
- panic!("Stated return value requirements in send_commitment() were not met");
- }
- ChannelError::Close(msg) => {
- log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg);
- let mut channel = remove_channel!(self, chan);
- // ChannelClosed event is generated by handle_error for us.
- Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
- },
- };
- handle_errors.push((counterparty_node_id, err));
- continue;
- }
- };
- match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
- ChannelMonitorUpdateStatus::Completed => {},
- e => {
- handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
- continue;
- }
- }
- log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}",
- add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id()));
- channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: chan.get().get_counterparty_node_id(),
- updates: msgs::CommitmentUpdate {
- update_add_htlcs: add_htlc_msgs,
- update_fulfill_htlcs: Vec::new(),
- update_fail_htlcs: fail_htlc_msgs,
- update_fail_malformed_htlcs: Vec::new(),
- update_fee: None,
- commitment_signed: commitment_msg,
- },
- });
- }
}
}
} else {
payment_secret: $payment_data.payment_secret,
}
};
- let mut claimable_htlcs = self.claimable_htlcs.lock().unwrap();
- let (_, htlcs) = claimable_htlcs.entry(payment_hash)
+ let mut claimable_payments = self.claimable_payments.lock().unwrap();
+ if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) {
+ fail_htlc!(claimable_htlc, payment_hash);
+ continue
+ }
+ let (_, htlcs) = claimable_payments.claimable_htlcs.entry(payment_hash)
.or_insert_with(|| (purpose(), Vec::new()));
if htlcs.len() == 1 {
if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
check_total_value!(payment_data, payment_preimage);
},
OnionPayload::Spontaneous(preimage) => {
- match self.claimable_htlcs.lock().unwrap().entry(payment_hash) {
+ let mut claimable_payments = self.claimable_payments.lock().unwrap();
+ if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) {
+ fail_htlc!(claimable_htlc, payment_hash);
+ continue
+ }
+ match claimable_payments.claimable_htlcs.entry(payment_hash) {
hash_map::Entry::Vacant(e) => {
let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
e.insert((purpose.clone(), vec![claimable_htlc]));
}
self.forward_htlcs(&mut phantom_receives);
- for (counterparty_node_id, err) in handle_errors.drain(..) {
- let _ = handle_error!(self, err, counterparty_node_id);
- }
+ // Freeing the holding cell here is relatively redundant - in practice we'll do it when we
+ // next get a `get_and_clear_pending_msg_events` call, but some tests rely on it, and it's
+ // nice to do the work now if we can rather than while we're trying to get messages in the
+ // network stack.
+ self.check_free_holding_cells();
if new_events.is_empty() { return }
let mut events = self.pending_events.lock().unwrap();
self.process_background_events();
}
- fn update_channel_fee(&self, pending_msg_events: &mut Vec<events::MessageSendEvent>, chan_id: &[u8; 32], chan: &mut Channel<<K::Target as KeysInterface>::Signer>, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) {
- if !chan.is_outbound() { return (true, NotifyOption::SkipPersist, Ok(())); }
+ fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<<K::Target as KeysInterface>::Signer>, new_feerate: u32) -> NotifyOption {
+ if !chan.is_outbound() { return NotifyOption::SkipPersist; }
// If the feerate has decreased by less than half, don't bother
if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() {
log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
- return (true, NotifyOption::SkipPersist, Ok(()));
+ return NotifyOption::SkipPersist;
}
if !chan.is_live() {
log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
- return (true, NotifyOption::SkipPersist, Ok(()));
+ return NotifyOption::SkipPersist;
}
log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.",
log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
- let mut retain_channel = true;
- let res = match chan.send_update_fee_and_commit(new_feerate, &self.logger) {
- Ok(res) => Ok(res),
- Err(e) => {
- let (drop, res) = convert_chan_err!(self, e, chan, chan_id);
- if drop { retain_channel = false; }
- Err(res)
- }
- };
- let ret_err = match res {
- Ok(Some((update_fee, commitment_signed, monitor_update))) => {
- match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
- ChannelMonitorUpdateStatus::Completed => {
- pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: chan.get_counterparty_node_id(),
- updates: msgs::CommitmentUpdate {
- update_add_htlcs: Vec::new(),
- update_fulfill_htlcs: Vec::new(),
- update_fail_htlcs: Vec::new(),
- update_fail_malformed_htlcs: Vec::new(),
- update_fee: Some(update_fee),
- commitment_signed,
- },
- });
- Ok(())
- },
- e => {
- let (res, drop) = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY);
- if drop { retain_channel = false; }
- res
- }
- }
- },
- Ok(None) => Ok(()),
- Err(e) => Err(e),
- };
- (retain_channel, NotifyOption::DoPersist, ret_err)
+ chan.queue_update_fee(new_feerate, &self.logger);
+ NotifyOption::DoPersist
}
#[cfg(fuzzing)]
let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
- let mut handle_errors = Vec::new();
- {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
- let pending_msg_events = &mut channel_state.pending_msg_events;
- channel_state.by_id.retain(|chan_id, chan| {
- let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(pending_msg_events, chan_id, chan, new_feerate);
- if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
- if err.is_err() {
- handle_errors.push(err);
- }
- retain_channel
- });
+ let mut channel_state = self.channel_state.lock().unwrap();
+ for (chan_id, chan) in channel_state.by_id.iter_mut() {
+ let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
+ if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
}
should_persist
let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
- let mut handle_errors = Vec::new();
+ let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
let mut timed_out_mpp_htlcs = Vec::new();
{
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
let pending_msg_events = &mut channel_state.pending_msg_events;
channel_state.by_id.retain(|chan_id, chan| {
- let counterparty_node_id = chan.get_counterparty_node_id();
- let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(pending_msg_events, chan_id, chan, new_feerate);
+ let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
- if err.is_err() {
- handle_errors.push((err, counterparty_node_id));
- }
- if !retain_channel { return false; }
if let Err(e) = chan.timer_check_closing_negotiation_progress() {
let (needs_close, err) = convert_chan_err!(self, e, chan, chan_id);
});
}
- self.claimable_htlcs.lock().unwrap().retain(|payment_hash, (_, htlcs)| {
+ self.claimable_payments.lock().unwrap().claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
if htlcs.is_empty() {
// This should be unreachable
debug_assert!(false);
self.remove_stale_resolved_payments();
+ // Technically we don't need to do this here, but if we have holding cell entries in a
+ // channel that need freeing, it's better to do that here and block a background task
+ // than block the message queueing pipeline.
+ if self.check_free_holding_cells() {
+ should_persist = NotifyOption::DoPersist;
+ }
+
should_persist
});
}
pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let removed_source = self.claimable_htlcs.lock().unwrap().remove(payment_hash);
+ let removed_source = self.claimable_payments.lock().unwrap().claimable_htlcs.remove(payment_hash);
if let Some((_, mut sources)) = removed_source {
for htlc in sources.drain(..) {
let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
/// [`process_pending_events`]: EventsProvider::process_pending_events
/// [`create_inbound_payment`]: Self::create_inbound_payment
/// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
- /// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events
pub fn claim_funds(&self, payment_preimage: PaymentPreimage) {
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let removed_source = self.claimable_htlcs.lock().unwrap().remove(&payment_hash);
- if let Some((payment_purpose, mut sources)) = removed_source {
- assert!(!sources.is_empty());
-
- // If we are claiming an MPP payment, we have to take special care to ensure that each
- // channel exists before claiming all of the payments (inside one lock).
- // Note that channel existance is sufficient as we should always get a monitor update
- // which will take care of the real HTLC claim enforcement.
- //
- // If we find an HTLC which we would need to claim but for which we do not have a
- // channel, we will fail all parts of the MPP payment. While we could wait and see if
- // the sender retries the already-failed path(s), it should be a pretty rare case where
- // we got all the HTLCs and then a channel closed while we were waiting for the user to
- // provide the preimage, so worrying too much about the optimal handling isn't worth
- // it.
- let mut claimable_amt_msat = 0;
- let mut expected_amt_msat = None;
- let mut valid_mpp = true;
- let mut errs = Vec::new();
- let mut claimed_any_htlcs = false;
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
- let mut receiver_node_id = Some(self.our_network_pubkey);
- for htlc in sources.iter() {
- let chan_id = match self.short_to_chan_info.read().unwrap().get(&htlc.prev_hop.short_channel_id) {
- Some((_cp_id, chan_id)) => chan_id.clone(),
- None => {
- valid_mpp = false;
+ let mut sources = {
+ let mut claimable_payments = self.claimable_payments.lock().unwrap();
+ if let Some((payment_purpose, sources)) = claimable_payments.claimable_htlcs.remove(&payment_hash) {
+ let mut receiver_node_id = self.our_network_pubkey;
+ for htlc in sources.iter() {
+ if htlc.prev_hop.phantom_shared_secret.is_some() {
+ let phantom_pubkey = self.keys_manager.get_node_id(Recipient::PhantomNode)
+ .expect("Failed to get node_id for phantom node recipient");
+ receiver_node_id = phantom_pubkey;
break;
}
- };
+ }
- if let None = channel_state.by_id.get(&chan_id) {
- valid_mpp = false;
- break;
+ let dup_purpose = claimable_payments.pending_claiming_payments.insert(payment_hash,
+ ClaimingPayment { amount_msat: sources.iter().map(|source| source.value).sum(),
+ payment_purpose, receiver_node_id,
+ });
+ if dup_purpose.is_some() {
+ debug_assert!(false, "Shouldn't get a duplicate pending claim event ever");
+ log_error!(self.logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug",
+ log_bytes!(payment_hash.0));
}
+ sources
+ } else { return; }
+ };
+ debug_assert!(!sources.is_empty());
- if expected_amt_msat.is_some() && expected_amt_msat != Some(htlc.total_msat) {
- log_error!(self.logger, "Somehow ended up with an MPP payment with different total amounts - this should not be reachable!");
- debug_assert!(false);
+ // If we are claiming an MPP payment, we check that all channels which contain a claimable
+ // HTLC still exist. While this isn't guaranteed to remain true if a channel closes while
+ // we're claiming (or even after we claim, before the commitment update dance completes),
+ // it should be a relatively rare race, and we'd rather not claim HTLCs that require us to
+ // go on-chain (and lose the on-chain fee to do so) than just reject the payment.
+ //
+ // Note that we'll still always get our funds - as long as the generated
+ // `ChannelMonitorUpdate` makes it out to the relevant monitor we can claim on-chain.
+ //
+ // If we find an HTLC which we would need to claim but for which we do not have a
+ // channel, we will fail all parts of the MPP payment. While we could wait and see if
+ // the sender retries the already-failed path(s), it should be a pretty rare case where
+ // we got all the HTLCs and then a channel closed while we were waiting for the user to
+ // provide the preimage, so worrying too much about the optimal handling isn't worth
+ // it.
+ let mut claimable_amt_msat = 0;
+ let mut expected_amt_msat = None;
+ let mut valid_mpp = true;
+ let mut errs = Vec::new();
+ let mut channel_state = Some(self.channel_state.lock().unwrap());
+ for htlc in sources.iter() {
+ let chan_id = match self.short_to_chan_info.read().unwrap().get(&htlc.prev_hop.short_channel_id) {
+ Some((_cp_id, chan_id)) => chan_id.clone(),
+ None => {
valid_mpp = false;
break;
}
- expected_amt_msat = Some(htlc.total_msat);
- if let OnionPayload::Spontaneous(_) = &htlc.onion_payload {
- // We don't currently support MPP for spontaneous payments, so just check
- // that there's one payment here and move on.
- if sources.len() != 1 {
- log_error!(self.logger, "Somehow ended up with an MPP spontaneous payment - this should not be reachable!");
- debug_assert!(false);
- valid_mpp = false;
- break;
- }
- }
- let phantom_shared_secret = htlc.prev_hop.phantom_shared_secret;
- if phantom_shared_secret.is_some() {
- let phantom_pubkey = self.keys_manager.get_node_id(Recipient::PhantomNode)
- .expect("Failed to get node_id for phantom node recipient");
- receiver_node_id = Some(phantom_pubkey)
- }
+ };
- claimable_amt_msat += htlc.value;
- }
- if sources.is_empty() || expected_amt_msat.is_none() {
- log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!");
- return;
+ if let None = channel_state.as_ref().unwrap().by_id.get(&chan_id) {
+ valid_mpp = false;
+ break;
}
- if claimable_amt_msat != expected_amt_msat.unwrap() {
- log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.",
- expected_amt_msat.unwrap(), claimable_amt_msat);
- return;
+
+ if expected_amt_msat.is_some() && expected_amt_msat != Some(htlc.total_msat) {
+ log_error!(self.logger, "Somehow ended up with an MPP payment with different total amounts - this should not be reachable!");
+ debug_assert!(false);
+ valid_mpp = false;
+ break;
}
- if valid_mpp {
- for htlc in sources.drain(..) {
- match self.claim_funds_from_hop(&mut channel_state_lock, htlc.prev_hop, payment_preimage) {
- ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => {
- if let msgs::ErrorAction::IgnoreError = err.err.action {
- // We got a temporary failure updating monitor, but will claim the
- // HTLC when the monitor updating is restored (or on chain).
- log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
- claimed_any_htlcs = true;
- } else { errs.push((pk, err)); }
- },
- ClaimFundsFromHop::PrevHopForceClosed => unreachable!("We already checked for channel existence, we can't fail here!"),
- ClaimFundsFromHop::DuplicateClaim => {
- // While we should never get here in most cases, if we do, it likely
- // indicates that the HTLC was timed out some time ago and is no longer
- // available to be claimed. Thus, it does not make sense to set
- // `claimed_any_htlcs`.
- },
- ClaimFundsFromHop::Success(_) => claimed_any_htlcs = true,
- }
+ expected_amt_msat = Some(htlc.total_msat);
+ if let OnionPayload::Spontaneous(_) = &htlc.onion_payload {
+ // We don't currently support MPP for spontaneous payments, so just check
+ // that there's one payment here and move on.
+ if sources.len() != 1 {
+ log_error!(self.logger, "Somehow ended up with an MPP spontaneous payment - this should not be reachable!");
+ debug_assert!(false);
+ valid_mpp = false;
+ break;
}
}
- mem::drop(channel_state_lock);
- if !valid_mpp {
- for htlc in sources.drain(..) {
- let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
- htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes());
- let source = HTLCSource::PreviousHopData(htlc.prev_hop);
- let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data);
- let receiver = HTLCDestination::FailedPayment { payment_hash };
- self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
+
+ claimable_amt_msat += htlc.value;
+ }
+ if sources.is_empty() || expected_amt_msat.is_none() {
+ mem::drop(channel_state);
+ self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
+ log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!");
+ return;
+ }
+ if claimable_amt_msat != expected_amt_msat.unwrap() {
+ mem::drop(channel_state);
+ self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
+ log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.",
+ expected_amt_msat.unwrap(), claimable_amt_msat);
+ return;
+ }
+ if valid_mpp {
+ for htlc in sources.drain(..) {
+ if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
+ if let Err((pk, err)) = self.claim_funds_from_hop(channel_state.take().unwrap(), htlc.prev_hop,
+ payment_preimage,
+ |_| Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash }))
+ {
+ if let msgs::ErrorAction::IgnoreError = err.err.action {
+ // We got a temporary failure updating monitor, but will claim the
+ // HTLC when the monitor updating is restored (or on chain).
+ log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
+ } else { errs.push((pk, err)); }
}
}
-
- if claimed_any_htlcs {
- self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed {
- receiver_node_id,
- payment_hash,
- purpose: payment_purpose,
- amount_msat: claimable_amt_msat,
- });
+ }
+ mem::drop(channel_state);
+ if !valid_mpp {
+ for htlc in sources.drain(..) {
+ let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
+ htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes());
+ let source = HTLCSource::PreviousHopData(htlc.prev_hop);
+ let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data);
+ let receiver = HTLCDestination::FailedPayment { payment_hash };
+ self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
}
+ self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
+ }
- // Now we can handle any errors which were generated.
- for (counterparty_node_id, err) in errs.drain(..) {
- let res: Result<(), _> = Err(err);
- let _ = handle_error!(self, res, counterparty_node_id);
- }
+ // Now we can handle any errors which were generated.
+ for (counterparty_node_id, err) in errs.drain(..) {
+ let res: Result<(), _> = Err(err);
+ let _ = handle_error!(self, res, counterparty_node_id);
}
}
- fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<<K::Target as KeysInterface>::Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop {
+ fn claim_funds_from_hop<ComplFunc: FnOnce(Option<u64>) -> Option<MonitorUpdateCompletionAction>>(&self,
+ mut channel_state_lock: MutexGuard<ChannelHolder<<K::Target as KeysInterface>::Signer>>,
+ prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc)
+ -> Result<(), (PublicKey, MsgHandleErrInternal)> {
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
let chan_id = prev_hop.outpoint.to_channel_id();
- let channel_state = &mut **channel_state_lock;
+ let channel_state = &mut *channel_state_lock;
if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) {
+ let counterparty_node_id = chan.get().get_counterparty_node_id();
match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
Ok(msgs_monitor_option) => {
if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option {
log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Debug },
"Failed to update channel monitor with preimage {:?}: {:?}",
payment_preimage, e);
- return ClaimFundsFromHop::MonitorUpdateFail(
- chan.get().get_counterparty_node_id(),
- handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
- Some(htlc_value_msat)
- );
+ let err = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err();
+ mem::drop(channel_state_lock);
+ self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat)));
+ return Err((counterparty_node_id, err));
}
}
if let Some((msg, commitment_signed)) = msgs {
}
});
}
- return ClaimFundsFromHop::Success(htlc_value_msat);
+ mem::drop(channel_state_lock);
+ self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat)));
+ Ok(())
} else {
- return ClaimFundsFromHop::DuplicateClaim;
+ Ok(())
}
},
Err((e, monitor_update)) => {
match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
ChannelMonitorUpdateStatus::Completed => {},
e => {
+ // TODO: This needs to be handled somehow - if we receive a monitor update
+ // with a preimage we *must* somehow manage to propagate it to the upstream
+ // channel, or we must have an ability to receive the same update and try
+ // again on restart.
log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Info },
"Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
payment_preimage, e);
},
}
- let counterparty_node_id = chan.get().get_counterparty_node_id();
let (drop, res) = convert_chan_err!(self, e, chan.get_mut(), &chan_id);
if drop {
chan.remove_entry();
}
- return ClaimFundsFromHop::MonitorUpdateFail(counterparty_node_id, res, None);
+ mem::drop(channel_state_lock);
+ self.handle_monitor_update_completion_actions(completion_action(None));
+ Err((counterparty_node_id, res))
},
}
- } else { return ClaimFundsFromHop::PrevHopForceClosed }
+ } else {
+ let preimage_update = ChannelMonitorUpdate {
+ update_id: CLOSED_CHANNEL_UPDATE_ID,
+ updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
+ payment_preimage,
+ }],
+ };
+ // We update the ChannelMonitor on the backward link, after
+ // receiving an `update_fulfill_htlc` from the forward link.
+ let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, preimage_update);
+ if update_res != ChannelMonitorUpdateStatus::Completed {
+ // TODO: This needs to be handled somehow - if we receive a monitor update
+ // with a preimage we *must* somehow manage to propagate it to the upstream
+ // channel, or we must have an ability to receive the same event and try
+ // again on restart.
+ log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
+ payment_preimage, update_res);
+ }
+ mem::drop(channel_state_lock);
+ // Note that we do process the completion action here. This totally could be a
+ // duplicate claim, but we have no way of knowing without interrogating the
+ // `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
+ // generally always allowed to be duplicative (and it's specifically noted in
+ // `PaymentForwarded`).
+ self.handle_monitor_update_completion_actions(completion_action(None));
+ Ok(())
+ }
}
fn finalize_claims(&self, mut sources: Vec<HTLCSource>) {
}
}
- fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<<K::Target as KeysInterface>::Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_id: [u8; 32]) {
+ fn claim_funds_internal(&self, channel_state_lock: MutexGuard<ChannelHolder<<K::Target as KeysInterface>::Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_id: [u8; 32]) {
match source {
HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
mem::drop(channel_state_lock);
},
HTLCSource::PreviousHopData(hop_data) => {
let prev_outpoint = hop_data.outpoint;
- let res = self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage);
- let claimed_htlc = if let ClaimFundsFromHop::DuplicateClaim = res { false } else { true };
- let htlc_claim_value_msat = match res {
- ClaimFundsFromHop::MonitorUpdateFail(_, _, amt_opt) => amt_opt,
- ClaimFundsFromHop::Success(amt) => Some(amt),
- _ => None,
- };
- if let ClaimFundsFromHop::PrevHopForceClosed = res {
- let preimage_update = ChannelMonitorUpdate {
- update_id: CLOSED_CHANNEL_UPDATE_ID,
- updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
- payment_preimage: payment_preimage.clone(),
- }],
- };
- // We update the ChannelMonitor on the backward link, after
- // receiving an offchain preimage event from the forward link (the
- // event being update_fulfill_htlc).
- let update_res = self.chain_monitor.update_channel(prev_outpoint, preimage_update);
- if update_res != ChannelMonitorUpdateStatus::Completed {
- // TODO: This needs to be handled somehow - if we receive a monitor update
- // with a preimage we *must* somehow manage to propagate it to the upstream
- // channel, or we must have an ability to receive the same event and try
- // again on restart.
- log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
- payment_preimage, update_res);
- }
- // Note that we do *not* set `claimed_htlc` to false here. In fact, this
- // totally could be a duplicate claim, but we have no way of knowing
- // without interrogating the `ChannelMonitor` we've provided the above
- // update to. Instead, we simply document in `PaymentForwarded` that this
- // can happen.
- }
- mem::drop(channel_state_lock);
- if let ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) = res {
+ let res = self.claim_funds_from_hop(channel_state_lock, hop_data, payment_preimage,
+ |htlc_claim_value_msat| {
+ if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
+ let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat {
+ Some(claimed_htlc_value - forwarded_htlc_value)
+ } else { None };
+
+ let prev_channel_id = Some(prev_outpoint.to_channel_id());
+ let next_channel_id = Some(next_channel_id);
+
+ Some(MonitorUpdateCompletionAction::EmitEvent { event: events::Event::PaymentForwarded {
+ fee_earned_msat,
+ claim_from_onchain_tx: from_onchain,
+ prev_channel_id,
+ next_channel_id,
+ }})
+ } else { None }
+ });
+ if let Err((pk, err)) = res {
let result: Result<(), _> = Err(err);
let _ = handle_error!(self, result, pk);
}
-
- if claimed_htlc {
- if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
- let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat {
- Some(claimed_htlc_value - forwarded_htlc_value)
- } else { None };
-
- let mut pending_events = self.pending_events.lock().unwrap();
- let prev_channel_id = Some(prev_outpoint.to_channel_id());
- let next_channel_id = Some(next_channel_id);
-
- pending_events.push(events::Event::PaymentForwarded {
- fee_earned_msat,
- claim_from_onchain_tx: from_onchain,
- prev_channel_id,
- next_channel_id,
- });
- }
- }
},
}
}
self.our_network_pubkey.clone()
}
+ fn handle_monitor_update_completion_actions<I: IntoIterator<Item=MonitorUpdateCompletionAction>>(&self, actions: I) {
+ for action in actions.into_iter() {
+ match action {
+ MonitorUpdateCompletionAction::PaymentClaimed { payment_hash } => {
+ let payment = self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
+ if let Some(ClaimingPayment { amount_msat, payment_purpose: purpose, receiver_node_id }) = payment {
+ self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed {
+ payment_hash, purpose, amount_msat, receiver_node_id: Some(receiver_node_id),
+ });
+ }
+ },
+ MonitorUpdateCompletionAction::EmitEvent { event } => {
+ self.pending_events.lock().unwrap().push(event);
+ },
+ }
+ }
+ }
+
/// Handles a channel reentering a functional state, either due to reconnect or a monitor
/// update completion.
fn handle_channel_resumption(&self, pending_msg_events: &mut Vec<MessageSendEvent>,
/// Check the holding cell in each channel and free any pending HTLCs in them if possible.
/// Returns whether there were any updates such as if pending HTLCs were freed or a monitor
/// update was applied.
- ///
- /// This should only apply to HTLCs which were added to the holding cell because we were
- /// waiting on a monitor update to finish. In that case, we don't want to free the holding cell
- /// directly in `channel_monitor_updated` as it may introduce deadlocks calling back into user
- /// code to inform them of a channel monitor update.
fn check_free_holding_cells(&self) -> bool {
let mut has_monitor_update = false;
let mut failed_htlcs = Vec::new();
}
if let Some(height) = height_opt {
- self.claimable_htlcs.lock().unwrap().retain(|payment_hash, (_, htlcs)| {
+ self.claimable_payments.lock().unwrap().claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
htlcs.retain(|htlc| {
// If height is approaching the number of blocks we think it takes us to get
// our commitment transaction confirmed before the HTLC expires, plus the
}
let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
- let claimable_htlcs = self.claimable_htlcs.lock().unwrap();
+ let claimable_payments = self.claimable_payments.lock().unwrap();
let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new();
- (claimable_htlcs.len() as u64).write(writer)?;
- for (payment_hash, (purpose, previous_hops)) in claimable_htlcs.iter() {
+ (claimable_payments.claimable_htlcs.len() as u64).write(writer)?;
+ for (payment_hash, (purpose, previous_hops)) in claimable_payments.claimable_htlcs.iter() {
payment_hash.write(writer)?;
(previous_hops.len() as u64).write(writer)?;
for htlc in previous_hops.iter() {
if our_pending_intercepts.len() != 0 {
pending_intercepted_htlcs = Some(our_pending_intercepts);
}
+
+ let mut pending_claiming_payments = Some(&claimable_payments.pending_claiming_payments);
+ if pending_claiming_payments.as_ref().unwrap().is_empty() {
+ // LDK versions prior to 0.0.113 do not know how to read the pending claimed payments
+ // map. Thus, if there are no entries we skip writing a TLV for it.
+ pending_claiming_payments = None;
+ } else {
+ debug_assert!(false, "While we have code to serialize pending_claiming_payments, the map should always be empty until a later PR");
+ }
+
write_tlv_fields!(writer, {
(1, pending_outbound_payments_no_retry, required),
(2, pending_intercepted_htlcs, option),
(3, pending_outbound_payments, required),
+ (4, pending_claiming_payments, option),
(5, self.our_network_pubkey, required),
(7, self.fake_scid_rand_bytes, required),
(9, htlc_purposes, vec_type),
let mut fake_scid_rand_bytes: Option<[u8; 32]> = None;
let mut probing_cookie_secret: Option<[u8; 32]> = None;
let mut claimable_htlc_purposes = None;
+ let mut pending_claiming_payments = Some(HashMap::new());
read_tlv_fields!(reader, {
(1, pending_outbound_payments_no_retry, option),
(2, pending_intercepted_htlcs, option),
(3, pending_outbound_payments, option),
+ (4, pending_claiming_payments, option),
(5, received_network_pubkey, option),
(7, fake_scid_rand_bytes, option),
(9, claimable_htlc_purposes, vec_type),
}
for (htlc_source, htlc) in monitor.get_all_current_outbound_htlcs() {
if let HTLCSource::PreviousHopData(prev_hop_data) = htlc_source {
+ let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| {
+ info.prev_funding_outpoint == prev_hop_data.outpoint &&
+ info.prev_htlc_id == prev_hop_data.htlc_id
+ };
// The ChannelMonitor is now responsible for this HTLC's
// failure/success and will let us know what its outcome is. If we
- // still have an entry for this HTLC in `forward_htlcs`, we were
- // apparently not persisted after the monitor was when forwarding
- // the payment.
+ // still have an entry for this HTLC in `forward_htlcs` or
+ // `pending_intercepted_htlcs`, we were apparently not persisted after
+ // the monitor was when forwarding the payment.
forward_htlcs.retain(|_, forwards| {
forwards.retain(|forward| {
if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
- if htlc_info.prev_short_channel_id == prev_hop_data.short_channel_id &&
- htlc_info.prev_htlc_id == prev_hop_data.htlc_id
- {
+ if pending_forward_matches_htlc(&htlc_info) {
log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
false
} else { true }
});
!forwards.is_empty()
- })
+ });
+ pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
+ if pending_forward_matches_htlc(&htlc_info) {
+ log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
+ log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
+ pending_events_read.retain(|event| {
+ if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
+ intercepted_id != ev_id
+ } else { true }
+ });
+ false
+ } else { true }
+ });
}
}
}
pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()),
forward_htlcs: Mutex::new(forward_htlcs),
- claimable_htlcs: Mutex::new(claimable_htlcs),
+ claimable_payments: Mutex::new(ClaimablePayments { claimable_htlcs, pending_claiming_payments: pending_claiming_payments.unwrap() }),
outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
id_to_peer: Mutex::new(id_to_peer),
short_to_chan_info: FairRwLock::new(short_to_chan_info),