use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
use crate::chain::transaction::{OutPoint, TransactionData};
use crate::events;
-use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
+use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination, PaymentFailureReason};
// Since this struct is returned in `list_channels` methods, expose it here in case users want to
// construct one themselves.
use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret};
}
}
}
-#[cfg(not(feature = "grind_signatures"))]
-#[cfg(test)]
impl HTLCSource {
+ #[cfg(not(feature = "grind_signatures"))]
+ #[cfg(test)]
pub fn dummy() -> Self {
HTLCSource::OutboundRoute {
path: Vec::new(),
payment_id: PaymentId([2; 32]),
}
}
+
+ #[cfg(debug_assertions)]
+ /// Checks whether this HTLCSource could possibly match the given HTLC output in a commitment
+ /// transaction. Useful to ensure different datastructures match up.
+ pub(crate) fn possibly_matches_output(&self, htlc: &super::chan_utils::HTLCOutputInCommitment) -> bool {
+ if let HTLCSource::OutboundRoute { first_hop_htlc_msat, .. } = self {
+ *first_hop_htlc_msat == htlc.amount_msat
+ } else {
+ // There's nothing we can check for forwarded HTLCs
+ true
+ }
+ }
}
struct ReceiveError {
/// This is not exported to bindings users as Arcs don't make sense in bindings
pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>>, &'g L>;
+/// A trivial trait which describes any [`ChannelManager`] used in testing.
+#[cfg(any(test, feature = "_test_utils"))]
+pub trait AChannelManager {
+ type Watch: chain::Watch<Self::Signer>;
+ type M: Deref<Target = Self::Watch>;
+ type Broadcaster: BroadcasterInterface;
+ type T: Deref<Target = Self::Broadcaster>;
+ type EntropySource: EntropySource;
+ type ES: Deref<Target = Self::EntropySource>;
+ type NodeSigner: NodeSigner;
+ type NS: Deref<Target = Self::NodeSigner>;
+ type Signer: WriteableEcdsaChannelSigner;
+ type SignerProvider: SignerProvider<Signer = Self::Signer>;
+ type SP: Deref<Target = Self::SignerProvider>;
+ type FeeEstimator: FeeEstimator;
+ type F: Deref<Target = Self::FeeEstimator>;
+ type Router: Router;
+ type R: Deref<Target = Self::Router>;
+ type Logger: Logger;
+ type L: Deref<Target = Self::Logger>;
+ fn get_cm(&self) -> &ChannelManager<Self::M, Self::T, Self::ES, Self::NS, Self::SP, Self::F, Self::R, Self::L>;
+}
+#[cfg(any(test, feature = "_test_utils"))]
+impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> AChannelManager
+for ChannelManager<M, T, ES, NS, SP, F, R, L>
+where
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer> + Sized,
+ T::Target: BroadcasterInterface + Sized,
+ ES::Target: EntropySource + Sized,
+ NS::Target: NodeSigner + Sized,
+ SP::Target: SignerProvider + Sized,
+ F::Target: FeeEstimator + Sized,
+ R::Target: Router + Sized,
+ L::Target: Logger + Sized,
+{
+ type Watch = M::Target;
+ type M = M;
+ type Broadcaster = T::Target;
+ type T = T;
+ type EntropySource = ES::Target;
+ type ES = ES;
+ type NodeSigner = NS::Target;
+ type NS = NS;
+ type Signer = <SP::Target as SignerProvider>::Signer;
+ type SignerProvider = SP::Target;
+ type SP = SP;
+ type FeeEstimator = F::Target;
+ type F = F;
+ type Router = R::Target;
+ type R = R;
+ type Logger = L::Target;
+ type L = L;
+ fn get_cm(&self) -> &ChannelManager<M, T, ES, NS, SP, F, R, L> { self }
+}
+
/// Manager which keeps track of a number of channels and sends messages to the appropriate
/// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
///
}
macro_rules! handle_error {
- ($self: ident, $internal: expr, $counterparty_node_id: expr) => {
+ ($self: ident, $internal: expr, $counterparty_node_id: expr) => { {
+ // In testing, ensure there are no deadlocks where the lock is already held upon
+ // entering the macro.
+ debug_assert_ne!($self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
+ debug_assert_ne!($self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
+
match $internal {
Ok(msg) => Ok(msg),
Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => {
- // In testing, ensure there are no deadlocks where the lock is already held upon
- // entering the macro.
- debug_assert_ne!($self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
- debug_assert_ne!($self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
-
let mut msg_events = Vec::with_capacity(2);
if let Some((shutdown_res, update_option)) = shutdown_finish {
Err(err)
},
}
- }
+ } }
}
macro_rules! update_maps_on_chan_removal {
($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
// update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
// any case so that it won't deadlock.
- debug_assert!($self.id_to_peer.try_lock().is_ok());
+ debug_assert_ne!($self.id_to_peer.held_by_thread(), LockHeldState::HeldByThread);
match $update_res {
ChannelMonitorUpdateStatus::InProgress => {
log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
}
}
+macro_rules! process_events_body {
+ ($self: expr, $event_to_handle: expr, $handle_event: expr) => {
+ // We'll acquire our total consistency lock until the returned future completes so that
+ // we can be sure no other persists happen while processing events.
+ let _read_guard = $self.total_consistency_lock.read().unwrap();
+
+ let mut result = NotifyOption::SkipPersist;
+
+ // TODO: This behavior should be documented. It's unintuitive that we query
+ // ChannelMonitors when clearing other events.
+ if $self.process_pending_monitor_events() {
+ result = NotifyOption::DoPersist;
+ }
+
+ let pending_events = mem::replace(&mut *$self.pending_events.lock().unwrap(), vec![]);
+ if !pending_events.is_empty() {
+ result = NotifyOption::DoPersist;
+ }
+
+ for event in pending_events {
+ $event_to_handle = event;
+ $handle_event;
+ }
+
+ if result == NotifyOption::DoPersist {
+ $self.persistence_notifier.notify();
+ }
+ }
+}
+
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
}
#[cfg(test)]
- pub(crate) fn test_send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
+ pub(crate) fn test_send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
let _lck = self.total_consistency_lock.read().unwrap();
- self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv_bytes)
+ self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv_bytes)
}
- fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
+ fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
// The top-level caller should hold the total_consistency_lock read lock.
debug_assert!(self.total_consistency_lock.try_write().is_err());
let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv)
.map_err(|_| APIError::InvalidRoute{err: "Pubkey along hop was maliciously selected".to_owned()})?;
- let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height, keysend_preimage)?;
+ let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, recipient_onion, cur_height, keysend_preimage)?;
if onion_utils::route_size_insane(&onion_payloads) {
return Err(APIError::InvalidRoute{err: "Route size too large considering onion data".to_owned()});
}
let best_block_height = self.best_block.read().unwrap().height();
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
self.pending_outbound_payments
- .send_payment_with_route(route, payment_hash, &recipient_onion.payment_secret, payment_id, &self.entropy_source, &self.node_signer, best_block_height,
- |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ .send_payment_with_route(route, payment_hash, recipient_onion, payment_id, &self.entropy_source, &self.node_signer, best_block_height,
+ |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
/// Similar to [`ChannelManager::send_payment`], but will automatically find a route based on
let best_block_height = self.best_block.read().unwrap().height();
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
self.pending_outbound_payments
- .send_payment(payment_hash, &recipient_onion.payment_secret, payment_id, retry_strategy, route_params,
+ .send_payment(payment_hash, recipient_onion, payment_id, retry_strategy, route_params,
&self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
&self.entropy_source, &self.node_signer, best_block_height, &self.logger,
&self.pending_events,
- |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
#[cfg(test)]
- pub(super) fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
+ pub(super) fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, payment_secret, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, best_block_height,
- |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, recipient_onion, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, best_block_height,
+ |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
#[cfg(test)]
- pub(crate) fn test_add_new_pending_payment(&self, payment_hash: PaymentHash, payment_secret: Option<PaymentSecret>, payment_id: PaymentId, route: &Route) -> Result<Vec<[u8; 32]>, PaymentSendFailure> {
+ pub(crate) fn test_add_new_pending_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route: &Route) -> Result<Vec<[u8; 32]>, PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
- self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, payment_secret, payment_id, route, None, &self.entropy_source, best_block_height)
+ self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, recipient_onion, payment_id, route, None, &self.entropy_source, best_block_height)
}
/// [`Event::PaymentSent`]: events::Event::PaymentSent
pub fn abandon_payment(&self, payment_id: PaymentId) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- self.pending_outbound_payments.abandon_payment(payment_id, &self.pending_events);
+ self.pending_outbound_payments.abandon_payment(payment_id, PaymentFailureReason::UserAbandoned, &self.pending_events);
}
/// Send a spontaneous payment, which is a payment that does not require the recipient to have
let best_block_height = self.best_block.read().unwrap().height();
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
self.pending_outbound_payments.send_spontaneous_payment_with_route(
- route, payment_preimage, payment_id, &self.entropy_source, &self.node_signer,
- best_block_height,
- |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ route, payment_preimage, recipient_onion, payment_id, &self.entropy_source,
+ &self.node_signer, best_block_height,
+ |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
/// Similar to [`ChannelManager::send_spontaneous_payment`], but will automatically find a route
pub fn send_spontaneous_payment_with_retry(&self, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<PaymentHash, RetryableSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, payment_id,
- retry_strategy, route_params, &self.router, self.list_usable_channels(),
+ self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, recipient_onion,
+ payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(),
|| self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
&self.logger, &self.pending_events,
- |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
/// Send a payment that is probing the given route for liquidity. We calculate the
let best_block_height = self.best_block.read().unwrap().height();
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
self.pending_outbound_payments.send_probe(hops, self.probing_cookie_secret, &self.entropy_source, &self.node_signer, best_block_height,
- |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
/// Returns whether a payment with the given [`PaymentHash`] and [`PaymentId`] is, in fact, a
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- let (chan, msg) = {
- let (res, chan) = {
- match peer_state.channel_by_id.remove(temporary_channel_id) {
- Some(mut chan) => {
- let funding_txo = find_funding_output(&chan, &funding_transaction)?;
-
- (chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
- .map_err(|e| if let ChannelError::Close(msg) = e {
- MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None)
- } else { unreachable!(); })
- , chan)
+ let (msg, chan) = match peer_state.channel_by_id.remove(temporary_channel_id) {
+ Some(mut chan) => {
+ let funding_txo = find_funding_output(&chan, &funding_transaction)?;
+
+ let funding_res = chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
+ .map_err(|e| if let ChannelError::Close(msg) = e {
+ MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None)
+ } else { unreachable!(); });
+ match funding_res {
+ Ok(funding_msg) => (funding_msg, chan),
+ Err(_) => {
+ mem::drop(peer_state_lock);
+ mem::drop(per_peer_state);
+
+ let _ = handle_error!(self, funding_res, chan.get_counterparty_node_id());
+ return Err(APIError::ChannelUnavailable {
+ err: "Signer refused to sign the initial commitment transaction".to_owned()
+ });
},
- None => { return Err(APIError::ChannelUnavailable { err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*temporary_channel_id), counterparty_node_id) }) },
}
- };
- match handle_error!(self, res, chan.get_counterparty_node_id()) {
- Ok(funding_msg) => {
- (chan, funding_msg)
- },
- Err(_) => { return Err(APIError::ChannelUnavailable {
- err: "Signer refused to sign the initial commitment transaction".to_owned()
- }) },
- }
+ },
+ None => {
+ return Err(APIError::ChannelUnavailable {
+ err: format!(
+ "Channel with id {} not found for the passed counterparty node_id {}",
+ log_bytes!(*temporary_channel_id), counterparty_node_id),
+ })
+ },
};
peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
}
}
let mut total_value = claimable_htlc.sender_intended_value;
+ let mut earliest_expiry = claimable_htlc.cltv_expiry;
for htlc in htlcs.iter() {
total_value += htlc.sender_intended_value;
+ earliest_expiry = cmp::min(earliest_expiry, htlc.cltv_expiry);
match &htlc.onion_payload {
OnionPayload::Invoice { .. } => {
if htlc.total_msat != $payment_data.total_msat {
amount_msat,
via_channel_id: Some(prev_channel_id),
via_user_channel_id: Some(prev_user_channel_id),
+ claim_deadline: Some(earliest_expiry - HTLC_FAIL_BACK_BUFFER),
});
payment_claimable_generated = true;
} else {
hash_map::Entry::Vacant(e) => {
let amount_msat = claimable_htlc.value;
claimable_htlc.total_value_received = Some(amount_msat);
+ let claim_deadline = Some(claimable_htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER);
let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
e.insert((purpose.clone(), vec![claimable_htlc]));
let prev_channel_id = prev_funding_outpoint.to_channel_id();
purpose,
via_channel_id: Some(prev_channel_id),
via_user_channel_id: Some(prev_user_channel_id),
+ claim_deadline,
});
},
hash_map::Entry::Occupied(_) => {
self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(),
|| self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
&self.pending_events, &self.logger,
- |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv));
+ |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv));
for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination);
/// Provides a payment preimage in response to [`Event::PaymentClaimable`], generating any
/// [`MessageSendEvent`]s needed to claim the payment.
///
- /// Note that calling this method does *not* guarantee that the payment has been claimed. You
- /// *must* wait for an [`Event::PaymentClaimed`] event which upon a successful claim will be
- /// provided to your [`EventHandler`] when [`process_pending_events`] is next called.
+ /// This method is guaranteed to ensure the payment has been claimed but only if the current
+ /// height is strictly below [`Event::PaymentClaimable::claim_deadline`]. To avoid race
+ /// conditions, you should wait for an [`Event::PaymentClaimed`] before considering the payment
+ /// successful. It will generally be available in the next [`process_pending_events`] call.
///
/// Note that if you did not set an `amount_msat` when calling [`create_inbound_payment`] or
/// [`create_inbound_payment_for_hash`] you must check that the amount in the `PaymentClaimable`
/// the sender "proof-of-payment" when they did not fulfill the full expected payment.
///
/// [`Event::PaymentClaimable`]: crate::events::Event::PaymentClaimable
+ /// [`Event::PaymentClaimable::claim_deadline`]: crate::events::Event::PaymentClaimable::claim_deadline
/// [`Event::PaymentClaimed`]: crate::events::Event::PaymentClaimed
/// [`process_pending_events`]: EventsProvider::process_pending_events
/// [`create_inbound_payment`]: Self::create_inbound_payment
};
debug_assert!(!sources.is_empty());
- // If we are claiming an MPP payment, we check that all channels which contain a claimable
- // HTLC still exist. While this isn't guaranteed to remain true if a channel closes while
- // we're claiming (or even after we claim, before the commitment update dance completes),
- // it should be a relatively rare race, and we'd rather not claim HTLCs that require us to
- // go on-chain (and lose the on-chain fee to do so) than just reject the payment.
- //
- // Note that we'll still always get our funds - as long as the generated
- // `ChannelMonitorUpdate` makes it out to the relevant monitor we can claim on-chain.
- //
- // If we find an HTLC which we would need to claim but for which we do not have a
- // channel, we will fail all parts of the MPP payment. While we could wait and see if
- // the sender retries the already-failed path(s), it should be a pretty rare case where
- // we got all the HTLCs and then a channel closed while we were waiting for the user to
- // provide the preimage, so worrying too much about the optimal handling isn't worth
- // it.
+ // Just in case one HTLC has been failed between when we generated the `PaymentClaimable`
+ // and when we got here we need to check that the amount we're about to claim matches the
+ // amount we told the user in the last `PaymentClaimable`. We also do a sanity-check that
+ // the MPP parts all have the same `total_msat`.
let mut claimable_amt_msat = 0;
let mut prev_total_msat = None;
let mut expected_amt_msat = None;
let mut errs = Vec::new();
let per_peer_state = self.per_peer_state.read().unwrap();
for htlc in sources.iter() {
- let (counterparty_node_id, chan_id) = match self.short_to_chan_info.read().unwrap().get(&htlc.prev_hop.short_channel_id) {
- Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
- None => {
- valid_mpp = false;
- break;
- }
- };
-
- let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
- if peer_state_mutex_opt.is_none() {
- valid_mpp = false;
- break;
- }
-
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
- let peer_state = &mut *peer_state_lock;
-
- if peer_state.channel_by_id.get(&chan_id).is_none() {
- valid_mpp = false;
- break;
- }
-
if prev_total_msat.is_some() && prev_total_msat != Some(htlc.total_msat) {
log_error!(self.logger, "Somehow ended up with an MPP payment with different expected total amounts - this should not be reachable!");
debug_assert!(false);
-> Result<(), (PublicKey, MsgHandleErrInternal)> {
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
- let per_peer_state = self.per_peer_state.read().unwrap();
- let chan_id = prev_hop.outpoint.to_channel_id();
- let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
- Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()),
- None => None
- };
+ {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let chan_id = prev_hop.outpoint.to_channel_id();
+ let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
+ Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()),
+ None => None
+ };
- let peer_state_opt = counterparty_node_id_opt.as_ref().map(
- |counterparty_node_id| per_peer_state.get(counterparty_node_id).map(
- |peer_mutex| peer_mutex.lock().unwrap()
- )
- ).unwrap_or(None);
+ let peer_state_opt = counterparty_node_id_opt.as_ref().map(
+ |counterparty_node_id| per_peer_state.get(counterparty_node_id)
+ .map(|peer_mutex| peer_mutex.lock().unwrap())
+ ).unwrap_or(None);
- if peer_state_opt.is_some() {
- let mut peer_state_lock = peer_state_opt.unwrap();
- let peer_state = &mut *peer_state_lock;
- if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) {
- let counterparty_node_id = chan.get().get_counterparty_node_id();
- let fulfill_res = chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
-
- if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res {
- if let Some(action) = completion_action(Some(htlc_value_msat)) {
- log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
- log_bytes!(chan_id), action);
- peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
- }
- let update_id = monitor_update.update_id;
- let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, monitor_update);
- let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
- peer_state, per_peer_state, chan);
- if let Err(e) = res {
- // TODO: This is a *critical* error - we probably updated the outbound edge
- // of the HTLC's monitor with a preimage. We should retry this monitor
- // update over and over again until morale improves.
- log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
- return Err((counterparty_node_id, e));
+ if peer_state_opt.is_some() {
+ let mut peer_state_lock = peer_state_opt.unwrap();
+ let peer_state = &mut *peer_state_lock;
+ if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) {
+ let counterparty_node_id = chan.get().get_counterparty_node_id();
+ let fulfill_res = chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
+
+ if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res {
+ if let Some(action) = completion_action(Some(htlc_value_msat)) {
+ log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
+ log_bytes!(chan_id), action);
+ peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
+ }
+ let update_id = monitor_update.update_id;
+ let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, monitor_update);
+ let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
+ peer_state, per_peer_state, chan);
+ if let Err(e) = res {
+ // TODO: This is a *critical* error - we probably updated the outbound edge
+ // of the HTLC's monitor with a preimage. We should retry this monitor
+ // update over and over again until morale improves.
+ log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
+ return Err((counterparty_node_id, e));
+ }
}
+ return Ok(());
}
- return Ok(());
}
}
let preimage_update = ChannelMonitorUpdate {
pub async fn process_pending_events_async<Future: core::future::Future, H: Fn(Event) -> Future>(
&self, handler: H
) {
- // We'll acquire our total consistency lock until the returned future completes so that
- // we can be sure no other persists happen while processing events.
- let _read_guard = self.total_consistency_lock.read().unwrap();
-
- let mut result = NotifyOption::SkipPersist;
-
- // TODO: This behavior should be documented. It's unintuitive that we query
- // ChannelMonitors when clearing other events.
- if self.process_pending_monitor_events() {
- result = NotifyOption::DoPersist;
- }
-
- let pending_events = mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
- if !pending_events.is_empty() {
- result = NotifyOption::DoPersist;
- }
-
- for event in pending_events {
- handler(event).await;
- }
-
- if result == NotifyOption::DoPersist {
- self.persistence_notifier.notify();
- }
+ let mut ev;
+ process_events_body!(self, ev, { handler(ev).await });
}
}
/// An [`EventHandler`] may safely call back to the provider in order to handle an event.
/// However, it must not call [`Writeable::write`] as doing so would result in a deadlock.
fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
- PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
- let mut result = NotifyOption::SkipPersist;
-
- // TODO: This behavior should be documented. It's unintuitive that we query
- // ChannelMonitors when clearing other events.
- if self.process_pending_monitor_events() {
- result = NotifyOption::DoPersist;
- }
-
- let pending_events = mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
- if !pending_events.is_empty() {
- result = NotifyOption::DoPersist;
- }
-
- for event in pending_events {
- handler.handle_event(event);
- }
-
- result
- });
+ let mut ev;
+ process_events_body!(self, ev, handler.handle_event(ev));
}
}
// Use the utility function send_payment_along_path to send the payment with MPP data which
// indicates there are more HTLCs coming.
let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
- let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash, Some(payment_secret), payment_id, &mpp_route).unwrap();
- nodes[0].node.test_send_payment_along_path(&mpp_route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
+ let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
+ RecipientOnionFields::secret_only(payment_secret), payment_id, &mpp_route).unwrap();
+ nodes[0].node.test_send_payment_along_path(&mpp_route.paths[0], &our_payment_hash,
+ RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
expect_payment_failed!(nodes[0], our_payment_hash, true);
// Send the second half of the original MPP payment.
- nodes[0].node.test_send_payment_along_path(&mpp_route.paths[1], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
+ nodes[0].node.test_send_payment_along_path(&mpp_route.paths[1], &our_payment_hash,
+ RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let test_preimage = PaymentPreimage([42; 32]);
let mismatch_payment_hash = PaymentHash([43; 32]);
- let session_privs = nodes[0].node.test_add_new_pending_payment(mismatch_payment_hash, None, PaymentId(mismatch_payment_hash.0), &route).unwrap();
- nodes[0].node.test_send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap();
+ let session_privs = nodes[0].node.test_add_new_pending_payment(mismatch_payment_hash,
+ RecipientOnionFields::spontaneous_empty(), PaymentId(mismatch_payment_hash.0), &route).unwrap();
+ nodes[0].node.test_send_payment_internal(&route, mismatch_payment_hash,
+ RecipientOnionFields::spontaneous_empty(), Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap();
check_added_monitors!(nodes[0], 1);
let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
let test_preimage = PaymentPreimage([42; 32]);
let test_secret = PaymentSecret([43; 32]);
let payment_hash = PaymentHash(Sha256::hash(&test_preimage.0).into_inner());
- let session_privs = nodes[0].node.test_add_new_pending_payment(payment_hash, Some(test_secret), PaymentId(payment_hash.0), &route).unwrap();
- nodes[0].node.test_send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage), PaymentId(payment_hash.0), None, session_privs).unwrap();
+ let session_privs = nodes[0].node.test_add_new_pending_payment(payment_hash,
+ RecipientOnionFields::secret_only(test_secret), PaymentId(payment_hash.0), &route).unwrap();
+ nodes[0].node.test_send_payment_internal(&route, payment_hash,
+ RecipientOnionFields::secret_only(test_secret), Some(test_preimage),
+ PaymentId(payment_hash.0), None, session_privs).unwrap();
check_added_monitors!(nodes[0], 1);
let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
use test::Bencher;
- struct NodeHolder<'a, P: Persist<InMemorySigner>> {
- node: &'a ChannelManager<
- &'a ChainMonitor<InMemorySigner, &'a test_utils::TestChainSource,
- &'a test_utils::TestBroadcaster, &'a test_utils::TestFeeEstimator,
- &'a test_utils::TestLogger, &'a P>,
- &'a test_utils::TestBroadcaster, &'a KeysManager, &'a KeysManager, &'a KeysManager,
- &'a test_utils::TestFeeEstimator, &'a test_utils::TestRouter<'a>,
- &'a test_utils::TestLogger>,
+ type Manager<'a, P> = ChannelManager<
+ &'a ChainMonitor<InMemorySigner, &'a test_utils::TestChainSource,
+ &'a test_utils::TestBroadcaster, &'a test_utils::TestFeeEstimator,
+ &'a test_utils::TestLogger, &'a P>,
+ &'a test_utils::TestBroadcaster, &'a KeysManager, &'a KeysManager, &'a KeysManager,
+ &'a test_utils::TestFeeEstimator, &'a test_utils::TestRouter<'a>,
+ &'a test_utils::TestLogger>;
+
+ struct ANodeHolder<'a, P: Persist<InMemorySigner>> {
+ node: &'a Manager<'a, P>,
+ }
+ impl<'a, P: Persist<InMemorySigner>> NodeHolder for ANodeHolder<'a, P> {
+ type CM = Manager<'a, P>;
+ #[inline]
+ fn node(&self) -> &Manager<'a, P> { self.node }
+ #[inline]
+ fn chain_monitor(&self) -> Option<&test_utils::TestChainMonitor> { None }
}
#[cfg(test)]
network,
best_block: BestBlock::from_network(network),
});
- let node_a_holder = NodeHolder { node: &node_a };
+ let node_a_holder = ANodeHolder { node: &node_a };
let logger_b = test_utils::TestLogger::with_id("node a".to_owned());
let chain_monitor_b = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_b);
network,
best_block: BestBlock::from_network(network),
});
- let node_b_holder = NodeHolder { node: &node_b };
+ let node_b_holder = ANodeHolder { node: &node_b };
node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: node_b.init_features(), remote_network_address: None }, true).unwrap();
node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: node_a.init_features(), remote_network_address: None }, false).unwrap();
let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());
$node_b.handle_update_add_htlc(&$node_a.get_our_node_id(), &payment_event.msgs[0]);
$node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &payment_event.commitment_msg);
- let (raa, cs) = do_get_revoke_commit_msgs!(NodeHolder { node: &$node_b }, &$node_a.get_our_node_id());
+ let (raa, cs) = get_revoke_commit_msgs(&ANodeHolder { node: &$node_b }, &$node_a.get_our_node_id());
$node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &raa);
$node_a.handle_commitment_signed(&$node_b.get_our_node_id(), &cs);
- $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id()));
+ $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &get_event_msg!(ANodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id()));
- expect_pending_htlcs_forwardable!(NodeHolder { node: &$node_b });
- expect_payment_claimable!(NodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000);
+ expect_pending_htlcs_forwardable!(ANodeHolder { node: &$node_b });
+ expect_payment_claimable!(ANodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000);
$node_b.claim_funds(payment_preimage);
- expect_payment_claimed!(NodeHolder { node: &$node_b }, payment_hash, 10_000);
+ expect_payment_claimed!(ANodeHolder { node: &$node_b }, payment_hash, 10_000);
match $node_b.get_and_clear_pending_msg_events().pop().unwrap() {
MessageSendEvent::UpdateHTLCs { node_id, updates } => {
_ => panic!("Failed to generate claim event"),
}
- let (raa, cs) = do_get_revoke_commit_msgs!(NodeHolder { node: &$node_a }, &$node_b.get_our_node_id());
+ let (raa, cs) = get_revoke_commit_msgs(&ANodeHolder { node: &$node_a }, &$node_b.get_our_node_id());
$node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &raa);
$node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &cs);
- $node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_b }, MessageSendEvent::SendRevokeAndACK, $node_a.get_our_node_id()));
+ $node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &get_event_msg!(ANodeHolder { node: &$node_b }, MessageSendEvent::SendRevokeAndACK, $node_a.get_our_node_id()));
- expect_payment_sent!(NodeHolder { node: &$node_a }, payment_preimage);
+ expect_payment_sent!(ANodeHolder { node: &$node_a }, payment_preimage);
}
}