//! responsible for tracking which channels are open, HTLCs are in flight and reestablishing those
//! upon reconnect to the relevant peer(s).
//!
-//! It does not manage routing logic (see [`find_route`] for that) nor does it manage constructing
+//! It does not manage routing logic (see [`Router`] for that) nor does it manage constructing
//! on-chain transactions (it only monitors the chain to watch for any force-closes that might
//! imply it needs to fail HTLCs/payments/channels it manages).
-//!
-//! [`find_route`]: crate::routing::router::find_route
use bitcoin::blockdata::block::BlockHeader;
use bitcoin::blockdata::transaction::Transaction;
Ok(InterceptId(buf))
}
}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+/// Uniquely describes an HTLC by its source. Just the guaranteed-unique subset of [`HTLCSource`].
+pub(crate) enum SentHTLCId {
+ PreviousHopData { short_channel_id: u64, htlc_id: u64 },
+ OutboundRoute { session_priv: SecretKey },
+}
+impl SentHTLCId {
+ pub(crate) fn from_source(source: &HTLCSource) -> Self {
+ match source {
+ HTLCSource::PreviousHopData(hop_data) => Self::PreviousHopData {
+ short_channel_id: hop_data.short_channel_id,
+ htlc_id: hop_data.htlc_id,
+ },
+ HTLCSource::OutboundRoute { session_priv, .. } =>
+ Self::OutboundRoute { session_priv: *session_priv },
+ }
+ }
+}
+impl_writeable_tlv_based_enum!(SentHTLCId,
+ (0, PreviousHopData) => {
+ (0, short_channel_id, required),
+ (2, htlc_id, required),
+ },
+ (2, OutboundRoute) => {
+ (0, session_priv, required),
+ };
+);
+
+
/// Tracks the inbound corresponding to an outbound HTLC
#[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
#[derive(Clone, PartialEq, Eq)]
}
macro_rules! handle_monitor_update_completion {
- ($self: ident, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan: expr) => { {
+ ($self: ident, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
let mut updates = $chan.monitor_updating_restored(&$self.logger,
&$self.node_signer, $self.genesis_hash, &$self.default_configuration,
$self.best_block.read().unwrap().height());
let channel_id = $chan.channel_id();
core::mem::drop($peer_state_lock);
+ core::mem::drop($per_peer_state_lock);
$self.handle_monitor_update_completion_actions(update_actions);
}
macro_rules! handle_new_monitor_update {
- ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
+ ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
// update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
// any case so that it won't deadlock.
debug_assert!($self.id_to_peer.try_lock().is_ok());
.update_id == $update_id) &&
$chan.get_latest_monitor_update_id() == $update_id
{
- handle_monitor_update_completion!($self, $update_id, $peer_state_lock, $peer_state, $chan);
+ handle_monitor_update_completion!($self, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan);
}
Ok(())
},
}
} };
- ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan_entry: expr) => {
- handle_new_monitor_update!($self, $update_res, $update_id, $peer_state_lock, $peer_state, $chan_entry.get_mut(), MANUALLY_REMOVING, $chan_entry.remove_entry())
+ ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr) => {
+ handle_new_monitor_update!($self, $update_res, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry.get_mut(), MANUALLY_REMOVING, $chan_entry.remove_entry())
}
}
self.list_channels_with_filter(|_| true)
}
- /// Gets the list of usable channels, in random order. Useful as an argument to [`find_route`]
- /// to ensure non-announced channels are used.
+ /// Gets the list of usable channels, in random order. Useful as an argument to
+ /// [`Router::find_route`] to ensure non-announced channels are used.
///
/// These are guaranteed to have their [`ChannelDetails::is_usable`] value set to true, see the
/// documentation for [`ChannelDetails::is_usable`] for more info on exactly what the criteria
/// are.
- ///
- /// [`find_route`]: crate::routing::router::find_route
pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
// Note we use is_live here instead of usable which leads to somewhat confused
// internal/external nomenclature, but that's ok cause that's probably what the user
if let Some(monitor_update) = monitor_update_opt.take() {
let update_id = monitor_update.update_id;
let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update);
- break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, chan_entry);
+ break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan_entry);
}
if chan_entry.get().is_shutdown() {
Some(monitor_update) => {
let update_id = monitor_update.update_id;
let update_res = self.chain_monitor.update_channel(funding_txo, monitor_update);
- if let Err(e) = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, chan) {
+ if let Err(e) = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan) {
break Err(e);
}
if update_res == ChannelMonitorUpdateStatus::InProgress {
/// [`events::Event::PaymentClaimed`] events even for payments you intend to fail, especially on
/// startup during which time claims that were in-progress at shutdown may be replayed.
pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
- self.fail_htlc_backwards_with_reason(payment_hash, &FailureCode::IncorrectOrUnknownPaymentDetails);
+ self.fail_htlc_backwards_with_reason(payment_hash, FailureCode::IncorrectOrUnknownPaymentDetails);
}
/// This is a variant of [`ChannelManager::fail_htlc_backwards`] that allows you to specify the
/// reason for the failure.
///
/// See [`FailureCode`] for valid failure codes.
- pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: &FailureCode) {
+ pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: FailureCode) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let removed_source = self.claimable_payments.lock().unwrap().claimable_htlcs.remove(payment_hash);
}
/// Gets error data to form an [`HTLCFailReason`] given a [`FailureCode`] and [`ClaimableHTLC`].
- fn get_htlc_fail_reason_from_failure_code(&self, failure_code: &FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason {
+ fn get_htlc_fail_reason_from_failure_code(&self, failure_code: FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason {
match failure_code {
- FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(*failure_code as u16),
- FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(*failure_code as u16),
+ FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(failure_code as u16),
+ FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(failure_code as u16),
FailureCode::IncorrectOrUnknownPaymentDetails => {
let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes());
- HTLCFailReason::reason(*failure_code as u16, htlc_msat_height_data)
+ HTLCFailReason::reason(failure_code as u16, htlc_msat_height_data)
}
}
}
None => None
};
- let mut peer_state_opt = counterparty_node_id_opt.as_ref().map(
+ let peer_state_opt = counterparty_node_id_opt.as_ref().map(
|counterparty_node_id| per_peer_state.get(counterparty_node_id).map(
|peer_mutex| peer_mutex.lock().unwrap()
)
).unwrap_or(None);
- if let Some(mut peer_state_lock) = peer_state_opt.take() {
+ if peer_state_opt.is_some() {
+ let mut peer_state_lock = peer_state_opt.unwrap();
let peer_state = &mut *peer_state_lock;
if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) {
let counterparty_node_id = chan.get().get_counterparty_node_id();
let update_id = monitor_update.update_id;
let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, monitor_update);
let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
- peer_state, chan);
+ peer_state, per_peer_state, chan);
if let Err(e) = res {
// TODO: This is a *critical* error - we probably updated the outbound edge
// of the HTLC's monitor with a preimage. We should retry this monitor
if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id {
return;
}
- handle_monitor_update_completion!(self, highest_applied_update_id, peer_state_lock, peer_state, channel.get_mut());
+ handle_monitor_update_completion!(self, highest_applied_update_id, peer_state_lock, peer_state, per_peer_state, channel.get_mut());
}
/// Accepts a request to open a channel after a [`Event::OpenChannelRequest`].
let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
let chan = e.insert(chan);
- let mut res = handle_new_monitor_update!(self, monitor_res, 0, peer_state_lock, peer_state, chan, MANUALLY_REMOVING, { peer_state.channel_by_id.remove(&new_channel_id) });
+ let mut res = handle_new_monitor_update!(self, monitor_res, 0, peer_state_lock, peer_state,
+ per_peer_state, chan, MANUALLY_REMOVING, { peer_state.channel_by_id.remove(&new_channel_id) });
// Note that we reply with the new channel_id in error messages if we gave up on the
// channel, not the temporary_channel_id. This is compatible with ourselves, but the
let monitor = try_chan_entry!(self,
chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan);
let update_res = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor);
- let mut res = handle_new_monitor_update!(self, update_res, 0, peer_state_lock, peer_state, chan);
+ let mut res = handle_new_monitor_update!(self, update_res, 0, peer_state_lock, peer_state, per_peer_state, chan);
if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
// We weren't able to watch the channel to begin with, so no updates should be made on
// it. Previously, full_stack_target found an (unreachable) panic when the
if let Some(monitor_update) = monitor_update_opt {
let update_id = monitor_update.update_id;
let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update);
- break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, chan_entry);
+ break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan_entry);
}
break Ok(());
},
let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
let update_id = monitor_update.update_id;
handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
- peer_state, chan)
+ peer_state, per_peer_state, chan)
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
let (htlcs_to_fail, res) = {
let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ let mut peer_state_lock = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
- })?;
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ }).map(|mtx| mtx.lock().unwrap())?;
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
let (htlcs_to_fail, monitor_update) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
let update_id = monitor_update.update_id;
- let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
- peer_state, chan);
+ let res = handle_new_monitor_update!(self, update_res, update_id,
+ peer_state_lock, peer_state, per_peer_state, chan);
(htlcs_to_fail, res)
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
let mut has_monitor_update = false;
let mut failed_htlcs = Vec::new();
let mut handle_errors = Vec::new();
- let per_peer_state = self.per_peer_state.read().unwrap();
- for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
- 'chan_loop: loop {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state: &mut PeerState<_> = &mut *peer_state_lock;
- for (channel_id, chan) in peer_state.channel_by_id.iter_mut() {
- let counterparty_node_id = chan.get_counterparty_node_id();
- let funding_txo = chan.get_funding_txo();
- let (monitor_opt, holding_cell_failed_htlcs) =
- chan.maybe_free_holding_cell_htlcs(&self.logger);
- if !holding_cell_failed_htlcs.is_empty() {
- failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id));
- }
- if let Some(monitor_update) = monitor_opt {
- has_monitor_update = true;
-
- let update_res = self.chain_monitor.update_channel(
- funding_txo.expect("channel is live"), monitor_update);
- let update_id = monitor_update.update_id;
- let channel_id: [u8; 32] = *channel_id;
- let res = handle_new_monitor_update!(self, update_res, update_id,
- peer_state_lock, peer_state, chan, MANUALLY_REMOVING,
- peer_state.channel_by_id.remove(&channel_id));
- if res.is_err() {
- handle_errors.push((counterparty_node_id, res));
+ // Walk our list of channels and find any that need to update. Note that when we do find an
+ // update, if it includes actions that must be taken afterwards, we have to drop the
+ // per-peer state lock as well as the top level per_peer_state lock. Thus, we loop until we
+ // manage to go through all our peers without finding a single channel to update.
+ 'peer_loop: loop {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
+ 'chan_loop: loop {
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state: &mut PeerState<_> = &mut *peer_state_lock;
+ for (channel_id, chan) in peer_state.channel_by_id.iter_mut() {
+ let counterparty_node_id = chan.get_counterparty_node_id();
+ let funding_txo = chan.get_funding_txo();
+ let (monitor_opt, holding_cell_failed_htlcs) =
+ chan.maybe_free_holding_cell_htlcs(&self.logger);
+ if !holding_cell_failed_htlcs.is_empty() {
+ failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id));
+ }
+ if let Some(monitor_update) = monitor_opt {
+ has_monitor_update = true;
+
+ let update_res = self.chain_monitor.update_channel(
+ funding_txo.expect("channel is live"), monitor_update);
+ let update_id = monitor_update.update_id;
+ let channel_id: [u8; 32] = *channel_id;
+ let res = handle_new_monitor_update!(self, update_res, update_id,
+ peer_state_lock, peer_state, per_peer_state, chan, MANUALLY_REMOVING,
+ peer_state.channel_by_id.remove(&channel_id));
+ if res.is_err() {
+ handle_errors.push((counterparty_node_id, res));
+ }
+ continue 'peer_loop;
}
- continue 'chan_loop;
}
+ break 'chan_loop;
}
- break 'chan_loop;
}
+ break 'peer_loop;
}
let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty();
0 => {
let mut session_priv: crate::util::ser::RequiredWrapper<SecretKey> = crate::util::ser::RequiredWrapper(None);
let mut first_hop_htlc_msat: u64 = 0;
- let mut path = Some(Vec::new());
+ let mut path: Option<Vec<RouteHop>> = Some(Vec::new());
let mut payment_id = None;
let mut payment_secret = None;
- let mut payment_params = None;
+ let mut payment_params: Option<PaymentParameters> = None;
read_tlv_fields!(reader, {
(0, session_priv, required),
(1, payment_id, option),
(2, first_hop_htlc_msat, required),
(3, payment_secret, option),
(4, path, vec_type),
- (5, payment_params, option),
+ (5, payment_params, (option: ReadableArgs, 0)),
});
if payment_id.is_none() {
// For backwards compat, if there was no payment_id written, use the session_priv bytes
// instead.
payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref()));
}
+ if path.is_none() || path.as_ref().unwrap().is_empty() {
+ return Err(DecodeError::InvalidValue);
+ }
+ let path = path.unwrap();
+ if let Some(params) = payment_params.as_mut() {
+ if params.final_cltv_expiry_delta == 0 {
+ params.final_cltv_expiry_delta = path.last().unwrap().cltv_expiry_delta;
+ }
+ }
Ok(HTLCSource::OutboundRoute {
session_priv: session_priv.0.unwrap(),
first_hop_htlc_msat,
- path: path.unwrap(),
+ path,
payment_id: payment_id.unwrap(),
payment_secret,
payment_params,
let mut monitor_update_blocked_actions_per_peer = None;
let mut peer_states = Vec::new();
for (_, peer_state_mutex) in per_peer_state.iter() {
- peer_states.push(peer_state_mutex.lock().unwrap());
+ // Because we're holding the owning `per_peer_state` write lock here there's no chance
+ // of a lockorder violation deadlock - no other thread can be holding any
+ // per_peer_state lock at all.
+ peer_states.push(peer_state_mutex.unsafe_well_ordered_double_lock_self());
}
(serializable_peer_count).write(writer)?;
probing_cookie_secret = Some(args.entropy_source.get_secure_random_bytes());
}
+ if !channel_closures.is_empty() {
+ pending_events_read.append(&mut channel_closures);
+ }
+
if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
pending_outbound_payments = Some(pending_outbound_payments_compat);
} else if pending_outbound_payments.is_none() {
outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs });
}
pending_outbound_payments = Some(outbounds);
- } else {
+ }
+ let pending_outbounds = OutboundPayments {
+ pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()),
+ retry_lock: Mutex::new(())
+ };
+
+ {
// If we're tracking pending payments, ensure we haven't lost any by looking at the
// ChannelMonitor data for any channels for which we do not have authorative state
// (i.e. those for which we just force-closed above or we otherwise don't have a
// 0.0.102+
for (_, monitor) in args.channel_monitors.iter() {
if id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() {
- for (htlc_source, htlc) in monitor.get_pending_outbound_htlcs() {
+ for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
if let HTLCSource::OutboundRoute { payment_id, session_priv, path, payment_secret, .. } = htlc_source {
if path.is_empty() {
log_error!(args.logger, "Got an empty path for a pending payment");
return Err(DecodeError::InvalidValue);
}
+
let path_amt = path.last().unwrap().fee_msat;
let mut session_priv_bytes = [0; 32];
session_priv_bytes[..].copy_from_slice(&session_priv[..]);
- match pending_outbound_payments.as_mut().unwrap().entry(payment_id) {
+ match pending_outbounds.pending_outbound_payments.lock().unwrap().entry(payment_id) {
hash_map::Entry::Occupied(mut entry) => {
let newly_added = entry.get_mut().insert(session_priv_bytes, &path);
log_info!(args.logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}",
}
}
}
- for (htlc_source, htlc) in monitor.get_all_current_outbound_htlcs() {
- if let HTLCSource::PreviousHopData(prev_hop_data) = htlc_source {
- let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| {
- info.prev_funding_outpoint == prev_hop_data.outpoint &&
- info.prev_htlc_id == prev_hop_data.htlc_id
- };
- // The ChannelMonitor is now responsible for this HTLC's
- // failure/success and will let us know what its outcome is. If we
- // still have an entry for this HTLC in `forward_htlcs` or
- // `pending_intercepted_htlcs`, we were apparently not persisted after
- // the monitor was when forwarding the payment.
- forward_htlcs.retain(|_, forwards| {
- forwards.retain(|forward| {
- if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
- if pending_forward_matches_htlc(&htlc_info) {
- log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
- log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
- false
+ for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() {
+ match htlc_source {
+ HTLCSource::PreviousHopData(prev_hop_data) => {
+ let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| {
+ info.prev_funding_outpoint == prev_hop_data.outpoint &&
+ info.prev_htlc_id == prev_hop_data.htlc_id
+ };
+ // The ChannelMonitor is now responsible for this HTLC's
+ // failure/success and will let us know what its outcome is. If we
+ // still have an entry for this HTLC in `forward_htlcs` or
+ // `pending_intercepted_htlcs`, we were apparently not persisted after
+ // the monitor was when forwarding the payment.
+ forward_htlcs.retain(|_, forwards| {
+ forwards.retain(|forward| {
+ if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
+ if pending_forward_matches_htlc(&htlc_info) {
+ log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
+ log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
+ false
+ } else { true }
} else { true }
+ });
+ !forwards.is_empty()
+ });
+ pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
+ if pending_forward_matches_htlc(&htlc_info) {
+ log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
+ log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
+ pending_events_read.retain(|event| {
+ if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
+ intercepted_id != ev_id
+ } else { true }
+ });
+ false
} else { true }
});
- !forwards.is_empty()
- });
- pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
- if pending_forward_matches_htlc(&htlc_info) {
- log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
- log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
- pending_events_read.retain(|event| {
- if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
- intercepted_id != ev_id
- } else { true }
- });
- false
- } else { true }
- });
+ },
+ HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } => {
+ if let Some(preimage) = preimage_opt {
+ let pending_events = Mutex::new(pending_events_read);
+ // Note that we set `from_onchain` to "false" here,
+ // deliberately keeping the pending payment around forever.
+ // Given it should only occur when we have a channel we're
+ // force-closing for being stale that's okay.
+ // The alternative would be to wipe the state when claiming,
+ // generating a `PaymentPathSuccessful` event but regenerating
+ // it and the `PaymentSent` on every restart until the
+ // `ChannelMonitor` is removed.
+ pending_outbounds.claim_htlc(payment_id, preimage, session_priv, path, false, &pending_events, &args.logger);
+ pending_events_read = pending_events.into_inner().unwrap();
+ }
+ },
}
}
}
}
}
- let pending_outbounds = OutboundPayments { pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()), retry_lock: Mutex::new(()) };
if !forward_htlcs.is_empty() || pending_outbounds.needs_abandon() {
// If we have pending HTLCs to forward, assume we either dropped a
// `PendingHTLCsForwardable` or the user received it but never processed it as they
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&args.entropy_source.get_secure_random_bytes());
- if !channel_closures.is_empty() {
- pending_events_read.append(&mut channel_closures);
- }
-
let our_network_pubkey = match args.node_signer.get_node_id(Recipient::Node) {
Ok(key) => key,
Err(()) => return Err(DecodeError::InvalidValue)
let route_params = RouteParameters {
payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV),
final_value_msat: 100_000,
- final_cltv_expiry_delta: TEST_FINAL_CLTV,
};
let route = find_route(
&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
let route_params = RouteParameters {
payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
final_value_msat: 10_000,
- final_cltv_expiry_delta: 40,
};
let network_graph = nodes[0].network_graph.clone();
let first_hops = nodes[0].node.list_usable_channels();
let route_params = RouteParameters {
payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
final_value_msat: 10_000,
- final_cltv_expiry_delta: 40,
};
let network_graph = nodes[0].network_graph.clone();
let first_hops = nodes[0].node.list_usable_channels();
let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
assert_eq!(nodes_0_lock.len(), 1);
assert!(nodes_0_lock.contains_key(channel_id));
-
- assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
}
+ assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
+
let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
assert_eq!(nodes_0_lock.len(), 1);
assert!(nodes_0_lock.contains_key(channel_id));
+ }
+ {
// Assert that `nodes[1]`'s `id_to_peer` map is populated with the channel as soon as
// as it has the funding transaction.
let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
assert_eq!(nodes_0_lock.len(), 1);
assert!(nodes_0_lock.contains_key(channel_id));
+ }
+ {
// At this stage, `nodes[1]` has proposed a fee for the closing transaction in the
// `handle_closing_signed` call above. As `nodes[1]` has not yet received the signature
// from `nodes[0]` for the closing transaction with the proposed fee, the channel is
// A MAX_UNFUNDED_CHANS_PER_PEER + 1 channel will be summarily rejected
open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
- assert_eq!(get_err_msg!(nodes[1], nodes[0].node.get_our_node_id()).channel_id,
+ assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
open_channel_msg.temporary_channel_id);
// Further, because all of our channels with nodes[0] are inbound, and none of them funded,
open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
}
nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
- assert_eq!(get_err_msg!(nodes[1], last_random_pk).channel_id,
+ assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
open_channel_msg.temporary_channel_id);
// Of course, however, outbound channels are always allowed
// Once we have MAX_UNFUNDED_CHANS_PER_PEER unfunded channels, new inbound channels will be
// rejected.
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
- assert_eq!(get_err_msg!(nodes[1], nodes[0].node.get_our_node_id()).channel_id,
+ assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
open_channel_msg.temporary_channel_id);
// but we can still open an outbound channel.
// but even with such an outbound channel, additional inbound channels will still fail.
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
- assert_eq!(get_err_msg!(nodes[1], nodes[0].node.get_our_node_id()).channel_id,
+ assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
open_channel_msg.temporary_channel_id);
}
}
_ => panic!("Unexpected event"),
}
- assert_eq!(get_err_msg!(nodes[1], last_random_pk).channel_id,
+ assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
open_channel_msg.temporary_channel_id);
// ...however if we accept the same channel 0conf it should work just fine.
_ => panic!("Unexpected event"),
}
- let error_msg = get_err_msg!(nodes[1], nodes[0].node.get_our_node_id());
+ let error_msg = get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id());
nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &error_msg);
let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());
$node_b.handle_update_add_htlc(&$node_a.get_our_node_id(), &payment_event.msgs[0]);
$node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &payment_event.commitment_msg);
- let (raa, cs) = get_revoke_commit_msgs!(NodeHolder { node: &$node_b }, $node_a.get_our_node_id());
+ let (raa, cs) = do_get_revoke_commit_msgs!(NodeHolder { node: &$node_b }, &$node_a.get_our_node_id());
$node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &raa);
$node_a.handle_commitment_signed(&$node_b.get_our_node_id(), &cs);
$node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id()));
_ => panic!("Failed to generate claim event"),
}
- let (raa, cs) = get_revoke_commit_msgs!(NodeHolder { node: &$node_a }, $node_b.get_our_node_id());
+ let (raa, cs) = do_get_revoke_commit_msgs!(NodeHolder { node: &$node_a }, &$node_b.get_our_node_id());
$node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &raa);
$node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &cs);
$node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_b }, MessageSendEvent::SendRevokeAndACK, $node_a.get_our_node_id()));