}
/// Tracks the inbound corresponding to an outbound HTLC
-#[derive(Clone, Hash, PartialEq, Eq)]
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
pub(crate) struct HTLCPreviousHopData {
// Note that this may be an outbound SCID alias for the associated channel.
short_channel_id: u64,
}
}
-/// A payment identifier used to uniquely identify a payment to LDK.
+/// A user-provided identifier in [`ChannelManager::send_payment`] used to uniquely identify
+/// a payment and ensure idempotency in LDK.
///
/// This is not exported to bindings users as we just use [u8; 32] directly
#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
}
}
-#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
/// Uniquely describes an HTLC by its source. Just the guaranteed-unique subset of [`HTLCSource`].
pub(crate) enum SentHTLCId {
PreviousHopData { short_channel_id: u64, htlc_id: u64 },
/// Tracks the inbound corresponding to an outbound HTLC
#[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
-#[derive(Clone, PartialEq, Eq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) enum HTLCSource {
PreviousHopData(HTLCPreviousHopData),
OutboundRoute {
}
impl RAAMonitorUpdateBlockingAction {
- #[allow(unused)]
fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self {
Self::ForwardedPaymentInboundClaim {
channel_id: prev_hop.outpoint.to_channel_id(),
///
/// Holds all channels within corresponding `ChannelPhase`s where the peer is the counterparty.
pub(super) channel_by_id: HashMap<ChannelId, ChannelPhase<SP>>,
- /// `temporary_channel_id` -> `OutboundV1Channel`.
- ///
- /// Holds all outbound V1 channels where the peer is the counterparty. Once an outbound channel has
- /// been assigned a `channel_id`, the entry in this map is removed and one is created in
- /// `channel_by_id`.
- pub(super) outbound_v1_channel_by_id: HashMap<ChannelId, OutboundV1Channel<SP>>,
- /// `temporary_channel_id` -> `InboundV1Channel`.
- ///
- /// Holds all inbound V1 channels where the peer is the counterparty. Once an inbound channel has
- /// been assigned a `channel_id`, the entry in this map is removed and one is created in
- /// `channel_by_id`.
- pub(super) inbound_v1_channel_by_id: HashMap<ChannelId, InboundV1Channel<SP>>,
/// `temporary_channel_id` -> `InboundChannelRequest`.
///
/// When manual channel acceptance is enabled, this holds all unaccepted inbound channels where
if require_disconnected && self.is_connected {
return false
}
- self.channel_by_id.is_empty() && self.monitor_update_blocked_actions.is_empty()
+ self.channel_by_id.iter().filter(|(_, phase)| matches!(phase, ChannelPhase::Funded(_))).count() == 0
+ && self.monitor_update_blocked_actions.is_empty()
&& self.in_flight_monitor_updates.is_empty()
}
// Returns a count of all channels we have with this peer, including unfunded channels.
fn total_channel_count(&self) -> usize {
- self.channel_by_id.len() +
- self.outbound_v1_channel_by_id.len() +
- self.inbound_v1_channel_by_id.len() +
- self.inbound_channel_request_by_id.len()
+ self.channel_by_id.len() + self.inbound_channel_request_by_id.len()
}
// Returns a bool indicating if the given `channel_id` matches a channel we have with this peer.
fn has_channel(&self, channel_id: &ChannelId) -> bool {
- self.channel_by_id.contains_key(&channel_id) ||
- self.outbound_v1_channel_by_id.contains_key(&channel_id) ||
- self.inbound_v1_channel_by_id.contains_key(&channel_id) ||
- self.inbound_channel_request_by_id.contains_key(&channel_id)
+ self.channel_by_id.contains_key(channel_id) ||
+ self.inbound_channel_request_by_id.contains_key(channel_id)
}
}
pub enum RecentPaymentDetails {
/// When an invoice was requested and thus a payment has not yet been sent.
AwaitingInvoice {
- /// Identifier for the payment to ensure idempotency.
+ /// A user-provided identifier in [`ChannelManager::send_payment`] used to uniquely identify
+ /// a payment and ensure idempotency in LDK.
payment_id: PaymentId,
},
/// When a payment is still being sent and awaiting successful delivery.
Pending {
+ /// A user-provided identifier in [`ChannelManager::send_payment`] used to uniquely identify
+ /// a payment and ensure idempotency in LDK.
+ payment_id: PaymentId,
/// Hash of the payment that is currently being sent but has yet to be fulfilled or
/// abandoned.
payment_hash: PaymentHash,
/// been resolved. Upon receiving [`Event::PaymentSent`], we delay for a few minutes before the
/// payment is removed from tracking.
Fulfilled {
+ /// A user-provided identifier in [`ChannelManager::send_payment`] used to uniquely identify
+ /// a payment and ensure idempotency in LDK.
+ payment_id: PaymentId,
/// Hash of the payment that was claimed. `None` for serializations of [`ChannelManager`]
/// made before LDK version 0.0.104.
payment_hash: Option<PaymentHash>,
/// abandoned via [`ChannelManager::abandon_payment`], it is marked as abandoned until all
/// pending HTLCs for this payment resolve and an [`Event::PaymentFailed`] is generated.
Abandoned {
+ /// A user-provided identifier in [`ChannelManager::send_payment`] used to uniquely identify
+ /// a payment and ensure idempotency in LDK.
+ payment_id: PaymentId,
/// Hash of the payment that we have given up trying to send.
payment_hash: PaymentHash,
},
}}
}
-/// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error)
-macro_rules! convert_unfunded_chan_err {
- ($self: ident, $err: expr, $channel: expr, $channel_id: expr) => {
- match $err {
- ChannelError::Warn(msg) => {
- (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), $channel_id.clone()))
- },
- ChannelError::Ignore(msg) => {
- (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone()))
- },
- ChannelError::Close(msg) => {
- log_error!($self.logger, "Closing channel {} due to close-required error: {}", &$channel_id, msg);
- update_maps_on_chan_removal!($self, &$channel.context);
- let shutdown_res = $channel.context.force_shutdown(true);
-
- (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.context.get_user_id(),
- shutdown_res, None, $channel.context.get_value_satoshis()))
- },
- }
- };
-}
-
/// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error)
macro_rules! convert_chan_phase_err {
($self: ident, $err: expr, $channel: expr, $channel_id: expr, MANUAL_CHANNEL_UPDATE, $channel_update: expr) => {
}
}
-macro_rules! try_unfunded_chan_entry {
- ($self: ident, $res: expr, $entry: expr) => {
- match $res {
- Ok(res) => res,
- Err(e) => {
- let (drop, res) = convert_unfunded_chan_err!($self, e, $entry.get_mut(), $entry.key());
- if drop {
- $entry.remove_entry();
- }
- return Err(res);
- }
- }
- }
-}
-
-macro_rules! remove_channel {
- ($self: expr, $entry: expr) => {
- {
- let channel = $entry.remove_entry().1;
- update_maps_on_chan_removal!($self, &channel.context);
- channel
- }
- }
-}
-
macro_rules! remove_channel_phase {
($self: expr, $entry: expr) => {
{
let res = channel.get_open_channel(self.genesis_hash.clone());
let temporary_channel_id = channel.context.channel_id();
- match peer_state.outbound_v1_channel_by_id.entry(temporary_channel_id) {
+ match peer_state.channel_by_id.entry(temporary_channel_id) {
hash_map::Entry::Occupied(_) => {
if cfg!(fuzzing) {
return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() });
panic!("RNG is bad???");
}
},
- hash_map::Entry::Vacant(entry) => { entry.insert(channel); }
+ hash_map::Entry::Vacant(entry) => { entry.insert(ChannelPhase::UnfundedOutboundV1(channel)); }
}
peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
peer_state.latest_features.clone(), &self.fee_estimator);
res.push(details);
}
- for (_channel_id, channel) in peer_state.inbound_v1_channel_by_id.iter() {
- let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
- peer_state.latest_features.clone(), &self.fee_estimator);
- res.push(details);
- }
- for (_channel_id, channel) in peer_state.outbound_v1_channel_by_id.iter() {
- let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
- peer_state.latest_features.clone(), &self.fee_estimator);
- res.push(details);
- }
}
}
res
return peer_state.channel_by_id
.iter()
.map(|(_, phase)| phase.context())
- .chain(peer_state.outbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context))
- .chain(peer_state.inbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context))
.map(context_to_details)
.collect();
}
},
PendingOutboundPayment::Retryable { payment_hash, total_msat, .. } => {
Some(RecentPaymentDetails::Pending {
+ payment_id: *payment_id,
payment_hash: *payment_hash,
total_msat: *total_msat,
})
},
PendingOutboundPayment::Abandoned { payment_hash, .. } => {
- Some(RecentPaymentDetails::Abandoned { payment_hash: *payment_hash })
+ Some(RecentPaymentDetails::Abandoned { payment_id: *payment_id, payment_hash: *payment_hash })
},
PendingOutboundPayment::Fulfilled { payment_hash, .. } => {
- Some(RecentPaymentDetails::Fulfilled { payment_hash: *payment_hash })
+ Some(RecentPaymentDetails::Fulfilled { payment_id: *payment_id, payment_hash: *payment_hash })
},
PendingOutboundPayment::Legacy { .. } => None
})
(None, chan_phase.context().get_counterparty_node_id())
},
}
- } else if let hash_map::Entry::Occupied(chan) = peer_state.outbound_v1_channel_by_id.entry(channel_id.clone()) {
- log_error!(self.logger, "Force-closing channel {}", &channel_id);
- self.issue_channel_close_events(&chan.get().context, closure_reason);
- let mut chan = remove_channel!(self, chan);
- self.finish_force_close_channel(chan.context.force_shutdown(false));
- // Unfunded channel has no update
- (None, chan.context.get_counterparty_node_id())
- } else if let hash_map::Entry::Occupied(chan) = peer_state.inbound_v1_channel_by_id.entry(channel_id.clone()) {
- log_error!(self.logger, "Force-closing channel {}", &channel_id);
- self.issue_channel_close_events(&chan.get().context, closure_reason);
- let mut chan = remove_channel!(self, chan);
- self.finish_force_close_channel(chan.context.force_shutdown(false));
- // Unfunded channel has no update
- (None, chan.context.get_counterparty_node_id())
} else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() {
log_error!(self.logger, "Force-closing channel {}", &channel_id);
// N.B. that we don't send any channel close event here: we
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- let (chan, msg) = match peer_state.outbound_v1_channel_by_id.remove(&temporary_channel_id) {
- Some(chan) => {
+ let (chan, msg) = match peer_state.channel_by_id.remove(temporary_channel_id) {
+ Some(ChannelPhase::UnfundedOutboundV1(chan)) => {
let funding_txo = find_funding_output(&chan, &funding_transaction)?;
let funding_res = chan.get_funding_created(funding_transaction, funding_txo, &self.logger)
},
}
},
- None => {
- return Err(APIError::ChannelUnavailable {
+ Some(phase) => {
+ peer_state.channel_by_id.insert(*temporary_channel_id, phase);
+ return Err(APIError::APIMisuseError {
err: format!(
- "Channel with id {} not found for the passed counterparty node_id {}",
+ "Channel with id {} for the passed counterparty node_id {} is not an unfunded, outbound V1 channel",
temporary_channel_id, counterparty_node_id),
})
},
+ None => return Err(APIError::ChannelUnavailable {err: format!(
+ "Channel with id {} not found for the passed counterparty node_id {}",
+ temporary_channel_id, counterparty_node_id),
+ }),
};
peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
}
}
continue;
- }
-
- let context = if let Some(channel) = peer_state.inbound_v1_channel_by_id.get_mut(channel_id) {
- &mut channel.context
- } else if let Some(channel) = peer_state.outbound_v1_channel_by_id.get_mut(channel_id) {
- &mut channel.context
} else {
// This should not be reachable as we've already checked for non-existence in the previous channel_id loop.
debug_assert!(false);
channel_id, counterparty_node_id),
});
};
- let mut config = context.config();
- config.apply(config_update);
- // We update the config, but we MUST NOT broadcast a `channel_update` before `channel_ready`
- // which would be the case for pending inbound/outbound channels.
- context.update_config(&config);
}
Ok(())
}
}
});
- peer_state.outbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick(
- chan_id, &mut chan.context, &mut chan.unfunded_context, pending_msg_events,
- counterparty_node_id));
- peer_state.inbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick(
- chan_id, &mut chan.context, &mut chan.unfunded_context, pending_msg_events,
- counterparty_node_id));
-
for (chan_id, req) in peer_state.inbound_channel_request_by_id.iter_mut() {
if { req.ticks_remaining -= 1 ; req.ticks_remaining } <= 0 {
log_error!(self.logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", &chan_id);
self.pending_outbound_payments.finalize_claims(sources, &self.pending_events);
}
- fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_outpoint: OutPoint) {
+ fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage,
+ forwarded_htlc_value_msat: Option<u64>, from_onchain: bool,
+ next_channel_counterparty_node_id: Option<PublicKey>, next_channel_outpoint: OutPoint
+ ) {
match source {
HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
debug_assert!(self.background_events_processed_since_startup.load(Ordering::Acquire),
"We don't support claim_htlc claims during startup - monitors may not be available yet");
+ if let Some(pubkey) = next_channel_counterparty_node_id {
+ debug_assert_eq!(pubkey, path.hops[0].pubkey);
+ }
let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
channel_funding_outpoint: next_channel_outpoint,
counterparty_node_id: path.hops[0].pubkey,
},
HTLCSource::PreviousHopData(hop_data) => {
let prev_outpoint = hop_data.outpoint;
+ let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
let res = self.claim_funds_from_hop(hop_data, payment_preimage,
|htlc_claim_value_msat| {
if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
next_channel_id: Some(next_channel_outpoint.to_channel_id()),
outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
},
- downstream_counterparty_and_funding_outpoint: None,
+ downstream_counterparty_and_funding_outpoint:
+ if let Some(node_id) = next_channel_counterparty_node_id {
+ Some((node_id, next_channel_outpoint, completed_blocker))
+ } else {
+ // We can only get `None` here if we are processing a
+ // `ChannelMonitor`-originated event, in which case we
+ // don't care about ensuring we wake the downstream
+ // channel's monitor updating - the channel is already
+ // closed.
+ None
+ },
})
} else { None }
});
msg: channel.accept_inbound_channel(),
});
- peer_state.inbound_v1_channel_by_id.insert(temporary_channel_id.clone(), channel);
+ peer_state.channel_by_id.insert(temporary_channel_id.clone(), ChannelPhase::UnfundedInboundV1(channel));
Ok(())
}
peer: &PeerState<SP>, best_block_height: u32
) -> usize {
let mut num_unfunded_channels = 0;
- for chan in peer.channel_by_id.iter().filter_map(
- |(_, phase)| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
- ) {
- // This covers non-zero-conf inbound `Channel`s that we are currently monitoring, but those
- // which have not yet had any confirmations on-chain.
- if !chan.context.is_outbound() && chan.context.minimum_depth().unwrap_or(1) != 0 &&
- chan.context.get_funding_tx_confirmations(best_block_height) == 0
- {
- num_unfunded_channels += 1;
- }
- }
- for (_, chan) in peer.inbound_v1_channel_by_id.iter() {
- if chan.context.minimum_depth().unwrap_or(1) != 0 {
- num_unfunded_channels += 1;
+ for (_, phase) in peer.channel_by_id.iter() {
+ match phase {
+ ChannelPhase::Funded(chan) => {
+ // This covers non-zero-conf inbound `Channel`s that we are currently monitoring, but those
+ // which have not yet had any confirmations on-chain.
+ if !chan.context.is_outbound() && chan.context.minimum_depth().unwrap_or(1) != 0 &&
+ chan.context.get_funding_tx_confirmations(best_block_height) == 0
+ {
+ num_unfunded_channels += 1;
+ }
+ },
+ ChannelPhase::UnfundedInboundV1(chan) => {
+ if chan.context.minimum_depth().unwrap_or(1) != 0 {
+ num_unfunded_channels += 1;
+ }
+ },
+ ChannelPhase::UnfundedOutboundV1(_) => {
+ // Outbound channels don't contribute to the unfunded count in the DoS context.
+ continue;
+ }
}
}
num_unfunded_channels + peer.inbound_channel_request_by_id.len()
node_id: counterparty_node_id.clone(),
msg: channel.accept_inbound_channel(),
});
- peer_state.inbound_v1_channel_by_id.insert(channel_id, channel);
+ peer_state.channel_by_id.insert(channel_id, ChannelPhase::UnfundedInboundV1(channel));
Ok(())
}
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- match peer_state.outbound_v1_channel_by_id.entry(msg.temporary_channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- try_unfunded_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), chan);
- (chan.get().context.get_value_satoshis(), chan.get().context.get_funding_redeemscript().to_v0_p2wsh(), chan.get().context.get_user_id())
+ match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
+ hash_map::Entry::Occupied(mut phase) => {
+ match phase.get_mut() {
+ ChannelPhase::UnfundedOutboundV1(chan) => {
+ try_chan_phase_entry!(self, chan.accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), phase);
+ (chan.context.get_value_satoshis(), chan.context.get_funding_redeemscript().to_v0_p2wsh(), chan.context.get_user_id())
+ },
+ _ => {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id));
+ }
+ }
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
}
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let (chan, funding_msg, monitor) =
- match peer_state.inbound_v1_channel_by_id.remove(&msg.temporary_channel_id) {
- Some(inbound_chan) => {
+ match peer_state.channel_by_id.remove(&msg.temporary_channel_id) {
+ Some(ChannelPhase::UnfundedInboundV1(inbound_chan)) => {
match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &self.logger) {
Ok(res) => res,
Err((mut inbound_chan, err)) => {
},
}
},
+ Some(ChannelPhase::Funded(_)) | Some(ChannelPhase::UnfundedOutboundV1(_)) => {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id));
+ },
None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
};
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- // TODO(dunxen): Fix this duplication when we switch to a single map with enums as per
- // https://github.com/lightningdevkit/rust-lightning/issues/2422
- if let hash_map::Entry::Occupied(chan_entry) = peer_state.outbound_v1_channel_by_id.entry(msg.channel_id.clone()) {
- log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
- self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
- let mut chan = remove_channel!(self, chan_entry);
- self.finish_force_close_channel(chan.context.force_shutdown(false));
- return Ok(());
- } else if let hash_map::Entry::Occupied(chan_entry) = peer_state.inbound_v1_channel_by_id.entry(msg.channel_id.clone()) {
- log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
- self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
- let mut chan = remove_channel!(self, chan_entry);
- self.finish_force_close_channel(chan.context.force_shutdown(false));
- return Ok(());
- } else if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) {
- if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
- if !chan.received_shutdown() {
- log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
- msg.channel_id,
- if chan.sent_shutdown() { " after we initiated shutdown" } else { "" });
- }
-
- let funding_txo_opt = chan.context.get_funding_txo();
- let (shutdown, monitor_update_opt, htlcs) = try_chan_phase_entry!(self,
- chan.shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_phase_entry);
- dropped_htlcs = htlcs;
+ if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) {
+ let phase = chan_phase_entry.get_mut();
+ match phase {
+ ChannelPhase::Funded(chan) => {
+ if !chan.received_shutdown() {
+ log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
+ msg.channel_id,
+ if chan.sent_shutdown() { " after we initiated shutdown" } else { "" });
+ }
- if let Some(msg) = shutdown {
- // We can send the `shutdown` message before updating the `ChannelMonitor`
- // here as we don't need the monitor update to complete until we send a
- // `shutdown_signed`, which we'll delay if we're pending a monitor update.
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: *counterparty_node_id,
- msg,
- });
- }
+ let funding_txo_opt = chan.context.get_funding_txo();
+ let (shutdown, monitor_update_opt, htlcs) = try_chan_phase_entry!(self,
+ chan.shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_phase_entry);
+ dropped_htlcs = htlcs;
- // Update the monitor with the shutdown script if necessary.
- if let Some(monitor_update) = monitor_update_opt {
- break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
- peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ());
- }
- break Ok(());
+ if let Some(msg) = shutdown {
+ // We can send the `shutdown` message before updating the `ChannelMonitor`
+ // here as we don't need the monitor update to complete until we send a
+ // `shutdown_signed`, which we'll delay if we're pending a monitor update.
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+ node_id: *counterparty_node_id,
+ msg,
+ });
+ }
+ // Update the monitor with the shutdown script if necessary.
+ if let Some(monitor_update) = monitor_update_opt {
+ break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
+ peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ());
+ }
+ break Ok(());
+ },
+ ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
+ let context = phase.context_mut();
+ log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
+ self.issue_channel_close_events(&context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
+ let mut chan = remove_channel_phase!(self, chan_phase_entry);
+ self.finish_force_close_channel(chan.context_mut().force_shutdown(false));
+ return Ok(());
+ },
}
} else {
return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let res = try_chan_phase_entry!(self, chan.update_fulfill_htlc(&msg), chan_phase_entry);
+ if let HTLCSource::PreviousHopData(prev_hop) = &res.0 {
+ peer_state.actions_blocking_raa_monitor_updates.entry(msg.channel_id)
+ .or_insert_with(Vec::new)
+ .push(RAAMonitorUpdateBlockingAction::from_prev_hop_data(&prev_hop));
+ }
+ // Note that we do not need to push an `actions_blocking_raa_monitor_updates`
+ // entry here, even though we *do* need to block the next RAA monitor update.
+ // We do this instead in the `claim_funds_internal` by attaching a
+ // `ReleaseRAAChannelMonitorUpdate` action to the event generated when the
+ // outbound HTLC is claimed. This is guaranteed to all complete before we
+ // process the RAA as messages are processed from single peers serially.
funding_txo = chan.context.get_funding_txo().expect("We won't accept a fulfill until funded");
res
} else {
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
- self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, funding_txo);
+ self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, Some(*counterparty_node_id), funding_txo);
Ok(())
}
})
}
+ #[cfg(any(test, feature = "_test_utils"))]
+ pub(crate) fn test_raa_monitor_updates_held(&self,
+ counterparty_node_id: PublicKey, channel_id: ChannelId
+ ) -> bool {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
+ let mut peer_state_lck = peer_state_mtx.lock().unwrap();
+ let peer_state = &mut *peer_state_lck;
+
+ if let Some(chan) = peer_state.channel_by_id.get(&channel_id) {
+ return self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
+ chan.context().get_funding_txo().unwrap(), counterparty_node_id);
+ }
+ }
+ false
+ }
+
fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
let (htlcs_to_fail, res) = {
let per_peer_state = self.per_peer_state.read().unwrap();
match monitor_event {
MonitorEvent::HTLCEvent(htlc_update) => {
if let Some(preimage) = htlc_update.payment_preimage {
- log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", &preimage);
- self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint);
+ log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", preimage);
+ self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, counterparty_node_id, funding_outpoint);
} else {
log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
}
&chan.context
},
+ // Unfunded channels will always be removed.
ChannelPhase::UnfundedOutboundV1(chan) => {
&chan.context
},
&chan.context
},
};
-
// Clean up for removal.
update_maps_on_chan_removal!(self, &context);
self.issue_channel_close_events(&context, ClosureReason::DisconnectedPeer);
false
});
- peer_state.inbound_v1_channel_by_id.retain(|_, chan| {
- update_maps_on_chan_removal!(self, &chan.context);
- self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer);
- false
- });
- peer_state.outbound_v1_channel_by_id.retain(|_, chan| {
- update_maps_on_chan_removal!(self, &chan.context);
- self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer);
- false
- });
// Note that we don't bother generating any events for pre-accept channels -
// they're not considered "channels" yet from the PoV of our events interface.
peer_state.inbound_channel_request_by_id.clear();
}
e.insert(Mutex::new(PeerState {
channel_by_id: HashMap::new(),
- outbound_v1_channel_by_id: HashMap::new(),
- inbound_v1_channel_by_id: HashMap::new(),
inbound_channel_request_by_id: HashMap::new(),
latest_features: init_msg.features.clone(),
pending_msg_events: Vec::new(),
// Note that we don't bother generating any events for pre-accept channels -
// they're not considered "channels" yet from the PoV of our events interface.
peer_state.inbound_channel_request_by_id.clear();
- peer_state.channel_by_id.keys().cloned()
- .chain(peer_state.outbound_v1_channel_by_id.keys().cloned())
- .chain(peer_state.inbound_v1_channel_by_id.keys().cloned()).collect()
+ peer_state.channel_by_id.keys().cloned().collect()
};
for channel_id in channel_ids {
// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
if peer_state_mutex_opt.is_none() { return; }
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
- if let Some(chan) = peer_state.outbound_v1_channel_by_id.get_mut(&msg.channel_id) {
+ if let Some(ChannelPhase::UnfundedOutboundV1(chan)) = peer_state.channel_by_id.get_mut(&msg.channel_id) {
if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash, &self.fee_estimator) {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
node_id: *counterparty_node_id,
let peer_state_from_chans = |channel_by_id| {
PeerState {
channel_by_id,
- outbound_v1_channel_by_id: HashMap::new(),
- inbound_v1_channel_by_id: HashMap::new(),
inbound_channel_request_by_id: HashMap::new(),
latest_features: InitFeatures::empty(),
pending_msg_events: Vec::new(),
// downstream chan is closed (because we don't have a
// channel_id -> peer map entry).
counterparty_opt.is_none(),
+ counterparty_opt.cloned().or(monitor.get_counterparty_node_id()),
monitor.get_funding_txo().0))
} else { None }
} else {
channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
}
- for (source, preimage, downstream_value, downstream_closed, downstream_funding) in pending_claims_to_replay {
+ for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding) in pending_claims_to_replay {
// We use `downstream_closed` in place of `from_onchain` here just as a guess - we
// don't remember in the `ChannelMonitor` where we got a preimage from, but if the
// channel is closed we just assume that it probably came from an on-chain claim.
channel_manager.claim_funds_internal(source, preimage, Some(downstream_value),
- downstream_closed, downstream_funding);
+ downstream_closed, downstream_node_id, downstream_funding);
}
//TODO: Broadcast channel update for closed channels, but only after we've made a
// To start (1), send a regular payment but don't claim it.
let expected_route = [&nodes[1]];
- let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &expected_route, 100_000);
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &expected_route, 100_000);
// Next, attempt a keysend payment and make sure it fails.
let route_params = RouteParameters::from_payment_params_and_value(