// Since this struct is returned in `list_channels` methods, expose it here in case users want to
// construct one themselves.
use crate::ln::{inbound_payment, ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
-use crate::ln::channel::{Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext};
+use crate::ln::channel::{self, Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext};
use crate::ln::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
#[cfg(any(feature = "_test_utils", test))]
use crate::ln::features::Bolt11InvoiceFeatures;
-use crate::routing::gossip::NetworkGraph;
-use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router};
-use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
-use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundOnionErr, NextPacketDetails};
+use crate::routing::router::{BlindedTail, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router};
+use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundHTLCErr, NextPacketDetails};
use crate::ln::msgs;
use crate::ln::onion_utils;
use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING};
use crate::offers::offer::{DerivedMetadata, Offer, OfferBuilder};
use crate::offers::parse::Bolt12SemanticError;
use crate::offers::refund::{Refund, RefundBuilder};
-use crate::onion_message::{Destination, OffersMessage, OffersMessageHandler, PendingOnionMessage, new_pending_onion_message};
-use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider};
+use crate::onion_message::messenger::{Destination, MessageRouter, PendingOnionMessage, new_pending_onion_message};
+use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
+use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider};
use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate};
use crate::util::wakers::{Future, Notifier};
use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
use crate::util::logger::{Level, Logger, WithContext};
use crate::util::errors::APIError;
+#[cfg(not(c_bindings))]
+use {
+ crate::routing::router::DefaultRouter,
+ crate::routing::gossip::NetworkGraph,
+ crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters},
+ crate::sign::KeysManager,
+};
use alloc::collections::{btree_map, BTreeMap};
/// Information about where a received HTLC('s onion) has indicated the HTLC should go.
#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
+#[cfg_attr(test, derive(Debug, PartialEq))]
pub enum PendingHTLCRouting {
/// An HTLC which should be forwarded on to another node.
Forward {
/// [`Event::PaymentClaimable::onion_fields`] as
/// [`RecipientOnionFields::custom_tlvs`].
custom_tlvs: Vec<(u64, Vec<u8>)>,
+ /// Set if this HTLC is the final hop in a multi-hop blinded path.
+ requires_blinded_error: bool,
},
/// The onion indicates that this is for payment to us but which contains the preimage for
/// claiming included, and is unrelated to any invoice we'd previously generated (aka a
}
/// Information used to forward or fail this HTLC that is being forwarded within a blinded path.
-#[derive(Clone, Copy, Hash, PartialEq, Eq)]
+#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct BlindedForward {
/// The `blinding_point` that was set in the inbound [`msgs::UpdateAddHTLC`], or in the inbound
/// onion payload if we're the introduction node. Useful for calculating the next hop's
/// [`msgs::UpdateAddHTLC::blinding_point`].
pub inbound_blinding_point: PublicKey,
- // Another field will be added here when we support forwarding as a non-intro node.
+ /// If needed, this determines how this HTLC should be failed backwards, based on whether we are
+ /// the introduction node.
+ pub failure: BlindedFailure,
}
impl PendingHTLCRouting {
// Used to override the onion failure code and data if the HTLC is blinded.
fn blinded_failure(&self) -> Option<BlindedFailure> {
- // TODO: needs update when we support receiving to multi-hop blinded paths
- if let Self::Forward { blinded: Some(_), .. } = self {
- Some(BlindedFailure::FromIntroductionNode)
- } else {
- None
+ match self {
+ Self::Forward { blinded: Some(BlindedForward { failure, .. }), .. } => Some(*failure),
+ Self::Receive { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
+ _ => None,
}
}
}
/// Information about an incoming HTLC, including the [`PendingHTLCRouting`] describing where it
/// should go next.
#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
+#[cfg_attr(test, derive(Debug, PartialEq))]
pub struct PendingHTLCInfo {
/// Further routing details based on whether the HTLC is being forwarded or received.
pub routing: PendingHTLCRouting,
Fail(HTLCFailureMsg),
}
+#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
pub(super) struct PendingAddHTLCInfo {
pub(super) forward_info: PendingHTLCInfo,
// Note that this may be an outbound SCID alias for the associated channel.
prev_short_channel_id: u64,
prev_htlc_id: u64,
+ prev_channel_id: ChannelId,
prev_funding_outpoint: OutPoint,
prev_user_channel_id: u128,
}
+#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
pub(super) enum HTLCForwardInfo {
AddHTLC(PendingAddHTLCInfo),
FailHTLC {
htlc_id: u64,
err_packet: msgs::OnionErrorPacket,
},
+ FailMalformedHTLC {
+ htlc_id: u64,
+ failure_code: u16,
+ sha256_of_onion: [u8; 32],
+ },
}
-// Used for failing blinded HTLCs backwards correctly.
-#[derive(Clone, Debug, Hash, PartialEq, Eq)]
-enum BlindedFailure {
+/// Whether this blinded HTLC is being failed backwards by the introduction node or a blinded node,
+/// which determines the failure message that should be used.
+#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
+pub enum BlindedFailure {
+ /// This HTLC is being failed backwards by the introduction node, and thus should be failed with
+ /// [`msgs::UpdateFailHTLC`] and error code `0x8000|0x4000|24`.
FromIntroductionNode,
- // Another variant will be added here for non-intro nodes.
+ /// This HTLC is being failed backwards by a blinded node within the path, and thus should be
+ /// failed with [`msgs::UpdateFailMalformedHTLC`] and error code `0x8000|0x4000|24`.
+ FromBlindedNode,
}
/// Tracks the inbound corresponding to an outbound HTLC
incoming_packet_shared_secret: [u8; 32],
phantom_shared_secret: Option<[u8; 32]>,
blinded_failure: Option<BlindedFailure>,
+ channel_id: ChannelId,
// This field is consumed by `claim_funds_from_hop()` when updating a force-closed backwards
// channel with a preimage provided by the forward channel.
impl From<&ClaimableHTLC> for events::ClaimedHTLC {
fn from(val: &ClaimableHTLC) -> Self {
events::ClaimedHTLC {
- channel_id: val.prev_hop.outpoint.to_channel_id(),
+ channel_id: val.prev_hop.channel_id,
user_channel_id: val.prev_hop.user_channel_id.unwrap_or(0),
cltv_expiry: val.cltv_expiry,
value_msat: val.value,
struct MsgHandleErrInternal {
err: msgs::LightningError,
- chan_id: Option<(ChannelId, u128)>, // If Some a channel of ours has been closed
+ closes_channel: bool,
shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
- channel_capacity: Option<u64>,
}
impl MsgHandleErrInternal {
#[inline]
},
},
},
- chan_id: None,
+ closes_channel: false,
shutdown_finish: None,
- channel_capacity: None,
}
}
#[inline]
fn from_no_close(err: msgs::LightningError) -> Self {
- Self { err, chan_id: None, shutdown_finish: None, channel_capacity: None }
+ Self { err, closes_channel: false, shutdown_finish: None }
}
#[inline]
- fn from_finish_shutdown(err: String, channel_id: ChannelId, user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>, channel_capacity: u64) -> Self {
+ fn from_finish_shutdown(err: String, channel_id: ChannelId, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
let err_msg = msgs::ErrorMessage { channel_id, data: err.clone() };
let action = if shutdown_res.monitor_update.is_some() {
// We have a closing `ChannelMonitorUpdate`, which means the channel was funded and we
};
Self {
err: LightningError { err, action },
- chan_id: Some((channel_id, user_channel_id)),
+ closes_channel: true,
shutdown_finish: Some((shutdown_res, channel_update)),
- channel_capacity: Some(channel_capacity)
}
}
#[inline]
},
},
},
- chan_id: None,
+ closes_channel: false,
shutdown_finish: None,
- channel_capacity: None,
}
}
fn closes_channel(&self) -> bool {
- self.chan_id.is_some()
+ self.closes_channel
}
}
///
/// Note that any such events are lost on shutdown, so in general they must be updates which
/// are regenerated on startup.
- ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)),
+ ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelId, ChannelMonitorUpdate)),
/// Handle a ChannelMonitorUpdate which may or may not close the channel and may unblock the
/// channel to continue normal operation.
///
MonitorUpdateRegeneratedOnStartup {
counterparty_node_id: PublicKey,
funding_txo: OutPoint,
+ channel_id: ChannelId,
update: ChannelMonitorUpdate
},
/// Some [`ChannelMonitorUpdate`] (s) completed before we were serialized but we still have
/// outbound edge.
EmitEventAndFreeOtherChannel {
event: events::Event,
- downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, RAAMonitorUpdateBlockingAction)>,
+ downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, ChannelId, RAAMonitorUpdateBlockingAction)>,
},
/// Indicates we should immediately resume the operation of another channel, unless there is
/// some other reason why the channel is blocked. In practice this simply means immediately
downstream_counterparty_node_id: PublicKey,
downstream_funding_outpoint: OutPoint,
blocking_action: RAAMonitorUpdateBlockingAction,
+ downstream_channel_id: ChannelId,
},
}
(0, downstream_counterparty_node_id, required),
(2, downstream_funding_outpoint, required),
(4, blocking_action, required),
+ // Note that by the time we get past the required read above, downstream_funding_outpoint will be
+ // filled in, so we can safely unwrap it here.
+ (5, downstream_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(downstream_funding_outpoint.0.unwrap()))),
},
(2, EmitEventAndFreeOtherChannel) => {
(0, event, upgradable_required),
ReleaseRAAChannelMonitorUpdate {
counterparty_node_id: PublicKey,
channel_funding_outpoint: OutPoint,
+ channel_id: ChannelId,
},
}
impl_writeable_tlv_based_enum!(EventCompletionAction,
(0, ReleaseRAAChannelMonitorUpdate) => {
(0, channel_funding_outpoint, required),
(2, counterparty_node_id, required),
+ // Note that by the time we get past the required read above, channel_funding_outpoint will be
+ // filled in, so we can safely unwrap it here.
+ (3, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(channel_funding_outpoint.0.unwrap()))),
};
);
impl RAAMonitorUpdateBlockingAction {
fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self {
Self::ForwardedPaymentInboundClaim {
- channel_id: prev_hop.outpoint.to_channel_id(),
+ channel_id: prev_hop.channel_id,
htlc_id: prev_hop.htlc_id,
}
}
// |
// |__`peer_state`
// |
-// |__`id_to_peer`
+// |__`outpoint_to_peer`
// |
// |__`short_to_chan_info`
// |
/// See `ChannelManager` struct-level documentation for lock order requirements.
outbound_scid_aliases: Mutex<HashSet<u64>>,
- /// `channel_id` -> `counterparty_node_id`.
- ///
- /// Only `channel_id`s are allowed as keys in this map, and not `temporary_channel_id`s. As
- /// multiple channels with the same `temporary_channel_id` to different peers can exist,
- /// allowing `temporary_channel_id`s in this map would cause collisions for such channels.
+ /// Channel funding outpoint -> `counterparty_node_id`.
///
/// Note that this map should only be used for `MonitorEvent` handling, to be able to access
/// the corresponding channel for the event, as we only have access to the `channel_id` during
/// required to access the channel with the `counterparty_node_id`.
///
/// See `ChannelManager` struct-level documentation for lock order requirements.
- id_to_peer: Mutex<HashMap<ChannelId, PublicKey>>,
+ #[cfg(not(test))]
+ outpoint_to_peer: Mutex<HashMap<OutPoint, PublicKey>>,
+ #[cfg(test)]
+ pub(crate) outpoint_to_peer: Mutex<HashMap<OutPoint, PublicKey>>,
/// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s.
///
/// The Channel's funding transaction output, if we've negotiated the funding transaction with
/// our counterparty already.
///
- /// Note that, if this has been set, `channel_id` will be equivalent to
- /// `funding_txo.unwrap().to_channel_id()`.
+ /// Note that, if this has been set, `channel_id` for V1-established channels will be equivalent to
+ /// `ChannelId::v1_from_funding_outpoint(funding_txo.unwrap())`.
pub funding_txo: Option<OutPoint>,
/// The features which this channel operates with. See individual features for more info.
///
match $internal {
Ok(msg) => Ok(msg),
- Err(MsgHandleErrInternal { err, chan_id, shutdown_finish, channel_capacity }) => {
+ Err(MsgHandleErrInternal { err, shutdown_finish, .. }) => {
let mut msg_events = Vec::with_capacity(2);
if let Some((shutdown_res, update_option)) = shutdown_finish {
+ let counterparty_node_id = shutdown_res.counterparty_node_id;
+ let channel_id = shutdown_res.channel_id;
+ let logger = WithContext::from(
+ &$self.logger, Some(counterparty_node_id), Some(channel_id),
+ );
+ log_error!(logger, "Force-closing channel: {}", err.err);
+
$self.finish_close_channel(shutdown_res);
if let Some(update) = update_option {
msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
- if let Some((channel_id, user_channel_id)) = chan_id {
- $self.pending_events.lock().unwrap().push_back((events::Event::ChannelClosed {
- channel_id, user_channel_id,
- reason: ClosureReason::ProcessingError { err: err.err.clone() },
- counterparty_node_id: Some($counterparty_node_id),
- channel_capacity_sats: channel_capacity,
- }, None));
- }
+ } else {
+ log_error!($self.logger, "Got non-closing error: {}", err.err);
}
- let logger = WithContext::from(
- &$self.logger, Some($counterparty_node_id), chan_id.map(|(chan_id, _)| chan_id)
- );
- log_error!(logger, "{}", err.err);
if let msgs::ErrorAction::IgnoreError = err.action {
} else {
msg_events.push(events::MessageSendEvent::HandleError {
macro_rules! update_maps_on_chan_removal {
($self: expr, $channel_context: expr) => {{
- $self.id_to_peer.lock().unwrap().remove(&$channel_context.channel_id());
+ if let Some(outpoint) = $channel_context.get_funding_txo() {
+ $self.outpoint_to_peer.lock().unwrap().remove(&outpoint);
+ }
let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
if let Some(short_id) = $channel_context.get_short_channel_id() {
short_to_chan_info.remove(&short_id);
let logger = WithChannelContext::from(&$self.logger, &$channel.context);
log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg);
update_maps_on_chan_removal!($self, $channel.context);
- let shutdown_res = $channel.context.force_shutdown(true);
- let user_id = $channel.context.get_user_id();
- let channel_capacity_satoshis = $channel.context.get_value_satoshis();
-
- (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, user_id,
- shutdown_res, $channel_update, channel_capacity_satoshis))
+ let reason = ClosureReason::ProcessingError { err: msg.clone() };
+ let shutdown_res = $channel.context.force_shutdown(true, reason);
+ let err =
+ MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $channel_update);
+ (true, err)
},
}
};
handle_new_monitor_update!($self, $update_res, $chan, _internal,
handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan))
};
- ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
+ ($self: ident, $funding_txo: expr, $channel_id: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
let in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo)
.or_insert_with(Vec::new);
// During startup, we push monitor updates as background events through to here in
forward_htlcs: Mutex::new(HashMap::new()),
claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: HashMap::new(), pending_claiming_payments: HashMap::new() }),
pending_intercepted_htlcs: Mutex::new(HashMap::new()),
- id_to_peer: Mutex::new(HashMap::new()),
+ outpoint_to_peer: Mutex::new(HashMap::new()),
short_to_chan_info: FairRwLock::new(HashMap::new()),
our_network_pubkey: node_signer.get_node_id(Recipient::Node).unwrap(),
fn list_funded_channels_with_filter<Fn: FnMut(&(&ChannelId, &Channel<SP>)) -> bool + Copy>(&self, f: Fn) -> Vec<ChannelDetails> {
// Allocate our best estimate of the number of channels we have in the `res`
// Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
- // a scid or a scid alias, and the `id_to_peer` shouldn't be used outside
+ // a scid or a scid alias, and the `outpoint_to_peer` shouldn't be used outside
// of the ChannelMonitor handling. Therefore reallocations may still occur, but is
// unlikely as the `short_to_chan_info` map often contains 2 entries for
// the same channel.
pub fn list_channels(&self) -> Vec<ChannelDetails> {
// Allocate our best estimate of the number of channels we have in the `res`
// Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
- // a scid or a scid alias, and the `id_to_peer` shouldn't be used outside
+ // a scid or a scid alias, and the `outpoint_to_peer` shouldn't be used outside
// of the ChannelMonitor handling. Therefore reallocations may still occur, but is
// unlikely as the `short_to_chan_info` map often contains 2 entries for
// the same channel.
.collect()
}
- /// Helper function that issues the channel close events
- fn issue_channel_close_events(&self, context: &ChannelContext<SP>, closure_reason: ClosureReason) {
- let mut pending_events_lock = self.pending_events.lock().unwrap();
- match context.unbroadcasted_funding() {
- Some(transaction) => {
- pending_events_lock.push_back((events::Event::DiscardFunding {
- channel_id: context.channel_id(), transaction
- }, None));
- },
- None => {},
- }
- pending_events_lock.push_back((events::Event::ChannelClosed {
- channel_id: context.channel_id(),
- user_channel_id: context.get_user_id(),
- reason: closure_reason,
- counterparty_node_id: Some(context.get_counterparty_node_id()),
- channel_capacity_sats: Some(context.get_value_satoshis()),
- }, None));
- }
-
fn close_channel_internal(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
// Update the monitor with the shutdown script if necessary.
if let Some(monitor_update) = monitor_update_opt.take() {
- handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
+ handle_new_monitor_update!(self, funding_txo_opt.unwrap(), *channel_id, monitor_update,
peer_state_lock, peer_state, per_peer_state, chan);
}
} else {
- self.issue_channel_close_events(chan_phase_entry.get().context(), ClosureReason::HolderForceClosed);
let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
- shutdown_result = Some(chan_phase.context_mut().force_shutdown(false));
+ shutdown_result = Some(chan_phase.context_mut().force_shutdown(false, ClosureReason::HolderForceClosed));
}
},
hash_map::Entry::Vacant(_) => {
let logger = WithContext::from(
&self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id),
);
- log_debug!(logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len());
+
+ log_debug!(logger, "Finishing closure of channel due to {} with {} HTLCs to fail",
+ shutdown_res.closure_reason, shutdown_res.dropped_outbound_htlcs.len());
for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) {
let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
}
- if let Some((_, funding_txo, monitor_update)) = shutdown_res.monitor_update {
+ if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
// There isn't anything we can do if we get an update failure - we're already
// force-closing. The monitor update on the required in-memory copy should broadcast
// the latest local state, which is the best we can do anyway. Thus, it is safe to
let mut peer_state = peer_state_mutex.lock().unwrap();
if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) {
update_maps_on_chan_removal!(self, &chan.context());
- self.issue_channel_close_events(&chan.context(), ClosureReason::FundingBatchClosure);
- shutdown_results.push(chan.context_mut().force_shutdown(false));
+ shutdown_results.push(chan.context_mut().force_shutdown(false, ClosureReason::FundingBatchClosure));
}
}
has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state));
"Closing a batch where all channels have completed initial monitor update",
);
}
+
+ {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ pending_events.push_back((events::Event::ChannelClosed {
+ channel_id: shutdown_res.channel_id,
+ user_channel_id: shutdown_res.user_channel_id,
+ reason: shutdown_res.closure_reason,
+ counterparty_node_id: Some(shutdown_res.counterparty_node_id),
+ channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis),
+ channel_funding_txo: shutdown_res.channel_funding_txo,
+ }, None));
+
+ if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx {
+ pending_events.push_back((events::Event::DiscardFunding {
+ channel_id: shutdown_res.channel_id, transaction
+ }, None));
+ }
+ }
for shutdown_result in shutdown_results.drain(..) {
self.finish_close_channel(shutdown_result);
}
let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id));
if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
log_error!(logger, "Force-closing channel {}", channel_id);
- self.issue_channel_close_events(&chan_phase_entry.get().context(), closure_reason);
let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
mem::drop(peer_state);
mem::drop(per_peer_state);
match chan_phase {
ChannelPhase::Funded(mut chan) => {
- self.finish_close_channel(chan.context.force_shutdown(broadcast));
+ self.finish_close_channel(chan.context.force_shutdown(broadcast, closure_reason));
(self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id())
},
ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => {
- self.finish_close_channel(chan_phase.context_mut().force_shutdown(false));
+ self.finish_close_channel(chan_phase.context_mut().force_shutdown(false, closure_reason));
// Unfunded channel has no update
(None, chan_phase.context().get_counterparty_node_id())
},
msg, &self.node_signer, &self.logger, &self.secp_ctx
)?;
- let is_blinded = match next_hop {
+ let is_intro_node_forward = match next_hop {
onion_utils::Hop::Forward {
- next_hop_data: msgs::InboundOnionPayload::BlindedForward { .. }, ..
+ next_hop_data: msgs::InboundOnionPayload::BlindedForward {
+ intro_node_blinding_point: Some(_), ..
+ }, ..
} => true,
- _ => false, // TODO: update this when we support receiving to multi-hop blinded paths
+ _ => false,
};
macro_rules! return_err {
WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id)),
"Failed to accept/forward incoming HTLC: {}", $msg
);
- let (err_code, err_data) = if is_blinded {
+ // If `msg.blinding_point` is set, we must always fail with malformed.
+ if msg.blinding_point.is_some() {
+ return Err(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
+ channel_id: msg.channel_id,
+ htlc_id: msg.htlc_id,
+ sha256_of_onion: [0; 32],
+ failure_code: INVALID_ONION_BLINDING,
+ }));
+ }
+
+ let (err_code, err_data) = if is_intro_node_forward {
(INVALID_ONION_BLINDING, &[0; 32][..])
} else { ($err_code, $data) };
return Err(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
{
let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id));
log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+ if msg.blinding_point.is_some() {
+ return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
+ msgs::UpdateFailMalformedHTLC {
+ channel_id: msg.channel_id,
+ htlc_id: msg.htlc_id,
+ sha256_of_onion: [0; 32],
+ failure_code: INVALID_ONION_BLINDING,
+ }
+ ))
+ }
return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
channel_id: msg.channel_id,
htlc_id: msg.htlc_id,
// delay) once they've send us a commitment_signed!
PendingHTLCStatus::Forward(info)
},
- Err(InboundOnionErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
+ Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
}
},
onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => {
match create_fwd_pending_htlc_info(msg, next_hop_data, next_hop_hmac,
new_packet_bytes, shared_secret, next_packet_pubkey_opt) {
Ok(info) => PendingHTLCStatus::Forward(info),
- Err(InboundOnionErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
+ Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
}
}
}
}, onion_packet, None, &self.fee_estimator, &&logger);
match break_chan_phase_entry!(self, send_res, chan_phase_entry) {
Some(monitor_update) => {
- match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
+ match handle_new_monitor_update!(self, funding_txo, channel_id, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
false => {
// Note that MonitorUpdateInProgress here indicates (per function
// docs) that we will resend the commitment update once monitor
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- let (chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) {
+ let funding_txo;
+ let (mut chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) {
Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => {
- let funding_txo = find_funding_output(&chan, &funding_transaction)?;
+ funding_txo = find_funding_output(&chan, &funding_transaction)?;
let logger = WithChannelContext::from(&self.logger, &chan.context);
let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger)
.map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e {
let channel_id = chan.context.channel_id();
- let user_id = chan.context.get_user_id();
- let shutdown_res = chan.context.force_shutdown(false);
- let channel_capacity = chan.context.get_value_satoshis();
- (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None, channel_capacity))
+ let reason = ClosureReason::ProcessingError { err: msg.clone() };
+ let shutdown_res = chan.context.force_shutdown(false, reason);
+ (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None))
} else { unreachable!(); });
match funding_res {
Ok(funding_msg) => (chan, funding_msg),
panic!("Generated duplicate funding txid?");
},
hash_map::Entry::Vacant(e) => {
- let mut id_to_peer = self.id_to_peer.lock().unwrap();
- if id_to_peer.insert(chan.context.channel_id(), chan.context.get_counterparty_node_id()).is_some() {
- panic!("id_to_peer map already contained funding txid, which shouldn't be possible");
+ let mut outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
+ match outpoint_to_peer.entry(funding_txo) {
+ hash_map::Entry::Vacant(e) => { e.insert(chan.context.get_counterparty_node_id()); },
+ hash_map::Entry::Occupied(o) => {
+ let err = format!(
+ "An existing channel using outpoint {} is open with peer {}",
+ funding_txo, o.get()
+ );
+ mem::drop(outpoint_to_peer);
+ mem::drop(peer_state_lock);
+ mem::drop(per_peer_state);
+ let reason = ClosureReason::ProcessingError { err: err.clone() };
+ self.finish_close_channel(chan.context.force_shutdown(true, reason));
+ return Err(APIError::ChannelUnavailable { err });
+ }
}
e.insert(ChannelPhase::UnfundedOutboundV1(chan));
}
}
let outpoint = OutPoint { txid: tx.txid(), index: output_index.unwrap() };
if let Some(funding_batch_state) = funding_batch_state.as_mut() {
- funding_batch_state.push((outpoint.to_channel_id(), *counterparty_node_id, false));
+ // TODO(dual_funding): We only do batch funding for V1 channels at the moment, but we'll probably
+ // need to fix this somehow to not rely on using the outpoint for the channel ID if we
+ // want to support V2 batching here as well.
+ funding_batch_state.push((ChannelId::v1_from_funding_outpoint(outpoint), *counterparty_node_id, false));
}
Ok(outpoint)
})
.and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id))
.map(|mut chan| {
update_maps_on_chan_removal!(self, &chan.context());
- self.issue_channel_close_events(&chan.context(), ClosureReason::ProcessingError { err: e.clone() });
- shutdown_results.push(chan.context_mut().force_shutdown(false));
+ let closure_reason = ClosureReason::ProcessingError { err: e.clone() };
+ shutdown_results.push(chan.context_mut().force_shutdown(false, closure_reason));
});
}
}
+ mem::drop(funding_batch_states);
for shutdown_result in shutdown_results.drain(..) {
self.finish_close_channel(shutdown_result);
}
let mut per_source_pending_forward = [(
payment.prev_short_channel_id,
payment.prev_funding_outpoint,
+ payment.prev_channel_id,
payment.prev_user_channel_id,
vec![(pending_htlc_info, payment.prev_htlc_id)]
)];
short_channel_id: payment.prev_short_channel_id,
user_channel_id: Some(payment.prev_user_channel_id),
outpoint: payment.prev_funding_outpoint,
+ channel_id: payment.prev_channel_id,
htlc_id: payment.prev_htlc_id,
incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret,
phantom_shared_secret: None,
let mut new_events = VecDeque::new();
let mut failed_forwards = Vec::new();
- let mut phantom_receives: Vec<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
+ let mut phantom_receives: Vec<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
{
let mut forward_htlcs = HashMap::new();
mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
for forward_info in pending_forwards.drain(..) {
match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
- prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
- forward_info: PendingHTLCInfo {
+ prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
+ prev_user_channel_id, forward_info: PendingHTLCInfo {
routing, incoming_shared_secret, payment_hash, outgoing_amt_msat,
outgoing_cltv_value, ..
}
}) => {
macro_rules! failure_handler {
($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
- let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_funding_outpoint.to_channel_id()));
+ let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id));
log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
user_channel_id: Some(prev_user_channel_id),
+ channel_id: prev_channel_id,
outpoint: prev_funding_outpoint,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: incoming_shared_secret,
let phantom_shared_secret = self.node_signer.ecdh(Recipient::PhantomNode, &onion_packet.public_key.unwrap(), None).unwrap().secret_bytes();
let next_hop = match onion_utils::decode_next_payment_hop(
phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac,
- payment_hash, &self.node_signer
+ payment_hash, None, &self.node_signer
) {
Ok(res) => res,
Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
outgoing_cltv_value, Some(phantom_shared_secret), false, None,
current_height, self.default_configuration.accept_mpp_keysend)
{
- Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])),
- Err(InboundOnionErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
+ Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, vec![(info, prev_htlc_id)])),
+ Err(InboundHTLCErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
}
},
_ => panic!(),
fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None);
}
},
- HTLCForwardInfo::FailHTLC { .. } => {
+ HTLCForwardInfo::FailHTLC { .. } | HTLCForwardInfo::FailMalformedHTLC { .. } => {
// Channel went away before we could fail it. This implies
// the channel is now on chain and our counterparty is
// trying to broadcast the HTLC-Timeout, but that's their
if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
let logger = WithChannelContext::from(&self.logger, &chan.context);
for forward_info in pending_forwards.drain(..) {
- match forward_info {
+ let queue_fail_htlc_res = match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
- prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
- forward_info: PendingHTLCInfo {
+ prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
+ prev_user_channel_id, forward_info: PendingHTLCInfo {
incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
routing: PendingHTLCRouting::Forward {
onion_packet, blinded, ..
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
user_channel_id: Some(prev_user_channel_id),
+ channel_id: prev_channel_id,
outpoint: prev_funding_outpoint,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: incoming_shared_secret,
// Phantom payments are only PendingHTLCRouting::Receive.
phantom_shared_secret: None,
- blinded_failure: blinded.map(|_| BlindedFailure::FromIntroductionNode),
+ blinded_failure: blinded.map(|b| b.failure),
});
let next_blinding_point = blinded.and_then(|b| {
let encrypted_tlvs_ss = self.node_signer.ecdh(
));
continue;
}
+ None
},
HTLCForwardInfo::AddHTLC { .. } => {
panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
},
HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
- if let Err(e) = chan.queue_fail_htlc(
- htlc_id, err_packet, &&logger
- ) {
- if let ChannelError::Ignore(msg) = e {
- log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
- } else {
- panic!("Stated return value requirements in queue_fail_htlc() were not met");
- }
- // fail-backs are best-effort, we probably already have one
- // pending, and if not that's OK, if not, the channel is on
- // the chain and sending the HTLC-Timeout is their problem.
- continue;
- }
+ Some((chan.queue_fail_htlc(htlc_id, err_packet, &&logger), htlc_id))
+ },
+ HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
+ log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
+ let res = chan.queue_fail_malformed_htlc(
+ htlc_id, failure_code, sha256_of_onion, &&logger
+ );
+ Some((res, htlc_id))
},
+ };
+ if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
+ if let Err(e) = queue_fail_htlc_res {
+ if let ChannelError::Ignore(msg) = e {
+ log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
+ } else {
+ panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met");
+ }
+ // fail-backs are best-effort, we probably already have one
+ // pending, and if not that's OK, if not, the channel is on
+ // the chain and sending the HTLC-Timeout is their problem.
+ continue;
+ }
}
}
} else {
'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
- prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
- forward_info: PendingHTLCInfo {
+ prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
+ prev_user_channel_id, forward_info: PendingHTLCInfo {
routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat,
skimmed_fee_msat, ..
}
}) => {
let blinded_failure = routing.blinded_failure();
let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret, mut onion_fields) = match routing {
- PendingHTLCRouting::Receive { payment_data, payment_metadata, incoming_cltv_expiry, phantom_shared_secret, custom_tlvs } => {
+ PendingHTLCRouting::Receive {
+ payment_data, payment_metadata, incoming_cltv_expiry, phantom_shared_secret,
+ custom_tlvs, requires_blinded_error: _
+ } => {
let _legacy_hop_data = Some(payment_data.clone());
let onion_fields = RecipientOnionFields { payment_secret: Some(payment_data.payment_secret),
payment_metadata, custom_tlvs };
prev_hop: HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
user_channel_id: Some(prev_user_channel_id),
+ channel_id: prev_channel_id,
outpoint: prev_funding_outpoint,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: incoming_shared_secret,
failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: $htlc.prev_hop.short_channel_id,
user_channel_id: $htlc.prev_hop.user_channel_id,
+ channel_id: prev_channel_id,
outpoint: prev_funding_outpoint,
htlc_id: $htlc.prev_hop.htlc_id,
incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
phantom_shared_secret,
- blinded_failure: None,
+ blinded_failure,
}), payment_hash,
HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data),
HTLCDestination::FailedPayment { payment_hash: $payment_hash },
#[allow(unused_assignments)] {
committed_to_claimable = true;
}
- let prev_channel_id = prev_funding_outpoint.to_channel_id();
htlcs.push(claimable_htlc);
let amount_msat = htlcs.iter().map(|htlc| htlc.value).sum();
htlcs.iter_mut().for_each(|htlc| htlc.total_value_received = Some(amount_msat));
},
};
},
- HTLCForwardInfo::FailHTLC { .. } => {
+ HTLCForwardInfo::FailHTLC { .. } | HTLCForwardInfo::FailMalformedHTLC { .. } => {
panic!("Got pending fail of our own HTLC");
}
}
for event in background_events.drain(..) {
match event {
- BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, update)) => {
+ BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, _channel_id, update)) => {
// The channel has already been closed, so no use bothering to care about the
// monitor updating completing.
let _ = self.chain_monitor.update_channel(funding_txo, &update);
},
- BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => {
+ BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
let mut updated_chan = false;
{
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) {
+ match peer_state.channel_by_id.entry(channel_id) {
hash_map::Entry::Occupied(mut chan_phase) => {
if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
updated_chan = true;
- handle_new_monitor_update!(self, funding_txo, update.clone(),
+ handle_new_monitor_update!(self, funding_txo, channel_id, update.clone(),
peer_state_lock, peer_state, per_peer_state, chan);
} else {
debug_assert!(false, "We shouldn't have an update for a non-funded channel");
log_error!(logger,
"Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id);
update_maps_on_chan_removal!(self, &context);
- self.issue_channel_close_events(&context, ClosureReason::HolderForceClosed);
- shutdown_channels.push(context.force_shutdown(false));
+ shutdown_channels.push(context.force_shutdown(false, ClosureReason::HolderForceClosed));
pending_msg_events.push(MessageSendEvent::HandleError {
node_id: counterparty_node_id,
action: msgs::ErrorAction::SendErrorMessage {
},
HTLCSource::PreviousHopData(HTLCPreviousHopData {
ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret,
- ref phantom_shared_secret, ref outpoint, ref blinded_failure, ..
+ ref phantom_shared_secret, outpoint: _, ref blinded_failure, ref channel_id, ..
}) => {
log_trace!(
- WithContext::from(&self.logger, None, Some(outpoint.to_channel_id())),
+ WithContext::from(&self.logger, None, Some(*channel_id)),
"Failing {}HTLC with payment_hash {} backwards from us: {:?}",
if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error
);
- let err_packet = match blinded_failure {
+ let failure = match blinded_failure {
Some(BlindedFailure::FromIntroductionNode) => {
let blinded_onion_error = HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32]);
- blinded_onion_error.get_encrypted_failure_packet(
+ let err_packet = blinded_onion_error.get_encrypted_failure_packet(
incoming_packet_shared_secret, phantom_shared_secret
- )
+ );
+ HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }
+ },
+ Some(BlindedFailure::FromBlindedNode) => {
+ HTLCForwardInfo::FailMalformedHTLC {
+ htlc_id: *htlc_id,
+ failure_code: INVALID_ONION_BLINDING,
+ sha256_of_onion: [0; 32]
+ }
},
None => {
- onion_error.get_encrypted_failure_packet(incoming_packet_shared_secret, phantom_shared_secret)
+ let err_packet = onion_error.get_encrypted_failure_packet(
+ incoming_packet_shared_secret, phantom_shared_secret
+ );
+ HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }
}
};
}
match forward_htlcs.entry(*short_channel_id) {
hash_map::Entry::Occupied(mut entry) => {
- entry.get_mut().push(HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet });
+ entry.get_mut().push(failure);
},
hash_map::Entry::Vacant(entry) => {
- entry.insert(vec!(HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }));
+ entry.insert(vec!(failure));
}
}
mem::drop(forward_htlcs);
if push_forward_ev { self.push_pending_forwards_ev(); }
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push_back((events::Event::HTLCHandlingFailed {
- prev_channel_id: outpoint.to_channel_id(),
+ prev_channel_id: *channel_id,
failed_next_destination: destination,
}, None));
},
}
if valid_mpp {
for htlc in sources.drain(..) {
- let prev_hop_chan_id = htlc.prev_hop.outpoint.to_channel_id();
+ let prev_hop_chan_id = htlc.prev_hop.channel_id;
if let Err((pk, err)) = self.claim_funds_from_hop(
htlc.prev_hop, payment_preimage,
|_, definitely_duplicate| {
{
let per_peer_state = self.per_peer_state.read().unwrap();
- let chan_id = prev_hop.outpoint.to_channel_id();
+ let chan_id = prev_hop.channel_id;
let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()),
None => None
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
}
if !during_init {
- handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
+ handle_new_monitor_update!(self, prev_hop.outpoint, prev_hop.channel_id, monitor_update, peer_state_lock,
peer_state, per_peer_state, chan);
} else {
// If we're running during init we cannot update a monitor directly -
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id,
funding_txo: prev_hop.outpoint,
+ channel_id: prev_hop.channel_id,
update: monitor_update.clone(),
});
}
log_trace!(logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}",
chan_id, action);
- let (node_id, funding_outpoint, blocker) =
+ let (node_id, _funding_outpoint, channel_id, blocker) =
if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
downstream_counterparty_node_id: node_id,
downstream_funding_outpoint: funding_outpoint,
- blocking_action: blocker,
+ blocking_action: blocker, downstream_channel_id: channel_id,
} = action {
- (node_id, funding_outpoint, blocker)
+ (node_id, funding_outpoint, channel_id, blocker)
} else {
debug_assert!(false,
"Duplicate claims should always free another channel immediately");
let mut peer_state = peer_state_mtx.lock().unwrap();
if let Some(blockers) = peer_state
.actions_blocking_raa_monitor_updates
- .get_mut(&funding_outpoint.to_channel_id())
+ .get_mut(&channel_id)
{
let mut found_blocker = false;
blockers.retain(|iter| {
}
let preimage_update = ChannelMonitorUpdate {
update_id: CLOSED_CHANNEL_UPDATE_ID,
+ counterparty_node_id: None,
updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
payment_preimage,
}],
+ channel_id: Some(prev_hop.channel_id),
};
if !during_init {
// with a preimage we *must* somehow manage to propagate it to the upstream
// channel, or we must have an ability to receive the same event and try
// again on restart.
- log_error!(WithContext::from(&self.logger, None, Some(prev_hop.outpoint.to_channel_id())), "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
+ log_error!(WithContext::from(&self.logger, None, Some(prev_hop.channel_id)),
+ "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
payment_preimage, update_res);
}
} else {
// complete the monitor update completion action from `completion_action`.
self.pending_background_events.lock().unwrap().push(
BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((
- prev_hop.outpoint, preimage_update,
+ prev_hop.outpoint, prev_hop.channel_id, preimage_update,
)));
}
// Note that we do process the completion action here. This totally could be a
}
fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage,
- forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, startup_replay: bool,
- next_channel_counterparty_node_id: Option<PublicKey>, next_channel_outpoint: OutPoint
+ forwarded_htlc_value_msat: Option<u64>, skimmed_fee_msat: Option<u64>, from_onchain: bool,
+ startup_replay: bool, next_channel_counterparty_node_id: Option<PublicKey>,
+ next_channel_outpoint: OutPoint, next_channel_id: ChannelId,
) {
match source {
HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
debug_assert_eq!(pubkey, path.hops[0].pubkey);
}
let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
- channel_funding_outpoint: next_channel_outpoint,
+ channel_funding_outpoint: next_channel_outpoint, channel_id: next_channel_id,
counterparty_node_id: path.hops[0].pubkey,
};
self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage,
&self.logger);
},
HTLCSource::PreviousHopData(hop_data) => {
- let prev_outpoint = hop_data.outpoint;
+ let prev_channel_id = hop_data.channel_id;
let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
#[cfg(debug_assertions)]
let claiming_chan_funding_outpoint = hop_data.outpoint;
+ #[cfg(debug_assertions)]
+ let claiming_channel_id = hop_data.channel_id;
let res = self.claim_funds_from_hop(hop_data, payment_preimage,
|htlc_claim_value_msat, definitely_duplicate| {
let chan_to_release =
if let Some(node_id) = next_channel_counterparty_node_id {
- Some((node_id, next_channel_outpoint, completed_blocker))
+ Some((node_id, next_channel_outpoint, next_channel_id, completed_blocker))
} else {
// We can only get `None` here if we are processing a
// `ChannelMonitor`-originated event, in which case we
},
// or the channel we'd unblock is already closed,
BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup(
- (funding_txo, monitor_update)
+ (funding_txo, _channel_id, monitor_update)
) => {
if *funding_txo == next_channel_outpoint {
assert_eq!(monitor_update.updates.len(), 1);
BackgroundEvent::MonitorUpdatesComplete {
channel_id, ..
} =>
- *channel_id == claiming_chan_funding_outpoint.to_channel_id(),
+ *channel_id == claiming_channel_id,
}
}), "{:?}", *background_events);
}
Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
downstream_counterparty_node_id: other_chan.0,
downstream_funding_outpoint: other_chan.1,
- blocking_action: other_chan.2,
+ downstream_channel_id: other_chan.2,
+ blocking_action: other_chan.3,
})
} else { None }
} else {
- let fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
+ let total_fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
if let Some(claimed_htlc_value) = htlc_claim_value_msat {
Some(claimed_htlc_value - forwarded_htlc_value)
} else { None }
} else { None };
+ debug_assert!(skimmed_fee_msat <= total_fee_earned_msat,
+ "skimmed_fee_msat must always be included in total_fee_earned_msat");
Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
event: events::Event::PaymentForwarded {
- fee_earned_msat,
+ total_fee_earned_msat,
claim_from_onchain_tx: from_onchain,
- prev_channel_id: Some(prev_outpoint.to_channel_id()),
- next_channel_id: Some(next_channel_outpoint.to_channel_id()),
+ prev_channel_id: Some(prev_channel_id),
+ next_channel_id: Some(next_channel_id),
outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
+ skimmed_fee_msat,
},
downstream_counterparty_and_funding_outpoint: chan_to_release,
})
event, downstream_counterparty_and_funding_outpoint
} => {
self.pending_events.lock().unwrap().push_back((event, None));
- if let Some((node_id, funding_outpoint, blocker)) = downstream_counterparty_and_funding_outpoint {
- self.handle_monitor_update_release(node_id, funding_outpoint, Some(blocker));
+ if let Some((node_id, funding_outpoint, channel_id, blocker)) = downstream_counterparty_and_funding_outpoint {
+ self.handle_monitor_update_release(node_id, funding_outpoint, channel_id, Some(blocker));
}
},
MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
- downstream_counterparty_node_id, downstream_funding_outpoint, blocking_action,
+ downstream_counterparty_node_id, downstream_funding_outpoint, downstream_channel_id, blocking_action,
} => {
self.handle_monitor_update_release(
downstream_counterparty_node_id,
downstream_funding_outpoint,
+ downstream_channel_id,
Some(blocking_action),
);
},
commitment_update: Option<msgs::CommitmentUpdate>, order: RAACommitmentOrder,
pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
- -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> {
+ -> Option<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> {
let logger = WithChannelContext::from(&self.logger, &channel.context);
log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
&channel.context.channel_id(),
let counterparty_node_id = channel.context.get_counterparty_node_id();
if !pending_forwards.is_empty() {
htlc_forwards = Some((channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias()),
- channel.context.get_funding_txo().unwrap(), channel.context.get_user_id(), pending_forwards));
+ channel.context.get_funding_txo().unwrap(), channel.context.channel_id(), channel.context.get_user_id(), pending_forwards));
}
if let Some(msg) = channel_ready {
htlc_forwards
}
- fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
+ fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
let counterparty_node_id = match counterparty_node_id {
Some(cp_id) => cp_id.clone(),
None => {
// TODO: Once we can rely on the counterparty_node_id from the
- // monitor event, this and the id_to_peer map should be removed.
- let id_to_peer = self.id_to_peer.lock().unwrap();
- match id_to_peer.get(&funding_txo.to_channel_id()) {
+ // monitor event, this and the outpoint_to_peer map should be removed.
+ let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
+ match outpoint_to_peer.get(&funding_txo) {
Some(cp_id) => cp_id.clone(),
None => return,
}
peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
let channel =
- if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&funding_txo.to_channel_id()) {
+ if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
chan
} else {
let update_actions = peer_state.monitor_update_blocked_actions
- .remove(&funding_txo.to_channel_id()).unwrap_or(Vec::new());
+ .remove(&channel_id).unwrap_or(Vec::new());
mem::drop(peer_state_lock);
mem::drop(per_peer_state);
self.handle_monitor_update_completion_actions(update_actions);
}
fn do_accept_inbound_channel(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> {
+
+ let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*temporary_channel_id));
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let peers_without_funded_channels =
self.peers_without_funded_channels(|peer| { peer.total_channel_count() > 0 });
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
- .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
+ .ok_or_else(|| {
+ let err_str = format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id);
+ log_error!(logger, "{}", err_str);
+
+ APIError::ChannelUnavailable { err: err_str }
+ })?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let is_only_peer_channel = peer_state.total_channel_count() == 1;
InboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features,
&unaccepted_channel.open_channel_msg, user_channel_id, &self.default_configuration, best_block_height,
- &self.logger, accept_0conf).map_err(|e| APIError::ChannelUnavailable { err: e.to_string() })
+ &self.logger, accept_0conf).map_err(|e| {
+ let err_str = e.to_string();
+ log_error!(logger, "{}", err_str);
+
+ APIError::ChannelUnavailable { err: err_str }
+ })
+ }
+ _ => {
+ let err_str = "No such channel awaiting to be accepted.".to_owned();
+ log_error!(logger, "{}", err_str);
+
+ Err(APIError::APIMisuseError { err: err_str })
}
- _ => Err(APIError::APIMisuseError { err: "No such channel awaiting to be accepted.".to_owned() })
}?;
if accept_0conf {
}
};
peer_state.pending_msg_events.push(send_msg_err_event);
- return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() });
+ let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
+ log_error!(logger, "{}", err_str);
+
+ return Err(APIError::APIMisuseError { err: err_str });
} else {
// If this peer already has some channels, a new channel won't increase our number of peers
// with unfunded channels, so as long as we aren't over the maximum number of unfunded
}
};
peer_state.pending_msg_events.push(send_msg_err_event);
- return Err(APIError::APIMisuseError { err: "Too many peers with unfunded channels, refusing to accept new ones".to_owned() });
+ let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
+ log_error!(logger, "{}", err_str);
+
+ return Err(APIError::APIMisuseError { err: err_str });
}
}
// If we're doing manual acceptance checks on the channel, then defer creation until we're sure we want to accept.
if self.default_configuration.manually_accept_inbound_channels {
+ let channel_type = channel::channel_type_from_open_channel(
+ &msg, &peer_state.latest_features, &self.channel_type_features()
+ ).map_err(|e|
+ MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id)
+ )?;
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push_back((events::Event::OpenChannelRequest {
temporary_channel_id: msg.temporary_channel_id.clone(),
counterparty_node_id: counterparty_node_id.clone(),
funding_satoshis: msg.funding_satoshis,
push_msat: msg.push_msat,
- channel_type: msg.channel_type.clone().unwrap(),
+ channel_type,
}, None));
peer_state.inbound_channel_request_by_id.insert(channel_id, InboundChannelRequest {
open_channel_msg: msg.clone(),
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- let (chan, funding_msg_opt, monitor) =
+ let (mut chan, funding_msg_opt, monitor) =
match peer_state.channel_by_id.remove(&msg.temporary_channel_id) {
Some(ChannelPhase::UnfundedInboundV1(inbound_chan)) => {
let logger = WithChannelContext::from(&self.logger, &inbound_chan.context);
match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) {
Ok(res) => res,
- Err((mut inbound_chan, err)) => {
+ Err((inbound_chan, err)) => {
// We've already removed this inbound channel from the map in `PeerState`
// above so at this point we just need to clean up any lingering entries
// concerning this channel as it is safe to do so.
- update_maps_on_chan_removal!(self, &inbound_chan.context);
- let user_id = inbound_chan.context.get_user_id();
- let shutdown_res = inbound_chan.context.force_shutdown(false);
- return Err(MsgHandleErrInternal::from_finish_shutdown(format!("{}", err),
- msg.temporary_channel_id, user_id, shutdown_res, None, inbound_chan.context.get_value_satoshis()));
+ debug_assert!(matches!(err, ChannelError::Close(_)));
+ // Really we should be returning the channel_id the peer expects based
+ // on their funding info here, but they're horribly confused anyway, so
+ // there's not a lot we can do to save them.
+ return Err(convert_chan_phase_err!(self, err, &mut ChannelPhase::UnfundedInboundV1(inbound_chan), &msg.temporary_channel_id).1);
},
}
},
- Some(ChannelPhase::Funded(_)) | Some(ChannelPhase::UnfundedOutboundV1(_)) => {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id));
+ Some(mut phase) => {
+ let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id);
+ let err = ChannelError::Close(err_msg);
+ return Err(convert_chan_phase_err!(self, err, &mut phase, &msg.temporary_channel_id).1);
},
None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
};
- match peer_state.channel_by_id.entry(chan.context.channel_id()) {
+ let funded_channel_id = chan.context.channel_id();
+
+ macro_rules! fail_chan { ($err: expr) => { {
+ // Note that at this point we've filled in the funding outpoint on our
+ // channel, but its actually in conflict with another channel. Thus, if
+ // we call `convert_chan_phase_err` immediately (thus calling
+ // `update_maps_on_chan_removal`), we'll remove the existing channel
+ // from `outpoint_to_peer`. Thus, we must first unset the funding outpoint
+ // on the channel.
+ let err = ChannelError::Close($err.to_owned());
+ chan.unset_funding_info(msg.temporary_channel_id);
+ return Err(convert_chan_phase_err!(self, err, chan, &funded_channel_id, UNFUNDED_CHANNEL).1);
+ } } }
+
+ match peer_state.channel_by_id.entry(funded_channel_id) {
hash_map::Entry::Occupied(_) => {
- Err(MsgHandleErrInternal::send_err_msg_no_close(
- "Already had channel with the new channel_id".to_owned(),
- chan.context.channel_id()
- ))
+ fail_chan!("Already had channel with the new channel_id");
},
hash_map::Entry::Vacant(e) => {
- let mut id_to_peer_lock = self.id_to_peer.lock().unwrap();
- match id_to_peer_lock.entry(chan.context.channel_id()) {
+ let mut outpoint_to_peer_lock = self.outpoint_to_peer.lock().unwrap();
+ match outpoint_to_peer_lock.entry(monitor.get_funding_txo().0) {
hash_map::Entry::Occupied(_) => {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(
- "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
- chan.context.channel_id()))
+ fail_chan!("The funding_created message had the same funding_txid as an existing channel - funding is not possible");
},
hash_map::Entry::Vacant(i_e) => {
let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
if let Ok(persist_state) = monitor_res {
i_e.insert(chan.context.get_counterparty_node_id());
- mem::drop(id_to_peer_lock);
+ mem::drop(outpoint_to_peer_lock);
// There's no problem signing a counterparty's funding transaction if our monitor
// hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
} else {
let logger = WithChannelContext::from(&self.logger, &chan.context);
log_error!(logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
- let channel_id = match funding_msg_opt {
- Some(msg) => msg.channel_id,
- None => chan.context.channel_id(),
- };
- return Err(MsgHandleErrInternal::send_err_msg_no_close(
- "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
- channel_id));
+ fail_chan!("Duplicate funding outpoint");
}
}
}
let res =
chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger);
match res {
- Ok((chan, monitor)) => {
+ Ok((mut chan, monitor)) => {
if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
// We really should be able to insert here without doing a second
// lookup, but sadly rust stdlib doesn't currently allow keeping
Ok(())
} else {
let e = ChannelError::Close("Channel funding outpoint was a duplicate".to_owned());
+ // We weren't able to watch the channel to begin with, so no
+ // updates should be made on it. Previously, full_stack_target
+ // found an (unreachable) panic when the monitor update contained
+ // within `shutdown_finish` was applied.
+ chan.unset_funding_info(msg.channel_id);
return Err(convert_chan_phase_err!(self, e, &mut ChannelPhase::Funded(chan), &msg.channel_id).1);
}
},
}
// Update the monitor with the shutdown script if necessary.
if let Some(monitor_update) = monitor_update_opt {
- handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
+ handle_new_monitor_update!(self, funding_txo_opt.unwrap(), chan.context.channel_id(), monitor_update,
peer_state_lock, peer_state, per_peer_state, chan);
}
},
let context = phase.context_mut();
let logger = WithChannelContext::from(&self.logger, context);
log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
- self.issue_channel_close_events(&context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
let mut chan = remove_channel_phase!(self, chan_phase_entry);
- finish_shutdown = Some(chan.context_mut().force_shutdown(false));
+ finish_shutdown = Some(chan.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel));
},
}
} else {
msg: update
});
}
- self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
}
mem::drop(per_peer_state);
if let Some(shutdown_result) = shutdown_result {
Err(e) => PendingHTLCStatus::Fail(e)
};
let create_pending_htlc_status = |chan: &Channel<SP>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
+ if msg.blinding_point.is_some() {
+ return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
+ msgs::UpdateFailMalformedHTLC {
+ channel_id: msg.channel_id,
+ htlc_id: msg.htlc_id,
+ sha256_of_onion: [0; 32],
+ failure_code: INVALID_ONION_BLINDING,
+ }
+ ))
+ }
// If the update_add is completely bogus, the call will Err and we will close,
// but if we've sent a shutdown and they haven't acknowledged it yet, we just
// want to reject the new HTLC and fail it backwards instead of forwarding.
fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
let funding_txo;
- let (htlc_source, forwarded_htlc_value) = {
+ let (htlc_source, forwarded_htlc_value, skimmed_fee_msat) = {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
- self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, false, Some(*counterparty_node_id), funding_txo);
+ self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(),
+ Some(forwarded_htlc_value), skimmed_fee_msat, false, false, Some(*counterparty_node_id),
+ funding_txo, msg.channel_id
+ );
+
Ok(())
}
let funding_txo = chan.context.get_funding_txo();
let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &&logger), chan_phase_entry);
if let Some(monitor_update) = monitor_update_opt {
- handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
+ handle_new_monitor_update!(self, funding_txo.unwrap(), chan.context.channel_id(), monitor_update, peer_state_lock,
peer_state, per_peer_state, chan);
}
Ok(())
}
#[inline]
- fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)]) {
- for &mut (prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
+ fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) {
+ for &mut (prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
let mut push_forward_event = false;
let mut new_intercept_events = VecDeque::new();
let mut failed_intercept_forwards = Vec::new();
match forward_htlcs.entry(scid) {
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
- prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info }));
+ prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info }));
},
hash_map::Entry::Vacant(entry) => {
if !is_our_scid && forward_info.incoming_amt_msat.is_some() &&
intercept_id
}, None));
entry.insert(PendingAddHTLCInfo {
- prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info });
+ prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info });
},
hash_map::Entry::Occupied(_) => {
- let logger = WithContext::from(&self.logger, None, Some(prev_funding_outpoint.to_channel_id()));
+ let logger = WithContext::from(&self.logger, None, Some(prev_channel_id));
log_info!(logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
user_channel_id: Some(prev_user_channel_id),
outpoint: prev_funding_outpoint,
+ channel_id: prev_channel_id,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: forward_info.incoming_shared_secret,
phantom_shared_secret: None,
push_forward_event = true;
}
entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
- prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info })));
+ prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info })));
}
}
}
/// the [`ChannelMonitorUpdate`] in question.
fn raa_monitor_updates_held(&self,
actions_blocking_raa_monitor_updates: &BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
- channel_funding_outpoint: OutPoint, counterparty_node_id: PublicKey
+ channel_funding_outpoint: OutPoint, channel_id: ChannelId, counterparty_node_id: PublicKey
) -> bool {
actions_blocking_raa_monitor_updates
- .get(&channel_funding_outpoint.to_channel_id()).map(|v| !v.is_empty()).unwrap_or(false)
+ .get(&channel_id).map(|v| !v.is_empty()).unwrap_or(false)
|| self.pending_events.lock().unwrap().iter().any(|(_, action)| {
action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
channel_funding_outpoint,
+ channel_id,
counterparty_node_id,
})
})
if let Some(chan) = peer_state.channel_by_id.get(&channel_id) {
return self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
- chan.context().get_funding_txo().unwrap(), counterparty_node_id);
+ chan.context().get_funding_txo().unwrap(), channel_id, counterparty_node_id);
}
}
false
let funding_txo_opt = chan.context.get_funding_txo();
let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
self.raa_monitor_updates_held(
- &peer_state.actions_blocking_raa_monitor_updates, funding_txo,
+ &peer_state.actions_blocking_raa_monitor_updates, funding_txo, msg.channel_id,
*counterparty_node_id)
} else { false };
let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self,
if let Some(monitor_update) = monitor_update_opt {
let funding_txo = funding_txo_opt
.expect("Funding outpoint must have been set for RAA handling to succeed");
- handle_new_monitor_update!(self, funding_txo, monitor_update,
+ handle_new_monitor_update!(self, funding_txo, chan.context.channel_id(), monitor_update,
peer_state_lock, peer_state, per_peer_state, chan);
}
htlcs_to_fail
let mut failed_channels = Vec::new();
let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
let has_pending_monitor_events = !pending_monitor_events.is_empty();
- for (funding_outpoint, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
+ for (funding_outpoint, channel_id, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
for monitor_event in monitor_events.drain(..) {
match monitor_event {
MonitorEvent::HTLCEvent(htlc_update) => {
- let logger = WithContext::from(&self.logger, counterparty_node_id, Some(funding_outpoint.to_channel_id()));
+ let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id));
if let Some(preimage) = htlc_update.payment_preimage {
log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
- self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, false, counterparty_node_id, funding_outpoint);
+ self.claim_funds_internal(htlc_update.source, preimage,
+ htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true,
+ false, counterparty_node_id, funding_outpoint, channel_id);
} else {
log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
- let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
+ let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
}
},
- MonitorEvent::HolderForceClosed(funding_outpoint) => {
+ MonitorEvent::HolderForceClosed(_funding_outpoint) => {
let counterparty_node_id_opt = match counterparty_node_id {
Some(cp_id) => Some(cp_id),
None => {
// TODO: Once we can rely on the counterparty_node_id from the
- // monitor event, this and the id_to_peer map should be removed.
- let id_to_peer = self.id_to_peer.lock().unwrap();
- id_to_peer.get(&funding_outpoint.to_channel_id()).cloned()
+ // monitor event, this and the outpoint_to_peer map should be removed.
+ let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
+ outpoint_to_peer.get(&funding_outpoint).cloned()
}
};
if let Some(counterparty_node_id) = counterparty_node_id_opt {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
- if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
+ if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id) {
if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) {
- failed_channels.push(chan.context.force_shutdown(false));
+ failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed));
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
- self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: chan.context.get_counterparty_node_id(),
action: msgs::ErrorAction::DisconnectPeer {
}
}
},
- MonitorEvent::Completed { funding_txo, monitor_update_id } => {
- self.channel_monitor_updated(&funding_txo, monitor_update_id, counterparty_node_id.as_ref());
+ MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id } => {
+ self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, counterparty_node_id.as_ref());
},
}
}
if let Some(monitor_update) = monitor_opt {
has_monitor_update = true;
- handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
+ handle_new_monitor_update!(self, funding_txo.unwrap(), chan.context.channel_id(), monitor_update,
peer_state_lock, peer_state, per_peer_state, chan);
continue 'peer_loop;
}
/// attempted in every channel, or in the specifically provided channel.
///
/// [`ChannelSigner`]: crate::sign::ChannelSigner
- #[cfg(test)] // This is only implemented for one signer method, and should be private until we
- // actually finish implementing it fully.
+ #[cfg(async_signing)]
pub fn signer_unblocked(&self, channel_opt: Option<(PublicKey, ChannelId)>) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
});
}
- self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
-
log_info!(logger, "Broadcasting {}", log_tx!(tx));
self.tx_broadcaster.broadcast_transactions(&[&tx]);
update_maps_on_chan_removal!(self, &chan.context);
// Channel::force_shutdown tries to make us do) as we may still be in initialization,
// so we track the update internally and handle it when the user next calls
// timer_tick_occurred, guaranteeing we're running normally.
- if let Some((counterparty_node_id, funding_txo, update)) = failure.monitor_update.take() {
+ if let Some((counterparty_node_id, funding_txo, channel_id, update)) = failure.monitor_update.take() {
assert_eq!(update.updates.len(), 1);
if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
assert!(should_broadcast);
} else { unreachable!(); }
self.pending_background_events.lock().unwrap().push(
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
- counterparty_node_id, funding_txo, update
+ counterparty_node_id, funding_txo, update, channel_id,
});
}
self.finish_close_channel(failure);
///
/// # Privacy
///
- /// Uses a one-hop [`BlindedPath`] for the offer with [`ChannelManager::get_our_node_id`] as the
- /// introduction node and a derived signing pubkey for recipient privacy. As such, currently,
- /// the node must be announced. Otherwise, there is no way to find a path to the introduction
- /// node in order to send the [`InvoiceRequest`].
+ /// Uses [`MessageRouter::create_blinded_paths`] to construct a [`BlindedPath`] for the offer.
+ /// However, if one is not found, uses a one-hop [`BlindedPath`] with
+ /// [`ChannelManager::get_our_node_id`] as the introduction node instead. In the latter case,
+ /// the node must be announced, otherwise, there is no way to find a path to the introduction in
+ /// order to send the [`InvoiceRequest`].
+ ///
+ /// Also, uses a derived signing pubkey in the offer for recipient privacy.
///
/// # Limitations
///
/// Requires a direct connection to the introduction node in the responding [`InvoiceRequest`]'s
/// reply path.
///
+ /// # Errors
+ ///
+ /// Errors if the parameterized [`Router`] is unable to create a blinded path for the offer.
+ ///
/// This is not exported to bindings users as builder patterns don't map outside of move semantics.
///
/// [`Offer`]: crate::offers::offer::Offer
/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
pub fn create_offer_builder(
&self, description: String
- ) -> OfferBuilder<DerivedMetadata, secp256k1::All> {
+ ) -> Result<OfferBuilder<DerivedMetadata, secp256k1::All>, Bolt12SemanticError> {
let node_id = self.get_our_node_id();
let expanded_key = &self.inbound_payment_key;
let entropy = &*self.entropy_source;
let secp_ctx = &self.secp_ctx;
- let path = self.create_one_hop_blinded_path();
- OfferBuilder::deriving_signing_pubkey(description, node_id, expanded_key, entropy, secp_ctx)
+ let path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
+ let builder = OfferBuilder::deriving_signing_pubkey(
+ description, node_id, expanded_key, entropy, secp_ctx
+ )
.chain_hash(self.chain_hash)
- .path(path)
+ .path(path);
+
+ Ok(builder)
}
/// Creates a [`RefundBuilder`] such that the [`Refund`] it builds is recognized by the
///
/// # Privacy
///
- /// Uses a one-hop [`BlindedPath`] for the refund with [`ChannelManager::get_our_node_id`] as
- /// the introduction node and a derived payer id for payer privacy. As such, currently, the
- /// node must be announced. Otherwise, there is no way to find a path to the introduction node
- /// in order to send the [`Bolt12Invoice`].
+ /// Uses [`MessageRouter::create_blinded_paths`] to construct a [`BlindedPath`] for the refund.
+ /// However, if one is not found, uses a one-hop [`BlindedPath`] with
+ /// [`ChannelManager::get_our_node_id`] as the introduction node instead. In the latter case,
+ /// the node must be announced, otherwise, there is no way to find a path to the introduction in
+ /// order to send the [`Bolt12Invoice`].
+ ///
+ /// Also, uses a derived payer id in the refund for payer privacy.
///
/// # Limitations
///
///
/// # Errors
///
- /// Errors if a duplicate `payment_id` is provided given the caveats in the aforementioned link
- /// or if `amount_msats` is invalid.
+ /// Errors if:
+ /// - a duplicate `payment_id` is provided given the caveats in the aforementioned link,
+ /// - `amount_msats` is invalid, or
+ /// - the parameterized [`Router`] is unable to create a blinded path for the refund.
///
/// This is not exported to bindings users as builder patterns don't map outside of move semantics.
///
/// [`Refund`]: crate::offers::refund::Refund
/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
/// [`Bolt12Invoice::payment_paths`]: crate::offers::invoice::Bolt12Invoice::payment_paths
+ /// [Avoiding Duplicate Payments]: #avoiding-duplicate-payments
pub fn create_refund_builder(
&self, description: String, amount_msats: u64, absolute_expiry: Duration,
payment_id: PaymentId, retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>
let expanded_key = &self.inbound_payment_key;
let entropy = &*self.entropy_source;
let secp_ctx = &self.secp_ctx;
- let path = self.create_one_hop_blinded_path();
+ let path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
let builder = RefundBuilder::deriving_payer_id(
description, node_id, expanded_key, entropy, secp_ctx, amount_msats, payment_id
)?
///
/// # Errors
///
- /// Errors if a duplicate `payment_id` is provided given the caveats in the aforementioned link
- /// or if the provided parameters are invalid for the offer.
+ /// Errors if:
+ /// - a duplicate `payment_id` is provided given the caveats in the aforementioned link,
+ /// - the provided parameters are invalid for the offer,
+ /// - the parameterized [`Router`] is unable to create a blinded reply path for the invoice
+ /// request.
///
/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
/// [`InvoiceRequest::quantity`]: crate::offers::invoice_request::InvoiceRequest::quantity
None => builder,
Some(payer_note) => builder.payer_note(payer_note),
};
-
let invoice_request = builder.build_and_sign()?;
- let reply_path = self.create_one_hop_blinded_path();
+ let reply_path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
let expiration = StaleExpiration::TimerTicks(1);
self.pending_outbound_payments
/// node meeting the aforementioned criteria, but there's no guarantee that they will be
/// received and no retries will be made.
///
+ /// # Errors
+ ///
+ /// Errors if the parameterized [`Router`] is unable to create a blinded payment path or reply
+ /// path for the invoice.
+ ///
/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
pub fn request_refund_payment(&self, refund: &Refund) -> Result<(), Bolt12SemanticError> {
let expanded_key = &self.inbound_payment_key;
match self.create_inbound_payment(Some(amount_msats), relative_expiry, None) {
Ok((payment_hash, payment_secret)) => {
- let payment_paths = vec![
- self.create_one_hop_blinded_payment_path(payment_secret),
- ];
- #[cfg(not(feature = "no-std"))]
+ let payment_paths = self.create_blinded_payment_paths(amount_msats, payment_secret)
+ .map_err(|_| Bolt12SemanticError::MissingPaths)?;
+
+ #[cfg(feature = "std")]
let builder = refund.respond_using_derived_keys(
payment_paths, payment_hash, expanded_key, entropy
)?;
- #[cfg(feature = "no-std")]
+ #[cfg(not(feature = "std"))]
let created_at = Duration::from_secs(
self.highest_seen_timestamp.load(Ordering::Acquire) as u64
);
- #[cfg(feature = "no-std")]
+ #[cfg(not(feature = "std"))]
let builder = refund.respond_using_derived_keys_no_std(
payment_paths, payment_hash, created_at, expanded_key, entropy
)?;
let invoice = builder.allow_mpp().build_and_sign(secp_ctx)?;
- let reply_path = self.create_one_hop_blinded_path();
+ let reply_path = self.create_blinded_path()
+ .map_err(|_| Bolt12SemanticError::MissingPaths)?;
let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap();
if refund.paths().is_empty() {
inbound_payment::get_payment_preimage(payment_hash, payment_secret, &self.inbound_payment_key)
}
- /// Creates a one-hop blinded path with [`ChannelManager::get_our_node_id`] as the introduction
- /// node.
- fn create_one_hop_blinded_path(&self) -> BlindedPath {
+ /// Creates a blinded path by delegating to [`MessageRouter::create_blinded_paths`].
+ ///
+ /// Errors if the `MessageRouter` errors or returns an empty `Vec`.
+ fn create_blinded_path(&self) -> Result<BlindedPath, ()> {
+ let recipient = self.get_our_node_id();
let entropy_source = self.entropy_source.deref();
let secp_ctx = &self.secp_ctx;
- BlindedPath::one_hop_for_message(self.get_our_node_id(), entropy_source, secp_ctx).unwrap()
+
+ let peers = self.per_peer_state.read().unwrap()
+ .iter()
+ .filter(|(_, peer)| peer.lock().unwrap().latest_features.supports_onion_messages())
+ .map(|(node_id, _)| *node_id)
+ .collect::<Vec<_>>();
+
+ self.router
+ .create_blinded_paths(recipient, peers, entropy_source, secp_ctx)
+ .and_then(|paths| paths.into_iter().next().ok_or(()))
}
- /// Creates a one-hop blinded path with [`ChannelManager::get_our_node_id`] as the introduction
- /// node.
- fn create_one_hop_blinded_payment_path(
- &self, payment_secret: PaymentSecret
- ) -> (BlindedPayInfo, BlindedPath) {
+ /// Creates multi-hop blinded payment paths for the given `amount_msats` by delegating to
+ /// [`Router::create_blinded_payment_paths`].
+ fn create_blinded_payment_paths(
+ &self, amount_msats: u64, payment_secret: PaymentSecret
+ ) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()> {
let entropy_source = self.entropy_source.deref();
let secp_ctx = &self.secp_ctx;
+ let first_hops = self.list_usable_channels();
let payee_node_id = self.get_our_node_id();
- let max_cltv_expiry = self.best_block.read().unwrap().height() + LATENCY_GRACE_PERIOD_BLOCKS;
+ let max_cltv_expiry = self.best_block.read().unwrap().height() + CLTV_FAR_FAR_AWAY
+ + LATENCY_GRACE_PERIOD_BLOCKS;
let payee_tlvs = ReceiveTlvs {
payment_secret,
payment_constraints: PaymentConstraints {
htlc_minimum_msat: 1,
},
};
- // TODO: Err for overflow?
- BlindedPath::one_hop_for_payment(
- payee_node_id, payee_tlvs, entropy_source, secp_ctx
- ).unwrap()
+ self.router.create_blinded_payment_paths(
+ payee_node_id, first_hops, payee_tlvs, amount_msats, entropy_source, secp_ctx
+ )
}
/// Gets a fake short channel id for use in receiving [phantom node payments]. These fake scids
/// [`Event`] being handled) completes, this should be called to restore the channel to normal
/// operation. It will double-check that nothing *else* is also blocking the same channel from
/// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly.
- fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
+ fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey,
+ channel_funding_outpoint: OutPoint, channel_id: ChannelId,
+ mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
+
let logger = WithContext::from(
- &self.logger, Some(counterparty_node_id), Some(channel_funding_outpoint.to_channel_id())
+ &self.logger, Some(counterparty_node_id), Some(channel_id),
);
loop {
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(blocker) = completed_blocker.take() {
// Only do this on the first iteration of the loop.
if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates
- .get_mut(&channel_funding_outpoint.to_channel_id())
+ .get_mut(&channel_id)
{
blockers.retain(|iter| iter != &blocker);
}
}
if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
- channel_funding_outpoint, counterparty_node_id) {
+ channel_funding_outpoint, channel_id, counterparty_node_id) {
// Check that, while holding the peer lock, we don't have anything else
// blocking monitor updates for this channel. If we do, release the monitor
// update(s) when those blockers complete.
log_trace!(logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
- &channel_funding_outpoint.to_channel_id());
+ &channel_id);
break;
}
- if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) {
+ if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(
+ channel_id) {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
debug_assert_eq!(chan.context.get_funding_txo().unwrap(), channel_funding_outpoint);
if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor",
- channel_funding_outpoint.to_channel_id());
- handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
+ channel_id);
+ handle_new_monitor_update!(self, channel_funding_outpoint, channel_id, monitor_update,
peer_state_lck, peer_state, per_peer_state, chan);
if further_update_exists {
// If there are more `ChannelMonitorUpdate`s to process, restart at the
}
} else {
log_trace!(logger, "Unlocked monitor updating for channel {} without monitors to update",
- channel_funding_outpoint.to_channel_id());
+ channel_id);
}
}
}
for action in actions {
match action {
EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
- channel_funding_outpoint, counterparty_node_id
+ channel_funding_outpoint, channel_id, counterparty_node_id
} => {
- self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, None);
+ self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, channel_id, None);
}
}
}
update_maps_on_chan_removal!(self, &channel.context);
// It looks like our counterparty went on-chain or funding transaction was
// reorged out of the main chain. Close the channel.
- failed_channels.push(channel.context.force_shutdown(true));
+ let reason_message = format!("{}", reason);
+ failed_channels.push(channel.context.force_shutdown(true, reason));
if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
- let reason_message = format!("{}", reason);
- self.issue_channel_close_events(&channel.context, reason);
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: channel.context.get_counterparty_node_id(),
action: msgs::ErrorAction::DisconnectPeer {
incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret,
phantom_shared_secret: None,
outpoint: htlc.prev_funding_outpoint,
+ channel_id: htlc.prev_channel_id,
blinded_failure: htlc.forward_info.routing.blinded_failure(),
});
HTLCFailReason::from_failure_code(0x2000 | 2),
HTLCDestination::InvalidForward { requested_forward_scid }));
let logger = WithContext::from(
- &self.logger, None, Some(htlc.prev_funding_outpoint.to_channel_id())
+ &self.logger, None, Some(htlc.prev_channel_id)
);
log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
false
};
// Clean up for removal.
update_maps_on_chan_removal!(self, &context);
- self.issue_channel_close_events(&context, ClosureReason::DisconnectedPeer);
- failed_channels.push(context.force_shutdown(false));
+ failed_channels.push(context.force_shutdown(false, ClosureReason::DisconnectedPeer));
false
});
// Note that we don't bother generating any events for pre-accept channels -
let pending_msg_events = &mut peer_state.pending_msg_events;
peer_state.channel_by_id.iter_mut().filter_map(|(_, phase)|
- if let ChannelPhase::Funded(chan) = phase { Some(chan) } else {
- // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted
- // (so won't be recovered after a crash), they shouldn't exist here and we would never need to
- // worry about closing and removing them.
- debug_assert!(false);
- None
- }
+ if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
).for_each(|chan| {
let logger = WithChannelContext::from(&self.logger, &chan.context);
pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
let amount_msats = match InvoiceBuilder::<DerivedSigningPubkey>::amount_msats(
&invoice_request
) {
- Ok(amount_msats) => Some(amount_msats),
+ Ok(amount_msats) => amount_msats,
Err(error) => return Some(OffersMessage::InvoiceError(error.into())),
};
let invoice_request = match invoice_request.verify(expanded_key, secp_ctx) {
return Some(OffersMessage::InvoiceError(error.into()));
},
};
- let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
- match self.create_inbound_payment(amount_msats, relative_expiry, None) {
- Ok((payment_hash, payment_secret)) if invoice_request.keys.is_some() => {
- let payment_paths = vec![
- self.create_one_hop_blinded_payment_path(payment_secret),
- ];
- #[cfg(not(feature = "no-std"))]
- let builder = invoice_request.respond_using_derived_keys(
- payment_paths, payment_hash
- );
- #[cfg(feature = "no-std")]
- let created_at = Duration::from_secs(
- self.highest_seen_timestamp.load(Ordering::Acquire) as u64
- );
- #[cfg(feature = "no-std")]
- let builder = invoice_request.respond_using_derived_keys_no_std(
- payment_paths, payment_hash, created_at
- );
- match builder.and_then(|b| b.allow_mpp().build_and_sign(secp_ctx)) {
- Ok(invoice) => Some(OffersMessage::Invoice(invoice)),
- Err(error) => Some(OffersMessage::InvoiceError(error.into())),
- }
- },
- Ok((payment_hash, payment_secret)) => {
- let payment_paths = vec![
- self.create_one_hop_blinded_payment_path(payment_secret),
- ];
- #[cfg(not(feature = "no-std"))]
- let builder = invoice_request.respond_with(payment_paths, payment_hash);
- #[cfg(feature = "no-std")]
- let created_at = Duration::from_secs(
- self.highest_seen_timestamp.load(Ordering::Acquire) as u64
- );
- #[cfg(feature = "no-std")]
- let builder = invoice_request.respond_with_no_std(
- payment_paths, payment_hash, created_at
- );
- let response = builder.and_then(|builder| builder.allow_mpp().build())
- .map_err(|e| OffersMessage::InvoiceError(e.into()))
- .and_then(|invoice|
- match invoice.sign(|invoice| self.node_signer.sign_bolt12_invoice(invoice)) {
- Ok(invoice) => Ok(OffersMessage::Invoice(invoice)),
- Err(SignError::Signing(())) => Err(OffersMessage::InvoiceError(
- InvoiceError::from_string("Failed signing invoice".to_string())
- )),
- Err(SignError::Verification(_)) => Err(OffersMessage::InvoiceError(
- InvoiceError::from_string("Failed invoice signature verification".to_string())
- )),
- });
- match response {
- Ok(invoice) => Some(invoice),
- Err(error) => Some(error),
- }
+ let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
+ let (payment_hash, payment_secret) = match self.create_inbound_payment(
+ Some(amount_msats), relative_expiry, None
+ ) {
+ Ok((payment_hash, payment_secret)) => (payment_hash, payment_secret),
+ Err(()) => {
+ let error = Bolt12SemanticError::InvalidAmount;
+ return Some(OffersMessage::InvoiceError(error.into()));
},
+ };
+
+ let payment_paths = match self.create_blinded_payment_paths(
+ amount_msats, payment_secret
+ ) {
+ Ok(payment_paths) => payment_paths,
Err(()) => {
- Some(OffersMessage::InvoiceError(Bolt12SemanticError::InvalidAmount.into()))
+ let error = Bolt12SemanticError::MissingPaths;
+ return Some(OffersMessage::InvoiceError(error.into()));
},
+ };
+
+ #[cfg(not(feature = "std"))]
+ let created_at = Duration::from_secs(
+ self.highest_seen_timestamp.load(Ordering::Acquire) as u64
+ );
+
+ if invoice_request.keys.is_some() {
+ #[cfg(feature = "std")]
+ let builder = invoice_request.respond_using_derived_keys(
+ payment_paths, payment_hash
+ );
+ #[cfg(not(feature = "std"))]
+ let builder = invoice_request.respond_using_derived_keys_no_std(
+ payment_paths, payment_hash, created_at
+ );
+ match builder.and_then(|b| b.allow_mpp().build_and_sign(secp_ctx)) {
+ Ok(invoice) => Some(OffersMessage::Invoice(invoice)),
+ Err(error) => Some(OffersMessage::InvoiceError(error.into())),
+ }
+ } else {
+ #[cfg(feature = "std")]
+ let builder = invoice_request.respond_with(payment_paths, payment_hash);
+ #[cfg(not(feature = "std"))]
+ let builder = invoice_request.respond_with_no_std(
+ payment_paths, payment_hash, created_at
+ );
+ let response = builder.and_then(|builder| builder.allow_mpp().build())
+ .map_err(|e| OffersMessage::InvoiceError(e.into()))
+ .and_then(|invoice|
+ match invoice.sign(|invoice| self.node_signer.sign_bolt12_invoice(invoice)) {
+ Ok(invoice) => Ok(OffersMessage::Invoice(invoice)),
+ Err(SignError::Signing(())) => Err(OffersMessage::InvoiceError(
+ InvoiceError::from_string("Failed signing invoice".to_string())
+ )),
+ Err(SignError::Verification(_)) => Err(OffersMessage::InvoiceError(
+ InvoiceError::from_string("Failed invoice signature verification".to_string())
+ )),
+ });
+ match response {
+ Ok(invoice) => Some(invoice),
+ Err(error) => Some(error),
+ }
}
},
OffersMessage::Invoice(invoice) => {
features.set_channel_type_optional();
features.set_scid_privacy_optional();
features.set_zero_conf_optional();
+ features.set_route_blinding_optional();
if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx {
features.set_anchors_zero_fee_htlc_tx_optional();
}
impl_writeable_tlv_based!(BlindedForward, {
(0, inbound_blinding_point, required),
+ (1, failure, (default_value, BlindedFailure::FromIntroductionNode)),
});
impl_writeable_tlv_based_enum!(PendingHTLCRouting,
(2, incoming_cltv_expiry, required),
(3, payment_metadata, option),
(5, custom_tlvs, optional_vec),
+ (7, requires_blinded_error, (default_value, false)),
},
(2, ReceiveKeysend) => {
(0, payment_preimage, required),
);
impl_writeable_tlv_based_enum!(BlindedFailure,
- (0, FromIntroductionNode) => {}, ;
+ (0, FromIntroductionNode) => {},
+ (2, FromBlindedNode) => {}, ;
);
impl_writeable_tlv_based!(HTLCPreviousHopData, {
(4, htlc_id, required),
(6, incoming_packet_shared_secret, required),
(7, user_channel_id, option),
+ // Note that by the time we get past the required read for type 2 above, outpoint will be
+ // filled in, so we can safely unwrap it here.
+ (9, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(outpoint.0.unwrap()))),
});
impl Writeable for ClaimableHTLC {
(2, prev_short_channel_id, required),
(4, prev_htlc_id, required),
(6, prev_funding_outpoint, required),
+ // Note that by the time we get past the required read for type 2 above, prev_funding_outpoint will be
+ // filled in, so we can safely unwrap it here.
+ (7, prev_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(prev_funding_outpoint.0.unwrap()))),
});
-impl_writeable_tlv_based_enum!(HTLCForwardInfo,
- (1, FailHTLC) => {
- (0, htlc_id, required),
- (2, err_packet, required),
- };
- (0, AddHTLC)
-);
+impl Writeable for HTLCForwardInfo {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ const FAIL_HTLC_VARIANT_ID: u8 = 1;
+ match self {
+ Self::AddHTLC(info) => {
+ 0u8.write(w)?;
+ info.write(w)?;
+ },
+ Self::FailHTLC { htlc_id, err_packet } => {
+ FAIL_HTLC_VARIANT_ID.write(w)?;
+ write_tlv_fields!(w, {
+ (0, htlc_id, required),
+ (2, err_packet, required),
+ });
+ },
+ Self::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
+ // Since this variant was added in 0.0.119, write this as `::FailHTLC` with an empty error
+ // packet so older versions have something to fail back with, but serialize the real data as
+ // optional TLVs for the benefit of newer versions.
+ FAIL_HTLC_VARIANT_ID.write(w)?;
+ let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
+ write_tlv_fields!(w, {
+ (0, htlc_id, required),
+ (1, failure_code, required),
+ (2, dummy_err_packet, required),
+ (3, sha256_of_onion, required),
+ });
+ },
+ }
+ Ok(())
+ }
+}
+
+impl Readable for HTLCForwardInfo {
+ fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let id: u8 = Readable::read(r)?;
+ Ok(match id {
+ 0 => Self::AddHTLC(Readable::read(r)?),
+ 1 => {
+ _init_and_read_len_prefixed_tlv_fields!(r, {
+ (0, htlc_id, required),
+ (1, malformed_htlc_failure_code, option),
+ (2, err_packet, required),
+ (3, sha256_of_onion, option),
+ });
+ if let Some(failure_code) = malformed_htlc_failure_code {
+ Self::FailMalformedHTLC {
+ htlc_id: _init_tlv_based_struct_field!(htlc_id, required),
+ failure_code,
+ sha256_of_onion: sha256_of_onion.ok_or(DecodeError::InvalidValue)?,
+ }
+ } else {
+ Self::FailHTLC {
+ htlc_id: _init_tlv_based_struct_field!(htlc_id, required),
+ err_packet: _init_tlv_based_struct_field!(err_packet, required),
+ }
+ }
+ },
+ _ => return Err(DecodeError::InvalidValue),
+ })
+ }
+}
impl_writeable_tlv_based!(PendingInboundPayment, {
(0, payment_secret, required),
let channel_count: u64 = Readable::read(reader)?;
let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
let mut funded_peer_channels: HashMap<PublicKey, HashMap<ChannelId, ChannelPhase<SP>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
- let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+ let mut outpoint_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut channel_closures = VecDeque::new();
let mut close_background_events = Vec::new();
+ let mut funding_txo_to_channel_id = HashMap::with_capacity(channel_count as usize);
for _ in 0..channel_count {
let mut channel: Channel<SP> = Channel::read(reader, (
&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
))?;
let logger = WithChannelContext::from(&args.logger, &channel.context);
let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
+ funding_txo_to_channel_id.insert(funding_txo, channel.context.channel_id());
funding_txo_set.insert(funding_txo.clone());
if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
&channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
}
- let mut shutdown_result = channel.context.force_shutdown(true);
+ let mut shutdown_result = channel.context.force_shutdown(true, ClosureReason::OutdatedChannelManager);
if shutdown_result.unbroadcasted_batch_funding_txid.is_some() {
return Err(DecodeError::InvalidValue);
}
- if let Some((counterparty_node_id, funding_txo, update)) = shutdown_result.monitor_update {
+ if let Some((counterparty_node_id, funding_txo, channel_id, update)) = shutdown_result.monitor_update {
close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
- counterparty_node_id, funding_txo, update
+ counterparty_node_id, funding_txo, channel_id, update
});
}
failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs);
reason: ClosureReason::OutdatedChannelManager,
counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
channel_capacity_sats: Some(channel.context.get_value_satoshis()),
+ channel_funding_txo: channel.context.get_funding_txo(),
}, None));
for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() {
let mut found_htlc = false;
if let Some(short_channel_id) = channel.context.get_short_channel_id() {
short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
}
- if channel.context.is_funding_broadcast() {
- id_to_peer.insert(channel.context.channel_id(), channel.context.get_counterparty_node_id());
+ if let Some(funding_txo) = channel.context.get_funding_txo() {
+ outpoint_to_peer.insert(funding_txo, channel.context.get_counterparty_node_id());
}
match funded_peer_channels.entry(channel.context.get_counterparty_node_id()) {
hash_map::Entry::Occupied(mut entry) => {
// If we were persisted and shut down while the initial ChannelMonitor persistence
// was in-progress, we never broadcasted the funding transaction and can still
// safely discard the channel.
- let _ = channel.context.force_shutdown(false);
+ let _ = channel.context.force_shutdown(false, ClosureReason::DisconnectedPeer);
channel_closures.push_back((events::Event::ChannelClosed {
channel_id: channel.context.channel_id(),
user_channel_id: channel.context.get_user_id(),
reason: ClosureReason::DisconnectedPeer,
counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
channel_capacity_sats: Some(channel.context.get_value_satoshis()),
+ channel_funding_txo: channel.context.get_funding_txo(),
}, None));
} else {
log_error!(logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", &channel.context.channel_id());
for (funding_txo, monitor) in args.channel_monitors.iter() {
if !funding_txo_set.contains(funding_txo) {
let logger = WithChannelMonitor::from(&args.logger, monitor);
+ let channel_id = monitor.channel_id();
log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
- &funding_txo.to_channel_id());
+ &channel_id);
let monitor_update = ChannelMonitorUpdate {
update_id: CLOSED_CHANNEL_UPDATE_ID,
+ counterparty_node_id: None,
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
+ channel_id: Some(monitor.channel_id()),
};
- close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update)));
+ close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, channel_id, monitor_update)));
}
}
$chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id());
for update in $chan_in_flight_upds.iter() {
log_trace!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
- update.update_id, $channel_info_log, &$funding_txo.to_channel_id());
+ update.update_id, $channel_info_log, &$monitor.channel_id());
max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id);
pending_background_events.push(
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id: $counterparty_node_id,
funding_txo: $funding_txo,
+ channel_id: $monitor.channel_id(),
update: update.clone(),
});
}
pending_background_events.push(
BackgroundEvent::MonitorUpdatesComplete {
counterparty_node_id: $counterparty_node_id,
- channel_id: $funding_txo.to_channel_id(),
+ channel_id: $monitor.channel_id(),
});
}
if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() {
if let Some(in_flight_upds) = in_flight_monitor_updates {
for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds {
- let logger = WithContext::from(&args.logger, Some(counterparty_id), Some(funding_txo.to_channel_id()));
+ let channel_id = funding_txo_to_channel_id.get(&funding_txo).copied();
+ let logger = WithContext::from(&args.logger, Some(counterparty_id), channel_id);
if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
// Now that we've removed all the in-flight monitor updates for channels that are
// still open, we need to replay any monitor updates that are for closed channels,
funding_txo, monitor, peer_state, logger, "closed ");
} else {
log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!");
- log_error!(logger, " The ChannelMonitor for channel {} is missing.",
- &funding_txo.to_channel_id());
+ log_error!(logger, " The ChannelMonitor for channel {} is missing.", if let Some(channel_id) =
+ channel_id { channel_id.to_string() } else { format!("with outpoint {}", funding_txo) } );
log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
// We only rebuild the pending payments map if we were most recently serialized by
// 0.0.102+
for (_, monitor) in args.channel_monitors.iter() {
- let counterparty_opt = id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id());
+ let counterparty_opt = outpoint_to_peer.get(&monitor.get_funding_txo().0);
if counterparty_opt.is_none() {
let logger = WithChannelMonitor::from(&args.logger, monitor);
for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
if pending_forward_matches_htlc(&htlc_info) {
log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
- &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id());
+ &htlc.payment_hash, &monitor.channel_id());
false
} else { true }
} else { true }
pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
if pending_forward_matches_htlc(&htlc_info) {
log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
- &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id());
+ &htlc.payment_hash, &monitor.channel_id());
pending_events_read.retain(|(event, _)| {
if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
intercepted_id != ev_id
let compl_action =
EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
channel_funding_outpoint: monitor.get_funding_txo().0,
+ channel_id: monitor.channel_id(),
counterparty_node_id: path.hops[0].pubkey,
};
pending_outbounds.claim_htlc(payment_id, preimage, session_priv,
// channel_id -> peer map entry).
counterparty_opt.is_none(),
counterparty_opt.cloned().or(monitor.get_counterparty_node_id()),
- monitor.get_funding_txo().0))
+ monitor.get_funding_txo().0, monitor.channel_id()))
} else { None }
} else {
// If it was an outbound payment, we've handled it above - if a preimage
// this channel as well. On the flip side, there's no harm in restarting
// without the new monitor persisted - we'll end up right back here on
// restart.
- let previous_channel_id = claimable_htlc.prev_hop.outpoint.to_channel_id();
- if let Some(peer_node_id) = id_to_peer.get(&previous_channel_id){
+ let previous_channel_id = claimable_htlc.prev_hop.channel_id;
+ if let Some(peer_node_id) = outpoint_to_peer.get(&claimable_htlc.prev_hop.outpoint) {
let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap();
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for action in actions.iter() {
if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
downstream_counterparty_and_funding_outpoint:
- Some((blocked_node_id, blocked_channel_outpoint, blocking_action)), ..
+ Some((blocked_node_id, _blocked_channel_outpoint, blocked_channel_id, blocking_action)), ..
} = action {
if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) {
+ let channel_id = blocked_channel_id;
log_trace!(logger,
"Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
- blocked_channel_outpoint.to_channel_id());
+ channel_id);
blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
- .entry(blocked_channel_outpoint.to_channel_id())
+ .entry(*channel_id)
.or_insert_with(Vec::new).push(blocking_action.clone());
} else {
// If the channel we were blocking has closed, we don't need to
forward_htlcs: Mutex::new(forward_htlcs),
claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap() }),
outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
- id_to_peer: Mutex::new(id_to_peer),
+ outpoint_to_peer: Mutex::new(outpoint_to_peer),
short_to_chan_info: FairRwLock::new(short_to_chan_info),
fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(),
channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
}
- for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding) in pending_claims_to_replay {
+ for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding, downstream_channel_id) in pending_claims_to_replay {
// We use `downstream_closed` in place of `from_onchain` here just as a guess - we
// don't remember in the `ChannelMonitor` where we got a preimage from, but if the
// channel is closed we just assume that it probably came from an on-chain claim.
- channel_manager.claim_funds_internal(source, preimage, Some(downstream_value),
- downstream_closed, true, downstream_node_id, downstream_funding);
+ channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), None,
+ downstream_closed, true, downstream_node_id, downstream_funding, downstream_channel_id);
}
//TODO: Broadcast channel update for closed channels, but only after we've made a
use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
use crate::ln::ChannelId;
- use crate::ln::channelmanager::{create_recv_pending_htlc_info, inbound_payment, PaymentId, PaymentSendFailure, RecipientOnionFields, InterceptId};
+ use crate::ln::channelmanager::{create_recv_pending_htlc_info, HTLCForwardInfo, inbound_payment, PaymentId, PaymentSendFailure, RecipientOnionFields, InterceptId};
use crate::ln::functional_test_utils::*;
use crate::ln::msgs::{self, ErrorAction};
use crate::ln::msgs::ChannelMessageHandler;
+ use crate::prelude::*;
use crate::routing::router::{PaymentParameters, RouteParameters, find_route};
use crate::util::errors::APIError;
+ use crate::util::ser::Writeable;
use crate::util::test_utils;
use crate::util::config::{ChannelConfig, ChannelConfigUpdate};
use crate::sign::EntropySource;
}
#[test]
- fn test_id_to_peer_coverage() {
- // Test that the `ChannelManager:id_to_peer` contains channels which have been assigned
+ fn test_outpoint_to_peer_coverage() {
+ // Test that the `ChannelManager:outpoint_to_peer` contains channels which have been assigned
// a `channel_id` (i.e. have had the funding tx created), and that they are removed once
// the channel is successfully closed.
let chanmon_cfgs = create_chanmon_cfgs(2);
let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
- let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
+ let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
let channel_id = ChannelId::from_bytes(tx.txid().to_byte_array());
{
- // Ensure that the `id_to_peer` map is empty until either party has received the
+ // Ensure that the `outpoint_to_peer` map is empty until either party has received the
// funding transaction, and have the real `channel_id`.
- assert_eq!(nodes[0].node.id_to_peer.lock().unwrap().len(), 0);
- assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
+ assert_eq!(nodes[0].node.outpoint_to_peer.lock().unwrap().len(), 0);
+ assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0);
}
nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
{
- // Assert that `nodes[0]`'s `id_to_peer` map is populated with the channel as soon as
+ // Assert that `nodes[0]`'s `outpoint_to_peer` map is populated with the channel as soon as
// as it has the funding transaction.
- let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
+ let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap();
assert_eq!(nodes_0_lock.len(), 1);
- assert!(nodes_0_lock.contains_key(&channel_id));
+ assert!(nodes_0_lock.contains_key(&funding_output));
}
- assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
+ assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0);
let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
{
- let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
+ let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap();
assert_eq!(nodes_0_lock.len(), 1);
- assert!(nodes_0_lock.contains_key(&channel_id));
+ assert!(nodes_0_lock.contains_key(&funding_output));
}
expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
{
- // Assert that `nodes[1]`'s `id_to_peer` map is populated with the channel as soon as
- // as it has the funding transaction.
- let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
+ // Assert that `nodes[1]`'s `outpoint_to_peer` map is populated with the channel as
+ // soon as it has the funding transaction.
+ let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap();
assert_eq!(nodes_1_lock.len(), 1);
- assert!(nodes_1_lock.contains_key(&channel_id));
+ assert!(nodes_1_lock.contains_key(&funding_output));
}
check_added_monitors!(nodes[1], 1);
let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
let closing_signed_node_0 = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0);
{
- // Assert that the channel is kept in the `id_to_peer` map for both nodes until the
+ // Assert that the channel is kept in the `outpoint_to_peer` map for both nodes until the
// channel can be fully closed by both parties (i.e. no outstanding htlcs exists, the
// fee for the closing transaction has been negotiated and the parties has the other
// party's signature for the fee negotiated closing transaction.)
- let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
+ let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap();
assert_eq!(nodes_0_lock.len(), 1);
- assert!(nodes_0_lock.contains_key(&channel_id));
+ assert!(nodes_0_lock.contains_key(&funding_output));
}
{
// At this stage, `nodes[1]` has proposed a fee for the closing transaction in the
// `handle_closing_signed` call above. As `nodes[1]` has not yet received the signature
// from `nodes[0]` for the closing transaction with the proposed fee, the channel is
- // kept in the `nodes[1]`'s `id_to_peer` map.
- let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
+ // kept in the `nodes[1]`'s `outpoint_to_peer` map.
+ let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap();
assert_eq!(nodes_1_lock.len(), 1);
- assert!(nodes_1_lock.contains_key(&channel_id));
+ assert!(nodes_1_lock.contains_key(&funding_output));
}
nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()));
// `nodes[0]` accepts `nodes[1]`'s proposed fee for the closing transaction, and
// therefore has all it needs to fully close the channel (both signatures for the
// closing transaction).
- // Assert that the channel is removed from `nodes[0]`'s `id_to_peer` map as it can be
+ // Assert that the channel is removed from `nodes[0]`'s `outpoint_to_peer` map as it can be
// fully closed by `nodes[0]`.
- assert_eq!(nodes[0].node.id_to_peer.lock().unwrap().len(), 0);
+ assert_eq!(nodes[0].node.outpoint_to_peer.lock().unwrap().len(), 0);
- // Assert that the channel is still in `nodes[1]`'s `id_to_peer` map, as `nodes[1]`
+ // Assert that the channel is still in `nodes[1]`'s `outpoint_to_peer` map, as `nodes[1]`
// doesn't have `nodes[0]`'s signature for the closing transaction yet.
- let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
+ let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap();
assert_eq!(nodes_1_lock.len(), 1);
- assert!(nodes_1_lock.contains_key(&channel_id));
+ assert!(nodes_1_lock.contains_key(&funding_output));
}
let (_nodes_0_update, closing_signed_node_0) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0.unwrap());
{
- // Assert that the channel has now been removed from both parties `id_to_peer` map once
+ // Assert that the channel has now been removed from both parties `outpoint_to_peer` map once
// they both have everything required to fully close the channel.
- assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
+ assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0);
}
let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
let sender_intended_amt_msat = 100;
let extra_fee_msat = 10;
let hop_data = msgs::InboundOnionPayload::Receive {
- amt_msat: 100,
- outgoing_cltv_value: 42,
+ sender_intended_htlc_amt_msat: 100,
+ cltv_expiry_height: 42,
payment_metadata: None,
keysend_preimage: None,
payment_data: Some(msgs::FinalOnionHopData {
// Check that if the amount we received + the penultimate hop extra fee is less than the sender
// intended amount, we fail the payment.
let current_height: u32 = node[0].node.best_block.read().unwrap().height();
- if let Err(crate::ln::channelmanager::InboundOnionErr { err_code, .. }) =
+ if let Err(crate::ln::channelmanager::InboundHTLCErr { err_code, .. }) =
create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat),
current_height, node[0].node.default_configuration.accept_mpp_keysend)
// If amt_received + extra_fee is equal to the sender intended amount, we're fine.
let hop_data = msgs::InboundOnionPayload::Receive { // This is the same payload as above, InboundOnionPayload doesn't implement Clone
- amt_msat: 100,
- outgoing_cltv_value: 42,
+ sender_intended_htlc_amt_msat: 100,
+ cltv_expiry_height: 42,
payment_metadata: None,
keysend_preimage: None,
payment_data: Some(msgs::FinalOnionHopData {
let current_height: u32 = node[0].node.best_block.read().unwrap().height();
let result = create_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive {
- amt_msat: 100,
- outgoing_cltv_value: 22,
+ sender_intended_htlc_amt_msat: 100,
+ cltv_expiry_height: 22,
payment_metadata: None,
keysend_preimage: None,
payment_data: Some(msgs::FinalOnionHopData {
check_spends!(txn[0], funding_tx);
}
}
+
+ #[test]
+ fn test_malformed_forward_htlcs_ser() {
+ // Ensure that `HTLCForwardInfo::FailMalformedHTLC`s are (de)serialized properly.
+ let chanmon_cfg = create_chanmon_cfgs(1);
+ let node_cfg = create_node_cfgs(1, &chanmon_cfg);
+ let persister;
+ let chain_monitor;
+ let chanmgrs = create_node_chanmgrs(1, &node_cfg, &[None]);
+ let deserialized_chanmgr;
+ let mut nodes = create_network(1, &node_cfg, &chanmgrs);
+
+ let dummy_failed_htlc = |htlc_id| {
+ HTLCForwardInfo::FailHTLC { htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }, }
+ };
+ let dummy_malformed_htlc = |htlc_id| {
+ HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code: 0x4000, sha256_of_onion: [0; 32] }
+ };
+
+ let dummy_htlcs_1: Vec<HTLCForwardInfo> = (1..10).map(|htlc_id| {
+ if htlc_id % 2 == 0 {
+ dummy_failed_htlc(htlc_id)
+ } else {
+ dummy_malformed_htlc(htlc_id)
+ }
+ }).collect();
+
+ let dummy_htlcs_2: Vec<HTLCForwardInfo> = (1..10).map(|htlc_id| {
+ if htlc_id % 2 == 1 {
+ dummy_failed_htlc(htlc_id)
+ } else {
+ dummy_malformed_htlc(htlc_id)
+ }
+ }).collect();
+
+
+ let (scid_1, scid_2) = (42, 43);
+ let mut forward_htlcs = HashMap::new();
+ forward_htlcs.insert(scid_1, dummy_htlcs_1.clone());
+ forward_htlcs.insert(scid_2, dummy_htlcs_2.clone());
+
+ let mut chanmgr_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap();
+ *chanmgr_fwd_htlcs = forward_htlcs.clone();
+ core::mem::drop(chanmgr_fwd_htlcs);
+
+ reload_node!(nodes[0], nodes[0].node.encode(), &[], persister, chain_monitor, deserialized_chanmgr);
+
+ let mut deserialized_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap();
+ for scid in [scid_1, scid_2].iter() {
+ let deserialized_htlcs = deserialized_fwd_htlcs.remove(scid).unwrap();
+ assert_eq!(forward_htlcs.remove(scid).unwrap(), deserialized_htlcs);
+ }
+ assert!(deserialized_fwd_htlcs.is_empty());
+ core::mem::drop(deserialized_fwd_htlcs);
+
+ expect_pending_htlcs_forwardable!(nodes[0]);
+ }
}
#[cfg(ldk_bench)]
use bitcoin::blockdata::locktime::absolute::LockTime;
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
- use bitcoin::{Block, Transaction, TxOut};
+ use bitcoin::{Transaction, TxOut};
use crate::sync::{Arc, Mutex, RwLock};
let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
let scorer = RwLock::new(test_utils::TestScorer::new());
- let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &scorer);
+ let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &logger_a, &scorer);
let mut config: UserConfig = Default::default();
config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253);