pub(crate) struct HTLCPreviousHopData {
// Note that this may be an outbound SCID alias for the associated channel.
short_channel_id: u64,
+ user_channel_id: Option<u128>,
htlc_id: u64,
incoming_packet_shared_secret: [u8; 32],
phantom_shared_secret: Option<[u8; 32]>,
counterparty_skimmed_fee_msat: Option<u64>,
}
+impl From<&ClaimableHTLC> for events::ClaimedHTLC {
+ fn from(val: &ClaimableHTLC) -> Self {
+ events::ClaimedHTLC {
+ channel_id: val.prev_hop.outpoint.to_channel_id(),
+ user_channel_id: val.prev_hop.user_channel_id.unwrap_or(0),
+ cltv_expiry: val.cltv_expiry,
+ value_msat: val.value,
+ }
+ }
+}
+
/// A payment identifier used to uniquely identify a payment to LDK.
///
/// This is not exported to bindings users as we just use [u8; 32] directly
amount_msat: u64,
payment_purpose: events::PaymentPurpose,
receiver_node_id: PublicKey,
+ htlcs: Vec<events::ClaimedHTLC>,
+ sender_intended_value: Option<u64>,
}
impl_writeable_tlv_based!(ClaimingPayment, {
(0, amount_msat, required),
(2, payment_purpose, required),
(4, receiver_node_id, required),
+ (5, htlcs, optional_vec),
+ (7, sender_intended_value, option),
});
struct ClaimablePayment {
/// been assigned a `channel_id`, the entry in this map is removed and one is created in
/// `channel_by_id`.
pub(super) inbound_v1_channel_by_id: HashMap<[u8; 32], InboundV1Channel<Signer>>,
+ /// `temporary_channel_id` -> `InboundChannelRequest`.
+ ///
+ /// When manual channel acceptance is enabled, this holds all unaccepted inbound channels where
+ /// the peer is the counterparty. If the channel is accepted, then the entry in this table is
+ /// removed, and an InboundV1Channel is created and placed in the `inbound_v1_channel_by_id` table. If
+ /// the channel is rejected, then the entry is simply removed.
+ pub(super) inbound_channel_request_by_id: HashMap<[u8; 32], InboundChannelRequest>,
/// The latest `InitFeatures` we heard from the peer.
latest_features: InitFeatures,
/// Messages to send to the peer - pushed to in the same lock that they are generated in (except
fn total_channel_count(&self) -> usize {
self.channel_by_id.len() +
self.outbound_v1_channel_by_id.len() +
- self.inbound_v1_channel_by_id.len()
+ self.inbound_v1_channel_by_id.len() +
+ self.inbound_channel_request_by_id.len()
}
// Returns a bool indicating if the given `channel_id` matches a channel we have with this peer.
fn has_channel(&self, channel_id: &[u8; 32]) -> bool {
self.channel_by_id.contains_key(channel_id) ||
self.outbound_v1_channel_by_id.contains_key(channel_id) ||
- self.inbound_v1_channel_by_id.contains_key(channel_id)
+ self.inbound_v1_channel_by_id.contains_key(channel_id) ||
+ self.inbound_channel_request_by_id.contains_key(channel_id)
}
}
+/// A not-yet-accepted inbound (from counterparty) channel. Once
+/// accepted, the parameters will be used to construct a channel.
+pub(super) struct InboundChannelRequest {
+ /// The original OpenChannel message.
+ pub open_channel_msg: msgs::OpenChannel,
+ /// The number of ticks remaining before the request expires.
+ pub ticks_remaining: i32,
+}
+
+/// The number of ticks that may elapse while we're waiting for an unaccepted inbound channel to be
+/// accepted. An unaccepted channel that exceeds this limit will be abandoned.
+const UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS: i32 = 2;
+
/// Stores a PaymentSecret and any other data we may need to validate an inbound payment is
/// actually ours and not some duplicate HTLC sent to us by a node along the route.
///
/// could be in the middle of being processed without the direct mutex held.
///
/// See `ChannelManager` struct-level documentation for lock order requirements.
+ #[cfg(not(any(test, feature = "_test_utils")))]
pending_events: Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
+ #[cfg(any(test, feature = "_test_utils"))]
+ pub(crate) pending_events: Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
+
/// A simple atomic flag to ensure only one task at a time can be processing events asynchronously.
pending_events_processor: AtomicBool,
}
/// Details of a channel, as returned by [`ChannelManager::list_channels`] and [`ChannelManager::list_usable_channels`]
+///
+/// Balances of a channel are available through [`ChainMonitor::get_claimable_balances`] and
+/// [`ChannelMonitor::get_claimable_balances`], calculated with respect to the corresponding on-chain
+/// transactions.
+///
+/// [`ChainMonitor::get_claimable_balances`]: crate::chain::chainmonitor::ChainMonitor::get_claimable_balances
#[derive(Clone, Debug, PartialEq)]
pub struct ChannelDetails {
/// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
///
/// [`outbound_capacity_msat`]: ChannelDetails::outbound_capacity_msat
pub unspendable_punishment_reserve: Option<u64>,
- /// The `user_channel_id` passed in to create_channel, or a random value if the channel was
- /// inbound. This may be zero for inbound channels serialized with LDK versions prior to
- /// 0.0.113.
+ /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound
+ /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if
+ /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise
+ /// `user_channel_id` will be randomized for an inbound channel. This may be zero for objects
+ /// serialized with LDK versions prior to 0.0.113.
+ ///
+ /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel
+ /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
+ /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels
pub user_channel_id: u128,
/// The currently negotiated fee rate denominated in satoshi per 1000 weight units,
/// which is applied to commitment and HTLC transactions.
///
/// This value will be `None` for objects serialized with LDK versions prior to 0.0.115.
pub feerate_sat_per_1000_weight: Option<u32>,
- /// Our total balance. This is the amount we would get if we close the channel.
- /// This value is not exact. Due to various in-flight changes and feerate changes, exactly this
- /// amount is not likely to be recoverable on close.
- ///
- /// This does not include any pending HTLCs which are not yet fully resolved (and, thus, whose
- /// balance is not available for inclusion in new outbound HTLCs). This further does not include
- /// any pending outgoing HTLCs which are awaiting some other resolution to be sent.
- /// This does not consider any on-chain fees.
- ///
- /// See also [`ChannelDetails::outbound_capacity_msat`]
- pub balance_msat: u64,
/// The available outbound capacity for sending HTLCs to the remote peer. This does not include
/// any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not
/// available for inclusion in new outbound HTLCs). This further does not include any pending
/// outgoing HTLCs which are awaiting some other resolution to be sent.
///
- /// See also [`ChannelDetails::balance_msat`]
- ///
/// This value is not exact. Due to various in-flight changes, feerate changes, and our
/// conflict-avoidance policy, exactly this amount is not likely to be spendable. However, we
/// should be able to spend nearly this amount.
/// the current state and per-HTLC limit(s). This is intended for use when routing, allowing us
/// to use a limit as close as possible to the HTLC limit we can currently send.
///
- /// See also [`ChannelDetails::next_outbound_htlc_minimum_msat`],
- /// [`ChannelDetails::balance_msat`], and [`ChannelDetails::outbound_capacity_msat`].
+ /// See also [`ChannelDetails::next_outbound_htlc_minimum_msat`] and
+ /// [`ChannelDetails::outbound_capacity_msat`].
pub next_outbound_htlc_limit_msat: u64,
/// The minimum value for sending a single HTLC to the remote peer. This is the equivalent of
/// [`ChannelDetails::next_outbound_htlc_limit_msat`] but represents a lower-bound, rather than
channel_value_satoshis: context.get_value_satoshis(),
feerate_sat_per_1000_weight: Some(context.get_feerate_sat_per_1000_weight()),
unspendable_punishment_reserve: to_self_reserve_satoshis,
- balance_msat: balance.balance_msat,
inbound_capacity_msat: balance.inbound_capacity_msat,
outbound_capacity_msat: balance.outbound_capacity_msat,
next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
self.finish_force_close_channel(chan.context.force_shutdown(false));
// Unfunded channel has no update
(None, chan.context.get_counterparty_node_id())
+ } else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() {
+ log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
+ // N.B. that we don't send any channel close event here: we
+ // don't have a user_channel_id, and we never sent any opening
+ // events anyway.
+ (None, *peer_node_id)
} else {
return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) });
}
short_channel_id, amt_to_forward, outgoing_cltv_value
}, ..
} => {
- let next_pk = onion_utils::next_hop_packet_pubkey(&self.secp_ctx,
+ let next_packet_pk = onion_utils::next_hop_pubkey(&self.secp_ctx,
msg.onion_routing_packet.public_key.unwrap(), &shared_secret);
- (short_channel_id, amt_to_forward, outgoing_cltv_value, Some(next_pk))
+ (short_channel_id, amt_to_forward, outgoing_cltv_value, Some(next_packet_pk))
},
// We'll do receive checks in [`Self::construct_pending_htlc_info`] so we have access to the
// inbound channel's state.
if let PendingHTLCRouting::Forward { short_channel_id, .. } = payment.forward_info.routing {
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: payment.prev_short_channel_id,
+ user_channel_id: Some(payment.prev_user_channel_id),
outpoint: payment.prev_funding_outpoint,
htlc_id: payment.prev_htlc_id,
incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret,
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
+ user_channel_id: Some(prev_user_channel_id),
outpoint: prev_funding_outpoint,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: incoming_shared_secret,
for forward_info in pending_forwards.drain(..) {
match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
- prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id: _,
+ prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
forward_info: PendingHTLCInfo {
incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
routing: PendingHTLCRouting::Forward { onion_packet, .. }, skimmed_fee_msat, ..
log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
+ user_channel_id: Some(prev_user_channel_id),
outpoint: prev_funding_outpoint,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: incoming_shared_secret,
let claimable_htlc = ClaimableHTLC {
prev_hop: HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
+ user_channel_id: Some(prev_user_channel_id),
outpoint: prev_funding_outpoint,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: incoming_shared_secret,
);
failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: $htlc.prev_hop.short_channel_id,
+ user_channel_id: $htlc.prev_hop.user_channel_id,
outpoint: prev_funding_outpoint,
htlc_id: $htlc.prev_hop.htlc_id,
incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
chan_id: &[u8; 32],
chan_context: &mut ChannelContext<<SP::Target as SignerProvider>::Signer>,
unfunded_chan_context: &mut UnfundedChannelContext,
+ pending_msg_events: &mut Vec<MessageSendEvent>,
| {
chan_context.maybe_expire_prev_config();
if unfunded_chan_context.should_expire_unfunded_channel() {
- log_error!(self.logger, "Force-closing pending outbound channel {} for not establishing in a timely manner", log_bytes!(&chan_id[..]));
+ log_error!(self.logger,
+ "Force-closing pending channel with ID {} for not establishing in a timely manner",
+ log_bytes!(&chan_id[..]));
update_maps_on_chan_removal!(self, &chan_context);
self.issue_channel_close_events(&chan_context, ClosureReason::HolderForceClosed);
self.finish_force_close_channel(chan_context.force_shutdown(false));
+ pending_msg_events.push(MessageSendEvent::HandleError {
+ node_id: counterparty_node_id,
+ action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage {
+ channel_id: *chan_id,
+ data: "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned(),
+ },
+ },
+ });
false
} else {
true
}
};
- peer_state.outbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context));
- peer_state.inbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context));
+ peer_state.outbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick(
+ chan_id, &mut chan.context, &mut chan.unfunded_context, pending_msg_events));
+ peer_state.inbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick(
+ chan_id, &mut chan.context, &mut chan.unfunded_context, pending_msg_events));
+
+ for (chan_id, req) in peer_state.inbound_channel_request_by_id.iter_mut() {
+ if { req.ticks_remaining -= 1 ; req.ticks_remaining } <= 0 {
+ log_error!(self.logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", log_bytes!(&chan_id[..]));
+ peer_state.pending_msg_events.push(
+ events::MessageSendEvent::HandleError {
+ node_id: counterparty_node_id,
+ action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage { channel_id: chan_id.clone(), data: "Channel force-closed".to_owned() }
+ },
+ }
+ );
+ }
+ }
+ peer_state.inbound_channel_request_by_id.retain(|_, req| req.ticks_remaining > 0);
if peer_state.ok_to_remove(true) {
pending_peers_awaiting_removal.push(counterparty_node_id);
&self.pending_events, &self.logger)
{ self.push_pending_forwards_ev(); }
},
- HTLCSource::PreviousHopData(HTLCPreviousHopData { ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret, ref phantom_shared_secret, ref outpoint }) => {
+ HTLCSource::PreviousHopData(HTLCPreviousHopData { ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret, ref phantom_shared_secret, ref outpoint, .. }) => {
log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with {:?}", log_bytes!(payment_hash.0), onion_error);
let err_packet = onion_error.get_encrypted_failure_packet(incoming_packet_shared_secret, phantom_shared_secret);
}
}
+ let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect();
+ let sender_intended_value = payment.htlcs.first().map(|htlc| htlc.total_msat);
let dup_purpose = claimable_payments.pending_claiming_payments.insert(payment_hash,
ClaimingPayment { amount_msat: payment.htlcs.iter().map(|source| source.value).sum(),
- payment_purpose: payment.purpose, receiver_node_id,
+ payment_purpose: payment.purpose, receiver_node_id, htlcs, sender_intended_value
});
if dup_purpose.is_some() {
debug_assert!(false, "Shouldn't get a duplicate pending claim event ever");
self.pending_outbound_payments.finalize_claims(sources, &self.pending_events);
}
- fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_id: [u8; 32]) {
+ fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_outpoint: OutPoint) {
match source {
HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
debug_assert!(self.background_events_processed_since_startup.load(Ordering::Acquire),
"We don't support claim_htlc claims during startup - monitors may not be available yet");
- self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage, session_priv, path, from_onchain, &self.pending_events, &self.logger);
+ let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
+ channel_funding_outpoint: next_channel_outpoint,
+ counterparty_node_id: path.hops[0].pubkey,
+ };
+ self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage,
+ session_priv, path, from_onchain, ev_completion_action, &self.pending_events,
+ &self.logger);
},
HTLCSource::PreviousHopData(hop_data) => {
let prev_outpoint = hop_data.outpoint;
fee_earned_msat,
claim_from_onchain_tx: from_onchain,
prev_channel_id: Some(prev_outpoint.to_channel_id()),
- next_channel_id: Some(next_channel_id),
+ next_channel_id: Some(next_channel_outpoint.to_channel_id()),
outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
},
downstream_counterparty_and_funding_outpoint: None,
match action {
MonitorUpdateCompletionAction::PaymentClaimed { payment_hash } => {
let payment = self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
- if let Some(ClaimingPayment { amount_msat, payment_purpose: purpose, receiver_node_id }) = payment {
+ if let Some(ClaimingPayment {
+ amount_msat,
+ payment_purpose: purpose,
+ receiver_node_id,
+ htlcs,
+ sender_intended_value: sender_intended_total_msat,
+ }) = payment {
self.pending_events.lock().unwrap().push_back((events::Event::PaymentClaimed {
- payment_hash, purpose, amount_msat, receiver_node_id: Some(receiver_node_id),
+ payment_hash,
+ purpose,
+ amount_msat,
+ receiver_node_id: Some(receiver_node_id),
+ htlcs,
+ sender_intended_total_msat,
}, None));
}
},
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let is_only_peer_channel = peer_state.total_channel_count() == 1;
- match peer_state.inbound_v1_channel_by_id.entry(temporary_channel_id.clone()) {
- hash_map::Entry::Occupied(mut channel) => {
- if !channel.get().is_awaiting_accept() {
- return Err(APIError::APIMisuseError { err: "The channel isn't currently awaiting to be accepted.".to_owned() });
+
+ // Find (and remove) the channel in the unaccepted table. If it's not there, something weird is
+ // happening and return an error. N.B. that we create channel with an outbound SCID of zero so
+ // that we can delay allocating the SCID until after we're sure that the checks below will
+ // succeed.
+ let mut channel = match peer_state.inbound_channel_request_by_id.remove(temporary_channel_id) {
+ Some(unaccepted_channel) => {
+ let best_block_height = self.best_block.read().unwrap().height();
+ InboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
+ counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features,
+ &unaccepted_channel.open_channel_msg, user_channel_id, &self.default_configuration, best_block_height,
+ &self.logger, accept_0conf).map_err(|e| APIError::ChannelUnavailable { err: e.to_string() })
+ }
+ _ => Err(APIError::APIMisuseError { err: "No such channel awaiting to be accepted.".to_owned() })
+ }?;
+
+ if accept_0conf {
+ // This should have been correctly configured by the call to InboundV1Channel::new.
+ debug_assert!(channel.context.minimum_depth().unwrap() == 0);
+ } else if channel.context.get_channel_type().requires_zero_conf() {
+ let send_msg_err_event = events::MessageSendEvent::HandleError {
+ node_id: channel.context.get_counterparty_node_id(),
+ action: msgs::ErrorAction::SendErrorMessage{
+ msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
}
- if accept_0conf {
- channel.get_mut().set_0conf();
- } else if channel.get().context.get_channel_type().requires_zero_conf() {
- let send_msg_err_event = events::MessageSendEvent::HandleError {
- node_id: channel.get().context.get_counterparty_node_id(),
- action: msgs::ErrorAction::SendErrorMessage{
- msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
- }
- };
- peer_state.pending_msg_events.push(send_msg_err_event);
- let _ = remove_channel!(self, channel);
- return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() });
- } else {
- // If this peer already has some channels, a new channel won't increase our number of peers
- // with unfunded channels, so as long as we aren't over the maximum number of unfunded
- // channels per-peer we can accept channels from a peer with existing ones.
- if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
- let send_msg_err_event = events::MessageSendEvent::HandleError {
- node_id: channel.get().context.get_counterparty_node_id(),
- action: msgs::ErrorAction::SendErrorMessage{
- msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
- }
- };
- peer_state.pending_msg_events.push(send_msg_err_event);
- let _ = remove_channel!(self, channel);
- return Err(APIError::APIMisuseError { err: "Too many peers with unfunded channels, refusing to accept new ones".to_owned() });
+ };
+ peer_state.pending_msg_events.push(send_msg_err_event);
+ return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() });
+ } else {
+ // If this peer already has some channels, a new channel won't increase our number of peers
+ // with unfunded channels, so as long as we aren't over the maximum number of unfunded
+ // channels per-peer we can accept channels from a peer with existing ones.
+ if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
+ let send_msg_err_event = events::MessageSendEvent::HandleError {
+ node_id: channel.context.get_counterparty_node_id(),
+ action: msgs::ErrorAction::SendErrorMessage{
+ msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
}
- }
-
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
- node_id: channel.get().context.get_counterparty_node_id(),
- msg: channel.get_mut().accept_inbound_channel(user_channel_id),
- });
- }
- hash_map::Entry::Vacant(_) => {
- return Err(APIError::ChannelUnavailable { err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*temporary_channel_id), counterparty_node_id) });
+ };
+ peer_state.pending_msg_events.push(send_msg_err_event);
+ return Err(APIError::APIMisuseError { err: "Too many peers with unfunded channels, refusing to accept new ones".to_owned() });
}
}
+
+ // Now that we know we have a channel, assign an outbound SCID alias.
+ let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
+ channel.context.set_outbound_scid_alias(outbound_scid_alias);
+
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
+ node_id: channel.context.get_counterparty_node_id(),
+ msg: channel.accept_inbound_channel(),
+ });
+
+ peer_state.inbound_v1_channel_by_id.insert(temporary_channel_id.clone(), channel);
+
Ok(())
}
num_unfunded_channels += 1;
}
}
- num_unfunded_channels
+ num_unfunded_channels + peer.inbound_channel_request_by_id.len()
}
fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(), msg.temporary_channel_id.clone()));
}
- let mut random_bytes = [0u8; 16];
- random_bytes.copy_from_slice(&self.entropy_source.get_secure_random_bytes()[..16]);
- let user_channel_id = u128::from_be_bytes(random_bytes);
- let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
-
// Get the number of peers with channels, but without funded ones. We don't care too much
// about peers that never open a channel, so we filter by peers that have at least one
// channel, and then limit the number of those with unfunded channels.
msg.temporary_channel_id.clone()));
}
+ let channel_id = msg.temporary_channel_id;
+ let channel_exists = peer_state.has_channel(&channel_id);
+ if channel_exists {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone()));
+ }
+
+ // If we're doing manual acceptance checks on the channel, then defer creation until we're sure we want to accept.
+ if self.default_configuration.manually_accept_inbound_channels {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ pending_events.push_back((events::Event::OpenChannelRequest {
+ temporary_channel_id: msg.temporary_channel_id.clone(),
+ counterparty_node_id: counterparty_node_id.clone(),
+ funding_satoshis: msg.funding_satoshis,
+ push_msat: msg.push_msat,
+ channel_type: msg.channel_type.clone().unwrap(),
+ }, None));
+ peer_state.inbound_channel_request_by_id.insert(channel_id, InboundChannelRequest {
+ open_channel_msg: msg.clone(),
+ ticks_remaining: UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS,
+ });
+ return Ok(());
+ }
+
+ // Otherwise create the channel right now.
+ let mut random_bytes = [0u8; 16];
+ random_bytes.copy_from_slice(&self.entropy_source.get_secure_random_bytes()[..16]);
+ let user_channel_id = u128::from_be_bytes(random_bytes);
let mut channel = match InboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id,
- &self.default_configuration, best_block_height, &self.logger, outbound_scid_alias)
+ &self.default_configuration, best_block_height, &self.logger, /*is_0conf=*/false)
{
Err(e) => {
- self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
return Err(MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id));
},
Ok(res) => res
};
- let channel_id = channel.context.channel_id();
- let channel_exists = peer_state.has_channel(&channel_id);
- if channel_exists {
- self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
- return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone()))
- } else {
- if !self.default_configuration.manually_accept_inbound_channels {
- let channel_type = channel.context.get_channel_type();
- if channel_type.requires_zero_conf() {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone()));
- }
- if channel_type.requires_anchors_zero_fee_htlc_tx() {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("No channels with anchor outputs accepted".to_owned(), msg.temporary_channel_id.clone()));
- }
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
- node_id: counterparty_node_id.clone(),
- msg: channel.accept_inbound_channel(user_channel_id),
- });
- } else {
- let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push_back((events::Event::OpenChannelRequest {
- temporary_channel_id: msg.temporary_channel_id.clone(),
- counterparty_node_id: counterparty_node_id.clone(),
- funding_satoshis: msg.funding_satoshis,
- push_msat: msg.push_msat,
- channel_type: channel.context.get_channel_type().clone(),
- }, None));
- }
- peer_state.inbound_v1_channel_by_id.insert(channel_id, channel);
+
+ let channel_type = channel.context.get_channel_type();
+ if channel_type.requires_zero_conf() {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone()));
+ }
+ if channel_type.requires_anchors_zero_fee_htlc_tx() {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("No channels with anchor outputs accepted".to_owned(), msg.temporary_channel_id.clone()));
}
+
+ let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
+ channel.context.set_outbound_scid_alias(outbound_scid_alias);
+
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
+ node_id: counterparty_node_id.clone(),
+ msg: channel.accept_inbound_channel(),
+ });
+ peer_state.inbound_v1_channel_by_id.insert(channel_id, channel);
Ok(())
}
}
fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
+ let funding_txo;
let (htlc_source, forwarded_htlc_value) = {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
- try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), chan)
+ let res = try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), chan);
+ funding_txo = chan.get().context.get_funding_txo().expect("We won't accept a fulfill until funded");
+ res
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
- self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, msg.channel_id);
+ self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, funding_txo);
Ok(())
}
log_info!(self.logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
+ user_channel_id: Some(prev_user_channel_id),
outpoint: prev_funding_outpoint,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: forward_info.incoming_shared_secret,
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
- let funding_txo = chan.get().context.get_funding_txo();
- let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), chan);
+ let funding_txo_opt = chan.get().context.get_funding_txo();
+ let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
+ self.raa_monitor_updates_held(
+ &peer_state.actions_blocking_raa_monitor_updates, funding_txo,
+ *counterparty_node_id)
+ } else { false };
+ let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self,
+ chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger, mon_update_blocked), chan);
let res = if let Some(monitor_update) = monitor_update_opt {
- handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
+ let funding_txo = funding_txo_opt
+ .expect("Funding outpoint must have been set for RAA handling to succeed");
+ handle_new_monitor_update!(self, funding_txo, monitor_update,
peer_state_lock, peer_state, per_peer_state, chan).map(|_| ())
} else { Ok(()) };
(htlcs_to_fail, res)
MonitorEvent::HTLCEvent(htlc_update) => {
if let Some(preimage) = htlc_update.payment_preimage {
log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
- self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id());
+ self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint);
} else {
log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
if height >= htlc.forward_info.outgoing_cltv_value - HTLC_FAIL_BACK_BUFFER {
let prev_hop_data = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: htlc.prev_short_channel_id,
+ user_channel_id: Some(htlc.prev_user_channel_id),
htlc_id: htlc.prev_htlc_id,
incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret,
phantom_shared_secret: None,
self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer);
false
});
+ // Note that we don't bother generating any events for pre-accept channels -
+ // they're not considered "channels" yet from the PoV of our events interface.
+ peer_state.inbound_channel_request_by_id.clear();
pending_msg_events.retain(|msg| {
match msg {
// V1 Channel Establishment
channel_by_id: HashMap::new(),
outbound_v1_channel_by_id: HashMap::new(),
inbound_v1_channel_by_id: HashMap::new(),
+ inbound_channel_request_by_id: HashMap::new(),
latest_features: init_msg.features.clone(),
pending_msg_events: Vec::new(),
in_flight_monitor_updates: BTreeMap::new(),
fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+ match &msg.data as &str {
+ "cannot co-op close channel w/ active htlcs"|
+ "link failed to shutdown" =>
+ {
+ // LND hasn't properly handled shutdown messages ever, and force-closes any time we
+ // send one while HTLCs are still present. The issue is tracked at
+ // https://github.com/lightningnetwork/lnd/issues/6039 and has had multiple patches
+ // to fix it but none so far have managed to land upstream. The issue appears to be
+ // very low priority for the LND team despite being marked "P1".
+ // We're not going to bother handling this in a sensible way, instead simply
+ // repeating the Shutdown message on repeat until morale improves.
+ if msg.channel_id != [0; 32] {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+ if peer_state_mutex_opt.is_none() { return; }
+ let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
+ if let Some(chan) = peer_state.channel_by_id.get(&msg.channel_id) {
+ if let Some(msg) = chan.get_outbound_shutdown() {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+ node_id: *counterparty_node_id,
+ msg,
+ });
+ }
+ peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: *counterparty_node_id,
+ action: msgs::ErrorAction::SendWarningMessage {
+ msg: msgs::WarningMessage {
+ channel_id: msg.channel_id,
+ data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned()
+ },
+ log_level: Level::Trace,
+ }
+ });
+ }
+ }
+ return;
+ }
+ _ => {}
+ }
+
if msg.channel_id == [0; 32] {
let channel_ids: Vec<[u8; 32]> = {
let per_peer_state = self.per_peer_state.read().unwrap();
if peer_state_mutex_opt.is_none() { return; }
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
+ // Note that we don't bother generating any events for pre-accept channels -
+ // they're not considered "channels" yet from the PoV of our events interface.
+ peer_state.inbound_channel_request_by_id.clear();
peer_state.channel_by_id.keys().cloned()
.chain(peer_state.outbound_v1_channel_by_id.keys().cloned())
.chain(peer_state.inbound_v1_channel_by_id.keys().cloned()).collect()
(10, self.channel_value_satoshis, required),
(12, self.unspendable_punishment_reserve, option),
(14, user_channel_id_low, required),
- (16, self.balance_msat, required),
+ (16, self.next_outbound_htlc_limit_msat, required), // Forwards compatibility for removed balance_msat field.
(18, self.outbound_capacity_msat, required),
(19, self.next_outbound_htlc_limit_msat, required),
(20, self.inbound_capacity_msat, required),
impl Readable for ChannelDetails {
fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
- _init_and_read_tlv_fields!(reader, {
+ _init_and_read_len_prefixed_tlv_fields!(reader, {
(1, inbound_scid_alias, option),
(2, channel_id, required),
(3, channel_type, option),
(10, channel_value_satoshis, required),
(12, unspendable_punishment_reserve, option),
(14, user_channel_id_low, required),
- (16, balance_msat, required),
+ (16, _balance_msat, option), // Backwards compatibility for removed balance_msat field.
(18, outbound_capacity_msat, required),
// Note that by the time we get past the required read above, outbound_capacity_msat will be
// filled in, so we can safely unwrap it here.
let user_channel_id = user_channel_id_low as u128 +
((user_channel_id_high_opt.unwrap_or(0 as u64) as u128) << 64);
+ let _balance_msat: Option<u64> = _balance_msat;
+
Ok(Self {
inbound_scid_alias,
channel_id: channel_id.0.unwrap(),
channel_value_satoshis: channel_value_satoshis.0.unwrap(),
unspendable_punishment_reserve,
user_channel_id,
- balance_msat: balance_msat.0.unwrap(),
outbound_capacity_msat: outbound_capacity_msat.0.unwrap(),
next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(),
next_outbound_htlc_minimum_msat: next_outbound_htlc_minimum_msat.0.unwrap(),
(1, phantom_shared_secret, option),
(2, outpoint, required),
(4, htlc_id, required),
- (6, incoming_packet_shared_secret, required)
+ (6, incoming_packet_shared_secret, required),
+ (7, user_channel_id, option),
});
impl Writeable for ClaimableHTLC {
impl Readable for ClaimableHTLC {
fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
- _init_and_read_tlv_fields!(reader, {
+ _init_and_read_len_prefixed_tlv_fields!(reader, {
(0, prev_hop, required),
(1, total_msat, option),
(2, value_ser, required),
channel_by_id,
outbound_v1_channel_by_id: HashMap::new(),
inbound_v1_channel_by_id: HashMap::new(),
+ inbound_channel_request_by_id: HashMap::new(),
latest_features: InitFeatures::empty(),
pending_msg_events: Vec::new(),
in_flight_monitor_updates: BTreeMap::new(),
// generating a `PaymentPathSuccessful` event but regenerating
// it and the `PaymentSent` on every restart until the
// `ChannelMonitor` is removed.
- pending_outbounds.claim_htlc(payment_id, preimage, session_priv, path, false, &pending_events, &args.logger);
+ let compl_action =
+ EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
+ channel_funding_outpoint: monitor.get_funding_txo().0,
+ counterparty_node_id: path.hops[0].pubkey,
+ };
+ pending_outbounds.claim_htlc(payment_id, preimage, session_priv,
+ path, false, compl_action, &pending_events, &args.logger);
pending_events_read = pending_events.into_inner().unwrap();
}
},
// downstream chan is closed (because we don't have a
// channel_id -> peer map entry).
counterparty_opt.is_none(),
- monitor.get_funding_txo().0.to_channel_id()))
+ monitor.get_funding_txo().0))
} else { None }
} else {
// If it was an outbound payment, we've handled it above - if a preimage
.expect("Failed to get node_id for phantom node recipient");
receiver_node_id = Some(phantom_pubkey)
}
- for claimable_htlc in payment.htlcs {
+ for claimable_htlc in &payment.htlcs {
claimable_amt_msat += claimable_htlc.value;
// Add a holding-cell claim of the payment to the Channel, which should be
payment_hash,
purpose: payment.purpose,
amount_msat: claimable_amt_msat,
+ htlcs: payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(),
+ sender_intended_total_msat: payment.htlcs.first().map(|htlc| htlc.total_msat),
}, None));
}
}
channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
}
- for (source, preimage, downstream_value, downstream_closed, downstream_chan_id) in pending_claims_to_replay {
+ for (source, preimage, downstream_value, downstream_closed, downstream_funding) in pending_claims_to_replay {
// We use `downstream_closed` in place of `from_onchain` here just as a guess - we
// don't remember in the `ChannelMonitor` where we got a preimage from, but if the
// channel is closed we just assume that it probably came from an on-chain claim.
channel_manager.claim_funds_internal(source, preimage, Some(downstream_value),
- downstream_closed, downstream_chan_id);
+ downstream_closed, downstream_funding);
}
//TODO: Broadcast channel update for closed channels, but only after we've made a
let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]);
+ expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed);
check_added_monitors!(nodes[0], 1);
let (as_first_raa, as_first_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
// Note that successful MPP payments will generate a single PaymentSent event upon the first
// path's success and a PaymentPathSuccessful event for each path's success.
let events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 3);
+ assert_eq!(events.len(), 2);
match events[0] {
- Event::PaymentSent { payment_id: ref id, payment_preimage: ref preimage, payment_hash: ref hash, .. } => {
- assert_eq!(Some(payment_id), *id);
- assert_eq!(payment_preimage, *preimage);
- assert_eq!(our_payment_hash, *hash);
- },
- _ => panic!("Unexpected event"),
- }
- match events[1] {
Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => {
assert_eq!(payment_id, *actual_payment_id);
assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
},
_ => panic!("Unexpected event"),
}
- match events[2] {
+ match events[1] {
Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => {
assert_eq!(payment_id, *actual_payment_id);
assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
assert!(!open_channel_msg.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx());
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
+ // Since nodes[1] should not have accepted the channel, it should
+ // not have generated any events.
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
}
#[test]