// Since this struct is returned in `list_channels` methods, expose it here in case users want to
// construct one themselves.
use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret};
-use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel};
+use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel};
use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
#[cfg(any(feature = "_test_utils", test))]
-use crate::ln::features::InvoiceFeatures;
+use crate::ln::features::Bolt11InvoiceFeatures;
use crate::routing::gossip::NetworkGraph;
use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router};
use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError};
#[cfg(test)]
use crate::ln::outbound_payment;
-use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment};
+use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs};
use crate::ln::wire::Encode;
use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner, WriteableEcdsaChannelSigner};
use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate};
payment_metadata: Option<Vec<u8>>,
incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
phantom_shared_secret: Option<[u8; 32]>,
+ /// See [`RecipientOnionFields::custom_tlvs`] for more info.
+ custom_tlvs: Vec<(u64, Vec<u8>)>,
},
ReceiveKeysend {
/// This was added in 0.0.116 and will break deserialization on downgrades.
payment_preimage: PaymentPreimage,
payment_metadata: Option<Vec<u8>>,
incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
+ /// See [`RecipientOnionFields::custom_tlvs`] for more info.
+ custom_tlvs: Vec<(u64, Vec<u8>)>,
},
}
pub enum FailureCode {
/// We had a temporary error processing the payment. Useful if no other error codes fit
/// and you want to indicate that the payer may want to retry.
- TemporaryNodeFailure = 0x2000 | 2,
+ TemporaryNodeFailure,
/// We have a required feature which was not in this onion. For example, you may require
/// some additional metadata that was not provided with this payment.
- RequiredNodeFeatureMissing = 0x4000 | 0x2000 | 3,
+ RequiredNodeFeatureMissing,
/// You may wish to use this when a `payment_preimage` is unknown, or the CLTV expiry of
/// the HTLC is too close to the current block height for safe handling.
/// Using this failure code in [`ChannelManager::fail_htlc_backwards_with_reason`] is
/// equivalent to calling [`ChannelManager::fail_htlc_backwards`].
- IncorrectOrUnknownPaymentDetails = 0x4000 | 15,
+ IncorrectOrUnknownPaymentDetails,
+ /// We failed to process the payload after the onion was decrypted. You may wish to
+ /// use this when receiving custom HTLC TLVs with even type numbers that you don't recognize.
+ ///
+ /// If available, the tuple data may include the type number and byte offset in the
+ /// decrypted byte stream where the failure occurred.
+ InvalidOnionPayload(Option<(u64, u16)>),
+}
+
+impl Into<u16> for FailureCode {
+ fn into(self) -> u16 {
+ match self {
+ FailureCode::TemporaryNodeFailure => 0x2000 | 2,
+ FailureCode::RequiredNodeFeatureMissing => 0x4000 | 0x2000 | 3,
+ FailureCode::IncorrectOrUnknownPaymentDetails => 0x4000 | 15,
+ FailureCode::InvalidOnionPayload(_) => 0x4000 | 22,
+ }
+ }
}
/// Error type returned across the peer_state mutex boundary. When an Err is generated for a
err: msgs::LightningError,
chan_id: Option<([u8; 32], u128)>, // If Some a channel of ours has been closed
shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
+ channel_capacity: Option<u64>,
}
impl MsgHandleErrInternal {
#[inline]
},
chan_id: None,
shutdown_finish: None,
+ channel_capacity: None,
}
}
#[inline]
fn from_no_close(err: msgs::LightningError) -> Self {
- Self { err, chan_id: None, shutdown_finish: None }
+ Self { err, chan_id: None, shutdown_finish: None, channel_capacity: None }
}
#[inline]
- fn from_finish_shutdown(err: String, channel_id: [u8; 32], user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
+ fn from_finish_shutdown(err: String, channel_id: [u8; 32], user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>, channel_capacity: u64) -> Self {
Self {
err: LightningError {
err: err.clone(),
},
chan_id: Some((channel_id, user_channel_id)),
shutdown_finish: Some((shutdown_res, channel_update)),
+ channel_capacity: Some(channel_capacity)
}
}
#[inline]
},
chan_id: None,
shutdown_finish: None,
+ channel_capacity: None,
}
}
}
&& self.in_flight_monitor_updates.is_empty()
}
- // Returns a count of all channels we have with this peer, including pending channels.
+ // Returns a count of all channels we have with this peer, including unfunded channels.
fn total_channel_count(&self) -> usize {
self.channel_by_id.len() +
self.outbound_v1_channel_by_id.len() +
match $internal {
Ok(msg) => Ok(msg),
- Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => {
+ Err(MsgHandleErrInternal { err, chan_id, shutdown_finish, channel_capacity }) => {
let mut msg_events = Vec::with_capacity(2);
if let Some((shutdown_res, update_option)) = shutdown_finish {
if let Some((channel_id, user_channel_id)) = chan_id {
$self.pending_events.lock().unwrap().push_back((events::Event::ChannelClosed {
channel_id, user_channel_id,
- reason: ClosureReason::ProcessingError { err: err.err.clone() }
+ reason: ClosureReason::ProcessingError { err: err.err.clone() },
+ counterparty_node_id: Some($counterparty_node_id),
+ channel_capacity_sats: channel_capacity,
}, None));
}
}
update_maps_on_chan_removal!($self, &$channel.context);
let shutdown_res = $channel.context.force_shutdown(true);
(true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.context.get_user_id(),
- shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
+ shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok(), $channel.context.get_value_satoshis()))
},
}
};
- ($self: ident, $err: expr, $channel_context: expr, $channel_id: expr, PREFUNDED) => {
+ ($self: ident, $err: expr, $channel_context: expr, $channel_id: expr, UNFUNDED) => {
match $err {
- // We should only ever have `ChannelError::Close` when prefunded channels error.
+ // We should only ever have `ChannelError::Close` when unfunded channels error.
// In any case, just close the channel.
ChannelError::Warn(msg) | ChannelError::Ignore(msg) | ChannelError::Close(msg) => {
- log_error!($self.logger, "Closing prefunded channel {} due to an error: {}", log_bytes!($channel_id[..]), msg);
+ log_error!($self.logger, "Closing unfunded channel {} due to an error: {}", log_bytes!($channel_id[..]), msg);
update_maps_on_chan_removal!($self, &$channel_context);
let shutdown_res = $channel_context.force_shutdown(false);
(true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel_context.get_user_id(),
- shutdown_res, None))
+ shutdown_res, None, $channel_context.get_value_satoshis()))
},
}
}
match $res {
Ok(res) => res,
Err(e) => {
- let (drop, res) = convert_chan_err!($self, e, $entry.get_mut().context, $entry.key(), PREFUNDED);
+ let (drop, res) = convert_chan_err!($self, e, $entry.get_mut().context, $entry.key(), UNFUNDED);
if drop {
$entry.remove_entry();
}
let res = Err(MsgHandleErrInternal::from_finish_shutdown(
"ChannelMonitor storage failure".to_owned(), $chan.context.channel_id(),
$chan.context.get_user_id(), $chan.context.force_shutdown(false),
- $self.get_channel_update_for_broadcast(&$chan).ok()));
+ $self.get_channel_update_for_broadcast(&$chan).ok(), $chan.context.get_value_satoshis()));
$remove;
res
},
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
+ // Only `Channels` in the channel_by_id map can be considered funded.
for (_channel_id, channel) in peer_state.channel_by_id.iter().filter(f) {
let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
peer_state.latest_features.clone(), &self.fee_estimator);
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let features = &peer_state.latest_features;
+ let chan_context_to_details = |context| {
+ ChannelDetails::from_channel_context(context, best_block_height, features.clone(), &self.fee_estimator)
+ };
return peer_state.channel_by_id
.iter()
- .map(|(_, channel)|
- ChannelDetails::from_channel_context(&channel.context, best_block_height,
- features.clone(), &self.fee_estimator))
+ .map(|(_, channel)| &channel.context)
+ .chain(peer_state.outbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context))
+ .chain(peer_state.inbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context))
+ .map(chan_context_to_details)
.collect();
}
vec![]
pending_events_lock.push_back((events::Event::ChannelClosed {
channel_id: context.channel_id(),
user_channel_id: context.get_user_id(),
- reason: closure_reason
+ reason: closure_reason,
+ counterparty_node_id: Some(context.get_counterparty_node_id()),
+ channel_capacity_sats: Some(context.get_value_satoshis()),
}, None));
}
let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
let result: Result<(), _> = loop {
- let per_peer_state = self.per_peer_state.read().unwrap();
+ {
+ let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex = per_peer_state.get(counterparty_node_id)
- .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(channel_id.clone()) {
- hash_map::Entry::Occupied(mut chan_entry) => {
- let funding_txo_opt = chan_entry.get().context.get_funding_txo();
- let their_features = &peer_state.latest_features;
- let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut()
- .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
- failed_htlcs = htlcs;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
- // We can send the `shutdown` message before updating the `ChannelMonitor`
- // here as we don't need the monitor update to complete until we send a
- // `shutdown_signed`, which we'll delay if we're pending a monitor update.
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: *counterparty_node_id,
- msg: shutdown_msg,
- });
+ match peer_state.channel_by_id.entry(channel_id.clone()) {
+ hash_map::Entry::Occupied(mut chan_entry) => {
+ let funding_txo_opt = chan_entry.get().context.get_funding_txo();
+ let their_features = &peer_state.latest_features;
+ let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut()
+ .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
+ failed_htlcs = htlcs;
- // Update the monitor with the shutdown script if necessary.
- if let Some(monitor_update) = monitor_update_opt.take() {
- break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
- peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
- }
+ // We can send the `shutdown` message before updating the `ChannelMonitor`
+ // here as we don't need the monitor update to complete until we send a
+ // `shutdown_signed`, which we'll delay if we're pending a monitor update.
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+ node_id: *counterparty_node_id,
+ msg: shutdown_msg,
+ });
- if chan_entry.get().is_shutdown() {
- let channel = remove_channel!(self, chan_entry);
- if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
- peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: channel_update
- });
+ // Update the monitor with the shutdown script if necessary.
+ if let Some(monitor_update) = monitor_update_opt.take() {
+ break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
+ peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
}
- self.issue_channel_close_events(&channel.context, ClosureReason::HolderForceClosed);
- }
- break Ok(());
- },
- hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), counterparty_node_id) })
+
+ if chan_entry.get().is_shutdown() {
+ let channel = remove_channel!(self, chan_entry);
+ if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: channel_update
+ });
+ }
+ self.issue_channel_close_events(&channel.context, ClosureReason::HolderForceClosed);
+ }
+ break Ok(());
+ },
+ hash_map::Entry::Vacant(_) => (),
+ }
}
+ // If we reach this point, it means that the channel_id either refers to an unfunded channel or
+ // it does not exist for this peer. Either way, we can attempt to force-close it.
+ //
+ // An appropriate error will be returned for non-existence of the channel if that's the case.
+ return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ())
+ // TODO(dunxen): This is still not ideal as we're doing some extra lookups.
+ // Fix this with https://github.com/lightningdevkit/rust-lightning/issues/2422
};
for htlc_source in failed_htlcs.drain(..) {
self.issue_channel_close_events(&chan.get().context, closure_reason);
let mut chan = remove_channel!(self, chan);
self.finish_force_close_channel(chan.context.force_shutdown(false));
- // Prefunded channel has no update
+ // Unfunded channel has no update
(None, chan.context.get_counterparty_node_id())
} else if let hash_map::Entry::Occupied(chan) = peer_state.inbound_v1_channel_by_id.entry(channel_id.clone()) {
log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
self.issue_channel_close_events(&chan.get().context, closure_reason);
let mut chan = remove_channel!(self, chan);
self.finish_force_close_channel(chan.context.force_shutdown(false));
- // Prefunded channel has no update
+ // Unfunded channel has no update
(None, chan.context.get_counterparty_node_id())
} else {
return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) });
}
fn construct_fwd_pending_htlc_info(
- &self, msg: &msgs::UpdateAddHTLC, hop_data: msgs::OnionHopData, hop_hmac: [u8; 32],
+ &self, msg: &msgs::UpdateAddHTLC, hop_data: msgs::InboundOnionPayload, hop_hmac: [u8; 32],
new_packet_bytes: [u8; onion_utils::ONION_DATA_LEN], shared_secret: [u8; 32],
next_packet_pubkey_opt: Option<Result<PublicKey, secp256k1::Error>>
) -> Result<PendingHTLCInfo, InboundOnionErr> {
version: 0,
public_key: next_packet_pubkey_opt.unwrap_or(Err(secp256k1::Error::InvalidPublicKey)),
hop_data: new_packet_bytes,
- hmac: hop_hmac.clone(),
+ hmac: hop_hmac,
};
- let short_channel_id = match hop_data.format {
- msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id,
- msgs::OnionHopDataFormat::FinalNode { .. } => {
+ let (short_channel_id, amt_to_forward, outgoing_cltv_value) = match hop_data {
+ msgs::InboundOnionPayload::Forward { short_channel_id, amt_to_forward, outgoing_cltv_value } =>
+ (short_channel_id, amt_to_forward, outgoing_cltv_value),
+ msgs::InboundOnionPayload::Receive { .. } =>
return Err(InboundOnionErr {
msg: "Final Node OnionHopData provided for us as an intermediary node",
err_code: 0x4000 | 22,
err_data: Vec::new(),
- })
- },
+ }),
};
Ok(PendingHTLCInfo {
onion_packet: outgoing_packet,
short_channel_id,
},
- payment_hash: msg.payment_hash.clone(),
+ payment_hash: msg.payment_hash,
incoming_shared_secret: shared_secret,
incoming_amt_msat: Some(msg.amount_msat),
- outgoing_amt_msat: hop_data.amt_to_forward,
- outgoing_cltv_value: hop_data.outgoing_cltv_value,
+ outgoing_amt_msat: amt_to_forward,
+ outgoing_cltv_value,
skimmed_fee_msat: None,
})
}
fn construct_recv_pending_htlc_info(
- &self, hop_data: msgs::OnionHopData, shared_secret: [u8; 32], payment_hash: PaymentHash,
+ &self, hop_data: msgs::InboundOnionPayload, shared_secret: [u8; 32], payment_hash: PaymentHash,
amt_msat: u64, cltv_expiry: u32, phantom_shared_secret: Option<[u8; 32]>, allow_underpay: bool,
counterparty_skimmed_fee_msat: Option<u64>,
) -> Result<PendingHTLCInfo, InboundOnionErr> {
- let (payment_data, keysend_preimage, payment_metadata) = match hop_data.format {
- msgs::OnionHopDataFormat::FinalNode { payment_data, keysend_preimage, payment_metadata } =>
- (payment_data, keysend_preimage, payment_metadata),
+ let (payment_data, keysend_preimage, custom_tlvs, onion_amt_msat, outgoing_cltv_value, payment_metadata) = match hop_data {
+ msgs::InboundOnionPayload::Receive {
+ payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata, ..
+ } =>
+ (payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata),
_ =>
return Err(InboundOnionErr {
err_code: 0x4000|22,
}),
};
// final_incorrect_cltv_expiry
- if hop_data.outgoing_cltv_value > cltv_expiry {
+ if outgoing_cltv_value > cltv_expiry {
return Err(InboundOnionErr {
msg: "Upstream node set CLTV to less than the CLTV set by the sender",
err_code: 18,
// payment logic has enough time to fail the HTLC backward before our onchain logic triggers a
// channel closure (see HTLC_FAIL_BACK_BUFFER rationale).
let current_height: u32 = self.best_block.read().unwrap().height();
- if (hop_data.outgoing_cltv_value as u64) <= current_height as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
+ if (outgoing_cltv_value as u64) <= current_height as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
let mut err_data = Vec::with_capacity(12);
err_data.extend_from_slice(&amt_msat.to_be_bytes());
err_data.extend_from_slice(¤t_height.to_be_bytes());
msg: "The final CLTV expiry is too soon to handle",
});
}
- if (!allow_underpay && hop_data.amt_to_forward > amt_msat) ||
- (allow_underpay && hop_data.amt_to_forward >
+ if (!allow_underpay && onion_amt_msat > amt_msat) ||
+ (allow_underpay && onion_amt_msat >
amt_msat.saturating_add(counterparty_skimmed_fee_msat.unwrap_or(0)))
{
return Err(InboundOnionErr {
payment_data,
payment_preimage,
payment_metadata,
- incoming_cltv_expiry: hop_data.outgoing_cltv_value,
+ incoming_cltv_expiry: outgoing_cltv_value,
+ custom_tlvs,
}
} else if let Some(data) = payment_data {
PendingHTLCRouting::Receive {
payment_data: data,
payment_metadata,
- incoming_cltv_expiry: hop_data.outgoing_cltv_value,
+ incoming_cltv_expiry: outgoing_cltv_value,
phantom_shared_secret,
+ custom_tlvs,
}
} else {
return Err(InboundOnionErr {
payment_hash,
incoming_shared_secret: shared_secret,
incoming_amt_msat: Some(amt_msat),
- outgoing_amt_msat: hop_data.amt_to_forward,
- outgoing_cltv_value: hop_data.outgoing_cltv_value,
+ outgoing_amt_msat: onion_amt_msat,
+ outgoing_cltv_value,
skimmed_fee_msat: counterparty_skimmed_fee_msat,
})
}
};
let (outgoing_scid, outgoing_amt_msat, outgoing_cltv_value, next_packet_pk_opt) = match next_hop {
onion_utils::Hop::Forward {
- next_hop_data: msgs::OnionHopData {
- format: msgs::OnionHopDataFormat::NonFinalNode { short_channel_id }, amt_to_forward,
- outgoing_cltv_value,
+ next_hop_data: msgs::InboundOnionPayload::Forward {
+ short_channel_id, amt_to_forward, outgoing_cltv_value
}, ..
} => {
- let next_pk = onion_utils::next_hop_packet_pubkey(&self.secp_ctx,
+ let next_packet_pk = onion_utils::next_hop_pubkey(&self.secp_ctx,
msg.onion_routing_packet.public_key.unwrap(), &shared_secret);
- (short_channel_id, amt_to_forward, outgoing_cltv_value, Some(next_pk))
+ (short_channel_id, amt_to_forward, outgoing_cltv_value, Some(next_packet_pk))
},
// We'll do receive checks in [`Self::construct_pending_htlc_info`] so we have access to the
// inbound channel's state.
onion_utils::Hop::Receive { .. } => return Ok((next_hop, shared_secret, None)),
- onion_utils::Hop::Forward {
- next_hop_data: msgs::OnionHopData { format: msgs::OnionHopDataFormat::FinalNode { .. }, .. }, ..
- } => {
+ onion_utils::Hop::Forward { next_hop_data: msgs::InboundOnionPayload::Receive { .. }, .. } => {
return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0; 0]);
}
};
#[cfg(test)]
pub(crate) fn test_send_payment_along_path(&self, path: &Path, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
let _lck = self.total_consistency_lock.read().unwrap();
- self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv_bytes)
+ self.send_payment_along_path(SendAlongPathArgs {
+ path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage,
+ session_priv_bytes
+ })
}
- fn send_payment_along_path(&self, path: &Path, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
+ fn send_payment_along_path(&self, args: SendAlongPathArgs) -> Result<(), APIError> {
+ let SendAlongPathArgs {
+ path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage,
+ session_priv_bytes
+ } = args;
// The top-level caller should hold the total_consistency_lock read lock.
debug_assert!(self.total_consistency_lock.try_write().is_err());
let best_block_height = self.best_block.read().unwrap().height();
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments
- .send_payment_with_route(route, payment_hash, recipient_onion, payment_id, &self.entropy_source, &self.node_signer, best_block_height,
- |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ .send_payment_with_route(route, payment_hash, recipient_onion, payment_id,
+ &self.entropy_source, &self.node_signer, best_block_height,
+ |args| self.send_payment_along_path(args))
}
/// Similar to [`ChannelManager::send_payment_with_route`], but will automatically find a route based on
.send_payment(payment_hash, recipient_onion, payment_id, retry_strategy, route_params,
&self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
&self.entropy_source, &self.node_signer, best_block_height, &self.logger,
- &self.pending_events,
- |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ &self.pending_events, |args| self.send_payment_along_path(args))
}
#[cfg(test)]
pub(super) fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
- self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, recipient_onion, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, best_block_height,
- |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, recipient_onion,
+ keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer,
+ best_block_height, |args| self.send_payment_along_path(args))
}
#[cfg(test)]
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.send_spontaneous_payment_with_route(
route, payment_preimage, recipient_onion, payment_id, &self.entropy_source,
- &self.node_signer, best_block_height,
- |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ &self.node_signer, best_block_height, |args| self.send_payment_along_path(args))
}
/// Similar to [`ChannelManager::send_spontaneous_payment`], but will automatically find a route
self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, recipient_onion,
payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(),
|| self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
- &self.logger, &self.pending_events,
- |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ &self.logger, &self.pending_events, |args| self.send_payment_along_path(args))
}
/// Send a payment that is probing the given route for liquidity. We calculate the
pub fn send_probe(&self, path: Path) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
- self.pending_outbound_payments.send_probe(path, self.probing_cookie_secret, &self.entropy_source, &self.node_signer, best_block_height,
- |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ self.pending_outbound_payments.send_probe(path, self.probing_cookie_secret,
+ &self.entropy_source, &self.node_signer, best_block_height,
+ |args| self.send_payment_along_path(args))
}
/// Returns whether a payment with the given [`PaymentHash`] and [`PaymentId`] is, in fact, a
Some(chan) => {
let funding_txo = find_funding_output(&chan, &funding_transaction)?;
- let funding_res = chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
+ let funding_res = chan.get_funding_created(funding_transaction, funding_txo, &self.logger)
.map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e {
let channel_id = chan.context.channel_id();
let user_id = chan.context.get_user_id();
let shutdown_res = chan.context.force_shutdown(false);
- (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None))
+ let channel_capacity = chan.context.get_value_satoshis();
+ (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None, channel_capacity))
} else { unreachable!(); });
match funding_res {
Ok((chan, funding_msg)) => (chan, funding_msg),
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for channel_id in channel_ids {
- if !peer_state.channel_by_id.contains_key(channel_id) {
+ if !peer_state.has_channel(channel_id) {
return Err(APIError::ChannelUnavailable {
err: format!("Channel with ID {} was not found for the passed counterparty_node_id {}", log_bytes!(*channel_id), counterparty_node_id),
});
- }
+ };
}
for channel_id in channel_ids {
- let channel = peer_state.channel_by_id.get_mut(channel_id).unwrap();
- let mut config = channel.context.config();
- config.apply(config_update);
- if !channel.context.update_config(&config) {
+ if let Some(channel) = peer_state.channel_by_id.get_mut(channel_id) {
+ let mut config = channel.context.config();
+ config.apply(config_update);
+ if !channel.context.update_config(&config) {
+ continue;
+ }
+ if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
+ } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+ node_id: channel.context.get_counterparty_node_id(),
+ msg,
+ });
+ }
continue;
}
- if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
- peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
- } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
- node_id: channel.context.get_counterparty_node_id(),
- msg,
+
+ let context = if let Some(channel) = peer_state.inbound_v1_channel_by_id.get_mut(channel_id) {
+ &mut channel.context
+ } else if let Some(channel) = peer_state.outbound_v1_channel_by_id.get_mut(channel_id) {
+ &mut channel.context
+ } else {
+ // This should not be reachable as we've already checked for non-existence in the previous channel_id loop.
+ debug_assert!(false);
+ return Err(APIError::ChannelUnavailable {
+ err: format!(
+ "Channel with ID {} for passed counterparty_node_id {} disappeared after we confirmed its existence - this should not be reachable!",
+ log_bytes!(*channel_id), counterparty_node_id),
});
- }
+ };
+ let mut config = context.config();
+ config.apply(config_update);
+ // We update the config, but we MUST NOT broadcast a `channel_update` before `channel_ready`
+ // which would be the case for pending inbound/outbound channels.
+ context.update_config(&config);
}
Ok(())
}
}
}) => {
let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret, mut onion_fields) = match routing {
- PendingHTLCRouting::Receive { payment_data, payment_metadata, incoming_cltv_expiry, phantom_shared_secret } => {
+ PendingHTLCRouting::Receive { payment_data, payment_metadata, incoming_cltv_expiry, phantom_shared_secret, custom_tlvs } => {
let _legacy_hop_data = Some(payment_data.clone());
- let onion_fields =
- RecipientOnionFields { payment_secret: Some(payment_data.payment_secret), payment_metadata };
+ let onion_fields = RecipientOnionFields { payment_secret: Some(payment_data.payment_secret),
+ payment_metadata, custom_tlvs };
(incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data },
Some(payment_data), phantom_shared_secret, onion_fields)
},
- PendingHTLCRouting::ReceiveKeysend { payment_data, payment_preimage, payment_metadata, incoming_cltv_expiry } => {
+ PendingHTLCRouting::ReceiveKeysend { payment_data, payment_preimage, payment_metadata, incoming_cltv_expiry, custom_tlvs } => {
let onion_fields = RecipientOnionFields {
payment_secret: payment_data.as_ref().map(|data| data.payment_secret),
- payment_metadata
+ payment_metadata,
+ custom_tlvs,
};
(incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage),
payment_data, None, onion_fields)
let best_block_height = self.best_block.read().unwrap().height();
self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(),
|| self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
- &self.pending_events, &self.logger,
- |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv));
+ &self.pending_events, &self.logger, |args| self.send_payment_along_path(args));
for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination);
PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
let mut should_persist = self.process_background_events();
- let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
+ let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
+ let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum);
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for (chan_id, chan) in peer_state.channel_by_id.iter_mut() {
+ let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ min_mempool_feerate
+ } else {
+ normal_feerate
+ };
let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
}
/// * Expiring a channel's previous [`ChannelConfig`] if necessary to only allow forwarding HTLCs
/// with the current [`ChannelConfig`].
/// * Removing peers which have disconnected but and no longer have any channels.
+ /// * Force-closing and removing channels which have not completed establishment in a timely manner.
///
/// Note that this may cause reentrancy through [`chain::Watch::update_channel`] calls or feerate
/// estimate fetches.
PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
let mut should_persist = self.process_background_events();
- let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
+ let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
+ let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum);
let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
let mut timed_out_mpp_htlcs = Vec::new();
let pending_msg_events = &mut peer_state.pending_msg_events;
let counterparty_node_id = *counterparty_node_id;
peer_state.channel_by_id.retain(|chan_id, chan| {
+ let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ min_mempool_feerate
+ } else {
+ normal_feerate
+ };
let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
true
});
+
+ let process_unfunded_channel_tick = |
+ chan_id: &[u8; 32],
+ chan_context: &mut ChannelContext<<SP::Target as SignerProvider>::Signer>,
+ unfunded_chan_context: &mut UnfundedChannelContext,
+ | {
+ chan_context.maybe_expire_prev_config();
+ if unfunded_chan_context.should_expire_unfunded_channel() {
+ log_error!(self.logger, "Force-closing pending outbound channel {} for not establishing in a timely manner", log_bytes!(&chan_id[..]));
+ update_maps_on_chan_removal!(self, &chan_context);
+ self.issue_channel_close_events(&chan_context, ClosureReason::HolderForceClosed);
+ self.finish_force_close_channel(chan_context.force_shutdown(false));
+ false
+ } else {
+ true
+ }
+ };
+ peer_state.outbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context));
+ peer_state.inbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context));
+
if peer_state.ok_to_remove(true) {
pending_peers_awaiting_removal.push(counterparty_node_id);
}
/// Gets error data to form an [`HTLCFailReason`] given a [`FailureCode`] and [`ClaimableHTLC`].
fn get_htlc_fail_reason_from_failure_code(&self, failure_code: FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason {
match failure_code {
- FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(failure_code as u16),
- FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(failure_code as u16),
+ FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(failure_code.into()),
+ FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(failure_code.into()),
FailureCode::IncorrectOrUnknownPaymentDetails => {
let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes());
- HTLCFailReason::reason(failure_code as u16, htlc_msat_height_data)
+ HTLCFailReason::reason(failure_code.into(), htlc_msat_height_data)
+ },
+ FailureCode::InvalidOnionPayload(data) => {
+ let fail_data = match data {
+ Some((typ, offset)) => [BigSize(typ).encode(), offset.encode()].concat(),
+ None => Vec::new(),
+ };
+ HTLCFailReason::reason(failure_code.into(), fail_data)
}
}
}
/// event matches your expectation. If you fail to do so and call this method, you may provide
/// the sender "proof-of-payment" when they did not fulfill the full expected payment.
///
+ /// This function will fail the payment if it has custom TLVs with even type numbers, as we
+ /// will assume they are unknown. If you intend to accept even custom TLVs, you should use
+ /// [`claim_funds_with_known_custom_tlvs`].
+ ///
/// [`Event::PaymentClaimable`]: crate::events::Event::PaymentClaimable
/// [`Event::PaymentClaimable::claim_deadline`]: crate::events::Event::PaymentClaimable::claim_deadline
/// [`Event::PaymentClaimed`]: crate::events::Event::PaymentClaimed
/// [`process_pending_events`]: EventsProvider::process_pending_events
/// [`create_inbound_payment`]: Self::create_inbound_payment
/// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
+ /// [`claim_funds_with_known_custom_tlvs`]: Self::claim_funds_with_known_custom_tlvs
pub fn claim_funds(&self, payment_preimage: PaymentPreimage) {
+ self.claim_payment_internal(payment_preimage, false);
+ }
+
+ /// This is a variant of [`claim_funds`] that allows accepting a payment with custom TLVs with
+ /// even type numbers.
+ ///
+ /// # Note
+ ///
+ /// You MUST check you've understood all even TLVs before using this to
+ /// claim, otherwise you may unintentionally agree to some protocol you do not understand.
+ ///
+ /// [`claim_funds`]: Self::claim_funds
+ pub fn claim_funds_with_known_custom_tlvs(&self, payment_preimage: PaymentPreimage) {
+ self.claim_payment_internal(payment_preimage, true);
+ }
+
+ fn claim_payment_internal(&self, payment_preimage: PaymentPreimage, custom_tlvs_known: bool) {
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
log_error!(self.logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug",
log_bytes!(payment_hash.0));
}
+
+ if let Some(RecipientOnionFields { ref custom_tlvs, .. }) = payment.onion_fields {
+ if !custom_tlvs_known && custom_tlvs.iter().any(|(typ, _)| typ % 2 == 0) {
+ log_info!(self.logger, "Rejecting payment with payment hash {} as we cannot accept payment with unknown even TLVs: {}",
+ log_bytes!(payment_hash.0), log_iter!(custom_tlvs.iter().map(|(typ, _)| typ).filter(|typ| *typ % 2 == 0)));
+ claimable_payments.pending_claiming_payments.remove(&payment_hash);
+ mem::drop(claimable_payments);
+ for htlc in payment.htlcs {
+ let reason = self.get_htlc_fail_reason_from_failure_code(FailureCode::InvalidOnionPayload(None), &htlc);
+ let source = HTLCSource::PreviousHopData(htlc.prev_hop);
+ let receiver = HTLCDestination::FailedPayment { payment_hash };
+ self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
+ }
+ return;
+ }
+ }
+
payment.htlcs
} else { return; }
};
let user_id = inbound_chan.context.get_user_id();
let shutdown_res = inbound_chan.context.force_shutdown(false);
return Err(MsgHandleErrInternal::from_finish_shutdown(format!("{}", err),
- msg.temporary_channel_id, user_id, shutdown_res, None));
+ msg.temporary_channel_id, user_id, shutdown_res, None, inbound_chan.context.get_value_satoshis()));
},
}
},
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
- hash_map::Entry::Occupied(mut chan_entry) => {
-
- if !chan_entry.get().received_shutdown() {
- log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
- log_bytes!(msg.channel_id),
- if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
- }
+ // TODO(dunxen): Fix this duplication when we switch to a single map with enums as per
+ // https://github.com/lightningdevkit/rust-lightning/issues/2422
+ if let hash_map::Entry::Occupied(chan_entry) = peer_state.outbound_v1_channel_by_id.entry(msg.channel_id.clone()) {
+ log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", log_bytes!(&msg.channel_id[..]));
+ self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
+ let mut chan = remove_channel!(self, chan_entry);
+ self.finish_force_close_channel(chan.context.force_shutdown(false));
+ return Ok(());
+ } else if let hash_map::Entry::Occupied(chan_entry) = peer_state.inbound_v1_channel_by_id.entry(msg.channel_id.clone()) {
+ log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", log_bytes!(&msg.channel_id[..]));
+ self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
+ let mut chan = remove_channel!(self, chan_entry);
+ self.finish_force_close_channel(chan.context.force_shutdown(false));
+ return Ok(());
+ } else if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) {
+ if !chan_entry.get().received_shutdown() {
+ log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
+ log_bytes!(msg.channel_id),
+ if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
+ }
- let funding_txo_opt = chan_entry.get().context.get_funding_txo();
- let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self,
- chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
- dropped_htlcs = htlcs;
+ let funding_txo_opt = chan_entry.get().context.get_funding_txo();
+ let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self,
+ chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
+ dropped_htlcs = htlcs;
- if let Some(msg) = shutdown {
- // We can send the `shutdown` message before updating the `ChannelMonitor`
- // here as we don't need the monitor update to complete until we send a
- // `shutdown_signed`, which we'll delay if we're pending a monitor update.
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: *counterparty_node_id,
- msg,
- });
- }
+ if let Some(msg) = shutdown {
+ // We can send the `shutdown` message before updating the `ChannelMonitor`
+ // here as we don't need the monitor update to complete until we send a
+ // `shutdown_signed`, which we'll delay if we're pending a monitor update.
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+ node_id: *counterparty_node_id,
+ msg,
+ });
+ }
- // Update the monitor with the shutdown script if necessary.
- if let Some(monitor_update) = monitor_update_opt {
- break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
- peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
- }
- break Ok(());
- },
- hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+ // Update the monitor with the shutdown script if necessary.
+ if let Some(monitor_update) = monitor_update_opt {
+ break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
+ peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
+ }
+ break Ok(());
+ } else {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
for htlc_source in dropped_htlcs.drain(..) {
provided_node_features(&self.default_configuration)
}
- /// Fetches the set of [`InvoiceFeatures`] flags which are provided by or required by
+ /// Fetches the set of [`Bolt11InvoiceFeatures`] flags which are provided by or required by
/// [`ChannelManager`].
///
/// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice"
/// or not. Thus, this method is not public.
#[cfg(any(feature = "_test_utils", test))]
- pub fn invoice_features(&self) -> InvoiceFeatures {
+ pub fn invoice_features(&self) -> Bolt11InvoiceFeatures {
provided_invoice_features(&self.default_configuration)
}
log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
let per_peer_state = self.per_peer_state.read().unwrap();
- for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
+ if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
- peer_state.channel_by_id.retain(|_, chan| {
- let retain = if chan.context.get_counterparty_node_id() == *counterparty_node_id {
- if !chan.context.have_received_message() {
- // If we created this (outbound) channel while we were disconnected from the
- // peer we probably failed to send the open_channel message, which is now
- // lost. We can't have had anything pending related to this channel, so we just
- // drop it.
- false
- } else {
- pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
- node_id: chan.context.get_counterparty_node_id(),
- msg: chan.get_channel_reestablish(&self.logger),
- });
- true
- }
- } else { true };
- if retain && chan.context.get_counterparty_node_id() != *counterparty_node_id {
- if let Some(msg) = chan.get_signed_channel_announcement(&self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), &self.default_configuration) {
- if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) {
- pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement {
- node_id: *counterparty_node_id,
- msg, update_msg,
- });
- }
- }
- }
- retain
+
+ // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted
+ // (so won't be recovered after a crash) we don't need to bother closing unfunded channels and
+ // clearing their maps here. Instead we can just send queue channel_reestablish messages for
+ // channels in the channel_by_id map.
+ peer_state.channel_by_id.iter_mut().for_each(|(_, chan)| {
+ pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
+ node_id: chan.context.get_counterparty_node_id(),
+ msg: chan.get_channel_reestablish(&self.logger),
+ });
});
}
//TODO: Also re-broadcast announcement_signatures
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
if let Some(chan) = peer_state.outbound_v1_channel_by_id.get_mut(&msg.channel_id) {
- if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) {
+ if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash, &self.fee_estimator) {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
node_id: *counterparty_node_id,
msg,
/// Fetches the set of [`NodeFeatures`] flags which are provided by or required by
/// [`ChannelManager`].
pub(crate) fn provided_node_features(config: &UserConfig) -> NodeFeatures {
- provided_init_features(config).to_context()
+ let mut node_features = provided_init_features(config).to_context();
+ node_features.set_keysend_optional();
+ node_features
}
-/// Fetches the set of [`InvoiceFeatures`] flags which are provided by or required by
+/// Fetches the set of [`Bolt11InvoiceFeatures`] flags which are provided by or required by
/// [`ChannelManager`].
///
/// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice"
/// or not. Thus, this method is not public.
#[cfg(any(feature = "_test_utils", test))]
-pub(crate) fn provided_invoice_features(config: &UserConfig) -> InvoiceFeatures {
+pub(crate) fn provided_invoice_features(config: &UserConfig) -> Bolt11InvoiceFeatures {
provided_init_features(config).to_context()
}
(1, phantom_shared_secret, option),
(2, incoming_cltv_expiry, required),
(3, payment_metadata, option),
+ (5, custom_tlvs, optional_vec),
},
(2, ReceiveKeysend) => {
(0, payment_preimage, required),
(2, incoming_cltv_expiry, required),
(3, payment_metadata, option),
(4, payment_data, option), // Added in 0.0.116
+ (5, custom_tlvs, optional_vec),
},
;);
channel_closures.push_back((events::Event::ChannelClosed {
channel_id: channel.context.channel_id(),
user_channel_id: channel.context.get_user_id(),
- reason: ClosureReason::OutdatedChannelManager
+ reason: ClosureReason::OutdatedChannelManager,
+ counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
+ channel_capacity_sats: Some(channel.context.get_value_satoshis()),
}, None));
for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() {
let mut found_htlc = false;
channel_id: channel.context.channel_id(),
user_channel_id: channel.context.get_user_id(),
reason: ClosureReason::DisconnectedPeer,
+ counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
+ channel_capacity_sats: Some(channel.context.get_value_satoshis()),
}, None));
} else {
log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.context.channel_id()));
payment_secret: None, // only used for retries, and we'll never retry on startup
payment_metadata: None, // only used for retries, and we'll never retry on startup
keysend_preimage: None, // only used for retries, and we'll never retry on startup
+ custom_tlvs: Vec::new(), // only used for retries, and we'll never retry on startup
pending_amt_msat: path_amt,
pending_fee_msat: Some(path_fee),
total_msat: path_amt,
nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
{
// Assert that nodes[1] is awaiting removal for nodes[0] once nodes[1] has been
}
let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
}
fn check_not_connected_to_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
let node = create_network(1, &node_cfg, &node_chanmgr);
let sender_intended_amt_msat = 100;
let extra_fee_msat = 10;
- let hop_data = msgs::OnionHopData {
- amt_to_forward: 100,
+ let hop_data = msgs::InboundOnionPayload::Receive {
+ amt_msat: 100,
outgoing_cltv_value: 42,
- format: msgs::OnionHopDataFormat::FinalNode {
- keysend_preimage: None,
- payment_metadata: None,
- payment_data: Some(msgs::FinalOnionHopData {
- payment_secret: PaymentSecret([0; 32]), total_msat: sender_intended_amt_msat,
- }),
- }
+ payment_metadata: None,
+ keysend_preimage: None,
+ payment_data: Some(msgs::FinalOnionHopData {
+ payment_secret: PaymentSecret([0; 32]), total_msat: sender_intended_amt_msat,
+ }),
+ custom_tlvs: Vec::new(),
};
// Check that if the amount we received + the penultimate hop extra fee is less than the sender
// intended amount, we fail the payment.
} else { panic!(); }
// If amt_received + extra_fee is equal to the sender intended amount, we're fine.
- let hop_data = msgs::OnionHopData { // This is the same hop_data as above, OnionHopData doesn't implement Clone
- amt_to_forward: 100,
+ let hop_data = msgs::InboundOnionPayload::Receive { // This is the same payload as above, InboundOnionPayload doesn't implement Clone
+ amt_msat: 100,
outgoing_cltv_value: 42,
- format: msgs::OnionHopDataFormat::FinalNode {
- keysend_preimage: None,
- payment_metadata: None,
- payment_data: Some(msgs::FinalOnionHopData {
- payment_secret: PaymentSecret([0; 32]), total_msat: sender_intended_amt_msat,
- }),
- }
+ payment_metadata: None,
+ keysend_preimage: None,
+ payment_data: Some(msgs::FinalOnionHopData {
+ payment_secret: PaymentSecret([0; 32]), total_msat: sender_intended_amt_msat,
+ }),
+ custom_tlvs: Vec::new(),
};
assert!(node[0].node.construct_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat)).is_ok());
let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
assert!(!open_channel_msg.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx());
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
_ => panic!("expected BroadcastChannelUpdate event"),
}
+
+ // If we provide a channel_id not associated with the peer, we should get an error and no updates
+ // should be applied to ensure update atomicity as specified in the API docs.
+ let bad_channel_id = [10; 32];
+ let current_fee = nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths;
+ let new_fee = current_fee + 100;
+ assert!(
+ matches!(
+ nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id, bad_channel_id], &ChannelConfigUpdate {
+ forwarding_fee_proportional_millionths: Some(new_fee),
+ ..Default::default()
+ }),
+ Err(APIError::ChannelUnavailable { err: _ }),
+ )
+ );
+ // Check that the fee hasn't changed for the channel that exists.
+ assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, current_fee);
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 0);
}
}