use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
use crate::chain::transaction::{OutPoint, TransactionData};
use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
use crate::chain::transaction::{OutPoint, TransactionData};
// Since this struct is returned in `list_channels` methods, expose it here in case users want to
// construct one themselves.
use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret};
// Since this struct is returned in `list_channels` methods, expose it here in case users want to
// construct one themselves.
use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret};
use crate::ln::wire::Encode;
use crate::chain::keysinterface::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner, WriteableEcdsaChannelSigner};
use crate::util::config::{UserConfig, ChannelConfig};
use crate::ln::wire::Encode;
use crate::chain::keysinterface::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner, WriteableEcdsaChannelSigner};
use crate::util::config::{UserConfig, ChannelConfig};
use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
use crate::util::logger::{Level, Logger};
use crate::util::errors::APIError;
use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
use crate::util::logger::{Level, Logger};
use crate::util::errors::APIError;
pub(super) routing: PendingHTLCRouting,
pub(super) incoming_shared_secret: [u8; 32],
payment_hash: PaymentHash,
pub(super) routing: PendingHTLCRouting,
pub(super) incoming_shared_secret: [u8; 32],
payment_hash: PaymentHash,
-/// SimpleRefChannelManager is the more appropriate type. Defining these type aliases prevents
-/// issues such as overly long function definitions. Note that the ChannelManager can take any type
-/// that implements KeysInterface or Router for its keys manager and router, respectively, but this
-/// type alias chooses the concrete types of KeysManager and DefaultRouter.
+/// [`SimpleRefChannelManager`] is the more appropriate type. Defining these type aliases prevents
+/// issues such as overly long function definitions. Note that the `ChannelManager` can take any type
+/// that implements [`NodeSigner`], [`EntropySource`], and [`SignerProvider`] for its keys manager,
+/// or, respectively, [`Router`] for its router, but this type alias chooses the concrete types
+/// of [`KeysManager`] and [`DefaultRouter`].
-/// that implements KeysInterface or Router for its keys manager and router, respectively, but this
-/// type alias chooses the concrete types of KeysManager and DefaultRouter.
+/// that implements [`NodeSigner`], [`EntropySource`], and [`SignerProvider`] for its keys manager,
+/// or, respectively, [`Router`] for its router, but this type alias chooses the concrete types
+/// of [`KeysManager`] and [`DefaultRouter`].
pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>>, &'g L>;
/// Manager which keeps track of a number of channels and sends messages to the appropriate
/// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
///
pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>>, &'g L>;
/// Manager which keeps track of a number of channels and sends messages to the appropriate
/// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
///
-/// Note that you can be a bit lazier about writing out ChannelManager than you can be with
-/// ChannelMonitors. With ChannelMonitors you MUST write each monitor update out to disk before
-/// returning from chain::Watch::watch_/update_channel, with ChannelManagers, writing updates
-/// happens out-of-band (and will prevent any other ChannelManager operations from occurring during
+/// Note that you can be a bit lazier about writing out `ChannelManager` than you can be with
+/// [`ChannelMonitor`]. With [`ChannelMonitor`] you MUST write each monitor update out to disk before
+/// returning from [`chain::Watch::watch_channel`]/[`update_channel`], with ChannelManagers, writing updates
+/// happens out-of-band (and will prevent any other `ChannelManager` operations from occurring during
-/// Note that the deserializer is only implemented for (BlockHash, ChannelManager), which
-/// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along
-/// the "reorg path" (ie call block_disconnected() until you get to a common block and then call
-/// block_connected() to step towards your best block) upon deserialization before using the
-/// object!
+/// Note that the deserializer is only implemented for `(`[`BlockHash`]`, `[`ChannelManager`]`)`, which
+/// tells you the last block hash which was connected. You should get the best block tip before using the manager.
+/// See [`chain::Listen`] and [`chain::Confirm`] for more details.
-/// Rather than using a plain ChannelManager, it is preferable to use either a SimpleArcChannelManager
-/// a SimpleRefChannelManager, for conciseness. See their documentation for more details, but
-/// essentially you should default to using a SimpleRefChannelManager, and use a
-/// SimpleArcChannelManager when you require a ChannelManager with a static lifetime, such as when
+/// Rather than using a plain `ChannelManager`, it is preferable to use either a [`SimpleArcChannelManager`]
+/// a [`SimpleRefChannelManager`], for conciseness. See their documentation for more details, but
+/// essentially you should default to using a [`SimpleRefChannelManager`], and use a
+/// [`SimpleArcChannelManager`] when you require a `ChannelManager` with a static lifetime, such as when
+///
+/// [`peer_disconnected`]: msgs::ChannelMessageHandler::peer_disconnected
+/// [`funding_created`]: msgs::FundingCreated
+/// [`funding_transaction_generated`]: Self::funding_transaction_generated
+/// [`BlockHash`]: bitcoin::hash_types::BlockHash
+/// [`update_channel`]: chain::Watch::update_channel
+/// [`ChannelUpdate`]: msgs::ChannelUpdate
+/// [`timer_tick_occurred`]: Self::timer_tick_occurred
+/// [`read`]: ReadableArgs::read
#[derive(Clone, Debug, PartialEq)]
pub struct ChannelDetails {
/// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
#[derive(Clone, Debug, PartialEq)]
pub struct ChannelDetails {
/// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None },
inbound_scid_alias: channel.latest_inbound_scid_alias(),
channel_value_satoshis: channel.get_value_satoshis(),
outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None },
inbound_scid_alias: channel.latest_inbound_scid_alias(),
channel_value_satoshis: channel.get_value_satoshis(),
unspendable_punishment_reserve: to_self_reserve_satoshis,
balance_msat: balance.balance_msat,
inbound_capacity_msat: balance.inbound_capacity_msat,
unspendable_punishment_reserve: to_self_reserve_satoshis,
balance_msat: balance.balance_msat,
inbound_capacity_msat: balance.inbound_capacity_msat,
- ($self: ident, $internal: expr, $counterparty_node_id: expr) => {
+ ($self: ident, $internal: expr, $counterparty_node_id: expr) => { {
+ // In testing, ensure there are no deadlocks where the lock is already held upon
+ // entering the macro.
+ debug_assert_ne!($self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
+ debug_assert_ne!($self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
+
+macro_rules! emit_channel_pending_event {
+ ($locked_events: expr, $channel: expr) => {
+ if $channel.should_emit_channel_pending_event() {
+ $locked_events.push(events::Event::ChannelPending {
+ channel_id: $channel.channel_id(),
+ former_temporary_channel_id: $channel.temporary_channel_id(),
+ counterparty_node_id: $channel.get_counterparty_node_id(),
+ user_channel_id: $channel.get_user_id(),
+ funding_txo: $channel.get_funding_txo().unwrap().into_bitcoin_outpoint(),
+ });
+ $channel.set_channel_pending_event_emitted();
+ }
+ }
+}
+
- {
- let mut pending_events = $self.pending_events.lock().unwrap();
- pending_events.push(events::Event::ChannelReady {
- channel_id: $channel.channel_id(),
- user_channel_id: $channel.get_user_id(),
- counterparty_node_id: $channel.get_counterparty_node_id(),
- channel_type: $channel.get_channel_type().clone(),
- });
- }
+ debug_assert!($channel.channel_pending_event_emitted());
+ $locked_events.push(events::Event::ChannelReady {
+ channel_id: $channel.channel_id(),
+ user_channel_id: $channel.get_user_id(),
+ counterparty_node_id: $channel.get_counterparty_node_id(),
+ channel_type: $channel.get_channel_type().clone(),
+ });
- /// Users need to notify the new ChannelManager when a new block is connected or
- /// disconnected using its `block_connected` and `block_disconnected` methods, starting
- /// from after `params.latest_hash`.
+ /// Users need to notify the new `ChannelManager` when a new block is connected or
+ /// disconnected using its [`block_connected`] and [`block_disconnected`] methods, starting
+ /// from after [`params.best_block.block_hash`]. See [`chain::Listen`] and [`chain::Confirm`] for
+ /// more details.
+ ///
+ /// [`block_connected`]: chain::Listen::block_connected
+ /// [`block_disconnected`]: chain::Listen::block_disconnected
+ /// [`params.best_block.block_hash`]: chain::BestBlock::block_hash
pub fn new(fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, entropy_source: ES, node_signer: NS, signer_provider: SP, config: UserConfig, params: ChainParameters) -> Self {
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
pub fn new(fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, entropy_source: ES, node_signer: NS, signer_provider: SP, config: UserConfig, params: ChainParameters) -> Self {
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
pub fn close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> {
self.close_channel_internal(channel_id, counterparty_node_id, None)
}
pub fn close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> {
self.close_channel_internal(channel_id, counterparty_node_id, None)
}
pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> {
self.close_channel_internal(channel_id, counterparty_node_id, Some(target_feerate_sats_per_1000_weight))
}
pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> {
self.close_channel_internal(channel_id, counterparty_node_id, Some(target_feerate_sats_per_1000_weight))
}
fn get_channel_update_for_broadcast(&self, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
if !chan.should_announce() {
return Err(LightningError {
fn get_channel_update_for_broadcast(&self, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
if !chan.should_announce() {
return Err(LightningError {
fn get_channel_update_for_unicast(&self, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id()));
let short_channel_id = match chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) {
fn get_channel_update_for_unicast(&self, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id()));
let short_channel_id = match chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) {
- pub(crate) fn test_send_payment_along_path(&self, path: &Vec<RouteHop>, payment_params: &Option<PaymentParameters>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
+ pub(crate) fn test_send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv_bytes)
+ self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv_bytes)
- fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_params: &Option<PaymentParameters>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
+ fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
}, onion_packet, &self.logger);
match break_chan_entry!(self, send_res, chan) {
Some(monitor_update) => {
}, onion_packet, &self.logger);
match break_chan_entry!(self, send_res, chan) {
Some(monitor_update) => {
- /// # A caution on `payment_secret`
- ///
- /// `payment_secret` is unrelated to `payment_hash` (or [`PaymentPreimage`]) and exists to
- /// authenticate the sender to the recipient and prevent payment-probing (deanonymization)
- /// attacks. For newer nodes, it will be provided to you in the invoice. If you do not have one,
- /// the [`Route`] must not contain multiple paths as multi-path payments require a
- /// recipient-provided `payment_secret`.
- ///
- /// If a `payment_secret` *is* provided, we assume that the invoice had the payment_secret
- /// feature bit set (either as required or as available). If multiple paths are present in the
- /// [`Route`], we assume the invoice had the basic_mpp feature set.
- ///
- .send_payment_with_route(route, payment_hash, payment_secret, payment_id, &self.entropy_source, &self.node_signer, best_block_height,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ .send_payment_with_route(route, payment_hash, recipient_onion, payment_id, &self.entropy_source, &self.node_signer, best_block_height,
+ |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
- pub fn send_payment_with_retry(&self, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), RetryableSendFailure> {
+ pub fn send_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), RetryableSendFailure> {
&self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
&self.entropy_source, &self.node_signer, best_block_height, &self.logger,
&self.pending_events,
&self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
&self.entropy_source, &self.node_signer, best_block_height, &self.logger,
&self.pending_events,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
- fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
+ pub(super) fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
- self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, payment_secret, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, best_block_height,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, recipient_onion, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, best_block_height,
+ |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
/// [`Event::PaymentSent`]: events::Event::PaymentSent
pub fn abandon_payment(&self, payment_id: PaymentId) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
/// [`Event::PaymentSent`]: events::Event::PaymentSent
pub fn abandon_payment(&self, payment_id: PaymentId) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- route, payment_preimage, payment_id, &self.entropy_source, &self.node_signer,
- best_block_height,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ route, payment_preimage, recipient_onion, payment_id, &self.entropy_source,
+ &self.node_signer, best_block_height,
+ |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
- self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, payment_id,
- retry_strategy, route_params, &self.router, self.list_usable_channels(),
+ self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, recipient_onion,
+ payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(),
|| self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
&self.logger, &self.pending_events,
|| self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
&self.logger, &self.pending_events,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
let best_block_height = self.best_block.read().unwrap().height();
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
self.pending_outbound_payments.send_probe(hops, self.probing_cookie_secret, &self.entropy_source, &self.node_signer, best_block_height,
let best_block_height = self.best_block.read().unwrap().height();
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
self.pending_outbound_payments.send_probe(hops, self.probing_cookie_secret, &self.entropy_source, &self.node_signer, best_block_height,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
- let (chan, msg) = {
- let (res, chan) = {
- match peer_state.channel_by_id.remove(temporary_channel_id) {
- Some(mut chan) => {
- let funding_txo = find_funding_output(&chan, &funding_transaction)?;
-
- (chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
- .map_err(|e| if let ChannelError::Close(msg) = e {
- MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None)
- } else { unreachable!(); })
- , chan)
+ let (msg, chan) = match peer_state.channel_by_id.remove(temporary_channel_id) {
+ Some(mut chan) => {
+ let funding_txo = find_funding_output(&chan, &funding_transaction)?;
+
+ let funding_res = chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
+ .map_err(|e| if let ChannelError::Close(msg) = e {
+ MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None)
+ } else { unreachable!(); });
+ match funding_res {
+ Ok(funding_msg) => (funding_msg, chan),
+ Err(_) => {
+ mem::drop(peer_state_lock);
+ mem::drop(per_peer_state);
+
+ let _ = handle_error!(self, funding_res, chan.get_counterparty_node_id());
+ return Err(APIError::ChannelUnavailable {
+ err: "Signer refused to sign the initial commitment transaction".to_owned()
+ });
- };
- match handle_error!(self, res, chan.get_counterparty_node_id()) {
- Ok(funding_msg) => {
- (chan, funding_msg)
- },
- Err(_) => { return Err(APIError::ChannelUnavailable {
- err: "Signer refused to sign the initial commitment transaction".to_owned()
- }) },
- }
+ },
+ None => {
+ return Err(APIError::ChannelUnavailable {
+ err: format!(
+ "Channel with id {} not found for the passed counterparty node_id {}",
+ log_bytes!(*temporary_channel_id), counterparty_node_id),
+ })
+ },
pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
forward_info: PendingHTLCInfo {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
forward_info: PendingHTLCInfo {
- if total_value >= msgs::MAX_VALUE_MSAT || total_value > $payment_data.total_msat {
- log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)",
- log_bytes!(payment_hash.0), total_value, $payment_data.total_msat);
+ // The condition determining whether an MPP is complete must
+ // match exactly the condition used in `timer_tick_occurred`
+ if total_value >= msgs::MAX_VALUE_MSAT {
+ fail_htlc!(claimable_htlc, payment_hash);
+ } else if total_value - claimable_htlc.sender_intended_value >= $payment_data.total_msat {
+ log_trace!(self.logger, "Failing HTLC with payment_hash {} as payment is already claimable",
+ log_bytes!(payment_hash.0));
let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
e.insert((purpose.clone(), vec![claimable_htlc]));
let prev_channel_id = prev_funding_outpoint.to_channel_id();
new_events.push(events::Event::PaymentClaimable {
receiver_node_id: Some(receiver_node_id),
payment_hash,
let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
e.insert((purpose.clone(), vec![claimable_htlc]));
let prev_channel_id = prev_funding_outpoint.to_channel_id();
new_events.push(events::Event::PaymentClaimable {
receiver_node_id: Some(receiver_node_id),
payment_hash,
self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(),
|| self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
&self.pending_events, &self.logger,
self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(),
|| self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
&self.pending_events, &self.logger,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv));
+ |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv));
for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination);
for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination);
fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<<SP::Target as SignerProvider>::Signer>, new_feerate: u32) -> NotifyOption {
if !chan.is_outbound() { return NotifyOption::SkipPersist; }
// If the feerate has decreased by less than half, don't bother
fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<<SP::Target as SignerProvider>::Signer>, new_feerate: u32) -> NotifyOption {
if !chan.is_outbound() { return NotifyOption::SkipPersist; }
// If the feerate has decreased by less than half, don't bother
- /// Note that calling this method does *not* guarantee that the payment has been claimed. You
- /// *must* wait for an [`Event::PaymentClaimed`] event which upon a successful claim will be
- /// provided to your [`EventHandler`] when [`process_pending_events`] is next called.
+ /// This method is guaranteed to ensure the payment has been claimed but only if the current
+ /// height is strictly below [`Event::PaymentClaimable::claim_deadline`]. To avoid race
+ /// conditions, you should wait for an [`Event::PaymentClaimed`] before considering the payment
+ /// successful. It will generally be available in the next [`process_pending_events`] call.
///
/// Note that if you did not set an `amount_msat` when calling [`create_inbound_payment`] or
/// [`create_inbound_payment_for_hash`] you must check that the amount in the `PaymentClaimable`
/// event matches your expectation. If you fail to do so and call this method, you may provide
/// the sender "proof-of-payment" when they did not fulfill the full expected payment.
///
///
/// Note that if you did not set an `amount_msat` when calling [`create_inbound_payment`] or
/// [`create_inbound_payment_for_hash`] you must check that the amount in the `PaymentClaimable`
/// event matches your expectation. If you fail to do so and call this method, you may provide
/// the sender "proof-of-payment" when they did not fulfill the full expected payment.
///
- // If we are claiming an MPP payment, we check that all channels which contain a claimable
- // HTLC still exist. While this isn't guaranteed to remain true if a channel closes while
- // we're claiming (or even after we claim, before the commitment update dance completes),
- // it should be a relatively rare race, and we'd rather not claim HTLCs that require us to
- // go on-chain (and lose the on-chain fee to do so) than just reject the payment.
- //
- // Note that we'll still always get our funds - as long as the generated
- // `ChannelMonitorUpdate` makes it out to the relevant monitor we can claim on-chain.
- //
- // If we find an HTLC which we would need to claim but for which we do not have a
- // channel, we will fail all parts of the MPP payment. While we could wait and see if
- // the sender retries the already-failed path(s), it should be a pretty rare case where
- // we got all the HTLCs and then a channel closed while we were waiting for the user to
- // provide the preimage, so worrying too much about the optimal handling isn't worth
- // it.
+ // Just in case one HTLC has been failed between when we generated the `PaymentClaimable`
+ // and when we got here we need to check that the amount we're about to claim matches the
+ // amount we told the user in the last `PaymentClaimable`. We also do a sanity-check that
+ // the MPP parts all have the same `total_msat`.
let mut expected_amt_msat = None;
let mut valid_mpp = true;
let mut errs = Vec::new();
let per_peer_state = self.per_peer_state.read().unwrap();
for htlc in sources.iter() {
let mut expected_amt_msat = None;
let mut valid_mpp = true;
let mut errs = Vec::new();
let per_peer_state = self.per_peer_state.read().unwrap();
for htlc in sources.iter() {
- let (counterparty_node_id, chan_id) = match self.short_to_chan_info.read().unwrap().get(&htlc.prev_hop.short_channel_id) {
- Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
- None => {
- valid_mpp = false;
- break;
- }
- };
-
- let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
- if peer_state_mutex_opt.is_none() {
- valid_mpp = false;
- break;
- }
-
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
- let peer_state = &mut *peer_state_lock;
-
- if peer_state.channel_by_id.get(&chan_id).is_none() {
+ if prev_total_msat.is_some() && prev_total_msat != Some(htlc.total_msat) {
+ log_error!(self.logger, "Somehow ended up with an MPP payment with different expected total amounts - this should not be reachable!");
+ debug_assert!(false);
- let per_peer_state = self.per_peer_state.read().unwrap();
- let chan_id = prev_hop.outpoint.to_channel_id();
- let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
- Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()),
- None => None
- };
+ {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let chan_id = prev_hop.outpoint.to_channel_id();
+ let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
+ Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()),
+ None => None
+ };
- if peer_state_opt.is_some() {
- let mut peer_state_lock = peer_state_opt.unwrap();
- let peer_state = &mut *peer_state_lock;
- if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) {
- let counterparty_node_id = chan.get().get_counterparty_node_id();
- let fulfill_res = chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
-
- if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res {
- if let Some(action) = completion_action(Some(htlc_value_msat)) {
- log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
- log_bytes!(chan_id), action);
- peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
- }
- let update_id = monitor_update.update_id;
- let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, monitor_update);
- let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
- peer_state, per_peer_state, chan);
- if let Err(e) = res {
- // TODO: This is a *critical* error - we probably updated the outbound edge
- // of the HTLC's monitor with a preimage. We should retry this monitor
- // update over and over again until morale improves.
- log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
- return Err((counterparty_node_id, e));
+ if peer_state_opt.is_some() {
+ let mut peer_state_lock = peer_state_opt.unwrap();
+ let peer_state = &mut *peer_state_lock;
+ if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) {
+ let counterparty_node_id = chan.get().get_counterparty_node_id();
+ let fulfill_res = chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
+
+ if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res {
+ if let Some(action) = completion_action(Some(htlc_value_msat)) {
+ log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
+ log_bytes!(chan_id), action);
+ peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
+ }
+ let update_id = monitor_update.update_id;
+ let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, monitor_update);
+ let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
+ peer_state, per_peer_state, chan);
+ if let Err(e) = res {
+ // TODO: This is a *critical* error - we probably updated the outbound edge
+ // of the HTLC's monitor with a preimage. We should retry this monitor
+ // update over and over again until morale improves.
+ log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
+ return Err((counterparty_node_id, e));
+ }
- /// Note that this method is not available with the `no-std` feature.
- ///
- /// [`await_persistable_update`]: Self::await_persistable_update
- /// [`await_persistable_update_timeout`]: Self::await_persistable_update_timeout
- /// [`get_persistable_update_future`]: Self::get_persistable_update_future
- #[cfg(any(test, feature = "std"))]
- pub fn await_persistable_update_timeout(&self, max_wait: Duration) -> bool {
- self.persistence_notifier.wait_timeout(max_wait)
- }
-
- /// Blocks until ChannelManager needs to be persisted. Only one listener on
- /// [`await_persistable_update`], `await_persistable_update_timeout`, or a future returned by
- /// [`get_persistable_update_future`] is guaranteed to be woken up.
+ /// Note that callbacks registered on the [`Future`] MUST NOT call back into this
+ /// [`ChannelManager`] and should instead register actions to be taken later.
(33, self.inbound_htlc_minimum_msat, option),
(35, self.inbound_htlc_maximum_msat, option),
(37, user_channel_id_high_opt, option),
(33, self.inbound_htlc_minimum_msat, option),
(35, self.inbound_htlc_maximum_msat, option),
(37, user_channel_id_high_opt, option),
(33, inbound_htlc_minimum_msat, option),
(35, inbound_htlc_maximum_msat, option),
(37, user_channel_id_high_opt, option),
(33, inbound_htlc_minimum_msat, option),
(35, inbound_htlc_maximum_msat, option),
(37, user_channel_id_high_opt, option),
(0, self.prev_hop, required),
(1, self.total_msat, required),
(2, self.value, required),
(0, self.prev_hop, required),
(1, self.total_msat, required),
(2, self.value, required),
(6, self.cltv_expiry, required),
(8, keysend_preimage, option),
});
(6, self.cltv_expiry, required),
(8, keysend_preimage, option),
});
let mut payment_params: Option<PaymentParameters> = None;
read_tlv_fields!(reader, {
(0, session_priv, required),
(1, payment_id, option),
(2, first_hop_htlc_msat, required),
let mut payment_params: Option<PaymentParameters> = None;
read_tlv_fields!(reader, {
(0, session_priv, required),
(1, payment_id, option),
(2, first_hop_htlc_msat, required),
0u8.write(writer)?;
let payment_id_opt = Some(payment_id);
write_tlv_fields!(writer, {
(0, session_priv, required),
(1, payment_id_opt, option),
(2, first_hop_htlc_msat, required),
0u8.write(writer)?;
let payment_id_opt = Some(payment_id);
write_tlv_fields!(writer, {
(0, session_priv, required),
(1, payment_id_opt, option),
(2, first_hop_htlc_msat, required),
let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut channel_closures = Vec::new();
let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut channel_closures = Vec::new();
for _ in 0..channel_count {
let mut channel: Channel<<SP::Target as SignerProvider>::Signer> = Channel::read(reader, (
&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
for _ in 0..channel_count {
let mut channel: Channel<<SP::Target as SignerProvider>::Signer> = Channel::read(reader, (
&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
- log_info!(args.logger, "Broadcasting latest holder commitment transaction for closed channel {}", log_bytes!(funding_txo.to_channel_id()));
- monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
+ let monitor_update = ChannelMonitorUpdate {
+ update_id: CLOSED_CHANNEL_UPDATE_ID,
+ updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
+ };
+ pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((*funding_txo, monitor_update)));
- 0 => pending_background_events_read.push(BackgroundEvent::ClosingMonitorUpdate((Readable::read(reader)?, Readable::read(reader)?))),
+ 0 => {
+ let (funding_txo, monitor_update): (OutPoint, ChannelMonitorUpdate) = (Readable::read(reader)?, Readable::read(reader)?);
+ if pending_background_events.iter().find(|e| {
+ let BackgroundEvent::ClosingMonitorUpdate((pending_funding_txo, pending_monitor_update)) = e;
+ *pending_funding_txo == funding_txo && *pending_monitor_update == monitor_update
+ }).is_none() {
+ pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update)));
+ }
+ }
for (_, monitor) in args.channel_monitors.iter() {
if id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() {
for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
for (_, monitor) in args.channel_monitors.iter() {
if id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() {
for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
keysend_preimage: None, // only used for retries, and we'll never retry on startup
pending_amt_msat: path_amt,
pending_fee_msat: Some(path_fee),
keysend_preimage: None, // only used for retries, and we'll never retry on startup
pending_amt_msat: path_amt,
pending_fee_msat: Some(path_fee),
use crate::ln::functional_test_utils::*;
use crate::ln::msgs;
use crate::ln::msgs::ChannelMessageHandler;
use crate::routing::router::{PaymentParameters, RouteParameters, find_route};
use crate::util::errors::APIError;
use crate::ln::functional_test_utils::*;
use crate::ln::msgs;
use crate::ln::msgs::ChannelMessageHandler;
use crate::routing::router::{PaymentParameters, RouteParameters, find_route};
use crate::util::errors::APIError;
// Node 3, unrelated to the only channel, shouldn't care if it receives a channel_update
// about the channel.
nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.0);
nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.1);
// Node 3, unrelated to the only channel, shouldn't care if it receives a channel_update
// about the channel.
nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.0);
nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.1);
nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0);
nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0);
nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
// persisted and that its channel info remains the same.
nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &as_update);
nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &bs_update);
// persisted and that its channel info remains the same.
nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &as_update);
nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &bs_update);
// the channel info has updated.
nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_update);
nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &as_update);
// the channel info has updated.
nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_update);
nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &as_update);
- let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash, Some(payment_secret), payment_id, &mpp_route).unwrap();
- nodes[0].node.test_send_payment_along_path(&mpp_route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
+ let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
+ RecipientOnionFields::secret_only(payment_secret), payment_id, &mpp_route).unwrap();
+ nodes[0].node.test_send_payment_along_path(&mpp_route.paths[0], &our_payment_hash,
+ RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
// Next, send a keysend payment with the same payment_hash and make sure it fails.
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
// Next, send a keysend payment with the same payment_hash and make sure it fails.
- nodes[0].node.test_send_payment_along_path(&mpp_route.paths[1], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
+ nodes[0].node.test_send_payment_along_path(&mpp_route.paths[1], &our_payment_hash,
+ RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
None, nodes[0].logger, &scorer, &random_seed_bytes
).unwrap();
&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
None, nodes[0].logger, &scorer, &random_seed_bytes
).unwrap();
&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
None, nodes[0].logger, &scorer, &random_seed_bytes
).unwrap();
&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
None, nodes[0].logger, &scorer, &random_seed_bytes
).unwrap();
- let session_privs = nodes[0].node.test_add_new_pending_payment(mismatch_payment_hash, None, PaymentId(mismatch_payment_hash.0), &route).unwrap();
- nodes[0].node.test_send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap();
+ let session_privs = nodes[0].node.test_add_new_pending_payment(mismatch_payment_hash,
+ RecipientOnionFields::spontaneous_empty(), PaymentId(mismatch_payment_hash.0), &route).unwrap();
+ nodes[0].node.test_send_payment_internal(&route, mismatch_payment_hash,
+ RecipientOnionFields::spontaneous_empty(), Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap();
- let session_privs = nodes[0].node.test_add_new_pending_payment(payment_hash, Some(test_secret), PaymentId(payment_hash.0), &route).unwrap();
- nodes[0].node.test_send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage), PaymentId(payment_hash.0), None, session_privs).unwrap();
+ let session_privs = nodes[0].node.test_add_new_pending_payment(payment_hash,
+ RecipientOnionFields::secret_only(test_secret), PaymentId(payment_hash.0), &route).unwrap();
+ nodes[0].node.test_send_payment_internal(&route, payment_hash,
+ RecipientOnionFields::secret_only(test_secret), Some(test_preimage),
+ PaymentId(payment_hash.0), None, session_privs).unwrap();
match inbound_payment::verify(bad_payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger) {
Ok(_) => panic!("Unexpected ok"),
Err(()) => {
match inbound_payment::verify(bad_payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger) {
Ok(_) => panic!("Unexpected ok"),
Err(()) => {
let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
check_added_monitors!(nodes[0], 1);
let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
check_added_monitors!(nodes[0], 1);
let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
let (announcement, nodes_0_update, nodes_1_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &nodes_0_update, &nodes_1_update);
let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
let (announcement, nodes_0_update, nodes_1_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &nodes_0_update, &nodes_1_update);
let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
check_added_monitors!(nodes[0], 1);
let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
check_added_monitors!(nodes[0], 1);
- use crate::chain::keysinterface::{EntropySource, KeysManager, InMemorySigner};
- use crate::ln::channelmanager::{self, BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId};
+ use crate::chain::keysinterface::{KeysManager, InMemorySigner};
+ use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+ use crate::ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId, RecipientOnionFields, Retry};
- let scorer = test_utils::TestScorer::new();
- let seed = [3u8; 32];
- let keys_manager = KeysManager::new(&seed, 42, 42);
- let random_seed_bytes = keys_manager.get_secure_random_bytes();
- let route = get_route(&$node_a.get_our_node_id(), &payment_params, &dummy_graph.read_only(),
- Some(&usable_channels.iter().map(|r| r).collect::<Vec<_>>()), 10_000, TEST_FINAL_CLTV, &logger_a, &scorer, &random_seed_bytes).unwrap();
-
let mut payment_preimage = PaymentPreimage([0; 32]);
payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes());
payment_count += 1;
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
let mut payment_preimage = PaymentPreimage([0; 32]);
payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes());
payment_count += 1;
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
- $node_a.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
+ $node_a.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
+ PaymentId(payment_hash.0), RouteParameters {
+ payment_params, final_value_msat: 10_000,
+ }, Retry::Attempts(0)).unwrap();
let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());
$node_b.handle_update_add_htlc(&$node_a.get_our_node_id(), &payment_event.msgs[0]);
$node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &payment_event.commitment_msg);
let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());
$node_b.handle_update_add_htlc(&$node_a.get_our_node_id(), &payment_event.msgs[0]);
$node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &payment_event.commitment_msg);