use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
#[cfg(any(feature = "_test_utils", test))]
use crate::ln::features::InvoiceFeatures;
-use crate::routing::router::{PaymentParameters, Route, RouteHop, RoutePath, RouteParameters};
+use crate::routing::router::{InFlightHtlcs, PaymentParameters, Route, RouteHop, RoutePath, RouteParameters};
use crate::ln::msgs;
use crate::ln::onion_utils;
use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT};
use crate::ln::wire::Encode;
use crate::chain::keysinterface::{Sign, KeysInterface, KeysManager, Recipient};
use crate::util::config::{UserConfig, ChannelConfig};
-use crate::util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
+use crate::util::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
use crate::util::{byte_utils, events};
use crate::util::wakers::{Future, Notifier};
use crate::util::scid_utils::fake_scid;
use core::{cmp, mem};
use core::cell::RefCell;
use crate::io::Read;
-use crate::sync::{Arc, Mutex, MutexGuard, RwLock, RwLockReadGuard};
+use crate::sync::{Arc, Mutex, MutexGuard, RwLock, RwLockReadGuard, FairRwLock};
use core::sync::atomic::{AtomicUsize, Ordering};
use core::time::Duration;
use core::ops::Deref;
pub(super) routing: PendingHTLCRouting,
pub(super) incoming_shared_secret: [u8; 32],
payment_hash: PaymentHash,
- pub(super) amt_to_forward: u64,
+ pub(super) incoming_amt_msat: Option<u64>, // Added in 0.0.113
+ pub(super) outgoing_amt_msat: u64,
pub(super) outgoing_cltv_value: u32,
}
Fail(HTLCFailureMsg),
}
-pub(super) enum HTLCForwardInfo {
- AddHTLC {
- forward_info: PendingHTLCInfo,
+pub(super) struct PendingAddHTLCInfo {
+ pub(super) forward_info: PendingHTLCInfo,
- // These fields are produced in `forward_htlcs()` and consumed in
- // `process_pending_htlc_forwards()` for constructing the
- // `HTLCSource::PreviousHopData` for failed and forwarded
- // HTLCs.
- //
- // Note that this may be an outbound SCID alias for the associated channel.
- prev_short_channel_id: u64,
- prev_htlc_id: u64,
- prev_funding_outpoint: OutPoint,
- },
+ // These fields are produced in `forward_htlcs()` and consumed in
+ // `process_pending_htlc_forwards()` for constructing the
+ // `HTLCSource::PreviousHopData` for failed and forwarded
+ // HTLCs.
+ //
+ // Note that this may be an outbound SCID alias for the associated channel.
+ prev_short_channel_id: u64,
+ prev_htlc_id: u64,
+ prev_funding_outpoint: OutPoint,
+ prev_user_channel_id: u128,
+}
+
+pub(super) enum HTLCForwardInfo {
+ AddHTLC(PendingAddHTLCInfo),
FailHTLC {
htlc_id: u64,
err_packet: msgs::OnionErrorPacket,
struct MsgHandleErrInternal {
err: msgs::LightningError,
- chan_id: Option<([u8; 32], u64)>, // If Some a channel of ours has been closed
+ chan_id: Option<([u8; 32], u128)>, // If Some a channel of ours has been closed
shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
}
impl MsgHandleErrInternal {
Self { err, chan_id: None, shutdown_finish: None }
}
#[inline]
- fn from_finish_shutdown(err: String, channel_id: [u8; 32], user_channel_id: u64, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
+ fn from_finish_shutdown(err: String, channel_id: [u8; 32], user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
Self {
err: LightningError {
err: err.clone(),
// Note this is only exposed in cfg(test):
pub(super) struct ChannelHolder<Signer: Sign> {
pub(super) by_id: HashMap<[u8; 32], Channel<Signer>>,
- /// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s.
- ///
- /// Outbound SCID aliases are added here once the channel is available for normal use, with
- /// SCIDs being added once the funding transaction is confirmed at the channel's required
- /// confirmation depth.
- pub(super) short_to_chan_info: HashMap<u64, (PublicKey, [u8; 32])>,
- /// Map from payment hash to the payment data and any HTLCs which are to us and can be
- /// failed/claimed by the user.
- ///
- /// Note that while this is held in the same mutex as the channels themselves, no consistency
- /// guarantees are made about the channels given here actually existing anymore by the time you
- /// go to read them!
- claimable_htlcs: HashMap<PaymentHash, (events::PaymentPurpose, Vec<ClaimableHTLC>)>,
/// Messages to send to peers - pushed to in the same lock that they are generated in (except
/// for broadcast messages, where ordering isn't as strict).
pub(super) pending_msg_events: Vec<MessageSendEvent>,
Fulfilled {
session_privs: HashSet<[u8; 32]>,
payment_hash: Option<PaymentHash>,
+ timer_ticks_without_htlcs: u8,
},
/// When a payer gives up trying to retry a payment, they inform us, letting us generate a
/// `PaymentFailed` event when all HTLCs have irrevocably failed. This avoids a number of race
}
impl PendingOutboundPayment {
- fn is_retryable(&self) -> bool {
- match self {
- PendingOutboundPayment::Retryable { .. } => true,
- _ => false,
- }
- }
fn is_fulfilled(&self) -> bool {
match self {
PendingOutboundPayment::Fulfilled { .. } => true,
=> session_privs,
});
let payment_hash = self.payment_hash();
- *self = PendingOutboundPayment::Fulfilled { session_privs, payment_hash };
+ *self = PendingOutboundPayment::Fulfilled { session_privs, payment_hash, timer_ticks_without_htlcs: 0 };
}
fn mark_abandoned(&mut self) -> Result<(), ()> {
// |
// |__`forward_htlcs`
// |
-// |__`channel_state`
+// |__`pending_inbound_payments`
// | |
-// | |__`id_to_peer`
+// | |__`claimable_htlcs`
// | |
-// | |__`per_peer_state`
+// | |__`pending_outbound_payments`
// | |
-// | |__`outbound_scid_aliases`
-// | |
-// | |__`pending_inbound_payments`
+// | |__`channel_state`
+// | |
+// | |__`id_to_peer`
+// | |
+// | |__`short_to_chan_info`
// | |
-// | |__`pending_outbound_payments`
+// | |__`per_peer_state`
+// | |
+// | |__`outbound_scid_aliases`
// | |
// | |__`best_block`
// | |
#[cfg(not(test))]
forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
+ /// Map from payment hash to the payment data and any HTLCs which are to us and can be
+ /// failed/claimed by the user.
+ ///
+ /// Note that, no consistency guarantees are made about the channels given here actually
+ /// existing anymore by the time you go to read them!
+ ///
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
+ claimable_htlcs: Mutex<HashMap<PaymentHash, (events::PaymentPurpose, Vec<ClaimableHTLC>)>>,
+
/// The set of outbound SCID aliases across all our channels, including unconfirmed channels
/// and some closed channels which reached a usable state prior to being closed. This is used
/// only to avoid duplicates, and is not persisted explicitly to disk, but rebuilt from the
/// See `ChannelManager` struct-level documentation for lock order requirements.
id_to_peer: Mutex<HashMap<[u8; 32], PublicKey>>,
+ /// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s.
+ ///
+ /// Outbound SCID aliases are added here once the channel is available for normal use, with
+ /// SCIDs being added once the funding transaction is confirmed at the channel's required
+ /// confirmation depth.
+ ///
+ /// Note that while this holds `counterparty_node_id`s and `channel_id`s, no consistency
+ /// guarantees are made about the existence of a peer with the `counterparty_node_id` nor a
+ /// channel with the `channel_id` in our other maps.
+ ///
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
+ #[cfg(test)]
+ pub(super) short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, [u8; 32])>>,
+ #[cfg(not(test))]
+ short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, [u8; 32])>>,
+
our_network_key: SecretKey,
our_network_pubkey: PublicKey,
#[allow(dead_code)]
const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
-/// The number of blocks before we consider an outbound payment for expiry if it doesn't have any
-/// pending HTLCs in flight.
-pub(crate) const PAYMENT_EXPIRY_BLOCKS: u32 = 3;
-
/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until expiry of incomplete MPPs
pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3;
+/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until we time-out the
+/// idempotency of payments by [`PaymentId`]. See
+/// [`ChannelManager::remove_stale_resolved_payments`].
+pub(crate) const IDEMPOTENCY_TIMEOUT_TICKS: u8 = 7;
+
/// Information needed for constructing an invoice route hint for this channel.
#[derive(Clone, Debug, PartialEq)]
pub struct CounterpartyForwardingInfo {
///
/// [`outbound_capacity_msat`]: ChannelDetails::outbound_capacity_msat
pub unspendable_punishment_reserve: Option<u64>,
- /// The `user_channel_id` passed in to create_channel, or 0 if the channel was inbound.
- pub user_channel_id: u64,
+ /// The `user_channel_id` passed in to create_channel, or a random value if the channel was
+ /// inbound. This may be zero for inbound channels serialized with LDK versions prior to
+ /// 0.0.113.
+ pub user_channel_id: u128,
/// Our total balance. This is the amount we would get if we close the channel.
/// This value is not exact. Due to various in-flight changes and feerate changes, exactly this
/// amount is not likely to be recoverable on close.
/// [`ChannelHandshakeConfig::minimum_depth`]: crate::util::config::ChannelHandshakeConfig::minimum_depth
/// [`ChannelHandshakeLimits::max_minimum_depth`]: crate::util::config::ChannelHandshakeLimits::max_minimum_depth
pub confirmations_required: Option<u32>,
+ /// The current number of confirmations on the funding transaction.
+ ///
+ /// This value will be `None` for objects serialized with LDK versions prior to 0.0.113.
+ pub confirmations: Option<u32>,
/// The number of blocks (after our commitment transaction confirms) that we will need to wait
/// until we can claim our funds after we force-close the channel. During this time our
/// counterparty is allowed to punish us if we broadcasted a stale state. If our counterparty
#[derive(Clone, Debug)]
pub enum PaymentSendFailure {
/// A parameter which was passed to send_payment was invalid, preventing us from attempting to
- /// send the payment at all. No channel state has been changed or messages sent to peers, and
- /// once you've changed the parameter at error, you can freely retry the payment in full.
+ /// send the payment at all.
+ ///
+ /// You can freely resend the payment in full (with the parameter error fixed).
+ ///
+ /// Because the payment failed outright, no payment tracking is done, you do not need to call
+ /// [`ChannelManager::abandon_payment`] and [`ChannelManager::retry_payment`] will *not* work
+ /// for this payment.
ParameterError(APIError),
/// A parameter in a single path which was passed to send_payment was invalid, preventing us
- /// from attempting to send the payment at all. No channel state has been changed or messages
- /// sent to peers, and once you've changed the parameter at error, you can freely retry the
- /// payment in full.
+ /// from attempting to send the payment at all.
+ ///
+ /// You can freely resend the payment in full (with the parameter error fixed).
///
/// The results here are ordered the same as the paths in the route object which was passed to
/// send_payment.
+ ///
+ /// Because the payment failed outright, no payment tracking is done, you do not need to call
+ /// [`ChannelManager::abandon_payment`] and [`ChannelManager::retry_payment`] will *not* work
+ /// for this payment.
PathParameterError(Vec<Result<(), APIError>>),
/// All paths which were attempted failed to send, with no channel state change taking place.
- /// You can freely retry the payment in full (though you probably want to do so over different
+ /// You can freely resend the payment in full (though you probably want to do so over different
/// paths than the ones selected).
- AllFailedRetrySafe(Vec<APIError>),
+ ///
+ /// Because the payment failed outright, no payment tracking is done, you do not need to call
+ /// [`ChannelManager::abandon_payment`] and [`ChannelManager::retry_payment`] will *not* work
+ /// for this payment.
+ AllFailedResendSafe(Vec<APIError>),
+ /// Indicates that a payment for the provided [`PaymentId`] is already in-flight and has not
+ /// yet completed (i.e. generated an [`Event::PaymentSent`]) or been abandoned (via
+ /// [`ChannelManager::abandon_payment`]).
+ ///
+ /// [`Event::PaymentSent`]: events::Event::PaymentSent
+ DuplicatePayment,
/// Some paths which were attempted failed to send, though possibly not all. At least some
/// paths have irrevocably committed to the HTLC and retrying the payment in full would result
/// in over-/re-payment.
}
macro_rules! update_maps_on_chan_removal {
- ($self: expr, $short_to_chan_info: expr, $channel: expr) => {
+ ($self: expr, $channel: expr) => {{
+ $self.id_to_peer.lock().unwrap().remove(&$channel.channel_id());
+ let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
if let Some(short_id) = $channel.get_short_channel_id() {
- $short_to_chan_info.remove(&short_id);
+ short_to_chan_info.remove(&short_id);
} else {
// If the channel was never confirmed on-chain prior to its closure, remove the
// outbound SCID alias we used for it from the collision-prevention set. While we
let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.outbound_scid_alias());
debug_assert!(alias_removed);
}
- $self.id_to_peer.lock().unwrap().remove(&$channel.channel_id());
- $short_to_chan_info.remove(&$channel.outbound_scid_alias());
- }
+ short_to_chan_info.remove(&$channel.outbound_scid_alias());
+ }}
}
/// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error)
macro_rules! convert_chan_err {
- ($self: ident, $err: expr, $short_to_chan_info: expr, $channel: expr, $channel_id: expr) => {
+ ($self: ident, $err: expr, $channel: expr, $channel_id: expr) => {
match $err {
ChannelError::Warn(msg) => {
(false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), $channel_id.clone()))
},
ChannelError::Close(msg) => {
log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg);
- update_maps_on_chan_removal!($self, $short_to_chan_info, $channel);
+ update_maps_on_chan_removal!($self, $channel);
let shutdown_res = $channel.force_shutdown(true);
(true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(),
shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
}
macro_rules! break_chan_entry {
- ($self: ident, $res: expr, $channel_state: expr, $entry: expr) => {
+ ($self: ident, $res: expr, $entry: expr) => {
match $res {
Ok(res) => res,
Err(e) => {
- let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_chan_info, $entry.get_mut(), $entry.key());
+ let (drop, res) = convert_chan_err!($self, e, $entry.get_mut(), $entry.key());
if drop {
$entry.remove_entry();
}
}
macro_rules! try_chan_entry {
- ($self: ident, $res: expr, $channel_state: expr, $entry: expr) => {
+ ($self: ident, $res: expr, $entry: expr) => {
match $res {
Ok(res) => res,
Err(e) => {
- let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_chan_info, $entry.get_mut(), $entry.key());
+ let (drop, res) = convert_chan_err!($self, e, $entry.get_mut(), $entry.key());
if drop {
$entry.remove_entry();
}
}
macro_rules! remove_channel {
- ($self: expr, $channel_state: expr, $entry: expr) => {
+ ($self: expr, $entry: expr) => {
{
let channel = $entry.remove_entry().1;
- update_maps_on_chan_removal!($self, $channel_state.short_to_chan_info, channel);
+ update_maps_on_chan_removal!($self, channel);
channel
}
}
}
macro_rules! handle_monitor_update_res {
- ($self: ident, $err: expr, $short_to_chan_info: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
+ ($self: ident, $err: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
match $err {
ChannelMonitorUpdateStatus::PermanentFailure => {
log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", log_bytes!($chan_id[..]));
- update_maps_on_chan_removal!($self, $short_to_chan_info, $chan);
+ update_maps_on_chan_removal!($self, $chan);
// TODO: $failed_fails is dropped here, which will cause other channels to hit the
// chain in a confused state! We need to move them into the ChannelMonitor which
// will be responsible for failing backwards once things confirm on-chain.
},
}
};
- ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
- let (res, drop) = handle_monitor_update_res!($self, $err, $channel_state.short_to_chan_info, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
+ ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
+ let (res, drop) = handle_monitor_update_res!($self, $err, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
if drop {
$entry.remove_entry();
}
res
} };
- ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $chan_id: expr, COMMITMENT_UPDATE_ONLY) => { {
+ ($self: ident, $err: expr, $entry: expr, $action_type: path, $chan_id: expr, COMMITMENT_UPDATE_ONLY) => { {
debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst);
- handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, false, true, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
+ handle_monitor_update_res!($self, $err, $entry, $action_type, false, true, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
} };
- ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $chan_id: expr, NO_UPDATE) => {
- handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
+ ($self: ident, $err: expr, $entry: expr, $action_type: path, $chan_id: expr, NO_UPDATE) => {
+ handle_monitor_update_res!($self, $err, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
};
- ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_channel_ready: expr, OPTIONALLY_RESEND_FUNDING_LOCKED) => {
- handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, false, false, $resend_channel_ready, Vec::new(), Vec::new(), Vec::new())
+ ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_channel_ready: expr, OPTIONALLY_RESEND_FUNDING_LOCKED) => {
+ handle_monitor_update_res!($self, $err, $entry, $action_type, false, false, $resend_channel_ready, Vec::new(), Vec::new(), Vec::new())
};
- ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
- handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new())
+ ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
+ handle_monitor_update_res!($self, $err, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new())
};
- ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
- handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, $failed_forwards, $failed_fails, Vec::new())
+ ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
+ handle_monitor_update_res!($self, $err, $entry, $action_type, $resend_raa, $resend_commitment, false, $failed_forwards, $failed_fails, Vec::new())
};
}
macro_rules! send_channel_ready {
- ($short_to_chan_info: expr, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {
+ ($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{
$pending_msg_events.push(events::MessageSendEvent::SendChannelReady {
node_id: $channel.get_counterparty_node_id(),
msg: $channel_ready_msg,
});
// Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so
// we allow collisions, but we shouldn't ever be updating the channel ID pointed to.
- let outbound_alias_insert = $short_to_chan_info.insert($channel.outbound_scid_alias(), ($channel.get_counterparty_node_id(), $channel.channel_id()));
+ let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
+ let outbound_alias_insert = short_to_chan_info.insert($channel.outbound_scid_alias(), ($channel.get_counterparty_node_id(), $channel.channel_id()));
assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()),
"SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
if let Some(real_scid) = $channel.get_short_channel_id() {
- let scid_insert = $short_to_chan_info.insert(real_scid, ($channel.get_counterparty_node_id(), $channel.channel_id()));
+ let scid_insert = short_to_chan_info.insert(real_scid, ($channel.get_counterparty_node_id(), $channel.channel_id()));
assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()),
"SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
}
- }
+ }}
}
macro_rules! emit_channel_ready_event {
}
}
-macro_rules! handle_chan_restoration_locked {
- ($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr,
- $raa: expr, $commitment_update: expr, $order: expr, $chanmon_update: expr,
- $pending_forwards: expr, $funding_broadcastable: expr, $channel_ready: expr, $announcement_sigs: expr) => { {
- let mut htlc_forwards = None;
-
- let chanmon_update: Option<ChannelMonitorUpdate> = $chanmon_update; // Force type-checking to resolve
- let chanmon_update_is_none = chanmon_update.is_none();
- let counterparty_node_id = $channel_entry.get().get_counterparty_node_id();
- let res = loop {
- let forwards: Vec<(PendingHTLCInfo, u64)> = $pending_forwards; // Force type-checking to resolve
- if !forwards.is_empty() {
- htlc_forwards = Some(($channel_entry.get().get_short_channel_id().unwrap_or($channel_entry.get().outbound_scid_alias()),
- $channel_entry.get().get_funding_txo().unwrap(), forwards));
- }
-
- if chanmon_update.is_some() {
- // On reconnect, we, by definition, only resend a channel_ready if there have been
- // no commitment updates, so the only channel monitor update which could also be
- // associated with a channel_ready would be the funding_created/funding_signed
- // monitor update. That monitor update failing implies that we won't send
- // channel_ready until it's been updated, so we can't have a channel_ready and a
- // monitor update here (so we don't bother to handle it correctly below).
- assert!($channel_ready.is_none());
- // A channel monitor update makes no sense without either a channel_ready or a
- // commitment update to process after it. Since we can't have a channel_ready, we
- // only bother to handle the monitor-update + commitment_update case below.
- assert!($commitment_update.is_some());
- }
-
- if let Some(msg) = $channel_ready {
- // Similar to the above, this implies that we're letting the channel_ready fly
- // before it should be allowed to.
- assert!(chanmon_update.is_none());
- send_channel_ready!($channel_state.short_to_chan_info, $channel_state.pending_msg_events, $channel_entry.get(), msg);
- }
- if let Some(msg) = $announcement_sigs {
- $channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
- node_id: counterparty_node_id,
- msg,
- });
- }
-
- emit_channel_ready_event!($self, $channel_entry.get_mut());
-
- let funding_broadcastable: Option<Transaction> = $funding_broadcastable; // Force type-checking to resolve
- if let Some(monitor_update) = chanmon_update {
- // We only ever broadcast a funding transaction in response to a funding_signed
- // message and the resulting monitor update. Thus, on channel_reestablish
- // message handling we can't have a funding transaction to broadcast. When
- // processing a monitor update finishing resulting in a funding broadcast, we
- // cannot have a second monitor update, thus this case would indicate a bug.
- assert!(funding_broadcastable.is_none());
- // Given we were just reconnected or finished updating a channel monitor, the
- // only case where we can get a new ChannelMonitorUpdate would be if we also
- // have some commitment updates to send as well.
- assert!($commitment_update.is_some());
- match $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) {
- ChannelMonitorUpdateStatus::Completed => {},
- e => {
- // channel_reestablish doesn't guarantee the order it returns is sensical
- // for the messages it returns, but if we're setting what messages to
- // re-transmit on monitor update success, we need to make sure it is sane.
- let mut order = $order;
- if $raa.is_none() {
- order = RAACommitmentOrder::CommitmentFirst;
- }
- break handle_monitor_update_res!($self, e, $channel_state, $channel_entry, order, $raa.is_some(), true);
- }
- }
- }
-
- macro_rules! handle_cs { () => {
- if let Some(update) = $commitment_update {
- $channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: counterparty_node_id,
- updates: update,
- });
- }
- } }
- macro_rules! handle_raa { () => {
- if let Some(revoke_and_ack) = $raa {
- $channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
- node_id: counterparty_node_id,
- msg: revoke_and_ack,
- });
- }
- } }
- match $order {
- RAACommitmentOrder::CommitmentFirst => {
- handle_cs!();
- handle_raa!();
- },
- RAACommitmentOrder::RevokeAndACKFirst => {
- handle_raa!();
- handle_cs!();
- },
- }
- if let Some(tx) = funding_broadcastable {
- log_info!($self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
- $self.tx_broadcaster.broadcast_transaction(&tx);
- }
- break Ok(());
- };
-
- if chanmon_update_is_none {
- // If there was no ChannelMonitorUpdate, we should never generate an Err in the res loop
- // above. Doing so would imply calling handle_err!() from channel_monitor_updated() which
- // should *never* end up calling back to `chain_monitor.update_channel()`.
- assert!(res.is_ok());
- }
-
- (htlc_forwards, res, counterparty_node_id)
- } }
-}
-
-macro_rules! post_handle_chan_restoration {
- ($self: ident, $locked_res: expr) => { {
- let (htlc_forwards, res, counterparty_node_id) = $locked_res;
-
- let _ = handle_error!($self, res, counterparty_node_id);
-
- if let Some(forwards) = htlc_forwards {
- $self.forward_htlcs(&mut [forwards][..]);
- }
- } }
-}
-
impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F, L>
where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: BroadcasterInterface,
channel_state: Mutex::new(ChannelHolder{
by_id: HashMap::new(),
- short_to_chan_info: HashMap::new(),
- claimable_htlcs: HashMap::new(),
pending_msg_events: Vec::new(),
}),
outbound_scid_aliases: Mutex::new(HashSet::new()),
pending_inbound_payments: Mutex::new(HashMap::new()),
pending_outbound_payments: Mutex::new(HashMap::new()),
forward_htlcs: Mutex::new(HashMap::new()),
+ claimable_htlcs: Mutex::new(HashMap::new()),
id_to_peer: Mutex::new(HashMap::new()),
+ short_to_chan_info: FairRwLock::new(HashMap::new()),
our_network_key: keys_manager.get_node_secret(Recipient::Node).unwrap(),
our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret(Recipient::Node).unwrap()),
///
/// `user_channel_id` will be provided back as in
/// [`Event::FundingGenerationReady::user_channel_id`] to allow tracking of which events
- /// correspond with which `create_channel` call. Note that the `user_channel_id` defaults to 0
- /// for inbound channels, so you may wish to avoid using 0 for `user_channel_id` here.
- /// `user_channel_id` has no meaning inside of LDK, it is simply copied to events and otherwise
- /// ignored.
+ /// correspond with which `create_channel` call. Note that the `user_channel_id` defaults to a
+ /// randomized value for inbound channels. `user_channel_id` has no meaning inside of LDK, it
+ /// is simply copied to events and otherwise ignored.
///
/// Raises [`APIError::APIMisuseError`] when `channel_value_satoshis` > 2**24 or `push_msat` is
/// greater than `channel_value_satoshis * 1k` or `channel_value_satoshis < 1000`.
/// [`Event::FundingGenerationReady::user_channel_id`]: events::Event::FundingGenerationReady::user_channel_id
/// [`Event::FundingGenerationReady::temporary_channel_id`]: events::Event::FundingGenerationReady::temporary_channel_id
/// [`Event::ChannelClosed::channel_id`]: events::Event::ChannelClosed::channel_id
- pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u64, override_config: Option<UserConfig>) -> Result<[u8; 32], APIError> {
+ pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u128, override_config: Option<UserConfig>) -> Result<[u8; 32], APIError> {
if channel_value_satoshis < 1000 {
return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
}
let mut res = Vec::new();
{
let channel_state = self.channel_state.lock().unwrap();
+ let best_block_height = self.best_block.read().unwrap().height();
res.reserve(channel_state.by_id.len());
for (channel_id, channel) in channel_state.by_id.iter().filter(f) {
let balance = channel.get_available_balances();
next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
user_channel_id: channel.get_user_id(),
confirmations_required: channel.minimum_depth(),
+ confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)),
force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
is_outbound: channel.is_outbound(),
is_channel_ready: channel.is_usable(),
if *counterparty_node_id != chan_entry.get().get_counterparty_node_id(){
return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() });
}
- let per_peer_state = self.per_peer_state.read().unwrap();
- let (shutdown_msg, monitor_update, htlcs) = match per_peer_state.get(&counterparty_node_id) {
- Some(peer_state) => {
- let peer_state = peer_state.lock().unwrap();
- let their_features = &peer_state.latest_features;
- chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)?
- },
- None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }),
+ let (shutdown_msg, monitor_update, htlcs) = {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ match per_peer_state.get(&counterparty_node_id) {
+ Some(peer_state) => {
+ let peer_state = peer_state.lock().unwrap();
+ let their_features = &peer_state.latest_features;
+ chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)?
+ },
+ None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }),
+ }
};
failed_htlcs = htlcs;
if let Some(monitor_update) = monitor_update {
let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update);
let (result, is_permanent) =
- handle_monitor_update_res!(self, update_res, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
+ handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
if is_permanent {
- remove_channel!(self, channel_state, chan_entry);
+ remove_channel!(self, chan_entry);
break result;
}
}
});
if chan_entry.get().is_shutdown() {
- let channel = remove_channel!(self, channel_state, chan_entry);
+ let channel = remove_channel!(self, chan_entry);
if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: channel_update
} else {
self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed);
}
- remove_channel!(self, channel_state, chan)
+ remove_channel!(self, chan)
} else {
return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
}
}
let routing = match hop_data.format {
- msgs::OnionHopDataFormat::Legacy { .. } => {
- return Err(ReceiveError {
- err_code: 0x4000|0x2000|3,
- err_data: Vec::new(),
- msg: "We require payment_secrets",
- });
- },
msgs::OnionHopDataFormat::NonFinalNode { .. } => {
return Err(ReceiveError {
err_code: 0x4000|22,
routing,
payment_hash,
incoming_shared_secret: shared_secret,
- amt_to_forward: amt_msat,
+ incoming_amt_msat: Some(amt_msat),
+ outgoing_amt_msat: amt_msat,
outgoing_cltv_value: hop_data.outgoing_cltv_value,
})
}
};
let short_channel_id = match next_hop_data.format {
- msgs::OnionHopDataFormat::Legacy { short_channel_id } => short_channel_id,
msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id,
msgs::OnionHopDataFormat::FinalNode { .. } => {
return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]);
},
payment_hash: msg.payment_hash.clone(),
incoming_shared_secret: shared_secret,
- amt_to_forward: next_hop_data.amt_to_forward,
+ incoming_amt_msat: Some(msg.amount_msat),
+ outgoing_amt_msat: next_hop_data.amt_to_forward,
outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
})
}
};
- if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
+ if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref outgoing_amt_msat, ref outgoing_cltv_value, .. }) = &pending_forward_info {
// If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel
// with a short_channel_id of 0. This is important as various things later assume
// short_channel_id is non-0 in any ::Forward.
if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing {
if let Some((err, code, chan_update)) = loop {
+ let id_option = self.short_to_chan_info.read().unwrap().get(&short_channel_id).cloned();
let mut channel_state = self.channel_state.lock().unwrap();
- let id_option = channel_state.short_to_chan_info.get(&short_channel_id).cloned();
let forwarding_id_opt = match id_option {
None => { // unknown_next_peer
// Note that this is likely a timing oracle for detecting whether an scid is a
// phantom.
- if fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, *short_channel_id) {
+ if fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash) {
None
} else {
break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
Some((_cp_id, chan_id)) => Some(chan_id.clone()),
};
let chan_update_opt = if let Some(forwarding_id) = forwarding_id_opt {
- let chan = channel_state.by_id.get_mut(&forwarding_id).unwrap();
+ let chan = match channel_state.by_id.get_mut(&forwarding_id) {
+ None => {
+ // Channel was removed. The short_to_chan_info and by_id maps have
+ // no consistency guarantees.
+ break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
+ },
+ Some(chan) => chan
+ };
if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
// Note that the behavior here should be identical to the above block - we
// should NOT reveal the existence or non-existence of a private channel if
if !chan.is_live() { // channel_disabled
break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, chan_update_opt));
}
- if *amt_to_forward < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
+ if *outgoing_amt_msat < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
}
- if let Err((err, code)) = chan.htlc_satisfies_config(&msg, *amt_to_forward, *outgoing_cltv_value) {
+ if let Err((err, code)) = chan.htlc_satisfies_config(&msg, *outgoing_amt_msat, *outgoing_cltv_value) {
break Some((err, code, chan_update_opt));
}
chan_update_opt
}
// Only public for testing, this should otherwise never be called direcly
- pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_params: &Option<PaymentParameters>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>) -> Result<(), APIError> {
+ pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_params: &Option<PaymentParameters>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id);
let prng_seed = self.keys_manager.get_secure_random_bytes();
- let session_priv_bytes = self.keys_manager.get_secure_random_bytes();
let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted");
let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv)
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let err: Result<(), _> = loop {
- let mut channel_lock = self.channel_state.lock().unwrap();
-
- let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
- let payment_entry = pending_outbounds.entry(payment_id);
- if let hash_map::Entry::Occupied(payment) = &payment_entry {
- if !payment.get().is_retryable() {
- return Err(APIError::RouteError {
- err: "Payment already completed"
- });
- }
- }
-
- let id = match channel_lock.short_to_chan_info.get(&path.first().unwrap().short_channel_id) {
+ let id = match self.short_to_chan_info.read().unwrap().get(&path.first().unwrap().short_channel_id) {
None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}),
Some((_cp_id, chan_id)) => chan_id.clone(),
};
- macro_rules! insert_outbound_payment {
- () => {
- let payment = payment_entry.or_insert_with(|| PendingOutboundPayment::Retryable {
- session_privs: HashSet::new(),
- pending_amt_msat: 0,
- pending_fee_msat: Some(0),
- payment_hash: *payment_hash,
- payment_secret: *payment_secret,
- starting_block_height: self.best_block.read().unwrap().height(),
- total_msat: total_value,
- });
- assert!(payment.insert(session_priv_bytes, path));
- }
- }
-
+ let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) {
match {
payment_secret: payment_secret.clone(),
payment_params: payment_params.clone(),
}, onion_packet, &self.logger),
- channel_state, chan)
+ chan)
} {
Some((update_add, commitment_signed, monitor_update)) => {
let update_err = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update);
let chan_id = chan.get().channel_id();
match (update_err,
- handle_monitor_update_res!(self, update_err, channel_state, chan,
+ handle_monitor_update_res!(self, update_err, chan,
RAACommitmentOrder::CommitmentFirst, false, true))
{
(ChannelMonitorUpdateStatus::PermanentFailure, Err(e)) => break Err(e),
- (ChannelMonitorUpdateStatus::Completed, Ok(())) => {
- insert_outbound_payment!();
- },
+ (ChannelMonitorUpdateStatus::Completed, Ok(())) => {},
(ChannelMonitorUpdateStatus::InProgress, Err(_)) => {
// Note that MonitorUpdateInProgress here indicates (per function
// docs) that we will resend the commitment update once monitor
// indicating that it is unsafe to retry the payment wholesale,
// which we do in the send_payment check for
// MonitorUpdateInProgress, below.
- insert_outbound_payment!(); // Only do this after possibly break'ing on Perm failure above.
return Err(APIError::MonitorUpdateInProgress);
},
_ => unreachable!(),
},
});
},
- None => { insert_outbound_payment!(); },
+ None => { },
}
- } else { unreachable!(); }
+ } else {
+ // The channel was likely removed after we fetched the id from the
+ // `short_to_chan_info` map, but before we successfully locked the `by_id` map.
+ // This can occur as no consistency guarantees exists between the two maps.
+ return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()});
+ }
return Ok(());
};
/// Value parameters are provided via the last hop in route, see documentation for RouteHop
/// fields for more info.
///
- /// Note that if the payment_hash already exists elsewhere (eg you're sending a duplicative
- /// payment), we don't do anything to stop you! We always try to ensure that if the provided
- /// next hop knows the preimage to payment_hash they can claim an additional amount as
- /// specified in the last hop in the route! Thus, you should probably do your own
- /// payment_preimage tracking (which you should already be doing as they represent "proof of
- /// payment") and prevent double-sends yourself.
+ /// If a pending payment is currently in-flight with the same [`PaymentId`] provided, this
+ /// method will error with an [`APIError::RouteError`]. Note, however, that once a payment
+ /// is no longer pending (either via [`ChannelManager::abandon_payment`], or handling of an
+ /// [`Event::PaymentSent`]) LDK will not stop you from sending a second payment with the same
+ /// [`PaymentId`].
+ ///
+ /// Thus, in order to ensure duplicate payments are not sent, you should implement your own
+ /// tracking of payments, including state to indicate once a payment has completed. Because you
+ /// should also ensure that [`PaymentHash`]es are not re-used, for simplicity, you should
+ /// consider using the [`PaymentHash`] as the key for tracking payments. In that case, the
+ /// [`PaymentId`] should be a copy of the [`PaymentHash`] bytes.
///
- /// May generate SendHTLCs message(s) event on success, which should be relayed.
+ /// May generate SendHTLCs message(s) event on success, which should be relayed (e.g. via
+ /// [`PeerManager::process_events`]).
///
/// Each path may have a different return value, and PaymentSendValue may return a Vec with
/// each entry matching the corresponding-index entry in the route paths, see
/// newer nodes, it will be provided to you in the invoice. If you do not have one, the Route
/// must not contain multiple paths as multi-path payments require a recipient-provided
/// payment_secret.
+ ///
/// If a payment_secret *is* provided, we assume that the invoice had the payment_secret feature
/// bit set (either as required or as available). If multiple paths are present in the Route,
/// we assume the invoice had the basic_mpp feature set.
- pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>) -> Result<PaymentId, PaymentSendFailure> {
- self.send_payment_internal(route, payment_hash, payment_secret, None, None, None)
+ ///
+ /// [`Event::PaymentSent`]: events::Event::PaymentSent
+ /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
+ pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
+ let onion_session_privs = self.add_new_pending_payment(payment_hash, *payment_secret, payment_id, route)?;
+ self.send_payment_internal(route, payment_hash, payment_secret, None, payment_id, None, onion_session_privs)
+ }
+
+ #[cfg(test)]
+ pub(crate) fn test_add_new_pending_payment(&self, payment_hash: PaymentHash, payment_secret: Option<PaymentSecret>, payment_id: PaymentId, route: &Route) -> Result<Vec<[u8; 32]>, PaymentSendFailure> {
+ self.add_new_pending_payment(payment_hash, payment_secret, payment_id, route)
}
- fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>, payment_id: Option<PaymentId>, recv_value_msat: Option<u64>) -> Result<PaymentId, PaymentSendFailure> {
+ fn add_new_pending_payment(&self, payment_hash: PaymentHash, payment_secret: Option<PaymentSecret>, payment_id: PaymentId, route: &Route) -> Result<Vec<[u8; 32]>, PaymentSendFailure> {
+ let mut onion_session_privs = Vec::with_capacity(route.paths.len());
+ for _ in 0..route.paths.len() {
+ onion_session_privs.push(self.keys_manager.get_secure_random_bytes());
+ }
+
+ let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
+ match pending_outbounds.entry(payment_id) {
+ hash_map::Entry::Occupied(_) => Err(PaymentSendFailure::DuplicatePayment),
+ hash_map::Entry::Vacant(entry) => {
+ let payment = entry.insert(PendingOutboundPayment::Retryable {
+ session_privs: HashSet::new(),
+ pending_amt_msat: 0,
+ pending_fee_msat: Some(0),
+ payment_hash,
+ payment_secret,
+ starting_block_height: self.best_block.read().unwrap().height(),
+ total_msat: route.get_total_amount(),
+ });
+
+ for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) {
+ assert!(payment.insert(*session_priv_bytes, path));
+ }
+
+ Ok(onion_session_privs)
+ },
+ }
+ }
+
+ fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
if route.paths.len() < 1 {
return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "There must be at least one path to send over"}));
}
let mut total_value = 0;
let our_node_id = self.get_our_node_id();
let mut path_errs = Vec::with_capacity(route.paths.len());
- let payment_id = if let Some(id) = payment_id { id } else { PaymentId(self.keys_manager.get_secure_random_bytes()) };
'path_check: for path in route.paths.iter() {
if path.len() < 1 || path.len() > 20 {
path_errs.push(Err(APIError::RouteError{err: "Path didn't go anywhere/had bogus size"}));
let cur_height = self.best_block.read().unwrap().height() + 1;
let mut results = Vec::new();
- for path in route.paths.iter() {
- results.push(self.send_payment_along_path(&path, &route.payment_params, &payment_hash, payment_secret, total_value, cur_height, payment_id, &keysend_preimage));
+ debug_assert_eq!(route.paths.len(), onion_session_privs.len());
+ for (path, session_priv) in route.paths.iter().zip(onion_session_privs.into_iter()) {
+ let mut path_res = self.send_payment_along_path(&path, &route.payment_params, &payment_hash, payment_secret, total_value, cur_height, payment_id, &keysend_preimage, session_priv);
+ match path_res {
+ Ok(_) => {},
+ Err(APIError::MonitorUpdateInProgress) => {
+ // While a MonitorUpdateInProgress is an Err(_), the payment is still
+ // considered "in flight" and we shouldn't remove it from the
+ // PendingOutboundPayment set.
+ },
+ Err(_) => {
+ let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
+ if let Some(payment) = pending_outbounds.get_mut(&payment_id) {
+ let removed = payment.remove(&session_priv, Some(path));
+ debug_assert!(removed, "This can't happen as the payment has an entry for this path added by callers");
+ } else {
+ debug_assert!(false, "This can't happen as the payment was added by callers");
+ path_res = Err(APIError::APIMisuseError { err: "Internal error: payment disappeared during processing. Please report this bug!".to_owned() });
+ }
+ }
+ }
+ results.push(path_res);
}
let mut has_ok = false;
let mut has_err = false;
} else { None },
})
} else if has_err {
- // If we failed to send any paths, we shouldn't have inserted the new PaymentId into
- // our `pending_outbound_payments` map at all.
- debug_assert!(self.pending_outbound_payments.lock().unwrap().get(&payment_id).is_none());
- Err(PaymentSendFailure::AllFailedRetrySafe(results.drain(..).map(|r| r.unwrap_err()).collect()))
+ // If we failed to send any paths, we should remove the new PaymentId from the
+ // `pending_outbound_payments` map, as the user isn't expected to `abandon_payment`.
+ let removed = self.pending_outbound_payments.lock().unwrap().remove(&payment_id).is_some();
+ debug_assert!(removed, "We should always have a pending payment to remove here");
+ Err(PaymentSendFailure::AllFailedResendSafe(results.drain(..).map(|r| r.unwrap_err()).collect()))
} else {
- Ok(payment_id)
+ Ok(())
}
}
}
}
+ let mut onion_session_privs = Vec::with_capacity(route.paths.len());
+ for _ in 0..route.paths.len() {
+ onion_session_privs.push(self.keys_manager.get_secure_random_bytes());
+ }
+
let (total_msat, payment_hash, payment_secret) = {
- let outbounds = self.pending_outbound_payments.lock().unwrap();
- if let Some(payment) = outbounds.get(&payment_id) {
- match payment {
- PendingOutboundPayment::Retryable {
- total_msat, payment_hash, payment_secret, pending_amt_msat, ..
- } => {
- let retry_amt_msat: u64 = route.paths.iter().map(|path| path.last().unwrap().fee_msat).sum();
- if retry_amt_msat + *pending_amt_msat > *total_msat * (100 + RETRY_OVERFLOW_PERCENTAGE) / 100 {
+ let mut outbounds = self.pending_outbound_payments.lock().unwrap();
+ match outbounds.get_mut(&payment_id) {
+ Some(payment) => {
+ let res = match payment {
+ PendingOutboundPayment::Retryable {
+ total_msat, payment_hash, payment_secret, pending_amt_msat, ..
+ } => {
+ let retry_amt_msat: u64 = route.paths.iter().map(|path| path.last().unwrap().fee_msat).sum();
+ if retry_amt_msat + *pending_amt_msat > *total_msat * (100 + RETRY_OVERFLOW_PERCENTAGE) / 100 {
+ return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+ err: format!("retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat).to_string()
+ }))
+ }
+ (*total_msat, *payment_hash, *payment_secret)
+ },
+ PendingOutboundPayment::Legacy { .. } => {
return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
- err: format!("retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat).to_string()
+ err: "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102".to_string()
}))
- }
- (*total_msat, *payment_hash, *payment_secret)
- },
- PendingOutboundPayment::Legacy { .. } => {
- return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
- err: "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102".to_string()
- }))
- },
- PendingOutboundPayment::Fulfilled { .. } => {
- return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
- err: "Payment already completed".to_owned()
- }));
- },
- PendingOutboundPayment::Abandoned { .. } => {
- return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
- err: "Payment already abandoned (with some HTLCs still pending)".to_owned()
- }));
- },
- }
- } else {
- return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
- err: format!("Payment with ID {} not found", log_bytes!(payment_id.0)),
- }))
+ },
+ PendingOutboundPayment::Fulfilled { .. } => {
+ return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+ err: "Payment already completed".to_owned()
+ }));
+ },
+ PendingOutboundPayment::Abandoned { .. } => {
+ return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+ err: "Payment already abandoned (with some HTLCs still pending)".to_owned()
+ }));
+ },
+ };
+ for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) {
+ assert!(payment.insert(*session_priv_bytes, path));
+ }
+ res
+ },
+ None =>
+ return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+ err: format!("Payment with ID {} not found", log_bytes!(payment_id.0)),
+ })),
}
};
- return self.send_payment_internal(route, payment_hash, &payment_secret, None, Some(payment_id), Some(total_msat)).map(|_| ())
+ self.send_payment_internal(route, payment_hash, &payment_secret, None, payment_id, Some(total_msat), onion_session_privs)
}
/// Signals that no further retries for the given payment will occur.
/// would be able to guess -- otherwise, an intermediate node may claim the payment and it will
/// never reach the recipient.
///
- /// See [`send_payment`] documentation for more details on the return value of this function.
+ /// See [`send_payment`] documentation for more details on the return value of this function
+ /// and idempotency guarantees provided by the [`PaymentId`] key.
///
/// Similar to regular payments, you MUST NOT reuse a `payment_preimage` value. See
/// [`send_payment`] for more information about the risks of duplicate preimage usage.
/// Note that `route` must have exactly one path.
///
/// [`send_payment`]: Self::send_payment
- pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
+ pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>, payment_id: PaymentId) -> Result<PaymentHash, PaymentSendFailure> {
let preimage = match payment_preimage {
Some(p) => p,
None => PaymentPreimage(self.keys_manager.get_secure_random_bytes()),
};
let payment_hash = PaymentHash(Sha256::hash(&preimage.0).into_inner());
- match self.send_payment_internal(route, payment_hash, &None, Some(preimage), None, None) {
- Ok(payment_id) => Ok((payment_hash, payment_id)),
+ let onion_session_privs = self.add_new_pending_payment(payment_hash, None, payment_id, &route)?;
+
+ match self.send_payment_internal(route, payment_hash, &None, Some(preimage), payment_id, None, onion_session_privs) {
+ Ok(()) => Ok(payment_hash),
Err(e) => Err(e)
}
}
}
let route = Route { paths: vec![hops], payment_params: None };
+ let onion_session_privs = self.add_new_pending_payment(payment_hash, None, payment_id, &route)?;
- match self.send_payment_internal(&route, payment_hash, &None, None, Some(payment_id), None) {
- Ok(payment_id) => Ok((payment_hash, payment_id)),
+ match self.send_payment_internal(&route, payment_hash, &None, None, payment_id, None, onion_session_privs) {
+ Ok(()) => Ok((payment_hash, payment_id)),
Err(e) => Err(e)
}
}
let mut new_events = Vec::new();
let mut failed_forwards = Vec::new();
- let mut phantom_receives: Vec<(u64, OutPoint, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
+ let mut phantom_receives: Vec<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
let mut handle_errors = Vec::new();
{
let mut forward_htlcs = HashMap::new();
mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
for (short_chan_id, mut pending_forwards) in forward_htlcs {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
if short_chan_id != 0 {
- let forward_chan_id = match channel_state.short_to_chan_info.get(&short_chan_id) {
- Some((_cp_id, chan_id)) => chan_id.clone(),
- None => {
+ macro_rules! forwarding_channel_not_found {
+ () => {
for forward_info in pending_forwards.drain(..) {
match forward_info {
- HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
- routing, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value },
- prev_funding_outpoint } => {
- macro_rules! failure_handler {
- ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
- log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
-
- let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
- short_channel_id: prev_short_channel_id,
- outpoint: prev_funding_outpoint,
- htlc_id: prev_htlc_id,
- incoming_packet_shared_secret: incoming_shared_secret,
- phantom_shared_secret: $phantom_ss,
- });
-
- let reason = if $next_hop_unknown {
- HTLCDestination::UnknownNextHop { requested_forward_scid: short_chan_id }
- } else {
- HTLCDestination::FailedPayment{ payment_hash }
- };
-
- failed_forwards.push((htlc_source, payment_hash,
- HTLCFailReason::Reason { failure_code: $err_code, data: $err_data },
- reason
- ));
- continue;
- }
+ HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
+ prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
+ forward_info: PendingHTLCInfo {
+ routing, incoming_shared_secret, payment_hash, outgoing_amt_msat,
+ outgoing_cltv_value, incoming_amt_msat: _
+ }
+ }) => {
+ macro_rules! failure_handler {
+ ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
+ log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+
+ let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
+ short_channel_id: prev_short_channel_id,
+ outpoint: prev_funding_outpoint,
+ htlc_id: prev_htlc_id,
+ incoming_packet_shared_secret: incoming_shared_secret,
+ phantom_shared_secret: $phantom_ss,
+ });
+
+ let reason = if $next_hop_unknown {
+ HTLCDestination::UnknownNextHop { requested_forward_scid: short_chan_id }
+ } else {
+ HTLCDestination::FailedPayment{ payment_hash }
+ };
+
+ failed_forwards.push((htlc_source, payment_hash,
+ HTLCFailReason::Reason { failure_code: $err_code, data: $err_data },
+ reason
+ ));
+ continue;
}
- macro_rules! fail_forward {
- ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
- {
- failure_handler!($msg, $err_code, $err_data, $phantom_ss, true);
- }
+ }
+ macro_rules! fail_forward {
+ ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
+ {
+ failure_handler!($msg, $err_code, $err_data, $phantom_ss, true);
}
}
- macro_rules! failed_payment {
- ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
- {
- failure_handler!($msg, $err_code, $err_data, $phantom_ss, false);
- }
+ }
+ macro_rules! failed_payment {
+ ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
+ {
+ failure_handler!($msg, $err_code, $err_data, $phantom_ss, false);
}
}
- if let PendingHTLCRouting::Forward { onion_packet, .. } = routing {
- let phantom_secret_res = self.keys_manager.get_node_secret(Recipient::PhantomNode);
- if phantom_secret_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id) {
- let phantom_shared_secret = SharedSecret::new(&onion_packet.public_key.unwrap(), &phantom_secret_res.unwrap()).secret_bytes();
- let next_hop = match onion_utils::decode_next_payment_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) {
- Ok(res) => res,
- Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
- let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).into_inner();
- // In this scenario, the phantom would have sent us an
- // `update_fail_malformed_htlc`, meaning here we encrypt the error as
- // if it came from us (the second-to-last hop) but contains the sha256
- // of the onion.
- failed_payment!(err_msg, err_code, sha256_of_onion.to_vec(), None);
- },
- Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => {
- failed_payment!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret));
- },
- };
- match next_hop {
- onion_utils::Hop::Receive(hop_data) => {
- match self.construct_recv_pending_htlc_info(hop_data, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value, Some(phantom_shared_secret)) {
- Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, vec![(info, prev_htlc_id)])),
- Err(ReceiveError { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
- }
- },
- _ => panic!(),
- }
- } else {
- fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None);
+ }
+ if let PendingHTLCRouting::Forward { onion_packet, .. } = routing {
+ let phantom_secret_res = self.keys_manager.get_node_secret(Recipient::PhantomNode);
+ if phantom_secret_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.genesis_hash) {
+ let phantom_shared_secret = SharedSecret::new(&onion_packet.public_key.unwrap(), &phantom_secret_res.unwrap()).secret_bytes();
+ let next_hop = match onion_utils::decode_next_payment_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) {
+ Ok(res) => res,
+ Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
+ let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).into_inner();
+ // In this scenario, the phantom would have sent us an
+ // `update_fail_malformed_htlc`, meaning here we encrypt the error as
+ // if it came from us (the second-to-last hop) but contains the sha256
+ // of the onion.
+ failed_payment!(err_msg, err_code, sha256_of_onion.to_vec(), None);
+ },
+ Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => {
+ failed_payment!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret));
+ },
+ };
+ match next_hop {
+ onion_utils::Hop::Receive(hop_data) => {
+ match self.construct_recv_pending_htlc_info(hop_data, incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, Some(phantom_shared_secret)) {
+ Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])),
+ Err(ReceiveError { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
+ }
+ },
+ _ => panic!(),
}
} else {
fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None);
}
- },
+ } else {
+ fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None);
+ }
+ },
HTLCForwardInfo::FailHTLC { .. } => {
// Channel went away before we could fail it. This implies
// the channel is now on chain and our counterparty is
}
}
}
+ }
+ }
+ let forward_chan_id = match self.short_to_chan_info.read().unwrap().get(&short_chan_id) {
+ Some((_cp_id, chan_id)) => chan_id.clone(),
+ None => {
+ forwarding_channel_not_found!();
continue;
}
};
- if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(forward_chan_id) {
- let mut add_htlc_msgs = Vec::new();
- let mut fail_htlc_msgs = Vec::new();
- for forward_info in pending_forwards.drain(..) {
- match forward_info {
- HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
- routing: PendingHTLCRouting::Forward {
- onion_packet, ..
- }, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value },
- prev_funding_outpoint } => {
- log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id);
- let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
- short_channel_id: prev_short_channel_id,
- outpoint: prev_funding_outpoint,
- htlc_id: prev_htlc_id,
- incoming_packet_shared_secret: incoming_shared_secret,
- // Phantom payments are only PendingHTLCRouting::Receive.
- phantom_shared_secret: None,
- });
- match chan.get_mut().send_htlc(amt_to_forward, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet, &self.logger) {
- Err(e) => {
- if let ChannelError::Ignore(msg) = e {
- log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg);
- } else {
- panic!("Stated return value requirements in send_htlc() were not met");
- }
- let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
- failed_forwards.push((htlc_source, payment_hash,
- HTLCFailReason::Reason { failure_code, data },
- HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id }
- ));
- continue;
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
+ match channel_state.by_id.entry(forward_chan_id) {
+ hash_map::Entry::Vacant(_) => {
+ forwarding_channel_not_found!();
+ continue;
+ },
+ hash_map::Entry::Occupied(mut chan) => {
+ let mut add_htlc_msgs = Vec::new();
+ let mut fail_htlc_msgs = Vec::new();
+ for forward_info in pending_forwards.drain(..) {
+ match forward_info {
+ HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
+ prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id: _,
+ forward_info: PendingHTLCInfo {
+ incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
+ routing: PendingHTLCRouting::Forward { onion_packet, .. }, incoming_amt_msat: _,
},
- Ok(update_add) => {
- match update_add {
- Some(msg) => { add_htlc_msgs.push(msg); },
- None => {
- // Nothing to do here...we're waiting on a remote
- // revoke_and_ack before we can add anymore HTLCs. The Channel
- // will automatically handle building the update_add_htlc and
- // commitment_signed messages when we can.
- // TODO: Do some kind of timer to set the channel as !is_live()
- // as we don't really want others relying on us relaying through
- // this channel currently :/.
+ }) => {
+ log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id);
+ let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
+ short_channel_id: prev_short_channel_id,
+ outpoint: prev_funding_outpoint,
+ htlc_id: prev_htlc_id,
+ incoming_packet_shared_secret: incoming_shared_secret,
+ // Phantom payments are only PendingHTLCRouting::Receive.
+ phantom_shared_secret: None,
+ });
+ match chan.get_mut().send_htlc(outgoing_amt_msat, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet, &self.logger) {
+ Err(e) => {
+ if let ChannelError::Ignore(msg) = e {
+ log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg);
+ } else {
+ panic!("Stated return value requirements in send_htlc() were not met");
+ }
+ let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
+ failed_forwards.push((htlc_source, payment_hash,
+ HTLCFailReason::Reason { failure_code, data },
+ HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id }
+ ));
+ continue;
+ },
+ Ok(update_add) => {
+ match update_add {
+ Some(msg) => { add_htlc_msgs.push(msg); },
+ None => {
+ // Nothing to do here...we're waiting on a remote
+ // revoke_and_ack before we can add anymore HTLCs. The Channel
+ // will automatically handle building the update_add_htlc and
+ // commitment_signed messages when we can.
+ // TODO: Do some kind of timer to set the channel as !is_live()
+ // as we don't really want others relying on us relaying through
+ // this channel currently :/.
+ }
}
}
}
- }
- },
- HTLCForwardInfo::AddHTLC { .. } => {
- panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
- },
- HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
- log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
- match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) {
- Err(e) => {
- if let ChannelError::Ignore(msg) = e {
- log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
- } else {
- panic!("Stated return value requirements in get_update_fail_htlc() were not met");
+ },
+ HTLCForwardInfo::AddHTLC { .. } => {
+ panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
+ },
+ HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
+ log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
+ match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) {
+ Err(e) => {
+ if let ChannelError::Ignore(msg) = e {
+ log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
+ } else {
+ panic!("Stated return value requirements in get_update_fail_htlc() were not met");
+ }
+ // fail-backs are best-effort, we probably already have one
+ // pending, and if not that's OK, if not, the channel is on
+ // the chain and sending the HTLC-Timeout is their problem.
+ continue;
+ },
+ Ok(Some(msg)) => { fail_htlc_msgs.push(msg); },
+ Ok(None) => {
+ // Nothing to do here...we're waiting on a remote
+ // revoke_and_ack before we can update the commitment
+ // transaction. The Channel will automatically handle
+ // building the update_fail_htlc and commitment_signed
+ // messages when we can.
+ // We don't need any kind of timer here as they should fail
+ // the channel onto the chain if they can't get our
+ // update_fail_htlc in time, it's not our problem.
}
- // fail-backs are best-effort, we probably already have one
- // pending, and if not that's OK, if not, the channel is on
- // the chain and sending the HTLC-Timeout is their problem.
- continue;
- },
- Ok(Some(msg)) => { fail_htlc_msgs.push(msg); },
- Ok(None) => {
- // Nothing to do here...we're waiting on a remote
- // revoke_and_ack before we can update the commitment
- // transaction. The Channel will automatically handle
- // building the update_fail_htlc and commitment_signed
- // messages when we can.
- // We don't need any kind of timer here as they should fail
- // the channel onto the chain if they can't get our
- // update_fail_htlc in time, it's not our problem.
}
- }
- },
+ },
+ }
}
- }
- if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() {
- let (commitment_msg, monitor_update) = match chan.get_mut().send_commitment(&self.logger) {
- Ok(res) => res,
- Err(e) => {
- // We surely failed send_commitment due to bad keys, in that case
- // close channel and then send error message to peer.
- let counterparty_node_id = chan.get().get_counterparty_node_id();
- let err: Result<(), _> = match e {
- ChannelError::Ignore(_) | ChannelError::Warn(_) => {
- panic!("Stated return value requirements in send_commitment() were not met");
- }
- ChannelError::Close(msg) => {
- log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg);
- let mut channel = remove_channel!(self, channel_state, chan);
- // ChannelClosed event is generated by handle_error for us.
- Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
- },
- };
- handle_errors.push((counterparty_node_id, err));
- continue;
- }
- };
- match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
- ChannelMonitorUpdateStatus::Completed => {},
- e => {
- handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_update_res!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
- continue;
+ if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() {
+ let (commitment_msg, monitor_update) = match chan.get_mut().send_commitment(&self.logger) {
+ Ok(res) => res,
+ Err(e) => {
+ // We surely failed send_commitment due to bad keys, in that case
+ // close channel and then send error message to peer.
+ let counterparty_node_id = chan.get().get_counterparty_node_id();
+ let err: Result<(), _> = match e {
+ ChannelError::Ignore(_) | ChannelError::Warn(_) => {
+ panic!("Stated return value requirements in send_commitment() were not met");
+ }
+ ChannelError::Close(msg) => {
+ log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg);
+ let mut channel = remove_channel!(self, chan);
+ // ChannelClosed event is generated by handle_error for us.
+ Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
+ },
+ };
+ handle_errors.push((counterparty_node_id, err));
+ continue;
+ }
+ };
+ match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+ ChannelMonitorUpdateStatus::Completed => {},
+ e => {
+ handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
+ continue;
+ }
}
+ log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}",
+ add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id()));
+ channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: chan.get().get_counterparty_node_id(),
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: add_htlc_msgs,
+ update_fulfill_htlcs: Vec::new(),
+ update_fail_htlcs: fail_htlc_msgs,
+ update_fail_malformed_htlcs: Vec::new(),
+ update_fee: None,
+ commitment_signed: commitment_msg,
+ },
+ });
}
- log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}",
- add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id()));
- channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: chan.get().get_counterparty_node_id(),
- updates: msgs::CommitmentUpdate {
- update_add_htlcs: add_htlc_msgs,
- update_fulfill_htlcs: Vec::new(),
- update_fail_htlcs: fail_htlc_msgs,
- update_fail_malformed_htlcs: Vec::new(),
- update_fee: None,
- commitment_signed: commitment_msg,
- },
- });
}
- } else {
- unreachable!();
}
} else {
for forward_info in pending_forwards.drain(..) {
match forward_info {
- HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
- routing, incoming_shared_secret, payment_hash, amt_to_forward, .. },
- prev_funding_outpoint } => {
+ HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
+ prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
+ forward_info: PendingHTLCInfo {
+ routing, incoming_shared_secret, payment_hash, outgoing_amt_msat, ..
+ }
+ }) => {
let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret) = match routing {
PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry, phantom_shared_secret } => {
let _legacy_hop_data = Some(payment_data.clone());
incoming_packet_shared_secret: incoming_shared_secret,
phantom_shared_secret,
},
- value: amt_to_forward,
+ value: outgoing_amt_msat,
timer_ticks: 0,
- total_msat: if let Some(data) = &payment_data { data.total_msat } else { amt_to_forward },
+ total_msat: if let Some(data) = &payment_data { data.total_msat } else { outgoing_amt_msat },
cltv_expiry,
onion_payload,
};
));
}
}
+ let phantom_shared_secret = claimable_htlc.prev_hop.phantom_shared_secret;
+ let mut receiver_node_id = self.our_network_pubkey;
+ if phantom_shared_secret.is_some() {
+ receiver_node_id = self.keys_manager.get_node_id(Recipient::PhantomNode)
+ .expect("Failed to get node_id for phantom node recipient");
+ }
macro_rules! check_total_value {
($payment_data: expr, $payment_preimage: expr) => {{
payment_secret: $payment_data.payment_secret,
}
};
- let (_, htlcs) = channel_state.claimable_htlcs.entry(payment_hash)
+ let mut claimable_htlcs = self.claimable_htlcs.lock().unwrap();
+ let (_, htlcs) = claimable_htlcs.entry(payment_hash)
.or_insert_with(|| (purpose(), Vec::new()));
if htlcs.len() == 1 {
if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
log_bytes!(payment_hash.0), total_value, $payment_data.total_msat);
fail_htlc!(claimable_htlc, payment_hash);
} else if total_value == $payment_data.total_msat {
+ let prev_channel_id = prev_funding_outpoint.to_channel_id();
htlcs.push(claimable_htlc);
new_events.push(events::Event::PaymentReceived {
+ receiver_node_id: Some(receiver_node_id),
payment_hash,
purpose: purpose(),
amount_msat: total_value,
+ via_channel_id: Some(prev_channel_id),
+ via_user_channel_id: Some(prev_user_channel_id),
});
payment_received_generated = true;
} else {
check_total_value!(payment_data, payment_preimage);
},
OnionPayload::Spontaneous(preimage) => {
- match channel_state.claimable_htlcs.entry(payment_hash) {
+ match self.claimable_htlcs.lock().unwrap().entry(payment_hash) {
hash_map::Entry::Vacant(e) => {
let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
e.insert((purpose.clone(), vec![claimable_htlc]));
+ let prev_channel_id = prev_funding_outpoint.to_channel_id();
new_events.push(events::Event::PaymentReceived {
+ receiver_node_id: Some(receiver_node_id),
payment_hash,
- amount_msat: amt_to_forward,
+ amount_msat: outgoing_amt_msat,
purpose,
+ via_channel_id: Some(prev_channel_id),
+ via_user_channel_id: Some(prev_user_channel_id),
});
},
hash_map::Entry::Occupied(_) => {
self.process_background_events();
}
- fn update_channel_fee(&self, short_to_chan_info: &mut HashMap<u64, (PublicKey, [u8; 32])>, pending_msg_events: &mut Vec<events::MessageSendEvent>, chan_id: &[u8; 32], chan: &mut Channel<<K::Target as KeysInterface>::Signer>, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) {
+ fn update_channel_fee(&self, pending_msg_events: &mut Vec<events::MessageSendEvent>, chan_id: &[u8; 32], chan: &mut Channel<<K::Target as KeysInterface>::Signer>, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) {
if !chan.is_outbound() { return (true, NotifyOption::SkipPersist, Ok(())); }
// If the feerate has decreased by less than half, don't bother
if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() {
let res = match chan.send_update_fee_and_commit(new_feerate, &self.logger) {
Ok(res) => Ok(res),
Err(e) => {
- let (drop, res) = convert_chan_err!(self, e, short_to_chan_info, chan, chan_id);
+ let (drop, res) = convert_chan_err!(self, e, chan, chan_id);
if drop { retain_channel = false; }
Err(res)
}
Ok(())
},
e => {
- let (res, drop) = handle_monitor_update_res!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY);
+ let (res, drop) = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY);
if drop { retain_channel = false; }
res
}
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
let pending_msg_events = &mut channel_state.pending_msg_events;
- let short_to_chan_info = &mut channel_state.short_to_chan_info;
channel_state.by_id.retain(|chan_id, chan| {
- let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_chan_info, pending_msg_events, chan_id, chan, new_feerate);
+ let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(pending_msg_events, chan_id, chan, new_feerate);
if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
if err.is_err() {
handle_errors.push(err);
});
}
+ fn remove_stale_resolved_payments(&self) {
+ // If an outbound payment was completed, and no pending HTLCs remain, we should remove it
+ // from the map. However, if we did that immediately when the last payment HTLC is claimed,
+ // this could race the user making a duplicate send_payment call and our idempotency
+ // guarantees would be violated. Instead, we wait a few timer ticks to do the actual
+ // removal. This should be more than sufficient to ensure the idempotency of any
+ // `send_payment` calls that were made at the same time the `PaymentSent` event was being
+ // processed.
+ let mut pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
+ let pending_events = self.pending_events.lock().unwrap();
+ pending_outbound_payments.retain(|payment_id, payment| {
+ if let PendingOutboundPayment::Fulfilled { session_privs, timer_ticks_without_htlcs, .. } = payment {
+ let mut no_remaining_entries = session_privs.is_empty();
+ if no_remaining_entries {
+ for ev in pending_events.iter() {
+ match ev {
+ events::Event::PaymentSent { payment_id: Some(ev_payment_id), .. } |
+ events::Event::PaymentPathSuccessful { payment_id: ev_payment_id, .. } |
+ events::Event::PaymentPathFailed { payment_id: Some(ev_payment_id), .. } => {
+ if payment_id == ev_payment_id {
+ no_remaining_entries = false;
+ break;
+ }
+ },
+ _ => {},
+ }
+ }
+ }
+ if no_remaining_entries {
+ *timer_ticks_without_htlcs += 1;
+ *timer_ticks_without_htlcs <= IDEMPOTENCY_TIMEOUT_TICKS
+ } else {
+ *timer_ticks_without_htlcs = 0;
+ true
+ }
+ } else { true }
+ });
+ }
+
/// Performs actions which should happen on startup and roughly once per minute thereafter.
///
/// This currently includes:
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
let pending_msg_events = &mut channel_state.pending_msg_events;
- let short_to_chan_info = &mut channel_state.short_to_chan_info;
channel_state.by_id.retain(|chan_id, chan| {
let counterparty_node_id = chan.get_counterparty_node_id();
- let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_chan_info, pending_msg_events, chan_id, chan, new_feerate);
+ let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(pending_msg_events, chan_id, chan, new_feerate);
if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
if err.is_err() {
handle_errors.push((err, counterparty_node_id));
if !retain_channel { return false; }
if let Err(e) = chan.timer_check_closing_negotiation_progress() {
- let (needs_close, err) = convert_chan_err!(self, e, short_to_chan_info, chan, chan_id);
+ let (needs_close, err) = convert_chan_err!(self, e, chan, chan_id);
handle_errors.push((Err(err), chan.get_counterparty_node_id()));
if needs_close { return false; }
}
true
});
+ }
- channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
- if htlcs.is_empty() {
- // This should be unreachable
- debug_assert!(false);
+ self.claimable_htlcs.lock().unwrap().retain(|payment_hash, (_, htlcs)| {
+ if htlcs.is_empty() {
+ // This should be unreachable
+ debug_assert!(false);
+ return false;
+ }
+ if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload {
+ // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
+ // In this case we're not going to handle any timeouts of the parts here.
+ if htlcs[0].total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
+ return true;
+ } else if htlcs.into_iter().any(|htlc| {
+ htlc.timer_ticks += 1;
+ return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
+ }) {
+ timed_out_mpp_htlcs.extend(htlcs.into_iter().map(|htlc| (htlc.prev_hop.clone(), payment_hash.clone())));
return false;
}
- if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload {
- // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
- // In this case we're not going to handle any timeouts of the parts here.
- if htlcs[0].total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
- return true;
- } else if htlcs.into_iter().any(|htlc| {
- htlc.timer_ticks += 1;
- return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
- }) {
- timed_out_mpp_htlcs.extend(htlcs.into_iter().map(|htlc| (htlc.prev_hop.clone(), payment_hash.clone())));
- return false;
- }
- }
- true
- });
- }
+ }
+ true
+ });
for htlc_source in timed_out_mpp_htlcs.drain(..) {
let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 };
for (err, counterparty_node_id) in handle_errors.drain(..) {
let _ = handle_error!(self, err, counterparty_node_id);
}
+
+ self.remove_stale_resolved_payments();
+
should_persist
});
}
pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let removed_source = {
- let mut channel_state = self.channel_state.lock().unwrap();
- channel_state.claimable_htlcs.remove(payment_hash)
- };
+ let removed_source = self.claimable_htlcs.lock().unwrap().remove(payment_hash);
if let Some((_, mut sources)) = removed_source {
for htlc in sources.drain(..) {
let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let removed_source = self.channel_state.lock().unwrap().claimable_htlcs.remove(&payment_hash);
+ let removed_source = self.claimable_htlcs.lock().unwrap().remove(&payment_hash);
if let Some((payment_purpose, mut sources)) = removed_source {
assert!(!sources.is_empty());
let mut claimed_any_htlcs = false;
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
+ let mut receiver_node_id = Some(self.our_network_pubkey);
for htlc in sources.iter() {
- if let None = channel_state.short_to_chan_info.get(&htlc.prev_hop.short_channel_id) {
+ let chan_id = match self.short_to_chan_info.read().unwrap().get(&htlc.prev_hop.short_channel_id) {
+ Some((_cp_id, chan_id)) => chan_id.clone(),
+ None => {
+ valid_mpp = false;
+ break;
+ }
+ };
+
+ if let None = channel_state.by_id.get(&chan_id) {
valid_mpp = false;
break;
}
+
if expected_amt_msat.is_some() && expected_amt_msat != Some(htlc.total_msat) {
log_error!(self.logger, "Somehow ended up with an MPP payment with different total amounts - this should not be reachable!");
debug_assert!(false);
break;
}
}
+ let phantom_shared_secret = htlc.prev_hop.phantom_shared_secret;
+ if phantom_shared_secret.is_some() {
+ let phantom_pubkey = self.keys_manager.get_node_id(Recipient::PhantomNode)
+ .expect("Failed to get node_id for phantom node recipient");
+ receiver_node_id = Some(phantom_pubkey)
+ }
claimable_amt_msat += htlc.value;
}
if claimed_any_htlcs {
self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed {
+ receiver_node_id,
payment_hash,
purpose: payment_purpose,
amount_msat: claimable_amt_msat,
fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<<K::Target as KeysInterface>::Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop {
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
let channel_state = &mut **channel_state_lock;
- let chan_id = match channel_state.short_to_chan_info.get(&prev_hop.short_channel_id) {
+ let chan_id = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
Some((_cp_id, chan_id)) => chan_id.clone(),
None => {
return ClaimFundsFromHop::PrevHopForceClosed
payment_preimage, e);
return ClaimFundsFromHop::MonitorUpdateFail(
chan.get().get_counterparty_node_id(),
- handle_monitor_update_res!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
+ handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
Some(htlc_value_msat)
);
}
},
}
let counterparty_node_id = chan.get().get_counterparty_node_id();
- let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_chan_info, chan.get_mut(), &chan_id);
+ let (drop, res) = convert_chan_err!(self, e, chan.get_mut(), &chan_id);
if drop {
chan.remove_entry();
}
return ClaimFundsFromHop::MonitorUpdateFail(counterparty_node_id, res, None);
},
}
- } else { unreachable!(); }
+ } else { return ClaimFundsFromHop::PrevHopForceClosed }
}
fn finalize_claims(&self, mut sources: Vec<HTLCSource>) {
}
);
}
- if payment.get().remaining_parts() == 0 {
- payment.remove();
- }
}
}
}
}
);
}
-
- if payment.get().remaining_parts() == 0 {
- payment.remove();
- }
}
} else {
log_trace!(self.logger, "Received duplicative fulfill for HTLC with payment_preimage {}", log_bytes!(payment_preimage.0));
self.our_network_pubkey.clone()
}
+ /// Handles a channel reentering a functional state, either due to reconnect or a monitor
+ /// update completion.
+ fn handle_channel_resumption(&self, pending_msg_events: &mut Vec<MessageSendEvent>,
+ channel: &mut Channel<<K::Target as KeysInterface>::Signer>, raa: Option<msgs::RevokeAndACK>,
+ commitment_update: Option<msgs::CommitmentUpdate>, order: RAACommitmentOrder,
+ pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
+ channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
+ -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> {
+ let mut htlc_forwards = None;
+
+ let counterparty_node_id = channel.get_counterparty_node_id();
+ if !pending_forwards.is_empty() {
+ htlc_forwards = Some((channel.get_short_channel_id().unwrap_or(channel.outbound_scid_alias()),
+ channel.get_funding_txo().unwrap(), channel.get_user_id(), pending_forwards));
+ }
+
+ if let Some(msg) = channel_ready {
+ send_channel_ready!(self, pending_msg_events, channel, msg);
+ }
+ if let Some(msg) = announcement_sigs {
+ pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
+ node_id: counterparty_node_id,
+ msg,
+ });
+ }
+
+ emit_channel_ready_event!(self, channel);
+
+ macro_rules! handle_cs { () => {
+ if let Some(update) = commitment_update {
+ pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: counterparty_node_id,
+ updates: update,
+ });
+ }
+ } }
+ macro_rules! handle_raa { () => {
+ if let Some(revoke_and_ack) = raa {
+ pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
+ node_id: counterparty_node_id,
+ msg: revoke_and_ack,
+ });
+ }
+ } }
+ match order {
+ RAACommitmentOrder::CommitmentFirst => {
+ handle_cs!();
+ handle_raa!();
+ },
+ RAACommitmentOrder::RevokeAndACKFirst => {
+ handle_raa!();
+ handle_cs!();
+ },
+ }
+
+ if let Some(tx) = funding_broadcastable {
+ log_info!(self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
+ self.tx_broadcaster.broadcast_transaction(&tx);
+ }
+
+ htlc_forwards
+ }
+
fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let chan_restoration_res;
+ let htlc_forwards;
let (mut pending_failures, finalized_claims, counterparty_node_id) = {
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
})
} else { None }
} else { None };
- chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, updates.raa, updates.commitment_update, updates.order, None, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
+ htlc_forwards = self.handle_channel_resumption(&mut channel_state.pending_msg_events, channel.get_mut(), updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
if let Some(upd) = channel_update {
channel_state.pending_msg_events.push(upd);
}
(updates.failed_htlcs, updates.finalized_claimed_htlcs, counterparty_node_id)
};
- post_handle_chan_restoration!(self, chan_restoration_res);
+ if let Some(forwards) = htlc_forwards {
+ self.forward_htlcs(&mut [forwards][..]);
+ }
self.finalize_claims(finalized_claims);
for failure in pending_failures.drain(..) {
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: funding_txo.to_channel_id() };
///
/// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
/// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
- pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u64) -> Result<(), APIError> {
+ pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> {
self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, false, user_channel_id)
}
///
/// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
/// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
- pub fn accept_inbound_channel_from_trusted_peer_0conf(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u64) -> Result<(), APIError> {
+ pub fn accept_inbound_channel_from_trusted_peer_0conf(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> {
self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, true, user_channel_id)
}
- fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u64) -> Result<(), APIError> {
+ fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let mut channel_state_lock = self.channel_state.lock().unwrap();
}
};
channel_state.pending_msg_events.push(send_msg_err_event);
- let _ = remove_channel!(self, channel_state, channel);
+ let _ = remove_channel!(self, channel);
return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() });
}
return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(), msg.temporary_channel_id.clone()));
}
+ let mut random_bytes = [0u8; 16];
+ random_bytes.copy_from_slice(&self.keys_manager.get_secure_random_bytes()[..16]);
+ let user_channel_id = u128::from_be_bytes(random_bytes);
+
let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.keys_manager,
- counterparty_node_id.clone(), &their_features, msg, 0, &self.default_configuration,
+ counterparty_node_id.clone(), &their_features, msg, user_channel_id, &self.default_configuration,
self.best_block.read().unwrap().height(), &self.logger, outbound_scid_alias)
{
Err(e) => {
}
channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
node_id: counterparty_node_id.clone(),
- msg: channel.accept_inbound_channel(0),
+ msg: channel.accept_inbound_channel(user_channel_id),
});
} else {
let mut pending_events = self.pending_events.lock().unwrap();
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
}
- try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &their_features), channel_state, chan);
+ try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &their_features), chan);
(chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id())
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
}
- (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.logger), channel_state, chan), chan.remove())
+ (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.logger), chan), chan.remove())
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
}
msg: funding_msg,
});
if let Some(msg) = channel_ready {
- send_channel_ready!(channel_state.short_to_chan_info, channel_state.pending_msg_events, chan, msg);
+ send_channel_ready!(self, channel_state.pending_msg_events, chan, msg);
}
e.insert(chan);
}
}
let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.logger) {
Ok(update) => update,
- Err(e) => try_chan_entry!(self, Err(e), channel_state, chan),
+ Err(e) => try_chan_entry!(self, Err(e), chan),
};
match self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
ChannelMonitorUpdateStatus::Completed => {},
e => {
- let mut res = handle_monitor_update_res!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
+ let mut res = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
// We weren't able to watch the channel to begin with, so no updates should be made on
// it. Previously, full_stack_target found an (unreachable) panic when the
},
}
if let Some(msg) = channel_ready {
- send_channel_ready!(channel_state.short_to_chan_info, channel_state.pending_msg_events, chan.get(), msg);
+ send_channel_ready!(self, channel_state.pending_msg_events, chan.get(), msg);
}
funding_tx
},
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, self.get_our_node_id(),
- self.genesis_hash.clone(), &self.best_block.read().unwrap(), &self.logger), channel_state, chan);
+ self.genesis_hash.clone(), &self.best_block.read().unwrap(), &self.logger), chan);
if let Some(announcement_sigs) = announcement_sigs_opt {
log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id()));
channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
}
- let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), channel_state, chan_entry);
+ let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), chan_entry);
dropped_htlcs = htlcs;
// Update the monitor with the shutdown script if necessary.
if let Some(monitor_update) = monitor_update {
let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update);
let (result, is_permanent) =
- handle_monitor_update_res!(self, update_res, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
+ handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
if is_permanent {
- remove_channel!(self, channel_state, chan_entry);
+ remove_channel!(self, chan_entry);
break result;
}
}
if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), channel_state, chan_entry);
+ let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), chan_entry);
if let Some(msg) = closing_signed {
channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
node_id: counterparty_node_id.clone(),
// also implies there are no pending HTLCs left on the channel, so we can
// fully delete it from tracking (the channel monitor is still around to
// watch for old state broadcasts)!
- (tx, Some(remove_channel!(self, channel_state, chan_entry)))
+ (tx, Some(remove_channel!(self, chan_entry)))
} else { (tx, None) }
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
_ => pending_forward_info
}
};
- try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), channel_state, chan);
+ try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), chan);
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), channel_state, chan)
+ try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), chan)
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::LightningError { err: msg.reason.clone() }), channel_state, chan);
+ try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::LightningError { err: msg.reason.clone() }), chan);
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
}
if (msg.failure_code & 0x8000) == 0 {
let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
- try_chan_entry!(self, Err(chan_err), channel_state, chan);
+ try_chan_entry!(self, Err(chan_err), chan);
}
- try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }), channel_state, chan);
+ try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }), chan);
Ok(())
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
let (revoke_and_ack, commitment_signed, monitor_update) =
match chan.get_mut().commitment_signed(&msg, &self.logger) {
- Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan),
+ Err((None, e)) => try_chan_entry!(self, Err(e), chan),
Err((Some(update), e)) => {
assert!(chan.get().is_awaiting_monitor_update());
let _ = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), update);
- try_chan_entry!(self, Err(e), channel_state, chan);
+ try_chan_entry!(self, Err(e), chan);
unreachable!();
},
Ok(res) => res
};
let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update);
- if let Err(e) = handle_monitor_update_res!(self, update_res, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) {
+ if let Err(e) = handle_monitor_update_res!(self, update_res, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) {
return Err(e);
}
}
#[inline]
- fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, Vec<(PendingHTLCInfo, u64)>)]) {
- for &mut (prev_short_channel_id, prev_funding_outpoint, ref mut pending_forwards) in per_source_pending_forwards {
+ fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)]) {
+ for &mut (prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
let mut forward_event = None;
if !pending_forwards.is_empty() {
let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
PendingHTLCRouting::ReceiveKeysend { .. } => 0,
}) {
hash_map::Entry::Occupied(mut entry) => {
- entry.get_mut().push(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_funding_outpoint,
- prev_htlc_id, forward_info });
+ entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
+ prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info }));
},
hash_map::Entry::Vacant(entry) => {
- entry.insert(vec!(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_funding_outpoint,
- prev_htlc_id, forward_info }));
+ entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
+ prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info })));
}
}
}
}
let was_paused_for_mon_update = chan.get().is_awaiting_monitor_update();
let raa_updates = break_chan_entry!(self,
- chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan);
+ chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
htlcs_to_fail = raa_updates.holding_cell_failed_htlcs;
let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update);
if was_paused_for_mon_update {
break Err(MsgHandleErrInternal::ignore_no_close("Existing pending monitor update prevented responses to RAA".to_owned()));
}
if update_res != ChannelMonitorUpdateStatus::Completed {
- if let Err(e) = handle_monitor_update_res!(self, update_res, channel_state, chan,
+ if let Err(e) = handle_monitor_update_res!(self, update_res, chan,
RAACommitmentOrder::CommitmentFirst, false,
raa_updates.commitment_update.is_some(), false,
raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
raa_updates.finalized_claimed_htlcs,
chan.get().get_short_channel_id()
.unwrap_or(chan.get().outbound_scid_alias()),
- chan.get().get_funding_txo().unwrap()))
+ chan.get().get_funding_txo().unwrap(),
+ chan.get().get_user_id()))
},
hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id);
match res {
Ok((pending_forwards, mut pending_failures, finalized_claim_htlcs,
- short_channel_id, channel_outpoint)) =>
+ short_channel_id, channel_outpoint, user_channel_id)) =>
{
for failure in pending_failures.drain(..) {
let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: channel_outpoint.to_channel_id() };
self.fail_htlc_backwards_internal(failure.0, &failure.1, failure.2, receiver);
}
- self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, pending_forwards)]);
+ self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, user_channel_id, pending_forwards)]);
self.finalize_claims(finalized_claim_htlcs);
Ok(())
},
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg), channel_state, chan);
+ try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg, &self.logger), chan);
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
msg: try_chan_entry!(self, chan.get_mut().announcement_signatures(
- self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height(), msg), channel_state, chan),
+ self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height(), msg), chan),
// Note that announcement_signatures fails if the channel cannot be announced,
// so get_channel_update_for_broadcast will never fail by the time we get here.
update_msg: self.get_channel_update_for_broadcast(chan.get()).unwrap(),
/// Returns ShouldPersist if anything changed, otherwise either SkipPersist or an Err.
fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<NotifyOption, MsgHandleErrInternal> {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
- let chan_id = match channel_state.short_to_chan_info.get(&msg.contents.short_channel_id) {
+ let chan_id = match self.short_to_chan_info.read().unwrap().get(&msg.contents.short_channel_id) {
Some((_cp_id, chan_id)) => chan_id.clone(),
None => {
// It's not a local channel
return Ok(NotifyOption::SkipPersist)
}
};
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
match channel_state.by_id.entry(chan_id) {
hash_map::Entry::Occupied(mut chan) => {
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Ok(NotifyOption::SkipPersist);
} else {
log_debug!(self.logger, "Received channel_update for channel {}.", log_bytes!(chan_id));
- try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan);
+ try_chan_entry!(self, chan.get_mut().channel_update(&msg), chan);
}
},
- hash_map::Entry::Vacant(_) => unreachable!()
+ hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersist)
}
Ok(NotifyOption::DoPersist)
}
fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
- let chan_restoration_res;
- let (htlcs_failed_forward, need_lnd_workaround) = {
+ let htlc_forwards;
+ let need_lnd_workaround = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
// add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
let responses = try_chan_entry!(self, chan.get_mut().channel_reestablish(
msg, &self.logger, self.our_network_pubkey.clone(), self.genesis_hash,
- &*self.best_block.read().unwrap()), channel_state, chan);
+ &*self.best_block.read().unwrap()), chan);
let mut channel_update = None;
if let Some(msg) = responses.shutdown_msg {
channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
}
}
let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
- chan_restoration_res = handle_chan_restoration_locked!(
- self, channel_state_lock, channel_state, chan, responses.raa, responses.commitment_update, responses.order,
- responses.mon_update, Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
+ htlc_forwards = self.handle_channel_resumption(
+ &mut channel_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order,
+ Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
if let Some(upd) = channel_update {
channel_state.pending_msg_events.push(upd);
}
- (responses.holding_cell_failed_htlcs, need_lnd_workaround)
+ need_lnd_workaround
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
};
- post_handle_chan_restoration!(self, chan_restoration_res);
- self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id, counterparty_node_id);
+
+ if let Some(forwards) = htlc_forwards {
+ self.forward_htlcs(&mut [forwards][..]);
+ }
if let Some(channel_ready_msg) = need_lnd_workaround {
self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;
let by_id = &mut channel_state.by_id;
let pending_msg_events = &mut channel_state.pending_msg_events;
if let hash_map::Entry::Occupied(chan_entry) = by_id.entry(funding_outpoint.to_channel_id()) {
- let mut chan = remove_channel!(self, channel_state, chan_entry);
+ let mut chan = remove_channel!(self, chan_entry);
failed_channels.push(chan.force_shutdown(false));
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
let by_id = &mut channel_state.by_id;
- let short_to_chan_info = &mut channel_state.short_to_chan_info;
let pending_msg_events = &mut channel_state.pending_msg_events;
by_id.retain(|channel_id, chan| {
},
e => {
has_monitor_update = true;
- let (res, close_channel) = handle_monitor_update_res!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY);
+ let (res, close_channel) = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY);
handle_errors.push((chan.get_counterparty_node_id(), res));
if close_channel { return false; }
},
true
},
Err(e) => {
- let (close_channel, res) = convert_chan_err!(self, e, short_to_chan_info, chan, channel_id);
+ let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id);
handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
// ChannelClosed event is generated by handle_error for us
!close_channel
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
let by_id = &mut channel_state.by_id;
- let short_to_chan_info = &mut channel_state.short_to_chan_info;
let pending_msg_events = &mut channel_state.pending_msg_events;
by_id.retain(|channel_id, chan| {
log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
self.tx_broadcaster.broadcast_transaction(&tx);
- update_maps_on_chan_removal!(self, short_to_chan_info, chan);
+ update_maps_on_chan_removal!(self, chan);
false
} else { true }
},
Err(e) => {
has_update = true;
- let (close_channel, res) = convert_chan_err!(self, e, short_to_chan_info, chan, channel_id);
+ let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id);
handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
!close_channel
}
///
/// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager
pub fn get_phantom_scid(&self) -> u64 {
- let mut channel_state = self.channel_state.lock().unwrap();
- let best_block = self.best_block.read().unwrap();
+ let best_block_height = self.best_block.read().unwrap().height();
+ let short_to_chan_info = self.short_to_chan_info.read().unwrap();
loop {
- let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block.height(), &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager);
+ let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager);
// Ensure the generated scid doesn't conflict with a real channel.
- match channel_state.short_to_chan_info.entry(scid_candidate) {
- hash_map::Entry::Occupied(_) => continue,
- hash_map::Entry::Vacant(_) => return scid_candidate
+ match short_to_chan_info.get(&scid_candidate) {
+ Some(_) => continue,
+ None => return scid_candidate
}
}
}
}
}
+ /// Gets inflight HTLC information by processing pending outbound payments that are in
+ /// our channels. May be used during pathfinding to account for in-use channel liquidity.
+ pub fn compute_inflight_htlcs(&self) -> InFlightHtlcs {
+ let mut inflight_htlcs = InFlightHtlcs::new();
+
+ for chan in self.channel_state.lock().unwrap().by_id.values() {
+ for htlc_source in chan.inflight_htlc_sources() {
+ if let HTLCSource::OutboundRoute { path, .. } = htlc_source {
+ inflight_htlcs.process_path(path, self.get_our_node_id());
+ }
+ }
+ }
+
+ inflight_htlcs
+ }
+
#[cfg(any(test, fuzzing, feature = "_test_utils"))]
pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
let events = core::cell::RefCell::new(Vec::new());
- let event_handler = |event: &events::Event| events.borrow_mut().push(event.clone());
+ let event_handler = |event: events::Event| events.borrow_mut().push(event);
self.process_pending_events(&event_handler);
events.into_inner()
}
pub fn clear_pending_payments(&self) {
self.pending_outbound_payments.lock().unwrap().clear()
}
+
+ /// Processes any events asynchronously in the order they were generated since the last call
+ /// using the given event handler.
+ ///
+ /// See the trait-level documentation of [`EventsProvider`] for requirements.
+ pub async fn process_pending_events_async<Future: core::future::Future, H: Fn(Event) -> Future>(
+ &self, handler: H
+ ) {
+ // We'll acquire our total consistency lock until the returned future completes so that
+ // we can be sure no other persists happen while processing events.
+ let _read_guard = self.total_consistency_lock.read().unwrap();
+
+ let mut result = NotifyOption::SkipPersist;
+
+ // TODO: This behavior should be documented. It's unintuitive that we query
+ // ChannelMonitors when clearing other events.
+ if self.process_pending_monitor_events() {
+ result = NotifyOption::DoPersist;
+ }
+
+ let pending_events = mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
+ if !pending_events.is_empty() {
+ result = NotifyOption::DoPersist;
+ }
+
+ for event in pending_events {
+ handler(event).await;
+ }
+
+ if result == NotifyOption::DoPersist {
+ self.persistence_notifier.notify();
+ }
+ }
}
impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<M, T, K, F, L>
result = NotifyOption::DoPersist;
}
- let mut pending_events = mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
+ let pending_events = mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
if !pending_events.is_empty() {
result = NotifyOption::DoPersist;
}
- for event in pending_events.drain(..) {
- handler.handle_event(&event);
+ for event in pending_events {
+ handler.handle_event(event);
}
result
payment_secrets.retain(|_, inbound_payment| {
inbound_payment.expiry_time > header.time as u64
});
-
- let mut outbounds = self.pending_outbound_payments.lock().unwrap();
- let mut pending_events = self.pending_events.lock().unwrap();
- outbounds.retain(|payment_id, payment| {
- if payment.remaining_parts() != 0 { return true }
- if let PendingOutboundPayment::Retryable { starting_block_height, payment_hash, .. } = payment {
- if *starting_block_height + PAYMENT_EXPIRY_BLOCKS <= height {
- log_info!(self.logger, "Timing out payment with id {} and hash {}", log_bytes!(payment_id.0), log_bytes!(payment_hash.0));
- pending_events.push(events::Event::PaymentFailed {
- payment_id: *payment_id, payment_hash: *payment_hash,
- });
- false
- } else { true }
- } else { true }
- });
}
- fn get_relevant_txids(&self) -> Vec<Txid> {
+ fn get_relevant_txids(&self) -> Vec<(Txid, Option<BlockHash>)> {
let channel_state = self.channel_state.lock().unwrap();
- let mut res = Vec::with_capacity(channel_state.short_to_chan_info.len());
+ let mut res = Vec::with_capacity(channel_state.by_id.len());
for chan in channel_state.by_id.values() {
- if let Some(funding_txo) = chan.get_funding_txo() {
- res.push(funding_txo.txid);
+ if let (Some(funding_txo), block_hash) = (chan.get_funding_txo(), chan.get_funding_tx_confirmed_in()) {
+ res.push((funding_txo.txid, block_hash));
}
}
res
{
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
- let short_to_chan_info = &mut channel_state.short_to_chan_info;
let pending_msg_events = &mut channel_state.pending_msg_events;
channel_state.by_id.retain(|_, channel| {
let res = f(channel);
}, HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() }));
}
if let Some(channel_ready) = channel_ready_opt {
- send_channel_ready!(short_to_chan_info, pending_msg_events, channel, channel_ready);
+ send_channel_ready!(self, pending_msg_events, channel, channel_ready);
if channel.is_usable() {
log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
// enforce option_scid_alias then), and if the funding tx is ever
// un-confirmed we force-close the channel, ensuring short_to_chan_info
// is always consistent.
+ let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
let scid_insert = short_to_chan_info.insert(real_scid, (channel.get_counterparty_node_id(), channel.channel_id()));
assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.get_counterparty_node_id(), channel.channel_id()),
"SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
}
}
} else if let Err(reason) = res {
- update_maps_on_chan_removal!(self, short_to_chan_info, channel);
+ update_maps_on_chan_removal!(self, channel);
// It looks like our counterparty went on-chain or funding transaction was
// reorged out of the main chain. Close the channel.
failed_channels.push(channel.force_shutdown(true));
}
true
});
+ }
- if let Some(height) = height_opt {
- channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
- htlcs.retain(|htlc| {
- // If height is approaching the number of blocks we think it takes us to get
- // our commitment transaction confirmed before the HTLC expires, plus the
- // number of blocks we generally consider it to take to do a commitment update,
- // just give up on it and fail the HTLC.
- if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
- let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
- htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
-
- timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
- failure_code: 0x4000 | 15,
- data: htlc_msat_height_data
- }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }));
- false
- } else { true }
- });
- !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
+ if let Some(height) = height_opt {
+ self.claimable_htlcs.lock().unwrap().retain(|payment_hash, (_, htlcs)| {
+ htlcs.retain(|htlc| {
+ // If height is approaching the number of blocks we think it takes us to get
+ // our commitment transaction confirmed before the HTLC expires, plus the
+ // number of blocks we generally consider it to take to do a commitment update,
+ // just give up on it and fail the HTLC.
+ if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
+ let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
+ htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
+
+ timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
+ failure_code: 0x4000 | 15,
+ data: htlc_msat_height_data
+ }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }));
+ false
+ } else { true }
});
- }
+ !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
+ });
}
self.handle_init_event_channel_failures(failed_channels);
/// Blocks until ChannelManager needs to be persisted or a timeout is reached. It returns a bool
/// indicating whether persistence is necessary. Only one listener on
- /// `await_persistable_update` or `await_persistable_update_timeout` is guaranteed to be woken
- /// up.
+ /// [`await_persistable_update`], [`await_persistable_update_timeout`], or a future returned by
+ /// [`get_persistable_update_future`] is guaranteed to be woken up.
///
/// Note that this method is not available with the `no-std` feature.
+ ///
+ /// [`await_persistable_update`]: Self::await_persistable_update
+ /// [`await_persistable_update_timeout`]: Self::await_persistable_update_timeout
+ /// [`get_persistable_update_future`]: Self::get_persistable_update_future
#[cfg(any(test, feature = "std"))]
pub fn await_persistable_update_timeout(&self, max_wait: Duration) -> bool {
self.persistence_notifier.wait_timeout(max_wait)
}
/// Blocks until ChannelManager needs to be persisted. Only one listener on
- /// `await_persistable_update` or `await_persistable_update_timeout` is guaranteed to be woken
- /// up.
+ /// [`await_persistable_update`], `await_persistable_update_timeout`, or a future returned by
+ /// [`get_persistable_update_future`] is guaranteed to be woken up.
+ ///
+ /// [`await_persistable_update`]: Self::await_persistable_update
+ /// [`get_persistable_update_future`]: Self::get_persistable_update_future
pub fn await_persistable_update(&self) {
self.persistence_notifier.wait()
}
}
}
-impl<M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
+impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref >
ChannelMessageHandler for ChannelManager<M, T, K, F, L>
where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: BroadcasterInterface,
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
let pending_msg_events = &mut channel_state.pending_msg_events;
- let short_to_chan_info = &mut channel_state.short_to_chan_info;
log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.",
log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" });
channel_state.by_id.retain(|_, chan| {
if chan.get_counterparty_node_id() == *counterparty_node_id {
chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
if chan.is_shutdown() {
- update_maps_on_chan_removal!(self, short_to_chan_info, chan);
+ update_maps_on_chan_removal!(self, chan);
self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
return false;
} else {
(11, outbound_htlc_maximum_msat, option),
});
-impl_writeable_tlv_based!(ChannelDetails, {
- (1, inbound_scid_alias, option),
- (2, channel_id, required),
- (3, channel_type, option),
- (4, counterparty, required),
- (5, outbound_scid_alias, option),
- (6, funding_txo, option),
- (7, config, option),
- (8, short_channel_id, option),
- (10, channel_value_satoshis, required),
- (12, unspendable_punishment_reserve, option),
- (14, user_channel_id, required),
- (16, balance_msat, required),
- (18, outbound_capacity_msat, required),
- // Note that by the time we get past the required read above, outbound_capacity_msat will be
- // filled in, so we can safely unwrap it here.
- (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)),
- (20, inbound_capacity_msat, required),
- (22, confirmations_required, option),
- (24, force_close_spend_delay, option),
- (26, is_outbound, required),
- (28, is_channel_ready, required),
- (30, is_usable, required),
- (32, is_public, required),
- (33, inbound_htlc_minimum_msat, option),
- (35, inbound_htlc_maximum_msat, option),
-});
+impl Writeable for ChannelDetails {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+ // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with
+ // versions prior to 0.0.113, the u128 is serialized as two separate u64 values.
+ let user_channel_id_low = self.user_channel_id as u64;
+ let user_channel_id_high_opt = Some((self.user_channel_id >> 64) as u64);
+ write_tlv_fields!(writer, {
+ (1, self.inbound_scid_alias, option),
+ (2, self.channel_id, required),
+ (3, self.channel_type, option),
+ (4, self.counterparty, required),
+ (5, self.outbound_scid_alias, option),
+ (6, self.funding_txo, option),
+ (7, self.config, option),
+ (8, self.short_channel_id, option),
+ (9, self.confirmations, option),
+ (10, self.channel_value_satoshis, required),
+ (12, self.unspendable_punishment_reserve, option),
+ (14, user_channel_id_low, required),
+ (16, self.balance_msat, required),
+ (18, self.outbound_capacity_msat, required),
+ // Note that by the time we get past the required read above, outbound_capacity_msat will be
+ // filled in, so we can safely unwrap it here.
+ (19, self.next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)),
+ (20, self.inbound_capacity_msat, required),
+ (22, self.confirmations_required, option),
+ (24, self.force_close_spend_delay, option),
+ (26, self.is_outbound, required),
+ (28, self.is_channel_ready, required),
+ (30, self.is_usable, required),
+ (32, self.is_public, required),
+ (33, self.inbound_htlc_minimum_msat, option),
+ (35, self.inbound_htlc_maximum_msat, option),
+ (37, user_channel_id_high_opt, option),
+ });
+ Ok(())
+ }
+}
+
+impl Readable for ChannelDetails {
+ fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ init_and_read_tlv_fields!(reader, {
+ (1, inbound_scid_alias, option),
+ (2, channel_id, required),
+ (3, channel_type, option),
+ (4, counterparty, required),
+ (5, outbound_scid_alias, option),
+ (6, funding_txo, option),
+ (7, config, option),
+ (8, short_channel_id, option),
+ (9, confirmations, option),
+ (10, channel_value_satoshis, required),
+ (12, unspendable_punishment_reserve, option),
+ (14, user_channel_id_low, required),
+ (16, balance_msat, required),
+ (18, outbound_capacity_msat, required),
+ // Note that by the time we get past the required read above, outbound_capacity_msat will be
+ // filled in, so we can safely unwrap it here.
+ (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)),
+ (20, inbound_capacity_msat, required),
+ (22, confirmations_required, option),
+ (24, force_close_spend_delay, option),
+ (26, is_outbound, required),
+ (28, is_channel_ready, required),
+ (30, is_usable, required),
+ (32, is_public, required),
+ (33, inbound_htlc_minimum_msat, option),
+ (35, inbound_htlc_maximum_msat, option),
+ (37, user_channel_id_high_opt, option),
+ });
+
+ // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with
+ // versions prior to 0.0.113, the u128 is serialized as two separate u64 values.
+ let user_channel_id_low: u64 = user_channel_id_low.0.unwrap();
+ let user_channel_id = user_channel_id_low as u128 +
+ ((user_channel_id_high_opt.unwrap_or(0 as u64) as u128) << 64);
+
+ Ok(Self {
+ inbound_scid_alias,
+ channel_id: channel_id.0.unwrap(),
+ channel_type,
+ counterparty: counterparty.0.unwrap(),
+ outbound_scid_alias,
+ funding_txo,
+ config,
+ short_channel_id,
+ channel_value_satoshis: channel_value_satoshis.0.unwrap(),
+ unspendable_punishment_reserve,
+ user_channel_id,
+ balance_msat: balance_msat.0.unwrap(),
+ outbound_capacity_msat: outbound_capacity_msat.0.unwrap(),
+ next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(),
+ inbound_capacity_msat: inbound_capacity_msat.0.unwrap(),
+ confirmations_required,
+ confirmations,
+ force_close_spend_delay,
+ is_outbound: is_outbound.0.unwrap(),
+ is_channel_ready: is_channel_ready.0.unwrap(),
+ is_usable: is_usable.0.unwrap(),
+ is_public: is_public.0.unwrap(),
+ inbound_htlc_minimum_msat,
+ inbound_htlc_maximum_msat,
+ })
+ }
+}
impl_writeable_tlv_based!(PhantomRouteHints, {
(2, channels, vec_type),
(0, routing, required),
(2, incoming_shared_secret, required),
(4, payment_hash, required),
- (6, amt_to_forward, required),
- (8, outgoing_cltv_value, required)
+ (6, outgoing_amt_msat, required),
+ (8, outgoing_cltv_value, required),
+ (9, incoming_amt_msat, option),
});
(1, payment_id_opt, option),
(2, first_hop_htlc_msat, required),
(3, payment_secret, option),
- (4, path, vec_type),
+ (4, *path, vec_type),
(5, payment_params, option),
});
}
},
;);
+impl_writeable_tlv_based!(PendingAddHTLCInfo, {
+ (0, forward_info, required),
+ (1, prev_user_channel_id, (default_value, 0)),
+ (2, prev_short_channel_id, required),
+ (4, prev_htlc_id, required),
+ (6, prev_funding_outpoint, required),
+});
+
impl_writeable_tlv_based_enum!(HTLCForwardInfo,
- (0, AddHTLC) => {
- (0, forward_info, required),
- (2, prev_short_channel_id, required),
- (4, prev_htlc_id, required),
- (6, prev_funding_outpoint, required),
- },
(1, FailHTLC) => {
(0, htlc_id, required),
(2, err_packet, required),
- },
-;);
+ };
+ (0, AddHTLC)
+);
impl_writeable_tlv_based!(PendingInboundPayment, {
(0, payment_secret, required),
(1, Fulfilled) => {
(0, session_privs, required),
(1, payment_hash, option),
+ (3, timer_ticks_without_htlcs, (default_value, 0)),
},
(2, Retryable) => {
(0, session_privs, required),
}
}
- let channel_state = self.channel_state.lock().unwrap();
+ let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
+ let claimable_htlcs = self.claimable_htlcs.lock().unwrap();
+ let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
+
let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new();
- (channel_state.claimable_htlcs.len() as u64).write(writer)?;
- for (payment_hash, (purpose, previous_hops)) in channel_state.claimable_htlcs.iter() {
+ (claimable_htlcs.len() as u64).write(writer)?;
+ for (payment_hash, (purpose, previous_hops)) in claimable_htlcs.iter() {
payment_hash.write(writer)?;
(previous_hops.len() as u64).write(writer)?;
for htlc in previous_hops.iter() {
peer_state.latest_features.write(writer)?;
}
- let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
- let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
let events = self.pending_events.lock().unwrap();
(events.len() as u64).write(writer)?;
for event in events.iter() {
/// which you've already broadcasted the transaction.
///
/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
-pub struct ChannelManagerReadArgs<'a, Signer: 'a + Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
- where M::Target: chain::Watch<Signer>,
+pub struct ChannelManagerReadArgs<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
+ where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface<Signer = Signer>,
+ K::Target: KeysInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
/// this struct.
///
/// (C-not exported) because we have no HashMap bindings
- pub channel_monitors: HashMap<OutPoint, &'a mut ChannelMonitor<Signer>>,
+ pub channel_monitors: HashMap<OutPoint, &'a mut ChannelMonitor<<K::Target as KeysInterface>::Signer>>,
}
-impl<'a, Signer: 'a + Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
- ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>
- where M::Target: chain::Watch<Signer>,
+impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
+ ChannelManagerReadArgs<'a, M, T, K, F, L>
+ where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface<Signer = Signer>,
+ K::Target: KeysInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
/// HashMap for you. This is primarily useful for C bindings where it is not practical to
/// populate a HashMap directly from C.
pub fn new(keys_manager: K, fee_estimator: F, chain_monitor: M, tx_broadcaster: T, logger: L, default_config: UserConfig,
- mut channel_monitors: Vec<&'a mut ChannelMonitor<Signer>>) -> Self {
+ mut channel_monitors: Vec<&'a mut ChannelMonitor<<K::Target as KeysInterface>::Signer>>) -> Self {
Self {
keys_manager, fee_estimator, chain_monitor, tx_broadcaster, logger, default_config,
channel_monitors: channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect()
// Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the
// SipmleArcChannelManager type:
impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
- ReadableArgs<ChannelManagerReadArgs<'a, <K::Target as KeysInterface>::Signer, M, T, K, F, L>> for (BlockHash, Arc<ChannelManager<M, T, K, F, L>>)
+ ReadableArgs<ChannelManagerReadArgs<'a, M, T, K, F, L>> for (BlockHash, Arc<ChannelManager<M, T, K, F, L>>)
where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: BroadcasterInterface,
K::Target: KeysInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
- fn read<R: io::Read>(reader: &mut R, args: ChannelManagerReadArgs<'a, <K::Target as KeysInterface>::Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
+ fn read<R: io::Read>(reader: &mut R, args: ChannelManagerReadArgs<'a, M, T, K, F, L>) -> Result<Self, DecodeError> {
let (blockhash, chan_manager) = <(BlockHash, ChannelManager<M, T, K, F, L>)>::read(reader, args)?;
Ok((blockhash, Arc::new(chan_manager)))
}
}
impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
- ReadableArgs<ChannelManagerReadArgs<'a, <K::Target as KeysInterface>::Signer, M, T, K, F, L>> for (BlockHash, ChannelManager<M, T, K, F, L>)
+ ReadableArgs<ChannelManagerReadArgs<'a, M, T, K, F, L>> for (BlockHash, ChannelManager<M, T, K, F, L>)
where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: BroadcasterInterface,
K::Target: KeysInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
- fn read<R: io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, <K::Target as KeysInterface>::Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
+ fn read<R: io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, M, T, K, F, L>) -> Result<Self, DecodeError> {
let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
let genesis_hash: BlockHash = Readable::read(reader)?;
if let Some((payment_purpose, claimable_htlcs)) = claimable_htlcs.remove(&payment_hash) {
log_info!(args.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", log_bytes!(payment_hash.0));
let mut claimable_amt_msat = 0;
+ let mut receiver_node_id = Some(our_network_pubkey);
+ let phantom_shared_secret = claimable_htlcs[0].prev_hop.phantom_shared_secret;
+ if phantom_shared_secret.is_some() {
+ let phantom_pubkey = args.keys_manager.get_node_id(Recipient::PhantomNode)
+ .expect("Failed to get node_id for phantom node recipient");
+ receiver_node_id = Some(phantom_pubkey)
+ }
for claimable_htlc in claimable_htlcs {
claimable_amt_msat += claimable_htlc.value;
}
}
pending_events_read.push(events::Event::PaymentClaimed {
+ receiver_node_id,
payment_hash,
purpose: payment_purpose,
amount_msat: claimable_amt_msat,
channel_state: Mutex::new(ChannelHolder {
by_id,
- short_to_chan_info,
- claimable_htlcs,
pending_msg_events: Vec::new(),
}),
inbound_payment_key: expanded_inbound_key,
pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()),
forward_htlcs: Mutex::new(forward_htlcs),
+ claimable_htlcs: Mutex::new(claimable_htlcs),
outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
id_to_peer: Mutex::new(id_to_peer),
+ short_to_chan_info: FairRwLock::new(short_to_chan_info),
fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(),
probing_cookie_secret: probing_cookie_secret.unwrap(),
// First, send a partial MPP payment.
let (route, our_payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000);
+ let mut mpp_route = route.clone();
+ mpp_route.paths.push(mpp_route.paths[0].clone());
+
let payment_id = PaymentId([42; 32]);
// Use the utility function send_payment_along_path to send the payment with MPP data which
// indicates there are more HTLCs coming.
let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
- nodes[0].node.send_payment_along_path(&route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None).unwrap();
+ let session_privs = nodes[0].node.add_new_pending_payment(our_payment_hash, Some(payment_secret), payment_id, &mpp_route).unwrap();
+ nodes[0].node.send_payment_along_path(&mpp_route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
// Next, send a keysend payment with the same payment_hash and make sure it fails.
- nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
+ nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage), PaymentId(payment_preimage.0)).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
expect_payment_failed!(nodes[0], our_payment_hash, true);
// Send the second half of the original MPP payment.
- nodes[0].node.send_payment_along_path(&route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None).unwrap();
+ nodes[0].node.send_payment_along_path(&mpp_route.paths[1], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
None, nodes[0].logger, &scorer, &random_seed_bytes
).unwrap();
- nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
+ nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage), PaymentId(payment_preimage.0)).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
None, nodes[0].logger, &scorer, &random_seed_bytes
).unwrap();
- let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
+ let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage), PaymentId(payment_preimage.0)).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
// Next, attempt a regular payment and make sure it fails.
let payment_secret = PaymentSecret([43; 32]);
- nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+ nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], channelmanager::provided_init_features(), channelmanager::provided_init_features());
let route_params = RouteParameters {
payment_params: PaymentParameters::for_keysend(payee_pubkey),
- final_value_msat: 10000,
+ final_value_msat: 10_000,
final_cltv_expiry_delta: 40,
};
let network_graph = nodes[0].network_graph;
let test_preimage = PaymentPreimage([42; 32]);
let mismatch_payment_hash = PaymentHash([43; 32]);
- let _ = nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage), None, None).unwrap();
+ let session_privs = nodes[0].node.add_new_pending_payment(mismatch_payment_hash, None, PaymentId(mismatch_payment_hash.0), &route).unwrap();
+ nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap();
check_added_monitors!(nodes[0], 1);
let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], channelmanager::provided_init_features(), channelmanager::provided_init_features());
let route_params = RouteParameters {
payment_params: PaymentParameters::for_keysend(payee_pubkey),
- final_value_msat: 10000,
+ final_value_msat: 10_000,
final_cltv_expiry_delta: 40,
};
let network_graph = nodes[0].network_graph;
let test_preimage = PaymentPreimage([42; 32]);
let test_secret = PaymentSecret([43; 32]);
let payment_hash = PaymentHash(Sha256::hash(&test_preimage.0).into_inner());
- let _ = nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage), None, None).unwrap();
+ let session_privs = nodes[0].node.add_new_pending_payment(payment_hash, Some(test_secret), PaymentId(payment_hash.0), &route).unwrap();
+ nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage), PaymentId(payment_hash.0), None, session_privs).unwrap();
check_added_monitors!(nodes[0], 1);
let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
route.paths[1][0].short_channel_id = chan_2_id;
route.paths[1][1].short_channel_id = chan_4_id;
- match nodes[0].node.send_payment(&route, payment_hash, &None).unwrap_err() {
+ match nodes[0].node.send_payment(&route, payment_hash, &None, PaymentId(payment_hash.0)).unwrap_err() {
PaymentSendFailure::ParameterError(APIError::APIMisuseError { ref err }) => {
assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err)) },
_ => panic!("unexpected error")
use crate::chain::Listen;
use crate::chain::chainmonitor::{ChainMonitor, Persist};
use crate::chain::keysinterface::{KeysManager, KeysInterface, InMemorySigner};
- use crate::ln::channelmanager::{self, BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage};
+ use crate::ln::channelmanager::{self, BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId};
use crate::ln::functional_test_utils::*;
use crate::ln::msgs::{ChannelMessageHandler, Init};
use crate::routing::gossip::NetworkGraph;
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200).unwrap();
- $node_a.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+ $node_a.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());
$node_b.handle_update_add_htlc(&$node_a.get_our_node_id(), &payment_event.msgs[0]);
$node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &payment_event.commitment_msg);