X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=b8372ad9b10302738882fcc7a35665135cee8e73;hb=e38ab09c3a9514768a9833b2636b2b969f62b3e1;hp=8e279f78a3a6f4b72efc5500ac77dd266bbb5043;hpb=f382f56cbba94480e4839b62c80bb349b0c067f1;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 8e279f78..b8372ad9 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -46,15 +46,16 @@ use crate::ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfi use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; #[cfg(any(feature = "_test_utils", test))] use crate::ln::features::InvoiceFeatures; -use crate::routing::router::{PaymentParameters, Route, RouteHop, RoutePath, RouteParameters}; +use crate::routing::router::{InFlightHtlcs, PaymentParameters, Route, RouteHop, RoutePath, RouteParameters}; use crate::ln::msgs; use crate::ln::onion_utils; +use crate::ln::onion_utils::HTLCFailReason; use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT}; use crate::ln::wire::Encode; -use crate::chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner, Recipient}; +use crate::chain::keysinterface::{Sign, KeysInterface, KeysManager, Recipient}; use crate::util::config::{UserConfig, ChannelConfig}; -use crate::util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; -use crate::util::{byte_utils, events}; +use crate::util::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; +use crate::util::events; use crate::util::wakers::{Future, Notifier}; use crate::util::scid_utils::fake_scid; use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter}; @@ -66,7 +67,7 @@ use crate::prelude::*; use core::{cmp, mem}; use core::cell::RefCell; use crate::io::Read; -use crate::sync::{Arc, Mutex, MutexGuard, RwLock, RwLockReadGuard}; +use crate::sync::{Arc, Mutex, MutexGuard, RwLock, RwLockReadGuard, FairRwLock}; use core::sync::atomic::{AtomicUsize, Ordering}; use core::time::Duration; use core::ops::Deref; @@ -92,8 +93,8 @@ use core::ops::Deref; pub(super) enum PendingHTLCRouting { Forward { onion_packet: msgs::OnionPacket, - /// The SCID from the onion that we should forward to. This could be a "real" SCID, an - /// outbound SCID alias, or a phantom node SCID. + /// The SCID from the onion that we should forward to. This could be a real SCID or a fake one + /// generated using `get_fake_scid` from the scid_utils::fake_scid module. short_channel_id: u64, // This should be NonZero eventually when we bump MSRV }, Receive { @@ -112,7 +113,8 @@ pub(super) struct PendingHTLCInfo { pub(super) routing: PendingHTLCRouting, pub(super) incoming_shared_secret: [u8; 32], payment_hash: PaymentHash, - pub(super) amt_to_forward: u64, + pub(super) incoming_amt_msat: Option, // Added in 0.0.113 + pub(super) outgoing_amt_msat: u64, pub(super) outgoing_cltv_value: u32, } @@ -129,20 +131,23 @@ pub(super) enum PendingHTLCStatus { Fail(HTLCFailureMsg), } -pub(super) enum HTLCForwardInfo { - AddHTLC { - forward_info: PendingHTLCInfo, +pub(super) struct PendingAddHTLCInfo { + pub(super) forward_info: PendingHTLCInfo, - // These fields are produced in `forward_htlcs()` and consumed in - // `process_pending_htlc_forwards()` for constructing the - // `HTLCSource::PreviousHopData` for failed and forwarded - // HTLCs. - // - // Note that this may be an outbound SCID alias for the associated channel. - prev_short_channel_id: u64, - prev_htlc_id: u64, - prev_funding_outpoint: OutPoint, - }, + // These fields are produced in `forward_htlcs()` and consumed in + // `process_pending_htlc_forwards()` for constructing the + // `HTLCSource::PreviousHopData` for failed and forwarded + // HTLCs. + // + // Note that this may be an outbound SCID alias for the associated channel. + prev_short_channel_id: u64, + prev_htlc_id: u64, + prev_funding_outpoint: OutPoint, + prev_user_channel_id: u128, +} + +pub(super) enum HTLCForwardInfo { + AddHTLC(PendingAddHTLCInfo), FailHTLC { htlc_id: u64, err_packet: msgs::OnionErrorPacket, @@ -203,6 +208,24 @@ impl Readable for PaymentId { Ok(PaymentId(buf)) } } + +/// An identifier used to uniquely identify an intercepted HTLC to LDK. +/// (C-not exported) as we just use [u8; 32] directly +#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)] +pub struct InterceptId(pub [u8; 32]); + +impl Writeable for InterceptId { + fn write(&self, w: &mut W) -> Result<(), io::Error> { + self.0.write(w) + } +} + +impl Readable for InterceptId { + fn read(r: &mut R) -> Result { + let buf: [u8; 32] = Readable::read(r)?; + Ok(InterceptId(buf)) + } +} /// Tracks the inbound corresponding to an outbound HTLC #[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash #[derive(Clone, PartialEq, Eq)] @@ -254,31 +277,12 @@ impl HTLCSource { } } -#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug -pub(super) enum HTLCFailReason { - LightningError { - err: msgs::OnionErrorPacket, - }, - Reason { - failure_code: u16, - data: Vec, - } -} - struct ReceiveError { err_code: u16, err_data: Vec, msg: &'static str, } -/// Return value for claim_funds_from_hop -enum ClaimFundsFromHop { - PrevHopForceClosed, - MonitorUpdateFail(PublicKey, MsgHandleErrInternal, Option), - Success(u64), - DuplicateClaim, -} - type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>); /// Error type returned across the channel_state mutex boundary. When an Err is generated for a @@ -289,7 +293,7 @@ type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource struct MsgHandleErrInternal { err: msgs::LightningError, - chan_id: Option<([u8; 32], u64)>, // If Some a channel of ours has been closed + chan_id: Option<([u8; 32], u128)>, // If Some a channel of ours has been closed shutdown_finish: Option<(ShutdownResult, Option)>, } impl MsgHandleErrInternal { @@ -325,7 +329,7 @@ impl MsgHandleErrInternal { Self { err, chan_id: None, shutdown_finish: None } } #[inline] - fn from_finish_shutdown(err: String, channel_id: [u8; 32], user_channel_id: u64, shutdown_res: ShutdownResult, channel_update: Option) -> Self { + fn from_finish_shutdown(err: String, channel_id: [u8; 32], user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option) -> Self { Self { err: LightningError { err: err.clone(), @@ -392,22 +396,39 @@ pub(super) enum RAACommitmentOrder { RevokeAndACKFirst, } -// Note this is only exposed in cfg(test): -pub(super) struct ChannelHolder { - pub(super) by_id: HashMap<[u8; 32], Channel>, - /// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s. - /// - /// Outbound SCID aliases are added here once the channel is available for normal use, with - /// SCIDs being added once the funding transaction is confirmed at the channel's required - /// confirmation depth. - pub(super) short_to_chan_info: HashMap, +/// Information about a payment which is currently being claimed. +struct ClaimingPayment { + amount_msat: u64, + payment_purpose: events::PaymentPurpose, + receiver_node_id: PublicKey, +} +impl_writeable_tlv_based!(ClaimingPayment, { + (0, amount_msat, required), + (2, payment_purpose, required), + (4, receiver_node_id, required), +}); + +/// Information about claimable or being-claimed payments +struct ClaimablePayments { /// Map from payment hash to the payment data and any HTLCs which are to us and can be /// failed/claimed by the user. /// - /// Note that while this is held in the same mutex as the channels themselves, no consistency - /// guarantees are made about the channels given here actually existing anymore by the time you - /// go to read them! + /// Note that, no consistency guarantees are made about the channels given here actually + /// existing anymore by the time you go to read them! + /// + /// When adding to the map, [`Self::pending_claiming_payments`] must also be checked to ensure + /// we don't get a duplicate payment. claimable_htlcs: HashMap)>, + + /// Map from payment hash to the payment data for HTLCs which we have begun claiming, but which + /// are waiting on a [`ChannelMonitorUpdate`] to complete in order to be surfaced to the user + /// as an [`events::Event::PaymentClaimed`]. + pending_claiming_payments: HashMap, +} + +// Note this is only exposed in cfg(test): +pub(super) struct ChannelHolder { + pub(super) by_id: HashMap<[u8; 32], Channel>, /// Messages to send to peers - pushed to in the same lock that they are generated in (except /// for broadcast messages, where ordering isn't as strict). pub(super) pending_msg_events: Vec, @@ -422,6 +443,16 @@ enum BackgroundEvent { ClosingMonitorUpdate((OutPoint, ChannelMonitorUpdate)), } +pub(crate) enum MonitorUpdateCompletionAction { + /// Indicates that a payment ultimately destined for us was claimed and we should emit an + /// [`events::Event::PaymentClaimed`] to the user if we haven't yet generated such an event for + /// this payment. Note that this is only best-effort. On restart it's possible such a duplicate + /// event can be generated. + PaymentClaimed { payment_hash: PaymentHash }, + /// Indicates an [`events::Event`] should be surfaced to the user. + EmitEvent { event: events::Event }, +} + /// State we hold per-peer. In the future we should put channels in here, but for now we only hold /// the latest Init features we heard from the peer. struct PeerState { @@ -473,6 +504,7 @@ pub(crate) enum PendingOutboundPayment { Fulfilled { session_privs: HashSet<[u8; 32]>, payment_hash: Option, + timer_ticks_without_htlcs: u8, }, /// When a payer gives up trying to retry a payment, they inform us, letting us generate a /// `PaymentFailed` event when all HTLCs have irrevocably failed. This avoids a number of race @@ -488,12 +520,6 @@ pub(crate) enum PendingOutboundPayment { } impl PendingOutboundPayment { - fn is_retryable(&self) -> bool { - match self { - PendingOutboundPayment::Retryable { .. } => true, - _ => false, - } - } fn is_fulfilled(&self) -> bool { match self { PendingOutboundPayment::Fulfilled { .. } => true, @@ -532,7 +558,7 @@ impl PendingOutboundPayment { => session_privs, }); let payment_hash = self.payment_hash(); - *self = PendingOutboundPayment::Fulfilled { session_privs, payment_hash }; + *self = PendingOutboundPayment::Fulfilled { session_privs, payment_hash, timer_ticks_without_htlcs: 0 }; } fn mark_abandoned(&mut self) -> Result<(), ()> { @@ -617,7 +643,7 @@ impl PendingOutboundPayment { /// concrete type of the KeysManager. /// /// (C-not exported) as Arcs don't make sense in bindings -pub type SimpleArcChannelManager = ChannelManager, Arc, Arc, Arc, Arc>; +pub type SimpleArcChannelManager = ChannelManager, Arc, Arc, Arc, Arc>; /// SimpleRefChannelManager is a type alias for a ChannelManager reference, and is the reference /// counterpart to the SimpleArcChannelManager type alias. Use this type by default when you don't @@ -629,7 +655,7 @@ pub type SimpleArcChannelManager = ChannelManager = ChannelManager; +pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'d F, &'e L>; /// Manager which keeps track of a number of channels and sends messages to the appropriate /// channel, also tracking HTLC preimages and forwarding onion packets appropriately. @@ -680,18 +706,24 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManage // `total_consistency_lock` // | // |__`forward_htlcs` +// | | +// | |__`pending_intercepted_htlcs` // | -// |__`channel_state` +// |__`pending_inbound_payments` // | | -// | |__`id_to_peer` +// | |__`claimable_payments` // | | -// | |__`per_peer_state` +// | |__`pending_outbound_payments` // | | -// | |__`outbound_scid_aliases` -// | | -// | |__`pending_inbound_payments` +// | |__`channel_state` +// | | +// | |__`id_to_peer` // | | -// | |__`pending_outbound_payments` +// | |__`short_to_chan_info` +// | | +// | |__`per_peer_state` +// | | +// | |__`outbound_scid_aliases` // | | // | |__`best_block` // | | @@ -699,10 +731,10 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManage // | | // | |__`pending_background_events` // -pub struct ChannelManager - where M::Target: chain::Watch, +pub struct ChannelManager + where M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + K::Target: KeysInterface, F::Target: FeeEstimator, L::Target: Logger, { @@ -721,14 +753,14 @@ pub struct ChannelManager>, + pub(super) channel_state: Mutex::Signer>>, #[cfg(not(any(test, feature = "_test_utils")))] - channel_state: Mutex>, + channel_state: Mutex::Signer>>, /// Storage for PaymentSecrets and any requirements on future inbound payments before we will - /// expose them to users via a PaymentReceived event. HTLCs which do not meet the requirements + /// expose them to users via a PaymentClaimable event. HTLCs which do not meet the requirements /// here are failed when we process them as pending-forwardable-HTLCs, and entries are removed - /// after we generate a PaymentReceived upon receipt of all MPP parts or when they time out. + /// after we generate a PaymentClaimable upon receipt of all MPP parts or when they time out. /// /// See `ChannelManager` struct-level documentation for lock order requirements. pending_inbound_payments: Mutex>, @@ -761,6 +793,17 @@ pub struct ChannelManager>>, #[cfg(not(test))] forward_htlcs: Mutex>>, + /// Storage for HTLCs that have been intercepted and bubbled up to the user. We hold them here + /// until the user tells us what we should do with them. + /// + /// See `ChannelManager` struct-level documentation for lock order requirements. + pending_intercepted_htlcs: Mutex>, + + /// The sets of payments which are claimable or currently being claimed. See + /// [`ClaimablePayments`]' individual field docs for more info. + /// + /// See `ChannelManager` struct-level documentation for lock order requirements. + claimable_payments: Mutex, /// The set of outbound SCID aliases across all our channels, including unconfirmed channels /// and some closed channels which reached a usable state prior to being closed. This is used @@ -791,6 +834,22 @@ pub struct ChannelManager>, + /// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s. + /// + /// Outbound SCID aliases are added here once the channel is available for normal use, with + /// SCIDs being added once the funding transaction is confirmed at the channel's required + /// confirmation depth. + /// + /// Note that while this holds `counterparty_node_id`s and `channel_id`s, no consistency + /// guarantees are made about the existence of a peer with the `counterparty_node_id` nor a + /// channel with the `channel_id` in our other maps. + /// + /// See `ChannelManager` struct-level documentation for lock order requirements. + #[cfg(test)] + pub(super) short_to_chan_info: FairRwLock>, + #[cfg(not(test))] + short_to_chan_info: FairRwLock>, + our_network_key: SecretKey, our_network_pubkey: PublicKey, @@ -959,13 +1018,14 @@ const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRA #[allow(dead_code)] const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER; -/// The number of blocks before we consider an outbound payment for expiry if it doesn't have any -/// pending HTLCs in flight. -pub(crate) const PAYMENT_EXPIRY_BLOCKS: u32 = 3; - /// The number of ticks of [`ChannelManager::timer_tick_occurred`] until expiry of incomplete MPPs pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3; +/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until we time-out the +/// idempotency of payments by [`PaymentId`]. See +/// [`ChannelManager::remove_stale_resolved_payments`]. +pub(crate) const IDEMPOTENCY_TIMEOUT_TICKS: u8 = 7; + /// Information needed for constructing an invoice route hint for this channel. #[derive(Clone, Debug, PartialEq)] pub struct CounterpartyForwardingInfo { @@ -1075,8 +1135,10 @@ pub struct ChannelDetails { /// /// [`outbound_capacity_msat`]: ChannelDetails::outbound_capacity_msat pub unspendable_punishment_reserve: Option, - /// The `user_channel_id` passed in to create_channel, or 0 if the channel was inbound. - pub user_channel_id: u64, + /// The `user_channel_id` passed in to create_channel, or a random value if the channel was + /// inbound. This may be zero for inbound channels serialized with LDK versions prior to + /// 0.0.113. + pub user_channel_id: u128, /// Our total balance. This is the amount we would get if we close the channel. /// This value is not exact. Due to various in-flight changes and feerate changes, exactly this /// amount is not likely to be recoverable on close. @@ -1128,6 +1190,10 @@ pub struct ChannelDetails { /// [`ChannelHandshakeConfig::minimum_depth`]: crate::util::config::ChannelHandshakeConfig::minimum_depth /// [`ChannelHandshakeLimits::max_minimum_depth`]: crate::util::config::ChannelHandshakeLimits::max_minimum_depth pub confirmations_required: Option, + /// The current number of confirmations on the funding transaction. + /// + /// This value will be `None` for objects serialized with LDK versions prior to 0.0.113. + pub confirmations: Option, /// The number of blocks (after our commitment transaction confirms) that we will need to wait /// until we can claim our funds after we force-close the channel. During this time our /// counterparty is allowed to punish us if we broadcasted a stale state. If our counterparty @@ -1192,21 +1258,40 @@ impl ChannelDetails { #[derive(Clone, Debug)] pub enum PaymentSendFailure { /// A parameter which was passed to send_payment was invalid, preventing us from attempting to - /// send the payment at all. No channel state has been changed or messages sent to peers, and - /// once you've changed the parameter at error, you can freely retry the payment in full. + /// send the payment at all. + /// + /// You can freely resend the payment in full (with the parameter error fixed). + /// + /// Because the payment failed outright, no payment tracking is done, you do not need to call + /// [`ChannelManager::abandon_payment`] and [`ChannelManager::retry_payment`] will *not* work + /// for this payment. ParameterError(APIError), /// A parameter in a single path which was passed to send_payment was invalid, preventing us - /// from attempting to send the payment at all. No channel state has been changed or messages - /// sent to peers, and once you've changed the parameter at error, you can freely retry the - /// payment in full. + /// from attempting to send the payment at all. + /// + /// You can freely resend the payment in full (with the parameter error fixed). /// /// The results here are ordered the same as the paths in the route object which was passed to /// send_payment. + /// + /// Because the payment failed outright, no payment tracking is done, you do not need to call + /// [`ChannelManager::abandon_payment`] and [`ChannelManager::retry_payment`] will *not* work + /// for this payment. PathParameterError(Vec>), /// All paths which were attempted failed to send, with no channel state change taking place. - /// You can freely retry the payment in full (though you probably want to do so over different + /// You can freely resend the payment in full (though you probably want to do so over different /// paths than the ones selected). - AllFailedRetrySafe(Vec), + /// + /// Because the payment failed outright, no payment tracking is done, you do not need to call + /// [`ChannelManager::abandon_payment`] and [`ChannelManager::retry_payment`] will *not* work + /// for this payment. + AllFailedResendSafe(Vec), + /// Indicates that a payment for the provided [`PaymentId`] is already in-flight and has not + /// yet completed (i.e. generated an [`Event::PaymentSent`]) or been abandoned (via + /// [`ChannelManager::abandon_payment`]). + /// + /// [`Event::PaymentSent`]: events::Event::PaymentSent + DuplicatePayment, /// Some paths which were attempted failed to send, though possibly not all. At least some /// paths have irrevocably committed to the HTLC and retrying the payment in full would result /// in over-/re-payment. @@ -1296,9 +1381,11 @@ macro_rules! handle_error { } macro_rules! update_maps_on_chan_removal { - ($self: expr, $short_to_chan_info: expr, $channel: expr) => { + ($self: expr, $channel: expr) => {{ + $self.id_to_peer.lock().unwrap().remove(&$channel.channel_id()); + let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap(); if let Some(short_id) = $channel.get_short_channel_id() { - $short_to_chan_info.remove(&short_id); + short_to_chan_info.remove(&short_id); } else { // If the channel was never confirmed on-chain prior to its closure, remove the // outbound SCID alias we used for it from the collision-prevention set. While we @@ -1309,14 +1396,13 @@ macro_rules! update_maps_on_chan_removal { let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.outbound_scid_alias()); debug_assert!(alias_removed); } - $self.id_to_peer.lock().unwrap().remove(&$channel.channel_id()); - $short_to_chan_info.remove(&$channel.outbound_scid_alias()); - } + short_to_chan_info.remove(&$channel.outbound_scid_alias()); + }} } /// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error) macro_rules! convert_chan_err { - ($self: ident, $err: expr, $short_to_chan_info: expr, $channel: expr, $channel_id: expr) => { + ($self: ident, $err: expr, $channel: expr, $channel_id: expr) => { match $err { ChannelError::Warn(msg) => { (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), $channel_id.clone())) @@ -1326,7 +1412,7 @@ macro_rules! convert_chan_err { }, ChannelError::Close(msg) => { log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg); - update_maps_on_chan_removal!($self, $short_to_chan_info, $channel); + update_maps_on_chan_removal!($self, $channel); let shutdown_res = $channel.force_shutdown(true); (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(), shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) @@ -1336,11 +1422,11 @@ macro_rules! convert_chan_err { } macro_rules! break_chan_entry { - ($self: ident, $res: expr, $channel_state: expr, $entry: expr) => { + ($self: ident, $res: expr, $entry: expr) => { match $res { Ok(res) => res, Err(e) => { - let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_chan_info, $entry.get_mut(), $entry.key()); + let (drop, res) = convert_chan_err!($self, e, $entry.get_mut(), $entry.key()); if drop { $entry.remove_entry(); } @@ -1351,11 +1437,11 @@ macro_rules! break_chan_entry { } macro_rules! try_chan_entry { - ($self: ident, $res: expr, $channel_state: expr, $entry: expr) => { + ($self: ident, $res: expr, $entry: expr) => { match $res { Ok(res) => res, Err(e) => { - let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_chan_info, $entry.get_mut(), $entry.key()); + let (drop, res) = convert_chan_err!($self, e, $entry.get_mut(), $entry.key()); if drop { $entry.remove_entry(); } @@ -1366,21 +1452,21 @@ macro_rules! try_chan_entry { } macro_rules! remove_channel { - ($self: expr, $channel_state: expr, $entry: expr) => { + ($self: expr, $entry: expr) => { { let channel = $entry.remove_entry().1; - update_maps_on_chan_removal!($self, $channel_state.short_to_chan_info, channel); + update_maps_on_chan_removal!($self, channel); channel } } } macro_rules! handle_monitor_update_res { - ($self: ident, $err: expr, $short_to_chan_info: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => { + ($self: ident, $err: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => { match $err { ChannelMonitorUpdateStatus::PermanentFailure => { log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", log_bytes!($chan_id[..])); - update_maps_on_chan_removal!($self, $short_to_chan_info, $chan); + update_maps_on_chan_removal!($self, $chan); // TODO: $failed_fails is dropped here, which will cause other channels to hit the // chain in a confused state! We need to move them into the ChannelMonitor which // will be responsible for failing backwards once things confirm on-chain. @@ -1422,180 +1508,72 @@ macro_rules! handle_monitor_update_res { }, } }; - ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { { - let (res, drop) = handle_monitor_update_res!($self, $err, $channel_state.short_to_chan_info, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key()); + ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { { + let (res, drop) = handle_monitor_update_res!($self, $err, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key()); if drop { $entry.remove_entry(); } res } }; - ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $chan_id: expr, COMMITMENT_UPDATE_ONLY) => { { + ($self: ident, $err: expr, $entry: expr, $action_type: path, $chan_id: expr, COMMITMENT_UPDATE_ONLY) => { { debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst); - handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, false, true, false, Vec::new(), Vec::new(), Vec::new(), $chan_id) + handle_monitor_update_res!($self, $err, $entry, $action_type, false, true, false, Vec::new(), Vec::new(), Vec::new(), $chan_id) } }; - ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $chan_id: expr, NO_UPDATE) => { - handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id) + ($self: ident, $err: expr, $entry: expr, $action_type: path, $chan_id: expr, NO_UPDATE) => { + handle_monitor_update_res!($self, $err, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id) }; - ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_channel_ready: expr, OPTIONALLY_RESEND_FUNDING_LOCKED) => { - handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, false, false, $resend_channel_ready, Vec::new(), Vec::new(), Vec::new()) + ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_channel_ready: expr, OPTIONALLY_RESEND_FUNDING_LOCKED) => { + handle_monitor_update_res!($self, $err, $entry, $action_type, false, false, $resend_channel_ready, Vec::new(), Vec::new(), Vec::new()) }; - ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { - handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new()) + ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { + handle_monitor_update_res!($self, $err, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new()) }; - ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { - handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, $failed_forwards, $failed_fails, Vec::new()) + ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { + handle_monitor_update_res!($self, $err, $entry, $action_type, $resend_raa, $resend_commitment, false, $failed_forwards, $failed_fails, Vec::new()) }; } macro_rules! send_channel_ready { - ($short_to_chan_info: expr, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => { + ($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{ $pending_msg_events.push(events::MessageSendEvent::SendChannelReady { node_id: $channel.get_counterparty_node_id(), msg: $channel_ready_msg, }); // Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so // we allow collisions, but we shouldn't ever be updating the channel ID pointed to. - let outbound_alias_insert = $short_to_chan_info.insert($channel.outbound_scid_alias(), ($channel.get_counterparty_node_id(), $channel.channel_id())); + let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap(); + let outbound_alias_insert = short_to_chan_info.insert($channel.outbound_scid_alias(), ($channel.get_counterparty_node_id(), $channel.channel_id())); assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()), "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels"); if let Some(real_scid) = $channel.get_short_channel_id() { - let scid_insert = $short_to_chan_info.insert(real_scid, ($channel.get_counterparty_node_id(), $channel.channel_id())); + let scid_insert = short_to_chan_info.insert(real_scid, ($channel.get_counterparty_node_id(), $channel.channel_id())); assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()), "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels"); } - } + }} } -macro_rules! handle_chan_restoration_locked { - ($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr, - $raa: expr, $commitment_update: expr, $order: expr, $chanmon_update: expr, - $pending_forwards: expr, $funding_broadcastable: expr, $channel_ready: expr, $announcement_sigs: expr) => { { - let mut htlc_forwards = None; - - let chanmon_update: Option = $chanmon_update; // Force type-checking to resolve - let chanmon_update_is_none = chanmon_update.is_none(); - let counterparty_node_id = $channel_entry.get().get_counterparty_node_id(); - let res = loop { - let forwards: Vec<(PendingHTLCInfo, u64)> = $pending_forwards; // Force type-checking to resolve - if !forwards.is_empty() { - htlc_forwards = Some(($channel_entry.get().get_short_channel_id().unwrap_or($channel_entry.get().outbound_scid_alias()), - $channel_entry.get().get_funding_txo().unwrap(), forwards)); - } - - if chanmon_update.is_some() { - // On reconnect, we, by definition, only resend a channel_ready if there have been - // no commitment updates, so the only channel monitor update which could also be - // associated with a channel_ready would be the funding_created/funding_signed - // monitor update. That monitor update failing implies that we won't send - // channel_ready until it's been updated, so we can't have a channel_ready and a - // monitor update here (so we don't bother to handle it correctly below). - assert!($channel_ready.is_none()); - // A channel monitor update makes no sense without either a channel_ready or a - // commitment update to process after it. Since we can't have a channel_ready, we - // only bother to handle the monitor-update + commitment_update case below. - assert!($commitment_update.is_some()); - } - - if let Some(msg) = $channel_ready { - // Similar to the above, this implies that we're letting the channel_ready fly - // before it should be allowed to. - assert!(chanmon_update.is_none()); - send_channel_ready!($channel_state.short_to_chan_info, $channel_state.pending_msg_events, $channel_entry.get(), msg); - } - if let Some(msg) = $announcement_sigs { - $channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { - node_id: counterparty_node_id, - msg, +macro_rules! emit_channel_ready_event { + ($self: expr, $channel: expr) => { + if $channel.should_emit_channel_ready_event() { + { + let mut pending_events = $self.pending_events.lock().unwrap(); + pending_events.push(events::Event::ChannelReady { + channel_id: $channel.channel_id(), + user_channel_id: $channel.get_user_id(), + counterparty_node_id: $channel.get_counterparty_node_id(), + channel_type: $channel.get_channel_type().clone(), }); } - - let funding_broadcastable: Option = $funding_broadcastable; // Force type-checking to resolve - if let Some(monitor_update) = chanmon_update { - // We only ever broadcast a funding transaction in response to a funding_signed - // message and the resulting monitor update. Thus, on channel_reestablish - // message handling we can't have a funding transaction to broadcast. When - // processing a monitor update finishing resulting in a funding broadcast, we - // cannot have a second monitor update, thus this case would indicate a bug. - assert!(funding_broadcastable.is_none()); - // Given we were just reconnected or finished updating a channel monitor, the - // only case where we can get a new ChannelMonitorUpdate would be if we also - // have some commitment updates to send as well. - assert!($commitment_update.is_some()); - match $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) { - ChannelMonitorUpdateStatus::Completed => {}, - e => { - // channel_reestablish doesn't guarantee the order it returns is sensical - // for the messages it returns, but if we're setting what messages to - // re-transmit on monitor update success, we need to make sure it is sane. - let mut order = $order; - if $raa.is_none() { - order = RAACommitmentOrder::CommitmentFirst; - } - break handle_monitor_update_res!($self, e, $channel_state, $channel_entry, order, $raa.is_some(), true); - } - } - } - - macro_rules! handle_cs { () => { - if let Some(update) = $commitment_update { - $channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: counterparty_node_id, - updates: update, - }); - } - } } - macro_rules! handle_raa { () => { - if let Some(revoke_and_ack) = $raa { - $channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { - node_id: counterparty_node_id, - msg: revoke_and_ack, - }); - } - } } - match $order { - RAACommitmentOrder::CommitmentFirst => { - handle_cs!(); - handle_raa!(); - }, - RAACommitmentOrder::RevokeAndACKFirst => { - handle_raa!(); - handle_cs!(); - }, - } - if let Some(tx) = funding_broadcastable { - log_info!($self.logger, "Broadcasting funding transaction with txid {}", tx.txid()); - $self.tx_broadcaster.broadcast_transaction(&tx); - } - break Ok(()); - }; - - if chanmon_update_is_none { - // If there was no ChannelMonitorUpdate, we should never generate an Err in the res loop - // above. Doing so would imply calling handle_err!() from channel_monitor_updated() which - // should *never* end up calling back to `chain_monitor.update_channel()`. - assert!(res.is_ok()); - } - - (htlc_forwards, res, counterparty_node_id) - } } -} - -macro_rules! post_handle_chan_restoration { - ($self: ident, $locked_res: expr) => { { - let (htlc_forwards, res, counterparty_node_id) = $locked_res; - - let _ = handle_error!($self, res, counterparty_node_id); - - if let Some(forwards) = htlc_forwards { - $self.forward_htlcs(&mut [forwards][..]); + $channel.set_channel_ready_event_emitted(); } - } } + } } -impl ChannelManager - where M::Target: chain::Watch, +impl ChannelManager + where M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + K::Target: KeysInterface, F::Target: FeeEstimator, L::Target: Logger, { @@ -1625,15 +1603,16 @@ impl ChannelMana channel_state: Mutex::new(ChannelHolder{ by_id: HashMap::new(), - short_to_chan_info: HashMap::new(), - claimable_htlcs: HashMap::new(), pending_msg_events: Vec::new(), }), outbound_scid_aliases: Mutex::new(HashSet::new()), pending_inbound_payments: Mutex::new(HashMap::new()), pending_outbound_payments: Mutex::new(HashMap::new()), forward_htlcs: Mutex::new(HashMap::new()), + claimable_payments: Mutex::new(ClaimablePayments { claimable_htlcs: HashMap::new(), pending_claiming_payments: HashMap::new() }), + pending_intercepted_htlcs: Mutex::new(HashMap::new()), id_to_peer: Mutex::new(HashMap::new()), + short_to_chan_info: FairRwLock::new(HashMap::new()), our_network_key: keys_manager.get_node_secret(Recipient::Node).unwrap(), our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret(Recipient::Node).unwrap()), @@ -1687,10 +1666,9 @@ impl ChannelMana /// /// `user_channel_id` will be provided back as in /// [`Event::FundingGenerationReady::user_channel_id`] to allow tracking of which events - /// correspond with which `create_channel` call. Note that the `user_channel_id` defaults to 0 - /// for inbound channels, so you may wish to avoid using 0 for `user_channel_id` here. - /// `user_channel_id` has no meaning inside of LDK, it is simply copied to events and otherwise - /// ignored. + /// correspond with which `create_channel` call. Note that the `user_channel_id` defaults to a + /// randomized value for inbound channels. `user_channel_id` has no meaning inside of LDK, it + /// is simply copied to events and otherwise ignored. /// /// Raises [`APIError::APIMisuseError`] when `channel_value_satoshis` > 2**24 or `push_msat` is /// greater than `channel_value_satoshis * 1k` or `channel_value_satoshis < 1000`. @@ -1709,7 +1687,7 @@ impl ChannelMana /// [`Event::FundingGenerationReady::user_channel_id`]: events::Event::FundingGenerationReady::user_channel_id /// [`Event::FundingGenerationReady::temporary_channel_id`]: events::Event::FundingGenerationReady::temporary_channel_id /// [`Event::ChannelClosed::channel_id`]: events::Event::ChannelClosed::channel_id - pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u64, override_config: Option) -> Result<[u8; 32], APIError> { + pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u128, override_config: Option) -> Result<[u8; 32], APIError> { if channel_value_satoshis < 1000 { return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) }); } @@ -1761,10 +1739,11 @@ impl ChannelMana Ok(temporary_channel_id) } - fn list_channels_with_filter)) -> bool>(&self, f: Fn) -> Vec { + fn list_channels_with_filter::Signer>)) -> bool>(&self, f: Fn) -> Vec { let mut res = Vec::new(); { let channel_state = self.channel_state.lock().unwrap(); + let best_block_height = self.best_block.read().unwrap().height(); res.reserve(channel_state.by_id.len()); for (channel_id, channel) in channel_state.by_id.iter().filter(f) { let balance = channel.get_available_balances(); @@ -1801,6 +1780,7 @@ impl ChannelMana next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat, user_channel_id: channel.get_user_id(), confirmations_required: channel.minimum_depth(), + confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)), force_close_spend_delay: channel.get_counterparty_selected_contest_delay(), is_outbound: channel.is_outbound(), is_channel_ready: channel.is_usable(), @@ -1843,7 +1823,7 @@ impl ChannelMana } /// Helper function that issues the channel close events - fn issue_channel_close_events(&self, channel: &Channel, closure_reason: ClosureReason) { + fn issue_channel_close_events(&self, channel: &Channel<::Signer>, closure_reason: ClosureReason) { let mut pending_events_lock = self.pending_events.lock().unwrap(); match channel.unbroadcasted_funding() { Some(transaction) => { @@ -1870,14 +1850,16 @@ impl ChannelMana if *counterparty_node_id != chan_entry.get().get_counterparty_node_id(){ return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() }); } - let per_peer_state = self.per_peer_state.read().unwrap(); - let (shutdown_msg, monitor_update, htlcs) = match per_peer_state.get(&counterparty_node_id) { - Some(peer_state) => { - let peer_state = peer_state.lock().unwrap(); - let their_features = &peer_state.latest_features; - chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)? - }, - None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }), + let (shutdown_msg, monitor_update, htlcs) = { + let per_peer_state = self.per_peer_state.read().unwrap(); + match per_peer_state.get(&counterparty_node_id) { + Some(peer_state) => { + let peer_state = peer_state.lock().unwrap(); + let their_features = &peer_state.latest_features; + chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)? + }, + None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }), + } }; failed_htlcs = htlcs; @@ -1885,9 +1867,9 @@ impl ChannelMana if let Some(monitor_update) = monitor_update { let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update); let (result, is_permanent) = - handle_monitor_update_res!(self, update_res, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); + handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); if is_permanent { - remove_channel!(self, channel_state, chan_entry); + remove_channel!(self, chan_entry); break result; } } @@ -1898,7 +1880,7 @@ impl ChannelMana }); if chan_entry.get().is_shutdown() { - let channel = remove_channel!(self, channel_state, chan_entry); + let channel = remove_channel!(self, chan_entry); if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) { channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: channel_update @@ -1913,8 +1895,9 @@ impl ChannelMana }; for htlc_source in failed_htlcs.drain(..) { + let reason = HTLCFailReason::from_failure_code(0x4000 | 8); let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id }; - self.fail_htlc_backwards_internal(htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); + self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver); } let _ = handle_error!(self, result, *counterparty_node_id); @@ -1971,8 +1954,9 @@ impl ChannelMana log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len()); for htlc_source in failed_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; + let reason = HTLCFailReason::from_failure_code(0x4000 | 8); let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; - self.fail_htlc_backwards_internal(source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); + self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } if let Some((funding_txo, monitor_update)) = monitor_update_option { // There isn't anything we can do if we get an update failure - we're already @@ -1999,7 +1983,7 @@ impl ChannelMana } else { self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed); } - remove_channel!(self, channel_state, chan) + remove_channel!(self, chan) } else { return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()}); } @@ -2078,7 +2062,7 @@ impl ChannelMana return Err(ReceiveError { msg: "Upstream node set CLTV to the wrong value", err_code: 18, - err_data: byte_utils::be32_to_array(cltv_expiry).to_vec() + err_data: cltv_expiry.to_be_bytes().to_vec() }) } // final_expiry_too_soon @@ -2087,29 +2071,25 @@ impl ChannelMana // Also, ensure that, in the case of an unknown preimage for the received payment hash, our // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a // channel closure (see HTLC_FAIL_BACK_BUFFER rationale). - if (hop_data.outgoing_cltv_value as u64) <= self.best_block.read().unwrap().height() as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 { + let current_height: u32 = self.best_block.read().unwrap().height(); + if (hop_data.outgoing_cltv_value as u64) <= current_height as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 { + let mut err_data = Vec::with_capacity(12); + err_data.extend_from_slice(&amt_msat.to_be_bytes()); + err_data.extend_from_slice(¤t_height.to_be_bytes()); return Err(ReceiveError { - err_code: 17, - err_data: Vec::new(), + err_code: 0x4000 | 15, err_data, msg: "The final CLTV expiry is too soon to handle", }); } if hop_data.amt_to_forward > amt_msat { return Err(ReceiveError { err_code: 19, - err_data: byte_utils::be64_to_array(amt_msat).to_vec(), + err_data: amt_msat.to_be_bytes().to_vec(), msg: "Upstream node sent less than we were supposed to receive in payment", }); } let routing = match hop_data.format { - msgs::OnionHopDataFormat::Legacy { .. } => { - return Err(ReceiveError { - err_code: 0x4000|0x2000|3, - err_data: Vec::new(), - msg: "We require payment_secrets", - }); - }, msgs::OnionHopDataFormat::NonFinalNode { .. } => { return Err(ReceiveError { err_code: 0x4000|22, @@ -2162,7 +2142,8 @@ impl ChannelMana routing, payment_hash, incoming_shared_secret: shared_secret, - amt_to_forward: amt_msat, + incoming_amt_msat: Some(amt_msat), + outgoing_amt_msat: amt_msat, outgoing_cltv_value: hop_data.outgoing_cltv_value, }) } @@ -2204,7 +2185,8 @@ impl ChannelMana return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id: msg.channel_id, htlc_id: msg.htlc_id, - reason: onion_utils::build_first_hop_failure_packet(&shared_secret, $err_code, $data), + reason: HTLCFailReason::reason($err_code, $data.to_vec()) + .get_encrypted_failure_packet(&shared_secret, &None), })); } } @@ -2244,7 +2226,6 @@ impl ChannelMana }; let short_channel_id = match next_hop_data.format { - msgs::OnionHopDataFormat::Legacy { short_channel_id } => short_channel_id, msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id, msgs::OnionHopDataFormat::FinalNode { .. } => { return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]); @@ -2258,25 +2239,29 @@ impl ChannelMana }, payment_hash: msg.payment_hash.clone(), incoming_shared_secret: shared_secret, - amt_to_forward: next_hop_data.amt_to_forward, + incoming_amt_msat: Some(msg.amount_msat), + outgoing_amt_msat: next_hop_data.amt_to_forward, outgoing_cltv_value: next_hop_data.outgoing_cltv_value, }) } }; - if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info { + if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref outgoing_amt_msat, ref outgoing_cltv_value, .. }) = &pending_forward_info { // If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel // with a short_channel_id of 0. This is important as various things later assume // short_channel_id is non-0 in any ::Forward. if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing { - if let Some((err, code, chan_update)) = loop { + if let Some((err, mut code, chan_update)) = loop { + let id_option = self.short_to_chan_info.read().unwrap().get(&short_channel_id).cloned(); let mut channel_state = self.channel_state.lock().unwrap(); - let id_option = channel_state.short_to_chan_info.get(&short_channel_id).cloned(); let forwarding_id_opt = match id_option { None => { // unknown_next_peer // Note that this is likely a timing oracle for detecting whether an scid is a - // phantom. - if fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, *short_channel_id) { + // phantom or an intercept. + if (self.default_configuration.accept_intercept_htlcs && + fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash)) || + fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash) + { None } else { break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); @@ -2285,7 +2270,14 @@ impl ChannelMana Some((_cp_id, chan_id)) => Some(chan_id.clone()), }; let chan_update_opt = if let Some(forwarding_id) = forwarding_id_opt { - let chan = channel_state.by_id.get_mut(&forwarding_id).unwrap(); + let chan = match channel_state.by_id.get_mut(&forwarding_id) { + None => { + // Channel was removed. The short_to_chan_info and by_id maps have + // no consistency guarantees. + break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); + }, + Some(chan) => chan + }; if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels { // Note that the behavior here should be identical to the above block - we // should NOT reveal the existence or non-existence of a private channel if @@ -2308,18 +2300,21 @@ impl ChannelMana if !chan.is_live() { // channel_disabled break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, chan_update_opt)); } - if *amt_to_forward < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum + if *outgoing_amt_msat < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt)); } - if let Err((err, code)) = chan.htlc_satisfies_config(&msg, *amt_to_forward, *outgoing_cltv_value) { + if let Err((err, code)) = chan.htlc_satisfies_config(&msg, *outgoing_amt_msat, *outgoing_cltv_value) { break Some((err, code, chan_update_opt)); } chan_update_opt } else { - if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 { // incorrect_cltv_expiry + if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 { + // We really should set `incorrect_cltv_expiry` here but as we're not + // forwarding over a real channel we can't generate a channel_update + // for it. Instead we just return a generic temporary_node_failure. break Some(( "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", - 0x1000 | 13, None, + 0x2000 | 2, None, )); } None @@ -2365,6 +2360,12 @@ impl ChannelMana (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail"); msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail"); chan_update.write(&mut res).expect("Writes cannot fail"); + } else if code & 0x1000 == 0x1000 { + // If we're trying to return an error that requires a `channel_update` but + // we're forwarding to a phantom or intercept "channel" (i.e. cannot + // generate an update), just use the generic "temporary_node_failure" + // instead. + code = 0x2000 | 2; } return_err!(err, code, &res.0[..]); } @@ -2379,7 +2380,7 @@ impl ChannelMana /// [`MessageSendEvent::BroadcastChannelUpdate`] event. /// /// May be called with channel_state already locked! - fn get_channel_update_for_broadcast(&self, chan: &Channel) -> Result { + fn get_channel_update_for_broadcast(&self, chan: &Channel<::Signer>) -> Result { if !chan.should_announce() { return Err(LightningError { err: "Cannot broadcast a channel_update for a private channel".to_owned(), @@ -2398,7 +2399,7 @@ impl ChannelMana /// and thus MUST NOT be called unless the recipient of the resulting message has already /// provided evidence that they know about the existence of the channel. /// May be called with channel_state already locked! - fn get_channel_update_for_unicast(&self, chan: &Channel) -> Result { + fn get_channel_update_for_unicast(&self, chan: &Channel<::Signer>) -> Result { log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id())); let short_channel_id = match chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) { None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}), @@ -2407,7 +2408,7 @@ impl ChannelMana self.get_channel_update_for_onion(short_channel_id, chan) } - fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel) -> Result { + fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<::Signer>) -> Result { log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.channel_id())); let were_node_one = PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key).serialize()[..] < chan.get_counterparty_node_id().serialize()[..]; @@ -2434,60 +2435,33 @@ impl ChannelMana } // Only public for testing, this should otherwise never be called direcly - pub(crate) fn send_payment_along_path(&self, path: &Vec, payment_params: &Option, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option) -> Result<(), APIError> { + pub(crate) fn send_payment_along_path(&self, path: &Vec, payment_params: &Option, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option, session_priv_bytes: [u8; 32]) -> Result<(), APIError> { log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id); let prng_seed = self.keys_manager.get_secure_random_bytes(); - let session_priv_bytes = self.keys_manager.get_secure_random_bytes(); let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted"); let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv) - .map_err(|_| APIError::RouteError{err: "Pubkey along hop was maliciously selected"})?; + .map_err(|_| APIError::InvalidRoute{err: "Pubkey along hop was maliciously selected"})?; let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height, keysend_preimage)?; if onion_utils::route_size_insane(&onion_payloads) { - return Err(APIError::RouteError{err: "Route size too large considering onion data"}); + return Err(APIError::InvalidRoute{err: "Route size too large considering onion data"}); } let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let err: Result<(), _> = loop { - let mut channel_lock = self.channel_state.lock().unwrap(); - - let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap(); - let payment_entry = pending_outbounds.entry(payment_id); - if let hash_map::Entry::Occupied(payment) = &payment_entry { - if !payment.get().is_retryable() { - return Err(APIError::RouteError { - err: "Payment already completed" - }); - } - } - - let id = match channel_lock.short_to_chan_info.get(&path.first().unwrap().short_channel_id) { + let id = match self.short_to_chan_info.read().unwrap().get(&path.first().unwrap().short_channel_id) { None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}), Some((_cp_id, chan_id)) => chan_id.clone(), }; - macro_rules! insert_outbound_payment { - () => { - let payment = payment_entry.or_insert_with(|| PendingOutboundPayment::Retryable { - session_privs: HashSet::new(), - pending_amt_msat: 0, - pending_fee_msat: Some(0), - payment_hash: *payment_hash, - payment_secret: *payment_secret, - starting_block_height: self.best_block.read().unwrap().height(), - total_msat: total_value, - }); - assert!(payment.insert(session_priv_bytes, path)); - } - } - + let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) { match { if chan.get().get_counterparty_node_id() != path.first().unwrap().pubkey { - return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"}); + return Err(APIError::InvalidRoute{err: "Node ID mismatch on first hop!"}); } if !chan.get().is_live() { return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()}); @@ -2501,19 +2475,17 @@ impl ChannelMana payment_secret: payment_secret.clone(), payment_params: payment_params.clone(), }, onion_packet, &self.logger), - channel_state, chan) + chan) } { Some((update_add, commitment_signed, monitor_update)) => { let update_err = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update); let chan_id = chan.get().channel_id(); match (update_err, - handle_monitor_update_res!(self, update_err, channel_state, chan, + handle_monitor_update_res!(self, update_err, chan, RAACommitmentOrder::CommitmentFirst, false, true)) { (ChannelMonitorUpdateStatus::PermanentFailure, Err(e)) => break Err(e), - (ChannelMonitorUpdateStatus::Completed, Ok(())) => { - insert_outbound_payment!(); - }, + (ChannelMonitorUpdateStatus::Completed, Ok(())) => {}, (ChannelMonitorUpdateStatus::InProgress, Err(_)) => { // Note that MonitorUpdateInProgress here indicates (per function // docs) that we will resend the commitment update once monitor @@ -2521,7 +2493,6 @@ impl ChannelMana // indicating that it is unsafe to retry the payment wholesale, // which we do in the send_payment check for // MonitorUpdateInProgress, below. - insert_outbound_payment!(); // Only do this after possibly break'ing on Perm failure above. return Err(APIError::MonitorUpdateInProgress); }, _ => unreachable!(), @@ -2540,9 +2511,14 @@ impl ChannelMana }, }); }, - None => { insert_outbound_payment!(); }, + None => { }, } - } else { unreachable!(); } + } else { + // The channel was likely removed after we fetched the id from the + // `short_to_chan_info` map, but before we successfully locked the `by_id` map. + // This can occur as no consistency guarantees exists between the two maps. + return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}); + } return Ok(()); }; @@ -2559,21 +2535,27 @@ impl ChannelMana /// Value parameters are provided via the last hop in route, see documentation for RouteHop /// fields for more info. /// - /// Note that if the payment_hash already exists elsewhere (eg you're sending a duplicative - /// payment), we don't do anything to stop you! We always try to ensure that if the provided - /// next hop knows the preimage to payment_hash they can claim an additional amount as - /// specified in the last hop in the route! Thus, you should probably do your own - /// payment_preimage tracking (which you should already be doing as they represent "proof of - /// payment") and prevent double-sends yourself. + /// If a pending payment is currently in-flight with the same [`PaymentId`] provided, this + /// method will error with an [`APIError::InvalidRoute`]. Note, however, that once a payment + /// is no longer pending (either via [`ChannelManager::abandon_payment`], or handling of an + /// [`Event::PaymentSent`]) LDK will not stop you from sending a second payment with the same + /// [`PaymentId`]. /// - /// May generate SendHTLCs message(s) event on success, which should be relayed. + /// Thus, in order to ensure duplicate payments are not sent, you should implement your own + /// tracking of payments, including state to indicate once a payment has completed. Because you + /// should also ensure that [`PaymentHash`]es are not re-used, for simplicity, you should + /// consider using the [`PaymentHash`] as the key for tracking payments. In that case, the + /// [`PaymentId`] should be a copy of the [`PaymentHash`] bytes. + /// + /// May generate SendHTLCs message(s) event on success, which should be relayed (e.g. via + /// [`PeerManager::process_events`]). /// /// Each path may have a different return value, and PaymentSendValue may return a Vec with /// each entry matching the corresponding-index entry in the route paths, see /// PaymentSendFailure for more info. /// /// In general, a path may raise: - /// * [`APIError::RouteError`] when an invalid route or forwarding parameter (cltv_delta, fee, + /// * [`APIError::InvalidRoute`] when an invalid route or forwarding parameter (cltv_delta, fee, /// node public key) is specified. /// * [`APIError::ChannelUnavailable`] if the next-hop channel is not available for updates /// (including due to previous monitor update failure or new permanent monitor update @@ -2590,16 +2572,55 @@ impl ChannelMana /// newer nodes, it will be provided to you in the invoice. If you do not have one, the Route /// must not contain multiple paths as multi-path payments require a recipient-provided /// payment_secret. + /// /// If a payment_secret *is* provided, we assume that the invoice had the payment_secret feature /// bit set (either as required or as available). If multiple paths are present in the Route, /// we assume the invoice had the basic_mpp feature set. - pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option) -> Result { - self.send_payment_internal(route, payment_hash, payment_secret, None, None, None) + /// + /// [`Event::PaymentSent`]: events::Event::PaymentSent + /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events + pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option, payment_id: PaymentId) -> Result<(), PaymentSendFailure> { + let onion_session_privs = self.add_new_pending_payment(payment_hash, *payment_secret, payment_id, route)?; + self.send_payment_internal(route, payment_hash, payment_secret, None, payment_id, None, onion_session_privs) } - fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option, keysend_preimage: Option, payment_id: Option, recv_value_msat: Option) -> Result { + #[cfg(test)] + pub(crate) fn test_add_new_pending_payment(&self, payment_hash: PaymentHash, payment_secret: Option, payment_id: PaymentId, route: &Route) -> Result, PaymentSendFailure> { + self.add_new_pending_payment(payment_hash, payment_secret, payment_id, route) + } + + fn add_new_pending_payment(&self, payment_hash: PaymentHash, payment_secret: Option, payment_id: PaymentId, route: &Route) -> Result, PaymentSendFailure> { + let mut onion_session_privs = Vec::with_capacity(route.paths.len()); + for _ in 0..route.paths.len() { + onion_session_privs.push(self.keys_manager.get_secure_random_bytes()); + } + + let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap(); + match pending_outbounds.entry(payment_id) { + hash_map::Entry::Occupied(_) => Err(PaymentSendFailure::DuplicatePayment), + hash_map::Entry::Vacant(entry) => { + let payment = entry.insert(PendingOutboundPayment::Retryable { + session_privs: HashSet::new(), + pending_amt_msat: 0, + pending_fee_msat: Some(0), + payment_hash, + payment_secret, + starting_block_height: self.best_block.read().unwrap().height(), + total_msat: route.get_total_amount(), + }); + + for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) { + assert!(payment.insert(*session_priv_bytes, path)); + } + + Ok(onion_session_privs) + }, + } + } + + fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option, keysend_preimage: Option, payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> { if route.paths.len() < 1 { - return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "There must be at least one path to send over"})); + return Err(PaymentSendFailure::ParameterError(APIError::InvalidRoute{err: "There must be at least one path to send over"})); } if payment_secret.is_none() && route.paths.len() > 1 { return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError{err: "Payment secret is required for multi-path payments".to_string()})); @@ -2607,15 +2628,14 @@ impl ChannelMana let mut total_value = 0; let our_node_id = self.get_our_node_id(); let mut path_errs = Vec::with_capacity(route.paths.len()); - let payment_id = if let Some(id) = payment_id { id } else { PaymentId(self.keys_manager.get_secure_random_bytes()) }; 'path_check: for path in route.paths.iter() { if path.len() < 1 || path.len() > 20 { - path_errs.push(Err(APIError::RouteError{err: "Path didn't go anywhere/had bogus size"})); + path_errs.push(Err(APIError::InvalidRoute{err: "Path didn't go anywhere/had bogus size"})); continue 'path_check; } for (idx, hop) in path.iter().enumerate() { if idx != path.len() - 1 && hop.pubkey == our_node_id { - path_errs.push(Err(APIError::RouteError{err: "Path went through us but wasn't a simple rebalance loop to us"})); + path_errs.push(Err(APIError::InvalidRoute{err: "Path went through us but wasn't a simple rebalance loop to us"})); continue 'path_check; } } @@ -2632,8 +2652,28 @@ impl ChannelMana let cur_height = self.best_block.read().unwrap().height() + 1; let mut results = Vec::new(); - for path in route.paths.iter() { - results.push(self.send_payment_along_path(&path, &route.payment_params, &payment_hash, payment_secret, total_value, cur_height, payment_id, &keysend_preimage)); + debug_assert_eq!(route.paths.len(), onion_session_privs.len()); + for (path, session_priv) in route.paths.iter().zip(onion_session_privs.into_iter()) { + let mut path_res = self.send_payment_along_path(&path, &route.payment_params, &payment_hash, payment_secret, total_value, cur_height, payment_id, &keysend_preimage, session_priv); + match path_res { + Ok(_) => {}, + Err(APIError::MonitorUpdateInProgress) => { + // While a MonitorUpdateInProgress is an Err(_), the payment is still + // considered "in flight" and we shouldn't remove it from the + // PendingOutboundPayment set. + }, + Err(_) => { + let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap(); + if let Some(payment) = pending_outbounds.get_mut(&payment_id) { + let removed = payment.remove(&session_priv, Some(path)); + debug_assert!(removed, "This can't happen as the payment has an entry for this path added by callers"); + } else { + debug_assert!(false, "This can't happen as the payment was added by callers"); + path_res = Err(APIError::APIMisuseError { err: "Internal error: payment disappeared during processing. Please report this bug!".to_owned() }); + } + } + } + results.push(path_res); } let mut has_ok = false; let mut has_err = false; @@ -2667,12 +2707,13 @@ impl ChannelMana } else { None }, }) } else if has_err { - // If we failed to send any paths, we shouldn't have inserted the new PaymentId into - // our `pending_outbound_payments` map at all. - debug_assert!(self.pending_outbound_payments.lock().unwrap().get(&payment_id).is_none()); - Err(PaymentSendFailure::AllFailedRetrySafe(results.drain(..).map(|r| r.unwrap_err()).collect())) + // If we failed to send any paths, we should remove the new PaymentId from the + // `pending_outbound_payments` map, as the user isn't expected to `abandon_payment`. + let removed = self.pending_outbound_payments.lock().unwrap().remove(&payment_id).is_some(); + debug_assert!(removed, "We should always have a pending payment to remove here"); + Err(PaymentSendFailure::AllFailedResendSafe(results.drain(..).map(|r| r.unwrap_err()).collect())) } else { - Ok(payment_id) + Ok(()) } } @@ -2696,57 +2737,74 @@ impl ChannelMana } } + let mut onion_session_privs = Vec::with_capacity(route.paths.len()); + for _ in 0..route.paths.len() { + onion_session_privs.push(self.keys_manager.get_secure_random_bytes()); + } + let (total_msat, payment_hash, payment_secret) = { - let outbounds = self.pending_outbound_payments.lock().unwrap(); - if let Some(payment) = outbounds.get(&payment_id) { - match payment { - PendingOutboundPayment::Retryable { - total_msat, payment_hash, payment_secret, pending_amt_msat, .. - } => { - let retry_amt_msat: u64 = route.paths.iter().map(|path| path.last().unwrap().fee_msat).sum(); - if retry_amt_msat + *pending_amt_msat > *total_msat * (100 + RETRY_OVERFLOW_PERCENTAGE) / 100 { + let mut outbounds = self.pending_outbound_payments.lock().unwrap(); + match outbounds.get_mut(&payment_id) { + Some(payment) => { + let res = match payment { + PendingOutboundPayment::Retryable { + total_msat, payment_hash, payment_secret, pending_amt_msat, .. + } => { + let retry_amt_msat: u64 = route.paths.iter().map(|path| path.last().unwrap().fee_msat).sum(); + if retry_amt_msat + *pending_amt_msat > *total_msat * (100 + RETRY_OVERFLOW_PERCENTAGE) / 100 { + return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { + err: format!("retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat).to_string() + })) + } + (*total_msat, *payment_hash, *payment_secret) + }, + PendingOutboundPayment::Legacy { .. } => { return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { - err: format!("retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat).to_string() + err: "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102".to_string() })) - } - (*total_msat, *payment_hash, *payment_secret) - }, - PendingOutboundPayment::Legacy { .. } => { - return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { - err: "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102".to_string() - })) - }, - PendingOutboundPayment::Fulfilled { .. } => { - return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { - err: "Payment already completed".to_owned() - })); - }, - PendingOutboundPayment::Abandoned { .. } => { - return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { - err: "Payment already abandoned (with some HTLCs still pending)".to_owned() - })); - }, - } - } else { - return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { - err: format!("Payment with ID {} not found", log_bytes!(payment_id.0)), - })) + }, + PendingOutboundPayment::Fulfilled { .. } => { + return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { + err: "Payment already completed".to_owned() + })); + }, + PendingOutboundPayment::Abandoned { .. } => { + return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { + err: "Payment already abandoned (with some HTLCs still pending)".to_owned() + })); + }, + }; + for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) { + assert!(payment.insert(*session_priv_bytes, path)); + } + res + }, + None => + return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { + err: format!("Payment with ID {} not found", log_bytes!(payment_id.0)), + })), } }; - return self.send_payment_internal(route, payment_hash, &payment_secret, None, Some(payment_id), Some(total_msat)).map(|_| ()) + self.send_payment_internal(route, payment_hash, &payment_secret, None, payment_id, Some(total_msat), onion_session_privs) } /// Signals that no further retries for the given payment will occur. /// - /// After this method returns, any future calls to [`retry_payment`] for the given `payment_id` - /// will fail with [`PaymentSendFailure::ParameterError`]. If no such event has been generated, - /// an [`Event::PaymentFailed`] event will be generated as soon as there are no remaining - /// pending HTLCs for this payment. + /// After this method returns, no future calls to [`retry_payment`] for the given `payment_id` + /// are allowed. If no [`Event::PaymentFailed`] event had been generated before, one will be + /// generated as soon as there are no remaining pending HTLCs for this payment. /// /// Note that calling this method does *not* prevent a payment from succeeding. You must still /// wait until you receive either a [`Event::PaymentFailed`] or [`Event::PaymentSent`] event to /// determine the ultimate status of a payment. /// + /// If an [`Event::PaymentFailed`] event is generated and we restart without this + /// [`ChannelManager`] having been persisted, the payment may still be in the pending state + /// upon restart. This allows further calls to [`retry_payment`] (and requiring a second call + /// to [`abandon_payment`] to mark the payment as failed again). Otherwise, future calls to + /// [`retry_payment`] will fail with [`PaymentSendFailure::ParameterError`]. + /// + /// [`abandon_payment`]: Self::abandon_payment /// [`retry_payment`]: Self::retry_payment /// [`Event::PaymentFailed`]: events::Event::PaymentFailed /// [`Event::PaymentSent`]: events::Event::PaymentSent @@ -2773,7 +2831,8 @@ impl ChannelMana /// would be able to guess -- otherwise, an intermediate node may claim the payment and it will /// never reach the recipient. /// - /// See [`send_payment`] documentation for more details on the return value of this function. + /// See [`send_payment`] documentation for more details on the return value of this function + /// and idempotency guarantees provided by the [`PaymentId`] key. /// /// Similar to regular payments, you MUST NOT reuse a `payment_preimage` value. See /// [`send_payment`] for more information about the risks of duplicate preimage usage. @@ -2781,14 +2840,16 @@ impl ChannelMana /// Note that `route` must have exactly one path. /// /// [`send_payment`]: Self::send_payment - pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> { + pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option, payment_id: PaymentId) -> Result { let preimage = match payment_preimage { Some(p) => p, None => PaymentPreimage(self.keys_manager.get_secure_random_bytes()), }; let payment_hash = PaymentHash(Sha256::hash(&preimage.0).into_inner()); - match self.send_payment_internal(route, payment_hash, &None, Some(preimage), None, None) { - Ok(payment_id) => Ok((payment_hash, payment_id)), + let onion_session_privs = self.add_new_pending_payment(payment_hash, None, payment_id, &route)?; + + match self.send_payment_internal(route, payment_hash, &None, Some(preimage), payment_id, None, onion_session_privs) { + Ok(()) => Ok(payment_hash), Err(e) => Err(e) } } @@ -2808,9 +2869,10 @@ impl ChannelMana } let route = Route { paths: vec![hops], payment_params: None }; + let onion_session_privs = self.add_new_pending_payment(payment_hash, None, payment_id, &route)?; - match self.send_payment_internal(&route, payment_hash, &None, None, Some(payment_id), None) { - Ok(payment_id) => Ok((payment_hash, payment_id)), + match self.send_payment_internal(&route, payment_hash, &None, None, payment_id, None, onion_session_privs) { + Ok(()) => Ok((payment_hash, payment_id)), Err(e) => Err(e) } } @@ -2832,7 +2894,7 @@ impl ChannelMana /// Handles the generation of a funding transaction, optionally (for tests) with a function /// which checks the correctness of the funding transaction given the associated channel. - fn funding_transaction_generated_intern, &Transaction) -> Result>( + fn funding_transaction_generated_intern::Signer>, &Transaction) -> Result>( &self, temporary_channel_id: &[u8; 32], _counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput ) -> Result<(), APIError> { let (chan, msg) = { @@ -3030,6 +3092,102 @@ impl ChannelMana Ok(()) } + /// Attempts to forward an intercepted HTLC over the provided channel id and with the provided + /// amount to forward. Should only be called in response to an [`HTLCIntercepted`] event. + /// + /// Intercepted HTLCs can be useful for Lightning Service Providers (LSPs) to open a just-in-time + /// channel to a receiving node if the node lacks sufficient inbound liquidity. + /// + /// To make use of intercepted HTLCs, set [`UserConfig::accept_intercept_htlcs`] and use + /// [`ChannelManager::get_intercept_scid`] to generate short channel id(s) to put in the + /// receiver's invoice route hints. These route hints will signal to LDK to generate an + /// [`HTLCIntercepted`] event when it receives the forwarded HTLC, and this method or + /// [`ChannelManager::fail_intercepted_htlc`] MUST be called in response to the event. + /// + /// Note that LDK does not enforce fee requirements in `amt_to_forward_msat`, and will not stop + /// you from forwarding more than you received. + /// + /// Errors if the event was not handled in time, in which case the HTLC was automatically failed + /// backwards. + /// + /// [`UserConfig::accept_intercept_htlcs`]: crate::util::config::UserConfig::accept_intercept_htlcs + /// [`HTLCIntercepted`]: events::Event::HTLCIntercepted + // TODO: when we move to deciding the best outbound channel at forward time, only take + // `next_node_id` and not `next_hop_channel_id` + pub fn forward_intercepted_htlc(&self, intercept_id: InterceptId, next_hop_channel_id: &[u8; 32], _next_node_id: PublicKey, amt_to_forward_msat: u64) -> Result<(), APIError> { + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + + let next_hop_scid = match self.channel_state.lock().unwrap().by_id.get(next_hop_channel_id) { + Some(chan) => { + if !chan.is_usable() { + return Err(APIError::ChannelUnavailable { + err: format!("Channel with id {} not fully established", log_bytes!(*next_hop_channel_id)) + }) + } + chan.get_short_channel_id().unwrap_or(chan.outbound_scid_alias()) + }, + None => return Err(APIError::ChannelUnavailable { + err: format!("Channel with id {} not found", log_bytes!(*next_hop_channel_id)) + }) + }; + + let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id) + .ok_or_else(|| APIError::APIMisuseError { + err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)) + })?; + + let routing = match payment.forward_info.routing { + PendingHTLCRouting::Forward { onion_packet, .. } => { + PendingHTLCRouting::Forward { onion_packet, short_channel_id: next_hop_scid } + }, + _ => unreachable!() // Only `PendingHTLCRouting::Forward`s are intercepted + }; + let pending_htlc_info = PendingHTLCInfo { + outgoing_amt_msat: amt_to_forward_msat, routing, ..payment.forward_info + }; + + let mut per_source_pending_forward = [( + payment.prev_short_channel_id, + payment.prev_funding_outpoint, + payment.prev_user_channel_id, + vec![(pending_htlc_info, payment.prev_htlc_id)] + )]; + self.forward_htlcs(&mut per_source_pending_forward); + Ok(()) + } + + /// Fails the intercepted HTLC indicated by intercept_id. Should only be called in response to + /// an [`HTLCIntercepted`] event. See [`ChannelManager::forward_intercepted_htlc`]. + /// + /// Errors if the event was not handled in time, in which case the HTLC was automatically failed + /// backwards. + /// + /// [`HTLCIntercepted`]: events::Event::HTLCIntercepted + pub fn fail_intercepted_htlc(&self, intercept_id: InterceptId) -> Result<(), APIError> { + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + + let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id) + .ok_or_else(|| APIError::APIMisuseError { + err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)) + })?; + + if let PendingHTLCRouting::Forward { short_channel_id, .. } = payment.forward_info.routing { + let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: payment.prev_short_channel_id, + outpoint: payment.prev_funding_outpoint, + htlc_id: payment.prev_htlc_id, + incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret, + phantom_shared_secret: None, + }); + + let failure_reason = HTLCFailReason::from_failure_code(0x4000 | 10); + let destination = HTLCDestination::UnknownNextHop { requested_forward_scid: short_channel_id }; + self.fail_htlc_backwards_internal(&htlc_source, &payment.forward_info.payment_hash, &failure_reason, destination); + } else { unreachable!() } // Only `PendingHTLCRouting::Forward`s are intercepted + + Ok(()) + } + /// Processes HTLCs which are pending waiting on random forward delay. /// /// Should only really ever be called in response to a PendingHTLCsForwardable event. @@ -3039,97 +3197,97 @@ impl ChannelMana let mut new_events = Vec::new(); let mut failed_forwards = Vec::new(); - let mut phantom_receives: Vec<(u64, OutPoint, Vec<(PendingHTLCInfo, u64)>)> = Vec::new(); - let mut handle_errors = Vec::new(); + let mut phantom_receives: Vec<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new(); { let mut forward_htlcs = HashMap::new(); mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap()); for (short_chan_id, mut pending_forwards) in forward_htlcs { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; if short_chan_id != 0 { - let forward_chan_id = match channel_state.short_to_chan_info.get(&short_chan_id) { - Some((_cp_id, chan_id)) => chan_id.clone(), - None => { + macro_rules! forwarding_channel_not_found { + () => { for forward_info in pending_forwards.drain(..) { match forward_info { - HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo { - routing, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value }, - prev_funding_outpoint } => { - macro_rules! failure_handler { - ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => { - log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg); - - let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { - short_channel_id: prev_short_channel_id, - outpoint: prev_funding_outpoint, - htlc_id: prev_htlc_id, - incoming_packet_shared_secret: incoming_shared_secret, - phantom_shared_secret: $phantom_ss, - }); - - let reason = if $next_hop_unknown { - HTLCDestination::UnknownNextHop { requested_forward_scid: short_chan_id } - } else { - HTLCDestination::FailedPayment{ payment_hash } - }; - - failed_forwards.push((htlc_source, payment_hash, - HTLCFailReason::Reason { failure_code: $err_code, data: $err_data }, - reason - )); - continue; - } + HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { + prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id, + forward_info: PendingHTLCInfo { + routing, incoming_shared_secret, payment_hash, outgoing_amt_msat, + outgoing_cltv_value, incoming_amt_msat: _ + } + }) => { + macro_rules! failure_handler { + ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => { + log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg); + + let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: prev_short_channel_id, + outpoint: prev_funding_outpoint, + htlc_id: prev_htlc_id, + incoming_packet_shared_secret: incoming_shared_secret, + phantom_shared_secret: $phantom_ss, + }); + + let reason = if $next_hop_unknown { + HTLCDestination::UnknownNextHop { requested_forward_scid: short_chan_id } + } else { + HTLCDestination::FailedPayment{ payment_hash } + }; + + failed_forwards.push((htlc_source, payment_hash, + HTLCFailReason::reason($err_code, $err_data), + reason + )); + continue; } - macro_rules! fail_forward { - ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => { - { - failure_handler!($msg, $err_code, $err_data, $phantom_ss, true); - } + } + macro_rules! fail_forward { + ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => { + { + failure_handler!($msg, $err_code, $err_data, $phantom_ss, true); } } - macro_rules! failed_payment { - ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => { - { - failure_handler!($msg, $err_code, $err_data, $phantom_ss, false); - } + } + macro_rules! failed_payment { + ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => { + { + failure_handler!($msg, $err_code, $err_data, $phantom_ss, false); } } - if let PendingHTLCRouting::Forward { onion_packet, .. } = routing { - let phantom_secret_res = self.keys_manager.get_node_secret(Recipient::PhantomNode); - if phantom_secret_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id) { - let phantom_shared_secret = SharedSecret::new(&onion_packet.public_key.unwrap(), &phantom_secret_res.unwrap()).secret_bytes(); - let next_hop = match onion_utils::decode_next_payment_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) { - Ok(res) => res, - Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => { - let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).into_inner(); - // In this scenario, the phantom would have sent us an - // `update_fail_malformed_htlc`, meaning here we encrypt the error as - // if it came from us (the second-to-last hop) but contains the sha256 - // of the onion. - failed_payment!(err_msg, err_code, sha256_of_onion.to_vec(), None); - }, - Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => { - failed_payment!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret)); - }, - }; - match next_hop { - onion_utils::Hop::Receive(hop_data) => { - match self.construct_recv_pending_htlc_info(hop_data, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value, Some(phantom_shared_secret)) { - Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, vec![(info, prev_htlc_id)])), - Err(ReceiveError { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret)) - } - }, - _ => panic!(), - } - } else { - fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None); + } + if let PendingHTLCRouting::Forward { onion_packet, .. } = routing { + let phantom_secret_res = self.keys_manager.get_node_secret(Recipient::PhantomNode); + if phantom_secret_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.genesis_hash) { + let phantom_shared_secret = SharedSecret::new(&onion_packet.public_key.unwrap(), &phantom_secret_res.unwrap()).secret_bytes(); + let next_hop = match onion_utils::decode_next_payment_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) { + Ok(res) => res, + Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => { + let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).into_inner(); + // In this scenario, the phantom would have sent us an + // `update_fail_malformed_htlc`, meaning here we encrypt the error as + // if it came from us (the second-to-last hop) but contains the sha256 + // of the onion. + failed_payment!(err_msg, err_code, sha256_of_onion.to_vec(), None); + }, + Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => { + failed_payment!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret)); + }, + }; + match next_hop { + onion_utils::Hop::Receive(hop_data) => { + match self.construct_recv_pending_htlc_info(hop_data, incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, Some(phantom_shared_secret)) { + Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])), + Err(ReceiveError { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret)) + } + }, + _ => panic!(), } } else { fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None); } - }, + } else { + fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None); + } + }, HTLCForwardInfo::FailHTLC { .. } => { // Channel went away before we could fail it. This implies // the channel is now on chain and our counterparty is @@ -3138,30 +3296,45 @@ impl ChannelMana } } } + } + } + let forward_chan_id = match self.short_to_chan_info.read().unwrap().get(&short_chan_id) { + Some((_cp_id, chan_id)) => chan_id.clone(), + None => { + forwarding_channel_not_found!(); continue; } }; - if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(forward_chan_id) { - let mut add_htlc_msgs = Vec::new(); - let mut fail_htlc_msgs = Vec::new(); - for forward_info in pending_forwards.drain(..) { - match forward_info { - HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo { - routing: PendingHTLCRouting::Forward { - onion_packet, .. - }, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value }, - prev_funding_outpoint } => { - log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id); - let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { - short_channel_id: prev_short_channel_id, - outpoint: prev_funding_outpoint, - htlc_id: prev_htlc_id, - incoming_packet_shared_secret: incoming_shared_secret, - // Phantom payments are only PendingHTLCRouting::Receive. - phantom_shared_secret: None, - }); - match chan.get_mut().send_htlc(amt_to_forward, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet, &self.logger) { - Err(e) => { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; + match channel_state.by_id.entry(forward_chan_id) { + hash_map::Entry::Vacant(_) => { + forwarding_channel_not_found!(); + continue; + }, + hash_map::Entry::Occupied(mut chan) => { + for forward_info in pending_forwards.drain(..) { + match forward_info { + HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { + prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id: _, + forward_info: PendingHTLCInfo { + incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, + routing: PendingHTLCRouting::Forward { onion_packet, .. }, incoming_amt_msat: _, + }, + }) => { + log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id); + let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: prev_short_channel_id, + outpoint: prev_funding_outpoint, + htlc_id: prev_htlc_id, + incoming_packet_shared_secret: incoming_shared_secret, + // Phantom payments are only PendingHTLCRouting::Receive. + phantom_shared_secret: None, + }); + if let Err(e) = chan.get_mut().queue_add_htlc(outgoing_amt_msat, + payment_hash, outgoing_cltv_value, htlc_source.clone(), + onion_packet, &self.logger) + { if let ChannelError::Ignore(msg) = e { log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg); } else { @@ -3169,112 +3342,44 @@ impl ChannelMana } let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get()); failed_forwards.push((htlc_source, payment_hash, - HTLCFailReason::Reason { failure_code, data }, + HTLCFailReason::reason(failure_code, data), HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id } )); continue; - }, - Ok(update_add) => { - match update_add { - Some(msg) => { add_htlc_msgs.push(msg); }, - None => { - // Nothing to do here...we're waiting on a remote - // revoke_and_ack before we can add anymore HTLCs. The Channel - // will automatically handle building the update_add_htlc and - // commitment_signed messages when we can. - // TODO: Do some kind of timer to set the channel as !is_live() - // as we don't really want others relying on us relaying through - // this channel currently :/. - } - } } - } - }, - HTLCForwardInfo::AddHTLC { .. } => { - panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward"); - }, - HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { - log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); - match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) { - Err(e) => { + }, + HTLCForwardInfo::AddHTLC { .. } => { + panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward"); + }, + HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { + log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); + if let Err(e) = chan.get_mut().queue_fail_htlc( + htlc_id, err_packet, &self.logger + ) { if let ChannelError::Ignore(msg) = e { log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); } else { - panic!("Stated return value requirements in get_update_fail_htlc() were not met"); + panic!("Stated return value requirements in queue_fail_htlc() were not met"); } // fail-backs are best-effort, we probably already have one // pending, and if not that's OK, if not, the channel is on // the chain and sending the HTLC-Timeout is their problem. continue; - }, - Ok(Some(msg)) => { fail_htlc_msgs.push(msg); }, - Ok(None) => { - // Nothing to do here...we're waiting on a remote - // revoke_and_ack before we can update the commitment - // transaction. The Channel will automatically handle - // building the update_fail_htlc and commitment_signed - // messages when we can. - // We don't need any kind of timer here as they should fail - // the channel onto the chain if they can't get our - // update_fail_htlc in time, it's not our problem. - } - } - }, - } - } - - if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() { - let (commitment_msg, monitor_update) = match chan.get_mut().send_commitment(&self.logger) { - Ok(res) => res, - Err(e) => { - // We surely failed send_commitment due to bad keys, in that case - // close channel and then send error message to peer. - let counterparty_node_id = chan.get().get_counterparty_node_id(); - let err: Result<(), _> = match e { - ChannelError::Ignore(_) | ChannelError::Warn(_) => { - panic!("Stated return value requirements in send_commitment() were not met"); } - ChannelError::Close(msg) => { - log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg); - let mut channel = remove_channel!(self, channel_state, chan); - // ChannelClosed event is generated by handle_error for us. - Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok())) - }, - }; - handle_errors.push((counterparty_node_id, err)); - continue; - } - }; - match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - ChannelMonitorUpdateStatus::Completed => {}, - e => { - handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_update_res!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true))); - continue; + }, } } - log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}", - add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id())); - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get().get_counterparty_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: add_htlc_msgs, - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: fail_htlc_msgs, - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed: commitment_msg, - }, - }); } - } else { - unreachable!(); } } else { for forward_info in pending_forwards.drain(..) { match forward_info { - HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo { - routing, incoming_shared_secret, payment_hash, amt_to_forward, .. }, - prev_funding_outpoint } => { + HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { + prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id, + forward_info: PendingHTLCInfo { + routing, incoming_shared_secret, payment_hash, outgoing_amt_msat, .. + } + }) => { let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret) = match routing { PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry, phantom_shared_secret } => { let _legacy_hop_data = Some(payment_data.clone()); @@ -3294,18 +3399,18 @@ impl ChannelMana incoming_packet_shared_secret: incoming_shared_secret, phantom_shared_secret, }, - value: amt_to_forward, + value: outgoing_amt_msat, timer_ticks: 0, - total_msat: if let Some(data) = &payment_data { data.total_msat } else { amt_to_forward }, + total_msat: if let Some(data) = &payment_data { data.total_msat } else { outgoing_amt_msat }, cltv_expiry, onion_payload, }; macro_rules! fail_htlc { ($htlc: expr, $payment_hash: expr) => { - let mut htlc_msat_height_data = byte_utils::be64_to_array($htlc.value).to_vec(); + let mut htlc_msat_height_data = $htlc.value.to_be_bytes().to_vec(); htlc_msat_height_data.extend_from_slice( - &byte_utils::be32_to_array(self.best_block.read().unwrap().height()), + &self.best_block.read().unwrap().height().to_be_bytes(), ); failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: $htlc.prev_hop.short_channel_id, @@ -3314,22 +3419,33 @@ impl ChannelMana incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret, phantom_shared_secret, }), payment_hash, - HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data }, + HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data), HTLCDestination::FailedPayment { payment_hash: $payment_hash }, )); } } + let phantom_shared_secret = claimable_htlc.prev_hop.phantom_shared_secret; + let mut receiver_node_id = self.our_network_pubkey; + if phantom_shared_secret.is_some() { + receiver_node_id = self.keys_manager.get_node_id(Recipient::PhantomNode) + .expect("Failed to get node_id for phantom node recipient"); + } macro_rules! check_total_value { ($payment_data: expr, $payment_preimage: expr) => {{ - let mut payment_received_generated = false; + let mut payment_claimable_generated = false; let purpose = || { events::PaymentPurpose::InvoicePayment { payment_preimage: $payment_preimage, payment_secret: $payment_data.payment_secret, } }; - let (_, htlcs) = channel_state.claimable_htlcs.entry(payment_hash) + let mut claimable_payments = self.claimable_payments.lock().unwrap(); + if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) { + fail_htlc!(claimable_htlc, payment_hash); + continue + } + let (_, htlcs) = claimable_payments.claimable_htlcs.entry(payment_hash) .or_insert_with(|| (purpose(), Vec::new())); if htlcs.len() == 1 { if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload { @@ -3358,20 +3474,24 @@ impl ChannelMana log_bytes!(payment_hash.0), total_value, $payment_data.total_msat); fail_htlc!(claimable_htlc, payment_hash); } else if total_value == $payment_data.total_msat { + let prev_channel_id = prev_funding_outpoint.to_channel_id(); htlcs.push(claimable_htlc); - new_events.push(events::Event::PaymentReceived { + new_events.push(events::Event::PaymentClaimable { + receiver_node_id: Some(receiver_node_id), payment_hash, purpose: purpose(), amount_msat: total_value, + via_channel_id: Some(prev_channel_id), + via_user_channel_id: Some(prev_user_channel_id), }); - payment_received_generated = true; + payment_claimable_generated = true; } else { // Nothing to do - we haven't reached the total // payment value yet, wait until we receive more // MPP parts. htlcs.push(claimable_htlc); } - payment_received_generated + payment_claimable_generated }} } @@ -3397,14 +3517,23 @@ impl ChannelMana check_total_value!(payment_data, payment_preimage); }, OnionPayload::Spontaneous(preimage) => { - match channel_state.claimable_htlcs.entry(payment_hash) { + let mut claimable_payments = self.claimable_payments.lock().unwrap(); + if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) { + fail_htlc!(claimable_htlc, payment_hash); + continue + } + match claimable_payments.claimable_htlcs.entry(payment_hash) { hash_map::Entry::Vacant(e) => { let purpose = events::PaymentPurpose::SpontaneousPayment(preimage); e.insert((purpose.clone(), vec![claimable_htlc])); - new_events.push(events::Event::PaymentReceived { + let prev_channel_id = prev_funding_outpoint.to_channel_id(); + new_events.push(events::Event::PaymentClaimable { + receiver_node_id: Some(receiver_node_id), payment_hash, - amount_msat: amt_to_forward, + amount_msat: outgoing_amt_msat, purpose, + via_channel_id: Some(prev_channel_id), + via_user_channel_id: Some(prev_user_channel_id), }); }, hash_map::Entry::Occupied(_) => { @@ -3430,8 +3559,8 @@ impl ChannelMana log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap()); fail_htlc!(claimable_htlc, payment_hash); } else { - let payment_received_generated = check_total_value!(payment_data, inbound_payment.get().payment_preimage); - if payment_received_generated { + let payment_claimable_generated = check_total_value!(payment_data, inbound_payment.get().payment_preimage); + if payment_claimable_generated { inbound_payment.remove_entry(); } } @@ -3448,13 +3577,15 @@ impl ChannelMana } for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) { - self.fail_htlc_backwards_internal(htlc_source, &payment_hash, failure_reason, destination); + self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination); } self.forward_htlcs(&mut phantom_receives); - for (counterparty_node_id, err) in handle_errors.drain(..) { - let _ = handle_error!(self, err, counterparty_node_id); - } + // Freeing the holding cell here is relatively redundant - in practice we'll do it when we + // next get a `get_and_clear_pending_msg_events` call, but some tests rely on it, and it's + // nice to do the work now if we can rather than while we're trying to get messages in the + // network stack. + self.check_free_holding_cells(); if new_events.is_empty() { return } let mut events = self.pending_events.lock().unwrap(); @@ -3492,59 +3623,24 @@ impl ChannelMana self.process_background_events(); } - fn update_channel_fee(&self, short_to_chan_info: &mut HashMap, pending_msg_events: &mut Vec, chan_id: &[u8; 32], chan: &mut Channel, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) { - if !chan.is_outbound() { return (true, NotifyOption::SkipPersist, Ok(())); } + fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<::Signer>, new_feerate: u32) -> NotifyOption { + if !chan.is_outbound() { return NotifyOption::SkipPersist; } // If the feerate has decreased by less than half, don't bother if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() { log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.", log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate); - return (true, NotifyOption::SkipPersist, Ok(())); + return NotifyOption::SkipPersist; } if !chan.is_live() { log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).", log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate); - return (true, NotifyOption::SkipPersist, Ok(())); + return NotifyOption::SkipPersist; } log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.", log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate); - let mut retain_channel = true; - let res = match chan.send_update_fee_and_commit(new_feerate, &self.logger) { - Ok(res) => Ok(res), - Err(e) => { - let (drop, res) = convert_chan_err!(self, e, short_to_chan_info, chan, chan_id); - if drop { retain_channel = false; } - Err(res) - } - }; - let ret_err = match res { - Ok(Some((update_fee, commitment_signed, monitor_update))) => { - match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { - ChannelMonitorUpdateStatus::Completed => { - pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get_counterparty_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: Some(update_fee), - commitment_signed, - }, - }); - Ok(()) - }, - e => { - let (res, drop) = handle_monitor_update_res!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY); - if drop { retain_channel = false; } - res - } - } - }, - Ok(None) => Ok(()), - Err(e) => Err(e), - }; - (retain_channel, NotifyOption::DoPersist, ret_err) + chan.queue_update_fee(new_feerate, &self.logger); + NotifyOption::DoPersist } #[cfg(fuzzing)] @@ -3558,26 +3654,55 @@ impl ChannelMana let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); - let mut handle_errors = Vec::new(); - { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - let pending_msg_events = &mut channel_state.pending_msg_events; - let short_to_chan_info = &mut channel_state.short_to_chan_info; - channel_state.by_id.retain(|chan_id, chan| { - let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_chan_info, pending_msg_events, chan_id, chan, new_feerate); - if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } - if err.is_err() { - handle_errors.push(err); - } - retain_channel - }); + let mut channel_state = self.channel_state.lock().unwrap(); + for (chan_id, chan) in channel_state.by_id.iter_mut() { + let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate); + if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } } should_persist }); } + fn remove_stale_resolved_payments(&self) { + // If an outbound payment was completed, and no pending HTLCs remain, we should remove it + // from the map. However, if we did that immediately when the last payment HTLC is claimed, + // this could race the user making a duplicate send_payment call and our idempotency + // guarantees would be violated. Instead, we wait a few timer ticks to do the actual + // removal. This should be more than sufficient to ensure the idempotency of any + // `send_payment` calls that were made at the same time the `PaymentSent` event was being + // processed. + let mut pending_outbound_payments = self.pending_outbound_payments.lock().unwrap(); + let pending_events = self.pending_events.lock().unwrap(); + pending_outbound_payments.retain(|payment_id, payment| { + if let PendingOutboundPayment::Fulfilled { session_privs, timer_ticks_without_htlcs, .. } = payment { + let mut no_remaining_entries = session_privs.is_empty(); + if no_remaining_entries { + for ev in pending_events.iter() { + match ev { + events::Event::PaymentSent { payment_id: Some(ev_payment_id), .. } | + events::Event::PaymentPathSuccessful { payment_id: ev_payment_id, .. } | + events::Event::PaymentPathFailed { payment_id: Some(ev_payment_id), .. } => { + if payment_id == ev_payment_id { + no_remaining_entries = false; + break; + } + }, + _ => {}, + } + } + } + if no_remaining_entries { + *timer_ticks_without_htlcs += 1; + *timer_ticks_without_htlcs <= IDEMPOTENCY_TIMEOUT_TICKS + } else { + *timer_ticks_without_htlcs = 0; + true + } + } else { true } + }); + } + /// Performs actions which should happen on startup and roughly once per minute thereafter. /// /// This currently includes: @@ -3597,24 +3722,18 @@ impl ChannelMana let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); - let mut handle_errors = Vec::new(); + let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new(); let mut timed_out_mpp_htlcs = Vec::new(); { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; let pending_msg_events = &mut channel_state.pending_msg_events; - let short_to_chan_info = &mut channel_state.short_to_chan_info; channel_state.by_id.retain(|chan_id, chan| { - let counterparty_node_id = chan.get_counterparty_node_id(); - let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_chan_info, pending_msg_events, chan_id, chan, new_feerate); + let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate); if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } - if err.is_err() { - handle_errors.push((err, counterparty_node_id)); - } - if !retain_channel { return false; } if let Err(e) = chan.timer_check_closing_negotiation_progress() { - let (needs_close, err) = convert_chan_err!(self, e, short_to_chan_info, chan, chan_id); + let (needs_close, err) = convert_chan_err!(self, e, chan, chan_id); handle_errors.push((Err(err), chan.get_counterparty_node_id())); if needs_close { return false; } } @@ -3649,49 +3768,61 @@ impl ChannelMana true }); + } - channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| { - if htlcs.is_empty() { - // This should be unreachable - debug_assert!(false); + self.claimable_payments.lock().unwrap().claimable_htlcs.retain(|payment_hash, (_, htlcs)| { + if htlcs.is_empty() { + // This should be unreachable + debug_assert!(false); + return false; + } + if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload { + // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat). + // In this case we're not going to handle any timeouts of the parts here. + if htlcs[0].total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) { + return true; + } else if htlcs.into_iter().any(|htlc| { + htlc.timer_ticks += 1; + return htlc.timer_ticks >= MPP_TIMEOUT_TICKS + }) { + timed_out_mpp_htlcs.extend(htlcs.drain(..).map(|htlc: ClaimableHTLC| (htlc.prev_hop, *payment_hash))); return false; } - if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload { - // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat). - // In this case we're not going to handle any timeouts of the parts here. - if htlcs[0].total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) { - return true; - } else if htlcs.into_iter().any(|htlc| { - htlc.timer_ticks += 1; - return htlc.timer_ticks >= MPP_TIMEOUT_TICKS - }) { - timed_out_mpp_htlcs.extend(htlcs.into_iter().map(|htlc| (htlc.prev_hop.clone(), payment_hash.clone()))); - return false; - } - } - true - }); - } + } + true + }); for htlc_source in timed_out_mpp_htlcs.drain(..) { + let source = HTLCSource::PreviousHopData(htlc_source.0.clone()); + let reason = HTLCFailReason::from_failure_code(23); let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 }; - self.fail_htlc_backwards_internal(HTLCSource::PreviousHopData(htlc_source.0.clone()), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }, receiver ); + self.fail_htlc_backwards_internal(&source, &htlc_source.1, &reason, receiver); } for (err, counterparty_node_id) in handle_errors.drain(..) { let _ = handle_error!(self, err, counterparty_node_id); } + + self.remove_stale_resolved_payments(); + + // Technically we don't need to do this here, but if we have holding cell entries in a + // channel that need freeing, it's better to do that here and block a background task + // than block the message queueing pipeline. + if self.check_free_holding_cells() { + should_persist = NotifyOption::DoPersist; + } + should_persist }); } /// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect - /// after a PaymentReceived event, failing the HTLC back to its origin and freeing resources + /// after a PaymentClaimable event, failing the HTLC back to its origin and freeing resources /// along the path (including in our own channel on which we received it). /// /// Note that in some cases around unclean shutdown, it is possible the payment may have /// already been claimed by you via [`ChannelManager::claim_funds`] prior to you seeing (a - /// second copy of) the [`events::Event::PaymentReceived`] event. Alternatively, the payment + /// second copy of) the [`events::Event::PaymentClaimable`] event. Alternatively, the payment /// may have already been failed automatically by LDK if it was nearing its expiration time. /// /// While LDK will never claim a payment automatically on your behalf (i.e. without you calling @@ -3701,19 +3832,15 @@ impl ChannelMana pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let removed_source = { - let mut channel_state = self.channel_state.lock().unwrap(); - channel_state.claimable_htlcs.remove(payment_hash) - }; + let removed_source = self.claimable_payments.lock().unwrap().claimable_htlcs.remove(payment_hash); if let Some((_, mut sources)) = removed_source { for htlc in sources.drain(..) { - let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec(); - htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array( - self.best_block.read().unwrap().height())); - self.fail_htlc_backwards_internal( - HTLCSource::PreviousHopData(htlc.prev_hop), payment_hash, - HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data }, - HTLCDestination::FailedPayment { payment_hash: *payment_hash }); + let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); + htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes()); + let source = HTLCSource::PreviousHopData(htlc.prev_hop); + let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data); + let receiver = HTLCDestination::FailedPayment { payment_hash: *payment_hash }; + self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } } } @@ -3723,7 +3850,7 @@ impl ChannelMana /// /// This is for failures on the channel on which the HTLC was *received*, not failures /// forwarding - fn get_htlc_inbound_temp_fail_err_and_data(&self, desired_err_code: u16, chan: &Channel) -> (u16, Vec) { + fn get_htlc_inbound_temp_fail_err_and_data(&self, desired_err_code: u16, chan: &Channel<::Signer>) -> (u16, Vec) { // We can't be sure what SCID was used when relaying inbound towards us, so we have to // guess somewhat. If its a public channel, we figure best to just use the real SCID (as // we're not leaking that we have a channel with the counterparty), otherwise we try to use @@ -3743,7 +3870,7 @@ impl ChannelMana /// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code /// that we want to return and a channel. - fn get_htlc_temp_fail_err_and_data(&self, desired_err_code: u16, scid: u64, chan: &Channel) -> (u16, Vec) { + fn get_htlc_temp_fail_err_and_data(&self, desired_err_code: u16, scid: u64, chan: &Channel<::Signer>) -> (u16, Vec) { debug_assert_eq!(desired_err_code & 0x1000, 0x1000); if let Ok(upd) = self.get_channel_update_for_onion(scid, chan) { let mut enc = VecWriter(Vec::with_capacity(upd.serialized_length() + 6)); @@ -3772,23 +3899,24 @@ impl ChannelMana &self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: [u8; 32], counterparty_node_id: &PublicKey ) { - for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) { - let (failure_code, onion_failure_data) = - match self.channel_state.lock().unwrap().by_id.entry(channel_id) { - hash_map::Entry::Occupied(chan_entry) => { - self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get()) - }, - hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new()) - }; + let (failure_code, onion_failure_data) = + match self.channel_state.lock().unwrap().by_id.entry(channel_id) { + hash_map::Entry::Occupied(chan_entry) => { + self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get()) + }, + hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new()) + }; + for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) { + let reason = HTLCFailReason::reason(failure_code, onion_failure_data.clone()); let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id }; - self.fail_htlc_backwards_internal(htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data }, receiver); + self.fail_htlc_backwards_internal(&htlc_src, &payment_hash, &reason, receiver); } } /// Fails an HTLC backwards to the sender of it to us. /// Note that we do not assume that channels corresponding to failed HTLCs are still available. - fn fail_htlc_backwards_internal(&self, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason,destination: HTLCDestination) { + fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) { #[cfg(debug_assertions)] { // Ensure that the `channel_state` lock is not held when calling this function. @@ -3807,13 +3935,13 @@ impl ChannelMana // from block_connected which may run during initialization prior to the chain_monitor // being fully configured. See the docs for `ChannelManagerReadArgs` for more. match source { - HTLCSource::OutboundRoute { ref path, session_priv, payment_id, ref payment_params, .. } => { + HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, ref payment_params, .. } => { let mut session_priv_bytes = [0; 32]; session_priv_bytes.copy_from_slice(&session_priv[..]); let mut outbounds = self.pending_outbound_payments.lock().unwrap(); let mut all_paths_failed = false; let mut full_failure_ev = None; - if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) { + if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(*payment_id) { if !payment.get_mut().remove(&session_priv_bytes, Some(&path)) { log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0)); return; @@ -3826,7 +3954,7 @@ impl ChannelMana all_paths_failed = true; if payment.get().abandoned() { full_failure_ev = Some(events::Event::PaymentFailed { - payment_id, + payment_id: *payment_id, payment_hash: payment.get().payment_hash().expect("PendingOutboundPayments::RetriesExceeded always has a payment hash set"), }); payment.remove(); @@ -3846,126 +3974,69 @@ impl ChannelMana } else { None }; log_trace!(self.logger, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0)); - let path_failure = match &onion_error { - &HTLCFailReason::LightningError { ref err } => { + let path_failure = { #[cfg(test)] - let (network_update, short_channel_id, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); + let (network_update, short_channel_id, payment_retryable, onion_error_code, onion_error_data) = onion_error.decode_onion_failure(&self.secp_ctx, &self.logger, &source); #[cfg(not(test))] - let (network_update, short_channel_id, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); + let (network_update, short_channel_id, payment_retryable, _, _) = onion_error.decode_onion_failure(&self.secp_ctx, &self.logger, &source); - if self.payment_is_probe(payment_hash, &payment_id) { - if !payment_retryable { - events::Event::ProbeSuccessful { - payment_id, - payment_hash: payment_hash.clone(), - path: path.clone(), - } - } else { - events::Event::ProbeFailed { - payment_id, - payment_hash: payment_hash.clone(), - path: path.clone(), - short_channel_id, - } - } - } else { - // TODO: If we decided to blame ourselves (or one of our channels) in - // process_onion_failure we should close that channel as it implies our - // next-hop is needlessly blaming us! - if let Some(scid) = short_channel_id { - retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid)); - } - events::Event::PaymentPathFailed { - payment_id: Some(payment_id), + if self.payment_is_probe(payment_hash, &payment_id) { + if !payment_retryable { + events::Event::ProbeSuccessful { + payment_id: *payment_id, payment_hash: payment_hash.clone(), - payment_failed_permanently: !payment_retryable, - network_update, - all_paths_failed, path: path.clone(), - short_channel_id, - retry, - #[cfg(test)] - error_code: onion_error_code, - #[cfg(test)] - error_data: onion_error_data - } - } - }, - &HTLCFailReason::Reason { -#[cfg(test)] - ref failure_code, -#[cfg(test)] - ref data, - .. } => { - // we get a fail_malformed_htlc from the first hop - // TODO: We'd like to generate a NetworkUpdate for temporary - // failures here, but that would be insufficient as find_route - // generally ignores its view of our own channels as we provide them via - // ChannelDetails. - // TODO: For non-temporary failures, we really should be closing the - // channel here as we apparently can't relay through them anyway. - let scid = path.first().unwrap().short_channel_id; - retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid)); - - if self.payment_is_probe(payment_hash, &payment_id) { - events::Event::ProbeFailed { - payment_id, - payment_hash: payment_hash.clone(), - path: path.clone(), - short_channel_id: Some(scid), } } else { - events::Event::PaymentPathFailed { - payment_id: Some(payment_id), + events::Event::ProbeFailed { + payment_id: *payment_id, payment_hash: payment_hash.clone(), - payment_failed_permanently: false, - network_update: None, - all_paths_failed, path: path.clone(), - short_channel_id: Some(scid), - retry, -#[cfg(test)] - error_code: Some(*failure_code), -#[cfg(test)] - error_data: Some(data.clone()), + short_channel_id, } } + } else { + // TODO: If we decided to blame ourselves (or one of our channels) in + // process_onion_failure we should close that channel as it implies our + // next-hop is needlessly blaming us! + if let Some(scid) = short_channel_id { + retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid)); + } + events::Event::PaymentPathFailed { + payment_id: Some(*payment_id), + payment_hash: payment_hash.clone(), + payment_failed_permanently: !payment_retryable, + network_update, + all_paths_failed, + path: path.clone(), + short_channel_id, + retry, + #[cfg(test)] + error_code: onion_error_code, + #[cfg(test)] + error_data: onion_error_data + } } }; let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push(path_failure); if let Some(ev) = full_failure_ev { pending_events.push(ev); } }, - HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, phantom_shared_secret, outpoint }) => { - let err_packet = match onion_error { - HTLCFailReason::Reason { failure_code, data } => { - log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with code {}", log_bytes!(payment_hash.0), failure_code); - if let Some(phantom_ss) = phantom_shared_secret { - let phantom_packet = onion_utils::build_failure_packet(&phantom_ss, failure_code, &data[..]).encode(); - let encrypted_phantom_packet = onion_utils::encrypt_failure_packet(&phantom_ss, &phantom_packet); - onion_utils::encrypt_failure_packet(&incoming_packet_shared_secret, &encrypted_phantom_packet.data[..]) - } else { - let packet = onion_utils::build_failure_packet(&incoming_packet_shared_secret, failure_code, &data[..]).encode(); - onion_utils::encrypt_failure_packet(&incoming_packet_shared_secret, &packet) - } - }, - HTLCFailReason::LightningError { err } => { - log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards with pre-built LightningError", log_bytes!(payment_hash.0)); - onion_utils::encrypt_failure_packet(&incoming_packet_shared_secret, &err.data) - } - }; + HTLCSource::PreviousHopData(HTLCPreviousHopData { ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret, ref phantom_shared_secret, ref outpoint }) => { + log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with {:?}", log_bytes!(payment_hash.0), onion_error); + let err_packet = onion_error.get_encrypted_failure_packet(incoming_packet_shared_secret, phantom_shared_secret); let mut forward_event = None; let mut forward_htlcs = self.forward_htlcs.lock().unwrap(); if forward_htlcs.is_empty() { forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS)); } - match forward_htlcs.entry(short_channel_id) { + match forward_htlcs.entry(*short_channel_id) { hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().push(HTLCForwardInfo::FailHTLC { htlc_id, err_packet }); + entry.get_mut().push(HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }); }, hash_map::Entry::Vacant(entry) => { - entry.insert(vec!(HTLCForwardInfo::FailHTLC { htlc_id, err_packet })); + entry.insert(vec!(HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet })); } } mem::drop(forward_htlcs); @@ -3977,13 +4048,13 @@ impl ChannelMana } pending_events.push(events::Event::HTLCHandlingFailed { prev_channel_id: outpoint.to_channel_id(), - failed_next_destination: destination + failed_next_destination: destination, }); }, } } - /// Provides a payment preimage in response to [`Event::PaymentReceived`], generating any + /// Provides a payment preimage in response to [`Event::PaymentClaimable`], generating any /// [`MessageSendEvent`]s needed to claim the payment. /// /// Note that calling this method does *not* guarantee that the payment has been claimed. You @@ -3991,139 +4062,159 @@ impl ChannelMana /// provided to your [`EventHandler`] when [`process_pending_events`] is next called. /// /// Note that if you did not set an `amount_msat` when calling [`create_inbound_payment`] or - /// [`create_inbound_payment_for_hash`] you must check that the amount in the `PaymentReceived` + /// [`create_inbound_payment_for_hash`] you must check that the amount in the `PaymentClaimable` /// event matches your expectation. If you fail to do so and call this method, you may provide /// the sender "proof-of-payment" when they did not fulfill the full expected payment. /// - /// [`Event::PaymentReceived`]: crate::util::events::Event::PaymentReceived + /// [`Event::PaymentClaimable`]: crate::util::events::Event::PaymentClaimable /// [`Event::PaymentClaimed`]: crate::util::events::Event::PaymentClaimed /// [`process_pending_events`]: EventsProvider::process_pending_events /// [`create_inbound_payment`]: Self::create_inbound_payment /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash - /// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events pub fn claim_funds(&self, payment_preimage: PaymentPreimage) { let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let removed_source = self.channel_state.lock().unwrap().claimable_htlcs.remove(&payment_hash); - if let Some((payment_purpose, mut sources)) = removed_source { - assert!(!sources.is_empty()); - - // If we are claiming an MPP payment, we have to take special care to ensure that each - // channel exists before claiming all of the payments (inside one lock). - // Note that channel existance is sufficient as we should always get a monitor update - // which will take care of the real HTLC claim enforcement. - // - // If we find an HTLC which we would need to claim but for which we do not have a - // channel, we will fail all parts of the MPP payment. While we could wait and see if - // the sender retries the already-failed path(s), it should be a pretty rare case where - // we got all the HTLCs and then a channel closed while we were waiting for the user to - // provide the preimage, so worrying too much about the optimal handling isn't worth - // it. - let mut claimable_amt_msat = 0; - let mut expected_amt_msat = None; - let mut valid_mpp = true; - let mut errs = Vec::new(); - let mut claimed_any_htlcs = false; - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - for htlc in sources.iter() { - if let None = channel_state.short_to_chan_info.get(&htlc.prev_hop.short_channel_id) { - valid_mpp = false; - break; + let mut sources = { + let mut claimable_payments = self.claimable_payments.lock().unwrap(); + if let Some((payment_purpose, sources)) = claimable_payments.claimable_htlcs.remove(&payment_hash) { + let mut receiver_node_id = self.our_network_pubkey; + for htlc in sources.iter() { + if htlc.prev_hop.phantom_shared_secret.is_some() { + let phantom_pubkey = self.keys_manager.get_node_id(Recipient::PhantomNode) + .expect("Failed to get node_id for phantom node recipient"); + receiver_node_id = phantom_pubkey; + break; + } } - if expected_amt_msat.is_some() && expected_amt_msat != Some(htlc.total_msat) { - log_error!(self.logger, "Somehow ended up with an MPP payment with different total amounts - this should not be reachable!"); - debug_assert!(false); + + let dup_purpose = claimable_payments.pending_claiming_payments.insert(payment_hash, + ClaimingPayment { amount_msat: sources.iter().map(|source| source.value).sum(), + payment_purpose, receiver_node_id, + }); + if dup_purpose.is_some() { + debug_assert!(false, "Shouldn't get a duplicate pending claim event ever"); + log_error!(self.logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug", + log_bytes!(payment_hash.0)); + } + sources + } else { return; } + }; + debug_assert!(!sources.is_empty()); + + // If we are claiming an MPP payment, we check that all channels which contain a claimable + // HTLC still exist. While this isn't guaranteed to remain true if a channel closes while + // we're claiming (or even after we claim, before the commitment update dance completes), + // it should be a relatively rare race, and we'd rather not claim HTLCs that require us to + // go on-chain (and lose the on-chain fee to do so) than just reject the payment. + // + // Note that we'll still always get our funds - as long as the generated + // `ChannelMonitorUpdate` makes it out to the relevant monitor we can claim on-chain. + // + // If we find an HTLC which we would need to claim but for which we do not have a + // channel, we will fail all parts of the MPP payment. While we could wait and see if + // the sender retries the already-failed path(s), it should be a pretty rare case where + // we got all the HTLCs and then a channel closed while we were waiting for the user to + // provide the preimage, so worrying too much about the optimal handling isn't worth + // it. + let mut claimable_amt_msat = 0; + let mut expected_amt_msat = None; + let mut valid_mpp = true; + let mut errs = Vec::new(); + let mut channel_state = Some(self.channel_state.lock().unwrap()); + for htlc in sources.iter() { + let chan_id = match self.short_to_chan_info.read().unwrap().get(&htlc.prev_hop.short_channel_id) { + Some((_cp_id, chan_id)) => chan_id.clone(), + None => { valid_mpp = false; break; } - expected_amt_msat = Some(htlc.total_msat); - if let OnionPayload::Spontaneous(_) = &htlc.onion_payload { - // We don't currently support MPP for spontaneous payments, so just check - // that there's one payment here and move on. - if sources.len() != 1 { - log_error!(self.logger, "Somehow ended up with an MPP spontaneous payment - this should not be reachable!"); - debug_assert!(false); - valid_mpp = false; - break; - } - } + }; - claimable_amt_msat += htlc.value; - } - if sources.is_empty() || expected_amt_msat.is_none() { - log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!"); - return; + if let None = channel_state.as_ref().unwrap().by_id.get(&chan_id) { + valid_mpp = false; + break; } - if claimable_amt_msat != expected_amt_msat.unwrap() { - log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.", - expected_amt_msat.unwrap(), claimable_amt_msat); - return; + + if expected_amt_msat.is_some() && expected_amt_msat != Some(htlc.total_msat) { + log_error!(self.logger, "Somehow ended up with an MPP payment with different total amounts - this should not be reachable!"); + debug_assert!(false); + valid_mpp = false; + break; } - if valid_mpp { - for htlc in sources.drain(..) { - match self.claim_funds_from_hop(&mut channel_state_lock, htlc.prev_hop, payment_preimage) { - ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => { - if let msgs::ErrorAction::IgnoreError = err.err.action { - // We got a temporary failure updating monitor, but will claim the - // HTLC when the monitor updating is restored (or on chain). - log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err); - claimed_any_htlcs = true; - } else { errs.push((pk, err)); } - }, - ClaimFundsFromHop::PrevHopForceClosed => unreachable!("We already checked for channel existence, we can't fail here!"), - ClaimFundsFromHop::DuplicateClaim => { - // While we should never get here in most cases, if we do, it likely - // indicates that the HTLC was timed out some time ago and is no longer - // available to be claimed. Thus, it does not make sense to set - // `claimed_any_htlcs`. - }, - ClaimFundsFromHop::Success(_) => claimed_any_htlcs = true, - } + expected_amt_msat = Some(htlc.total_msat); + if let OnionPayload::Spontaneous(_) = &htlc.onion_payload { + // We don't currently support MPP for spontaneous payments, so just check + // that there's one payment here and move on. + if sources.len() != 1 { + log_error!(self.logger, "Somehow ended up with an MPP spontaneous payment - this should not be reachable!"); + debug_assert!(false); + valid_mpp = false; + break; } } - mem::drop(channel_state_lock); - if !valid_mpp { - for htlc in sources.drain(..) { - let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec(); - htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array( - self.best_block.read().unwrap().height())); - self.fail_htlc_backwards_internal( - HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash, - HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data }, - HTLCDestination::FailedPayment { payment_hash } ); + + claimable_amt_msat += htlc.value; + } + if sources.is_empty() || expected_amt_msat.is_none() { + mem::drop(channel_state); + self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); + log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!"); + return; + } + if claimable_amt_msat != expected_amt_msat.unwrap() { + mem::drop(channel_state); + self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); + log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.", + expected_amt_msat.unwrap(), claimable_amt_msat); + return; + } + if valid_mpp { + for htlc in sources.drain(..) { + if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); } + if let Err((pk, err)) = self.claim_funds_from_hop(channel_state.take().unwrap(), htlc.prev_hop, + payment_preimage, + |_| Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash })) + { + if let msgs::ErrorAction::IgnoreError = err.err.action { + // We got a temporary failure updating monitor, but will claim the + // HTLC when the monitor updating is restored (or on chain). + log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err); + } else { errs.push((pk, err)); } } } - - if claimed_any_htlcs { - self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed { - payment_hash, - purpose: payment_purpose, - amount_msat: claimable_amt_msat, - }); + } + mem::drop(channel_state); + if !valid_mpp { + for htlc in sources.drain(..) { + let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); + htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes()); + let source = HTLCSource::PreviousHopData(htlc.prev_hop); + let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data); + let receiver = HTLCDestination::FailedPayment { payment_hash }; + self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } + self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); + } - // Now we can handle any errors which were generated. - for (counterparty_node_id, err) in errs.drain(..) { - let res: Result<(), _> = Err(err); - let _ = handle_error!(self, res, counterparty_node_id); - } + // Now we can handle any errors which were generated. + for (counterparty_node_id, err) in errs.drain(..) { + let res: Result<(), _> = Err(err); + let _ = handle_error!(self, res, counterparty_node_id); } } - fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop { + fn claim_funds_from_hop) -> Option>(&self, + mut channel_state_lock: MutexGuard::Signer>>, + prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc) + -> Result<(), (PublicKey, MsgHandleErrInternal)> { //TODO: Delay the claimed_funds relaying just like we do outbound relay! - let channel_state = &mut **channel_state_lock; - let chan_id = match channel_state.short_to_chan_info.get(&prev_hop.short_channel_id) { - Some((_cp_id, chan_id)) => chan_id.clone(), - None => { - return ClaimFundsFromHop::PrevHopForceClosed - } - }; + let chan_id = prev_hop.outpoint.to_channel_id(); + let channel_state = &mut *channel_state_lock; if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) { + let counterparty_node_id = chan.get().get_counterparty_node_id(); match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) { Ok(msgs_monitor_option) => { if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option { @@ -4133,11 +4224,10 @@ impl ChannelMana log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Debug }, "Failed to update channel monitor with preimage {:?}: {:?}", payment_preimage, e); - return ClaimFundsFromHop::MonitorUpdateFail( - chan.get().get_counterparty_node_id(), - handle_monitor_update_res!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(), - Some(htlc_value_msat) - ); + let err = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(); + mem::drop(channel_state_lock); + self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat))); + return Err((counterparty_node_id, err)); } } if let Some((msg, commitment_signed)) = msgs { @@ -4155,29 +4245,62 @@ impl ChannelMana } }); } - return ClaimFundsFromHop::Success(htlc_value_msat); + mem::drop(channel_state_lock); + self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat))); + Ok(()) } else { - return ClaimFundsFromHop::DuplicateClaim; + Ok(()) } }, Err((e, monitor_update)) => { match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { ChannelMonitorUpdateStatus::Completed => {}, e => { + // TODO: This needs to be handled somehow - if we receive a monitor update + // with a preimage we *must* somehow manage to propagate it to the upstream + // channel, or we must have an ability to receive the same update and try + // again on restart. log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Info }, "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}", payment_preimage, e); }, } - let counterparty_node_id = chan.get().get_counterparty_node_id(); - let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_chan_info, chan.get_mut(), &chan_id); + let (drop, res) = convert_chan_err!(self, e, chan.get_mut(), &chan_id); if drop { chan.remove_entry(); } - return ClaimFundsFromHop::MonitorUpdateFail(counterparty_node_id, res, None); + mem::drop(channel_state_lock); + self.handle_monitor_update_completion_actions(completion_action(None)); + Err((counterparty_node_id, res)) }, } - } else { unreachable!(); } + } else { + let preimage_update = ChannelMonitorUpdate { + update_id: CLOSED_CHANNEL_UPDATE_ID, + updates: vec![ChannelMonitorUpdateStep::PaymentPreimage { + payment_preimage, + }], + }; + // We update the ChannelMonitor on the backward link, after + // receiving an `update_fulfill_htlc` from the forward link. + let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, preimage_update); + if update_res != ChannelMonitorUpdateStatus::Completed { + // TODO: This needs to be handled somehow - if we receive a monitor update + // with a preimage we *must* somehow manage to propagate it to the upstream + // channel, or we must have an ability to receive the same event and try + // again on restart. + log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}", + payment_preimage, update_res); + } + mem::drop(channel_state_lock); + // Note that we do process the completion action here. This totally could be a + // duplicate claim, but we have no way of knowing without interrogating the + // `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are + // generally always allowed to be duplicative (and it's specifically noted in + // `PaymentForwarded`). + self.handle_monitor_update_completion_actions(completion_action(None)); + Ok(()) + } } fn finalize_claims(&self, mut sources: Vec) { @@ -4198,15 +4321,12 @@ impl ChannelMana } ); } - if payment.get().remaining_parts() == 0 { - payment.remove(); - } } } } } - fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool, next_channel_id: [u8; 32]) { + fn claim_funds_internal(&self, channel_state_lock: MutexGuard::Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool, next_channel_id: [u8; 32]) { match source { HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => { mem::drop(channel_state_lock); @@ -4246,10 +4366,6 @@ impl ChannelMana } ); } - - if payment.get().remaining_parts() == 0 { - payment.remove(); - } } } else { log_trace!(self.logger, "Received duplicative fulfill for HTLC with payment_preimage {}", log_bytes!(payment_preimage.0)); @@ -4257,62 +4373,28 @@ impl ChannelMana }, HTLCSource::PreviousHopData(hop_data) => { let prev_outpoint = hop_data.outpoint; - let res = self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage); - let claimed_htlc = if let ClaimFundsFromHop::DuplicateClaim = res { false } else { true }; - let htlc_claim_value_msat = match res { - ClaimFundsFromHop::MonitorUpdateFail(_, _, amt_opt) => amt_opt, - ClaimFundsFromHop::Success(amt) => Some(amt), - _ => None, - }; - if let ClaimFundsFromHop::PrevHopForceClosed = res { - let preimage_update = ChannelMonitorUpdate { - update_id: CLOSED_CHANNEL_UPDATE_ID, - updates: vec![ChannelMonitorUpdateStep::PaymentPreimage { - payment_preimage: payment_preimage.clone(), - }], - }; - // We update the ChannelMonitor on the backward link, after - // receiving an offchain preimage event from the forward link (the - // event being update_fulfill_htlc). - let update_res = self.chain_monitor.update_channel(prev_outpoint, preimage_update); - if update_res != ChannelMonitorUpdateStatus::Completed { - // TODO: This needs to be handled somehow - if we receive a monitor update - // with a preimage we *must* somehow manage to propagate it to the upstream - // channel, or we must have an ability to receive the same event and try - // again on restart. - log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}", - payment_preimage, update_res); - } - // Note that we do *not* set `claimed_htlc` to false here. In fact, this - // totally could be a duplicate claim, but we have no way of knowing - // without interrogating the `ChannelMonitor` we've provided the above - // update to. Instead, we simply document in `PaymentForwarded` that this - // can happen. - } - mem::drop(channel_state_lock); - if let ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) = res { + let res = self.claim_funds_from_hop(channel_state_lock, hop_data, payment_preimage, + |htlc_claim_value_msat| { + if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { + let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat { + Some(claimed_htlc_value - forwarded_htlc_value) + } else { None }; + + let prev_channel_id = Some(prev_outpoint.to_channel_id()); + let next_channel_id = Some(next_channel_id); + + Some(MonitorUpdateCompletionAction::EmitEvent { event: events::Event::PaymentForwarded { + fee_earned_msat, + claim_from_onchain_tx: from_onchain, + prev_channel_id, + next_channel_id, + }}) + } else { None } + }); + if let Err((pk, err)) = res { let result: Result<(), _> = Err(err); let _ = handle_error!(self, result, pk); } - - if claimed_htlc { - if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { - let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat { - Some(claimed_htlc_value - forwarded_htlc_value) - } else { None }; - - let mut pending_events = self.pending_events.lock().unwrap(); - let prev_channel_id = Some(prev_outpoint.to_channel_id()); - let next_channel_id = Some(next_channel_id); - - pending_events.push(events::Event::PaymentForwarded { - fee_earned_msat, - claim_from_onchain_tx: from_onchain, - prev_channel_id, - next_channel_id, - }); - } - } }, } } @@ -4322,10 +4404,91 @@ impl ChannelMana self.our_network_pubkey.clone() } + fn handle_monitor_update_completion_actions>(&self, actions: I) { + for action in actions.into_iter() { + match action { + MonitorUpdateCompletionAction::PaymentClaimed { payment_hash } => { + let payment = self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); + if let Some(ClaimingPayment { amount_msat, payment_purpose: purpose, receiver_node_id }) = payment { + self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed { + payment_hash, purpose, amount_msat, receiver_node_id: Some(receiver_node_id), + }); + } + }, + MonitorUpdateCompletionAction::EmitEvent { event } => { + self.pending_events.lock().unwrap().push(event); + }, + } + } + } + + /// Handles a channel reentering a functional state, either due to reconnect or a monitor + /// update completion. + fn handle_channel_resumption(&self, pending_msg_events: &mut Vec, + channel: &mut Channel<::Signer>, raa: Option, + commitment_update: Option, order: RAACommitmentOrder, + pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option, + channel_ready: Option, announcement_sigs: Option) + -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> { + let mut htlc_forwards = None; + + let counterparty_node_id = channel.get_counterparty_node_id(); + if !pending_forwards.is_empty() { + htlc_forwards = Some((channel.get_short_channel_id().unwrap_or(channel.outbound_scid_alias()), + channel.get_funding_txo().unwrap(), channel.get_user_id(), pending_forwards)); + } + + if let Some(msg) = channel_ready { + send_channel_ready!(self, pending_msg_events, channel, msg); + } + if let Some(msg) = announcement_sigs { + pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { + node_id: counterparty_node_id, + msg, + }); + } + + emit_channel_ready_event!(self, channel); + + macro_rules! handle_cs { () => { + if let Some(update) = commitment_update { + pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: counterparty_node_id, + updates: update, + }); + } + } } + macro_rules! handle_raa { () => { + if let Some(revoke_and_ack) = raa { + pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { + node_id: counterparty_node_id, + msg: revoke_and_ack, + }); + } + } } + match order { + RAACommitmentOrder::CommitmentFirst => { + handle_cs!(); + handle_raa!(); + }, + RAACommitmentOrder::RevokeAndACKFirst => { + handle_raa!(); + handle_cs!(); + }, + } + + if let Some(tx) = funding_broadcastable { + log_info!(self.logger, "Broadcasting funding transaction with txid {}", tx.txid()); + self.tx_broadcaster.broadcast_transaction(&tx); + } + + htlc_forwards + } + fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let chan_restoration_res; + let htlc_forwards; let (mut pending_failures, finalized_claims, counterparty_node_id) = { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; @@ -4352,18 +4515,20 @@ impl ChannelMana }) } else { None } } else { None }; - chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, updates.raa, updates.commitment_update, updates.order, None, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs); + htlc_forwards = self.handle_channel_resumption(&mut channel_state.pending_msg_events, channel.get_mut(), updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs); if let Some(upd) = channel_update { channel_state.pending_msg_events.push(upd); } (updates.failed_htlcs, updates.finalized_claimed_htlcs, counterparty_node_id) }; - post_handle_chan_restoration!(self, chan_restoration_res); + if let Some(forwards) = htlc_forwards { + self.forward_htlcs(&mut [forwards][..]); + } self.finalize_claims(finalized_claims); for failure in pending_failures.drain(..) { let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: funding_txo.to_channel_id() }; - self.fail_htlc_backwards_internal(failure.0, &failure.1, failure.2, receiver); + self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver); } } @@ -4383,7 +4548,7 @@ impl ChannelMana /// /// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest /// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id - pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u64) -> Result<(), APIError> { + pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> { self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, false, user_channel_id) } @@ -4405,11 +4570,11 @@ impl ChannelMana /// /// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest /// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id - pub fn accept_inbound_channel_from_trusted_peer_0conf(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u64) -> Result<(), APIError> { + pub fn accept_inbound_channel_from_trusted_peer_0conf(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> { self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, true, user_channel_id) } - fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u64) -> Result<(), APIError> { + fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let mut channel_state_lock = self.channel_state.lock().unwrap(); @@ -4432,7 +4597,7 @@ impl ChannelMana } }; channel_state.pending_msg_events.push(send_msg_err_event); - let _ = remove_channel!(self, channel_state, channel); + let _ = remove_channel!(self, channel); return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() }); } @@ -4457,9 +4622,13 @@ impl ChannelMana return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(), msg.temporary_channel_id.clone())); } + let mut random_bytes = [0u8; 16]; + random_bytes.copy_from_slice(&self.keys_manager.get_secure_random_bytes()[..16]); + let user_channel_id = u128::from_be_bytes(random_bytes); + let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.keys_manager, - counterparty_node_id.clone(), &their_features, msg, 0, &self.default_configuration, + counterparty_node_id.clone(), &their_features, msg, user_channel_id, &self.default_configuration, self.best_block.read().unwrap().height(), &self.logger, outbound_scid_alias) { Err(e) => { @@ -4482,7 +4651,7 @@ impl ChannelMana } channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { node_id: counterparty_node_id.clone(), - msg: channel.accept_inbound_channel(0), + msg: channel.accept_inbound_channel(user_channel_id), }); } else { let mut pending_events = self.pending_events.lock().unwrap(); @@ -4512,7 +4681,7 @@ impl ChannelMana if chan.get().get_counterparty_node_id() != *counterparty_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id)); } - try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &their_features), channel_state, chan); + try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &their_features), chan); (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id)) @@ -4539,7 +4708,7 @@ impl ChannelMana if chan.get().get_counterparty_node_id() != *counterparty_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id)); } - (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.logger), channel_state, chan), chan.remove()) + (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.keys_manager, &self.logger), chan), chan.remove()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id)) } @@ -4592,7 +4761,7 @@ impl ChannelMana msg: funding_msg, }); if let Some(msg) = channel_ready { - send_channel_ready!(channel_state.short_to_chan_info, channel_state.pending_msg_events, chan, msg); + send_channel_ready!(self, channel_state.pending_msg_events, chan, msg); } e.insert(chan); } @@ -4610,14 +4779,14 @@ impl ChannelMana if chan.get().get_counterparty_node_id() != *counterparty_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } - let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.logger) { + let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.keys_manager, &self.logger) { Ok(update) => update, - Err(e) => try_chan_entry!(self, Err(e), channel_state, chan), + Err(e) => try_chan_entry!(self, Err(e), chan), }; match self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) { ChannelMonitorUpdateStatus::Completed => {}, e => { - let mut res = handle_monitor_update_res!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED); + let mut res = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED); if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res { // We weren't able to watch the channel to begin with, so no updates should be made on // it. Previously, full_stack_target found an (unreachable) panic when the @@ -4630,7 +4799,7 @@ impl ChannelMana }, } if let Some(msg) = channel_ready { - send_channel_ready!(channel_state.short_to_chan_info, channel_state.pending_msg_events, chan.get(), msg); + send_channel_ready!(self, channel_state.pending_msg_events, chan.get(), msg); } funding_tx }, @@ -4651,7 +4820,7 @@ impl ChannelMana return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, self.get_our_node_id(), - self.genesis_hash.clone(), &self.best_block.read().unwrap(), &self.logger), channel_state, chan); + self.genesis_hash.clone(), &self.best_block.read().unwrap(), &self.logger), chan); if let Some(announcement_sigs) = announcement_sigs_opt { log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id())); channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { @@ -4672,6 +4841,9 @@ impl ChannelMana }); } } + + emit_channel_ready_event!(self, chan.get_mut()); + Ok(()) }, hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) @@ -4696,16 +4868,16 @@ impl ChannelMana if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); } - let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), channel_state, chan_entry); + let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), chan_entry); dropped_htlcs = htlcs; // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update { let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update); let (result, is_permanent) = - handle_monitor_update_res!(self, update_res, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); + handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); if is_permanent { - remove_channel!(self, channel_state, chan_entry); + remove_channel!(self, chan_entry); break result; } } @@ -4724,7 +4896,8 @@ impl ChannelMana }; for htlc_source in dropped_htlcs.drain(..) { let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id }; - self.fail_htlc_backwards_internal(htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); + let reason = HTLCFailReason::from_failure_code(0x4000 | 8); + self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver); } let _ = handle_error!(self, result, *counterparty_node_id); @@ -4740,7 +4913,7 @@ impl ChannelMana if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } - let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), channel_state, chan_entry); + let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), chan_entry); if let Some(msg) = closing_signed { channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { node_id: counterparty_node_id.clone(), @@ -4753,7 +4926,7 @@ impl ChannelMana // also implies there are no pending HTLCs left on the channel, so we can // fully delete it from tracking (the channel monitor is still around to // watch for old state broadcasts)! - (tx, Some(remove_channel!(self, channel_state, chan_entry))) + (tx, Some(remove_channel!(self, chan_entry))) } else { (tx, None) } }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) @@ -4795,7 +4968,7 @@ impl ChannelMana return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } - let create_pending_htlc_status = |chan: &Channel, pending_forward_info: PendingHTLCStatus, error_code: u16| { + let create_pending_htlc_status = |chan: &Channel<::Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| { // If the update_add is completely bogus, the call will Err and we will close, // but if we've sent a shutdown and they haven't acknowledged it yet, we just // want to reject the new HTLC and fail it backwards instead of forwarding. @@ -4803,10 +4976,10 @@ impl ChannelMana PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => { let reason = if (error_code & 0x1000) != 0 { let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan); - onion_utils::build_first_hop_failure_packet(incoming_shared_secret, real_code, &error_data) + HTLCFailReason::reason(real_code, error_data) } else { - onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &[]) - }; + HTLCFailReason::from_failure_code(error_code) + }.get_encrypted_failure_packet(incoming_shared_secret, &None); let msg = msgs::UpdateFailHTLC { channel_id: msg.channel_id, htlc_id: msg.htlc_id, @@ -4817,7 +4990,7 @@ impl ChannelMana _ => pending_forward_info } }; - try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), channel_state, chan); + try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), chan); }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } @@ -4833,7 +5006,7 @@ impl ChannelMana if chan.get().get_counterparty_node_id() != *counterparty_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } - try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), channel_state, chan) + try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), chan) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } @@ -4850,7 +5023,7 @@ impl ChannelMana if chan.get().get_counterparty_node_id() != *counterparty_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } - try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::LightningError { err: msg.reason.clone() }), channel_state, chan); + try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan); }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } @@ -4867,9 +5040,9 @@ impl ChannelMana } if (msg.failure_code & 0x8000) == 0 { let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned()); - try_chan_entry!(self, Err(chan_err), channel_state, chan); + try_chan_entry!(self, Err(chan_err), chan); } - try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }), channel_state, chan); + try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan); Ok(()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) @@ -4886,17 +5059,17 @@ impl ChannelMana } let (revoke_and_ack, commitment_signed, monitor_update) = match chan.get_mut().commitment_signed(&msg, &self.logger) { - Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan), + Err((None, e)) => try_chan_entry!(self, Err(e), chan), Err((Some(update), e)) => { assert!(chan.get().is_awaiting_monitor_update()); let _ = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), update); - try_chan_entry!(self, Err(e), channel_state, chan); + try_chan_entry!(self, Err(e), chan); unreachable!(); }, Ok(res) => res }; let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update); - if let Err(e) = handle_monitor_update_res!(self, update_res, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) { + if let Err(e) = handle_monitor_update_res!(self, update_res, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) { return Err(e); } @@ -4924,31 +5097,85 @@ impl ChannelMana } #[inline] - fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, Vec<(PendingHTLCInfo, u64)>)]) { - for &mut (prev_short_channel_id, prev_funding_outpoint, ref mut pending_forwards) in per_source_pending_forwards { + fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)]) { + for &mut (prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards { let mut forward_event = None; + let mut new_intercept_events = Vec::new(); + let mut failed_intercept_forwards = Vec::new(); if !pending_forwards.is_empty() { - let mut forward_htlcs = self.forward_htlcs.lock().unwrap(); - if forward_htlcs.is_empty() { - forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS)) - } for (forward_info, prev_htlc_id) in pending_forwards.drain(..) { - match forward_htlcs.entry(match forward_info.routing { - PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id, - PendingHTLCRouting::Receive { .. } => 0, - PendingHTLCRouting::ReceiveKeysend { .. } => 0, - }) { + let scid = match forward_info.routing { + PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id, + PendingHTLCRouting::Receive { .. } => 0, + PendingHTLCRouting::ReceiveKeysend { .. } => 0, + }; + // Pull this now to avoid introducing a lock order with `forward_htlcs`. + let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid); + + let mut forward_htlcs = self.forward_htlcs.lock().unwrap(); + let forward_htlcs_empty = forward_htlcs.is_empty(); + match forward_htlcs.entry(scid) { hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().push(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_funding_outpoint, - prev_htlc_id, forward_info }); + entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { + prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info })); }, hash_map::Entry::Vacant(entry) => { - entry.insert(vec!(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_funding_outpoint, - prev_htlc_id, forward_info })); + if !is_our_scid && forward_info.incoming_amt_msat.is_some() && + fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, scid, &self.genesis_hash) + { + let intercept_id = InterceptId(Sha256::hash(&forward_info.incoming_shared_secret).into_inner()); + let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); + match pending_intercepts.entry(intercept_id) { + hash_map::Entry::Vacant(entry) => { + new_intercept_events.push(events::Event::HTLCIntercepted { + requested_next_hop_scid: scid, + payment_hash: forward_info.payment_hash, + inbound_amount_msat: forward_info.incoming_amt_msat.unwrap(), + expected_outbound_amount_msat: forward_info.outgoing_amt_msat, + intercept_id + }); + entry.insert(PendingAddHTLCInfo { + prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info }); + }, + hash_map::Entry::Occupied(_) => { + log_info!(self.logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid); + let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: prev_short_channel_id, + outpoint: prev_funding_outpoint, + htlc_id: prev_htlc_id, + incoming_packet_shared_secret: forward_info.incoming_shared_secret, + phantom_shared_secret: None, + }); + + failed_intercept_forwards.push((htlc_source, forward_info.payment_hash, + HTLCFailReason::from_failure_code(0x4000 | 10), + HTLCDestination::InvalidForward { requested_forward_scid: scid }, + )); + } + } + } else { + // We don't want to generate a PendingHTLCsForwardable event if only intercepted + // payments are being processed. + if forward_htlcs_empty { + forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS)); + } + entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { + prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info }))); + } } } } } + + for (htlc_source, payment_hash, failure_reason, destination) in failed_intercept_forwards.drain(..) { + self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination); + } + + if !new_intercept_events.is_empty() { + let mut events = self.pending_events.lock().unwrap(); + events.append(&mut new_intercept_events); + } + match forward_event { Some(time) => { let mut pending_events = self.pending_events.lock().unwrap(); @@ -4973,7 +5200,7 @@ impl ChannelMana } let was_paused_for_mon_update = chan.get().is_awaiting_monitor_update(); let raa_updates = break_chan_entry!(self, - chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan); + chan.get_mut().revoke_and_ack(&msg, &self.logger), chan); htlcs_to_fail = raa_updates.holding_cell_failed_htlcs; let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update); if was_paused_for_mon_update { @@ -4985,7 +5212,7 @@ impl ChannelMana break Err(MsgHandleErrInternal::ignore_no_close("Existing pending monitor update prevented responses to RAA".to_owned())); } if update_res != ChannelMonitorUpdateStatus::Completed { - if let Err(e) = handle_monitor_update_res!(self, update_res, channel_state, chan, + if let Err(e) = handle_monitor_update_res!(self, update_res, chan, RAACommitmentOrder::CommitmentFirst, false, raa_updates.commitment_update.is_some(), false, raa_updates.accepted_htlcs, raa_updates.failed_htlcs, @@ -5003,7 +5230,8 @@ impl ChannelMana raa_updates.finalized_claimed_htlcs, chan.get().get_short_channel_id() .unwrap_or(chan.get().outbound_scid_alias()), - chan.get().get_funding_txo().unwrap())) + chan.get().get_funding_txo().unwrap(), + chan.get().get_user_id())) }, hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } @@ -5011,13 +5239,13 @@ impl ChannelMana self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id); match res { Ok((pending_forwards, mut pending_failures, finalized_claim_htlcs, - short_channel_id, channel_outpoint)) => + short_channel_id, channel_outpoint, user_channel_id)) => { for failure in pending_failures.drain(..) { let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: channel_outpoint.to_channel_id() }; - self.fail_htlc_backwards_internal(failure.0, &failure.1, failure.2, receiver); + self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver); } - self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, pending_forwards)]); + self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, user_channel_id, pending_forwards)]); self.finalize_claims(finalized_claim_htlcs); Ok(()) }, @@ -5033,7 +5261,7 @@ impl ChannelMana if chan.get().get_counterparty_node_id() != *counterparty_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } - try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg), channel_state, chan); + try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg, &self.logger), chan); }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } @@ -5055,7 +5283,7 @@ impl ChannelMana channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { msg: try_chan_entry!(self, chan.get_mut().announcement_signatures( - self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height(), msg), channel_state, chan), + self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height(), msg), chan), // Note that announcement_signatures fails if the channel cannot be announced, // so get_channel_update_for_broadcast will never fail by the time we get here. update_msg: self.get_channel_update_for_broadcast(chan.get()).unwrap(), @@ -5068,15 +5296,15 @@ impl ChannelMana /// Returns ShouldPersist if anything changed, otherwise either SkipPersist or an Err. fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - let chan_id = match channel_state.short_to_chan_info.get(&msg.contents.short_channel_id) { + let chan_id = match self.short_to_chan_info.read().unwrap().get(&msg.contents.short_channel_id) { Some((_cp_id, chan_id)) => chan_id.clone(), None => { // It's not a local channel return Ok(NotifyOption::SkipPersist) } }; + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; match channel_state.by_id.entry(chan_id) { hash_map::Entry::Occupied(mut chan) => { if chan.get().get_counterparty_node_id() != *counterparty_node_id { @@ -5094,17 +5322,17 @@ impl ChannelMana return Ok(NotifyOption::SkipPersist); } else { log_debug!(self.logger, "Received channel_update for channel {}.", log_bytes!(chan_id)); - try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan); + try_chan_entry!(self, chan.get_mut().channel_update(&msg), chan); } }, - hash_map::Entry::Vacant(_) => unreachable!() + hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersist) } Ok(NotifyOption::DoPersist) } fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> { - let chan_restoration_res; - let (htlcs_failed_forward, need_lnd_workaround) = { + let htlc_forwards; + let need_lnd_workaround = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -5119,7 +5347,7 @@ impl ChannelMana // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here. let responses = try_chan_entry!(self, chan.get_mut().channel_reestablish( msg, &self.logger, self.our_network_pubkey.clone(), self.genesis_hash, - &*self.best_block.read().unwrap()), channel_state, chan); + &*self.best_block.read().unwrap()), chan); let mut channel_update = None; if let Some(msg) = responses.shutdown_msg { channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { @@ -5138,19 +5366,21 @@ impl ChannelMana } } let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take(); - chan_restoration_res = handle_chan_restoration_locked!( - self, channel_state_lock, channel_state, chan, responses.raa, responses.commitment_update, responses.order, - responses.mon_update, Vec::new(), None, responses.channel_ready, responses.announcement_sigs); + htlc_forwards = self.handle_channel_resumption( + &mut channel_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order, + Vec::new(), None, responses.channel_ready, responses.announcement_sigs); if let Some(upd) = channel_update { channel_state.pending_msg_events.push(upd); } - (responses.holding_cell_failed_htlcs, need_lnd_workaround) + need_lnd_workaround }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } }; - post_handle_chan_restoration!(self, chan_restoration_res); - self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id, counterparty_node_id); + + if let Some(forwards) = htlc_forwards { + self.forward_htlcs(&mut [forwards][..]); + } if let Some(channel_ready_msg) = need_lnd_workaround { self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?; @@ -5173,7 +5403,8 @@ impl ChannelMana } else { log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() }; - self.fail_htlc_backwards_internal(htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); + let reason = HTLCFailReason::from_failure_code(0x4000 | 8); + self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver); } }, MonitorEvent::CommitmentTxConfirmed(funding_outpoint) | @@ -5183,7 +5414,7 @@ impl ChannelMana let by_id = &mut channel_state.by_id; let pending_msg_events = &mut channel_state.pending_msg_events; if let hash_map::Entry::Occupied(chan_entry) = by_id.entry(funding_outpoint.to_channel_id()) { - let mut chan = remove_channel!(self, channel_state, chan_entry); + let mut chan = remove_channel!(self, chan_entry); failed_channels.push(chan.force_shutdown(false)); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { @@ -5229,11 +5460,6 @@ impl ChannelMana /// Check the holding cell in each channel and free any pending HTLCs in them if possible. /// Returns whether there were any updates such as if pending HTLCs were freed or a monitor /// update was applied. - /// - /// This should only apply to HTLCs which were added to the holding cell because we were - /// waiting on a monitor update to finish. In that case, we don't want to free the holding cell - /// directly in `channel_monitor_updated` as it may introduce deadlocks calling back into user - /// code to inform them of a channel monitor update. fn check_free_holding_cells(&self) -> bool { let mut has_monitor_update = false; let mut failed_htlcs = Vec::new(); @@ -5242,7 +5468,6 @@ impl ChannelMana let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; let by_id = &mut channel_state.by_id; - let short_to_chan_info = &mut channel_state.short_to_chan_info; let pending_msg_events = &mut channel_state.pending_msg_events; by_id.retain(|channel_id, chan| { @@ -5265,7 +5490,7 @@ impl ChannelMana }, e => { has_monitor_update = true; - let (res, close_channel) = handle_monitor_update_res!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY); + let (res, close_channel) = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY); handle_errors.push((chan.get_counterparty_node_id(), res)); if close_channel { return false; } }, @@ -5274,7 +5499,7 @@ impl ChannelMana true }, Err(e) => { - let (close_channel, res) = convert_chan_err!(self, e, short_to_chan_info, chan, channel_id); + let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id); handle_errors.push((chan.get_counterparty_node_id(), Err(res))); // ChannelClosed event is generated by handle_error for us !close_channel @@ -5305,7 +5530,6 @@ impl ChannelMana let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; let by_id = &mut channel_state.by_id; - let short_to_chan_info = &mut channel_state.short_to_chan_info; let pending_msg_events = &mut channel_state.pending_msg_events; by_id.retain(|channel_id, chan| { @@ -5330,13 +5554,13 @@ impl ChannelMana log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); self.tx_broadcaster.broadcast_transaction(&tx); - update_maps_on_chan_removal!(self, short_to_chan_info, chan); + update_maps_on_chan_removal!(self, chan); false } else { true } }, Err(e) => { has_update = true; - let (close_channel, res) = convert_chan_err!(self, e, short_to_chan_info, chan, channel_id); + let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id); handle_errors.push((chan.get_counterparty_node_id(), Err(res))); !close_channel } @@ -5411,8 +5635,8 @@ impl ChannelMana /// This differs from [`create_inbound_payment_for_hash`] only in that it generates the /// [`PaymentHash`] and [`PaymentPreimage`] for you. /// - /// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentReceived`], which - /// will have the [`PaymentReceived::payment_preimage`] field filled in. That should then be + /// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentClaimable`], which + /// will have the [`PaymentClaimable::payment_preimage`] field filled in. That should then be /// passed directly to [`claim_funds`]. /// /// See [`create_inbound_payment_for_hash`] for detailed documentation on behavior and requirements. @@ -5428,8 +5652,8 @@ impl ChannelMana /// Errors if `min_value_msat` is greater than total bitcoin supply. /// /// [`claim_funds`]: Self::claim_funds - /// [`PaymentReceived`]: events::Event::PaymentReceived - /// [`PaymentReceived::payment_preimage`]: events::Event::PaymentReceived::payment_preimage + /// [`PaymentClaimable`]: events::Event::PaymentClaimable + /// [`PaymentClaimable::payment_preimage`]: events::Event::PaymentClaimable::payment_preimage /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash pub fn create_inbound_payment(&self, min_value_msat: Option, invoice_expiry_delta_secs: u32) -> Result<(PaymentHash, PaymentSecret), ()> { inbound_payment::create(&self.inbound_payment_key, min_value_msat, invoice_expiry_delta_secs, &self.keys_manager, self.highest_seen_timestamp.load(Ordering::Acquire) as u64) @@ -5455,7 +5679,7 @@ impl ChannelMana /// Gets a [`PaymentSecret`] for a given [`PaymentHash`], for which the payment preimage is /// stored external to LDK. /// - /// A [`PaymentReceived`] event will only be generated if the [`PaymentSecret`] matches a + /// A [`PaymentClaimable`] event will only be generated if the [`PaymentSecret`] matches a /// payment secret fetched via this method or [`create_inbound_payment`], and which is at least /// the `min_value_msat` provided here, if one is provided. /// @@ -5465,7 +5689,7 @@ impl ChannelMana /// /// `min_value_msat` should be set if the invoice being generated contains a value. Any payment /// received for the returned [`PaymentHash`] will be required to be at least `min_value_msat` - /// before a [`PaymentReceived`] event will be generated, ensuring that we do not provide the + /// before a [`PaymentClaimable`] event will be generated, ensuring that we do not provide the /// sender "proof-of-payment" unless they have paid the required amount. /// /// `invoice_expiry_delta_secs` describes the number of seconds that the invoice is valid for @@ -5476,9 +5700,9 @@ impl ChannelMana /// /// Note that we use block header time to time-out pending inbound payments (with some margin /// to compensate for the inaccuracy of block header timestamps). Thus, in practice we will - /// accept a payment and generate a [`PaymentReceived`] event for some time after the expiry. + /// accept a payment and generate a [`PaymentClaimable`] event for some time after the expiry. /// If you need exact expiry semantics, you should enforce them upon receipt of - /// [`PaymentReceived`]. + /// [`PaymentClaimable`]. /// /// Note that invoices generated for inbound payments should have their `min_final_cltv_expiry` /// set to at least [`MIN_FINAL_CLTV_EXPIRY`]. @@ -5494,7 +5718,7 @@ impl ChannelMana /// Errors if `min_value_msat` is greater than total bitcoin supply. /// /// [`create_inbound_payment`]: Self::create_inbound_payment - /// [`PaymentReceived`]: events::Event::PaymentReceived + /// [`PaymentClaimable`]: events::Event::PaymentClaimable pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option, invoice_expiry_delta_secs: u32) -> Result { inbound_payment::create_from_hash(&self.inbound_payment_key, min_value_msat, payment_hash, invoice_expiry_delta_secs, self.highest_seen_timestamp.load(Ordering::Acquire) as u64) } @@ -5526,14 +5750,14 @@ impl ChannelMana /// /// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager pub fn get_phantom_scid(&self) -> u64 { - let mut channel_state = self.channel_state.lock().unwrap(); - let best_block = self.best_block.read().unwrap(); + let best_block_height = self.best_block.read().unwrap().height(); + let short_to_chan_info = self.short_to_chan_info.read().unwrap(); loop { - let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block.height(), &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager); + let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager); // Ensure the generated scid doesn't conflict with a real channel. - match channel_state.short_to_chan_info.entry(scid_candidate) { - hash_map::Entry::Occupied(_) => continue, - hash_map::Entry::Vacant(_) => return scid_candidate + match short_to_chan_info.get(&scid_candidate) { + Some(_) => continue, + None => return scid_candidate } } } @@ -5549,14 +5773,53 @@ impl ChannelMana } } + /// Gets a fake short channel id for use in receiving intercepted payments. These fake scids are + /// used when constructing the route hints for HTLCs intended to be intercepted. See + /// [`ChannelManager::forward_intercepted_htlc`]. + /// + /// Note that this method is not guaranteed to return unique values, you may need to call it a few + /// times to get a unique scid. + pub fn get_intercept_scid(&self) -> u64 { + let best_block_height = self.best_block.read().unwrap().height(); + let short_to_chan_info = self.short_to_chan_info.read().unwrap(); + loop { + let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager); + // Ensure the generated scid doesn't conflict with a real channel. + if short_to_chan_info.contains_key(&scid_candidate) { continue } + return scid_candidate + } + } + + /// Gets inflight HTLC information by processing pending outbound payments that are in + /// our channels. May be used during pathfinding to account for in-use channel liquidity. + pub fn compute_inflight_htlcs(&self) -> InFlightHtlcs { + let mut inflight_htlcs = InFlightHtlcs::new(); + + for chan in self.channel_state.lock().unwrap().by_id.values() { + for (htlc_source, _) in chan.inflight_htlc_sources() { + if let HTLCSource::OutboundRoute { path, .. } = htlc_source { + inflight_htlcs.process_path(path, self.get_our_node_id()); + } + } + } + + inflight_htlcs + } + #[cfg(any(test, fuzzing, feature = "_test_utils"))] pub fn get_and_clear_pending_events(&self) -> Vec { let events = core::cell::RefCell::new(Vec::new()); - let event_handler = |event: &events::Event| events.borrow_mut().push(event.clone()); + let event_handler = |event: events::Event| events.borrow_mut().push(event); self.process_pending_events(&event_handler); events.into_inner() } + #[cfg(test)] + pub fn pop_pending_event(&self) -> Option { + let mut events = self.pending_events.lock().unwrap(); + if events.is_empty() { None } else { Some(events.remove(0)) } + } + #[cfg(test)] pub fn has_pending_payments(&self) -> bool { !self.pending_outbound_payments.lock().unwrap().is_empty() @@ -5566,12 +5829,45 @@ impl ChannelMana pub fn clear_pending_payments(&self) { self.pending_outbound_payments.lock().unwrap().clear() } + + /// Processes any events asynchronously in the order they were generated since the last call + /// using the given event handler. + /// + /// See the trait-level documentation of [`EventsProvider`] for requirements. + pub async fn process_pending_events_async Future>( + &self, handler: H + ) { + // We'll acquire our total consistency lock until the returned future completes so that + // we can be sure no other persists happen while processing events. + let _read_guard = self.total_consistency_lock.read().unwrap(); + + let mut result = NotifyOption::SkipPersist; + + // TODO: This behavior should be documented. It's unintuitive that we query + // ChannelMonitors when clearing other events. + if self.process_pending_monitor_events() { + result = NotifyOption::DoPersist; + } + + let pending_events = mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]); + if !pending_events.is_empty() { + result = NotifyOption::DoPersist; + } + + for event in pending_events { + handler(event).await; + } + + if result == NotifyOption::DoPersist { + self.persistence_notifier.notify(); + } + } } -impl MessageSendEventsProvider for ChannelManager - where M::Target: chain::Watch, +impl MessageSendEventsProvider for ChannelManager + where M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + K::Target: KeysInterface, F::Target: FeeEstimator, L::Target: Logger, { @@ -5607,11 +5903,11 @@ impl MessageSend } } -impl EventsProvider for ChannelManager +impl EventsProvider for ChannelManager where - M::Target: chain::Watch, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + K::Target: KeysInterface, F::Target: FeeEstimator, L::Target: Logger, { @@ -5629,13 +5925,13 @@ where result = NotifyOption::DoPersist; } - let mut pending_events = mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]); + let pending_events = mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]); if !pending_events.is_empty() { result = NotifyOption::DoPersist; } - for event in pending_events.drain(..) { - handler.handle_event(&event); + for event in pending_events { + handler.handle_event(event); } result @@ -5643,11 +5939,11 @@ where } } -impl chain::Listen for ChannelManager +impl chain::Listen for ChannelManager where - M::Target: chain::Watch, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + K::Target: KeysInterface, F::Target: FeeEstimator, L::Target: Logger, { @@ -5680,11 +5976,11 @@ where } } -impl chain::Confirm for ChannelManager +impl chain::Confirm for ChannelManager where - M::Target: chain::Watch, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + K::Target: KeysInterface, F::Target: FeeEstimator, L::Target: Logger, { @@ -5742,29 +6038,14 @@ where payment_secrets.retain(|_, inbound_payment| { inbound_payment.expiry_time > header.time as u64 }); - - let mut outbounds = self.pending_outbound_payments.lock().unwrap(); - let mut pending_events = self.pending_events.lock().unwrap(); - outbounds.retain(|payment_id, payment| { - if payment.remaining_parts() != 0 { return true } - if let PendingOutboundPayment::Retryable { starting_block_height, payment_hash, .. } = payment { - if *starting_block_height + PAYMENT_EXPIRY_BLOCKS <= height { - log_info!(self.logger, "Timing out payment with id {} and hash {}", log_bytes!(payment_id.0), log_bytes!(payment_hash.0)); - pending_events.push(events::Event::PaymentFailed { - payment_id: *payment_id, payment_hash: *payment_hash, - }); - false - } else { true } - } else { true } - }); } - fn get_relevant_txids(&self) -> Vec { + fn get_relevant_txids(&self) -> Vec<(Txid, Option)> { let channel_state = self.channel_state.lock().unwrap(); - let mut res = Vec::with_capacity(channel_state.short_to_chan_info.len()); + let mut res = Vec::with_capacity(channel_state.by_id.len()); for chan in channel_state.by_id.values() { - if let Some(funding_txo) = chan.get_funding_txo() { - res.push(funding_txo.txid); + if let (Some(funding_txo), block_hash) = (chan.get_funding_txo(), chan.get_funding_tx_confirmed_in()) { + res.push((funding_txo.txid, block_hash)); } } res @@ -5782,18 +6063,18 @@ where } } -impl ChannelManager +impl ChannelManager where - M::Target: chain::Watch, + M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + K::Target: KeysInterface, F::Target: FeeEstimator, L::Target: Logger, { /// Calls a function which handles an on-chain event (blocks dis/connected, transactions /// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by /// the function. - fn do_chain_event) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason>> + fn do_chain_event::Signer>) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason>> (&self, height_opt: Option, f: FN) { // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called // during initialization prior to the chain_monitor being fully configured in some cases. @@ -5804,19 +6085,17 @@ where { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - let short_to_chan_info = &mut channel_state.short_to_chan_info; let pending_msg_events = &mut channel_state.pending_msg_events; channel_state.by_id.retain(|_, channel| { let res = f(channel); if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res { for (source, payment_hash) in timed_out_pending_htlcs.drain(..) { let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel); - timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason { - failure_code, data, - }, HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() })); + timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data), + HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() })); } if let Some(channel_ready) = channel_ready_opt { - send_channel_ready!(short_to_chan_info, pending_msg_events, channel, channel_ready); + send_channel_ready!(self, pending_msg_events, channel, channel_ready); if channel.is_usable() { log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id())); if let Ok(msg) = self.get_channel_update_for_unicast(channel) { @@ -5829,6 +6108,9 @@ where log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.channel_id())); } } + + emit_channel_ready_event!(self, channel); + if let Some(announcement_sigs) = announcement_sigs { log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id())); pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { @@ -5854,6 +6136,7 @@ where // enforce option_scid_alias then), and if the funding tx is ever // un-confirmed we force-close the channel, ensuring short_to_chan_info // is always consistent. + let mut short_to_chan_info = self.short_to_chan_info.write().unwrap(); let scid_insert = short_to_chan_info.insert(real_scid, (channel.get_counterparty_node_id(), channel.channel_id())); assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.get_counterparty_node_id(), channel.channel_id()), "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels", @@ -5861,7 +6144,7 @@ where } } } else if let Err(reason) = res { - update_maps_on_chan_removal!(self, short_to_chan_info, channel); + update_maps_on_chan_removal!(self, channel); // It looks like our counterparty went on-chain or funding transaction was // reorged out of the main chain. Close the channel. failed_channels.push(channel.force_shutdown(true)); @@ -5883,34 +6166,56 @@ where } true }); + } - if let Some(height) = height_opt { - channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| { - htlcs.retain(|htlc| { - // If height is approaching the number of blocks we think it takes us to get - // our commitment transaction confirmed before the HTLC expires, plus the - // number of blocks we generally consider it to take to do a commitment update, - // just give up on it and fail the HTLC. - if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER { - let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec(); - htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height)); - - timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason { - failure_code: 0x4000 | 15, - data: htlc_msat_height_data - }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() })); - false - } else { true } - }); - !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry. + if let Some(height) = height_opt { + self.claimable_payments.lock().unwrap().claimable_htlcs.retain(|payment_hash, (_, htlcs)| { + htlcs.retain(|htlc| { + // If height is approaching the number of blocks we think it takes us to get + // our commitment transaction confirmed before the HTLC expires, plus the + // number of blocks we generally consider it to take to do a commitment update, + // just give up on it and fail the HTLC. + if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER { + let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); + htlc_msat_height_data.extend_from_slice(&height.to_be_bytes()); + + timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), + HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data), + HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() })); + false + } else { true } }); - } + !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry. + }); + + let mut intercepted_htlcs = self.pending_intercepted_htlcs.lock().unwrap(); + intercepted_htlcs.retain(|_, htlc| { + if height >= htlc.forward_info.outgoing_cltv_value - HTLC_FAIL_BACK_BUFFER { + let prev_hop_data = HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: htlc.prev_short_channel_id, + htlc_id: htlc.prev_htlc_id, + incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret, + phantom_shared_secret: None, + outpoint: htlc.prev_funding_outpoint, + }); + + let requested_forward_scid /* intercept scid */ = match htlc.forward_info.routing { + PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id, + _ => unreachable!(), + }; + timed_out_htlcs.push((prev_hop_data, htlc.forward_info.payment_hash, + HTLCFailReason::from_failure_code(0x2000 | 2), + HTLCDestination::InvalidForward { requested_forward_scid })); + log_trace!(self.logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid); + false + } else { true } + }); } self.handle_init_event_channel_failures(failed_channels); for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) { - self.fail_htlc_backwards_internal(source, &payment_hash, reason, destination); + self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, destination); } } @@ -5958,11 +6263,11 @@ where } } -impl - ChannelMessageHandler for ChannelManager - where M::Target: chain::Watch, +impl + ChannelMessageHandler for ChannelManager + where M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + K::Target: KeysInterface, F::Target: FeeEstimator, L::Target: Logger, { @@ -6064,14 +6369,13 @@ impl let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; let pending_msg_events = &mut channel_state.pending_msg_events; - let short_to_chan_info = &mut channel_state.short_to_chan_info; log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.", log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" }); channel_state.by_id.retain(|_, chan| { if chan.get_counterparty_node_id() == *counterparty_node_id { chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger); if chan.is_shutdown() { - update_maps_on_chan_removal!(self, short_to_chan_info, chan); + update_maps_on_chan_removal!(self, chan); self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); return false; } else { @@ -6276,33 +6580,111 @@ impl_writeable_tlv_based!(ChannelCounterparty, { (11, outbound_htlc_maximum_msat, option), }); -impl_writeable_tlv_based!(ChannelDetails, { - (1, inbound_scid_alias, option), - (2, channel_id, required), - (3, channel_type, option), - (4, counterparty, required), - (5, outbound_scid_alias, option), - (6, funding_txo, option), - (7, config, option), - (8, short_channel_id, option), - (10, channel_value_satoshis, required), - (12, unspendable_punishment_reserve, option), - (14, user_channel_id, required), - (16, balance_msat, required), - (18, outbound_capacity_msat, required), - // Note that by the time we get past the required read above, outbound_capacity_msat will be - // filled in, so we can safely unwrap it here. - (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)), - (20, inbound_capacity_msat, required), - (22, confirmations_required, option), - (24, force_close_spend_delay, option), - (26, is_outbound, required), - (28, is_channel_ready, required), - (30, is_usable, required), - (32, is_public, required), - (33, inbound_htlc_minimum_msat, option), - (35, inbound_htlc_maximum_msat, option), -}); +impl Writeable for ChannelDetails { + fn write(&self, writer: &mut W) -> Result<(), io::Error> { + // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with + // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. + let user_channel_id_low = self.user_channel_id as u64; + let user_channel_id_high_opt = Some((self.user_channel_id >> 64) as u64); + write_tlv_fields!(writer, { + (1, self.inbound_scid_alias, option), + (2, self.channel_id, required), + (3, self.channel_type, option), + (4, self.counterparty, required), + (5, self.outbound_scid_alias, option), + (6, self.funding_txo, option), + (7, self.config, option), + (8, self.short_channel_id, option), + (9, self.confirmations, option), + (10, self.channel_value_satoshis, required), + (12, self.unspendable_punishment_reserve, option), + (14, user_channel_id_low, required), + (16, self.balance_msat, required), + (18, self.outbound_capacity_msat, required), + // Note that by the time we get past the required read above, outbound_capacity_msat will be + // filled in, so we can safely unwrap it here. + (19, self.next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)), + (20, self.inbound_capacity_msat, required), + (22, self.confirmations_required, option), + (24, self.force_close_spend_delay, option), + (26, self.is_outbound, required), + (28, self.is_channel_ready, required), + (30, self.is_usable, required), + (32, self.is_public, required), + (33, self.inbound_htlc_minimum_msat, option), + (35, self.inbound_htlc_maximum_msat, option), + (37, user_channel_id_high_opt, option), + }); + Ok(()) + } +} + +impl Readable for ChannelDetails { + fn read(reader: &mut R) -> Result { + init_and_read_tlv_fields!(reader, { + (1, inbound_scid_alias, option), + (2, channel_id, required), + (3, channel_type, option), + (4, counterparty, required), + (5, outbound_scid_alias, option), + (6, funding_txo, option), + (7, config, option), + (8, short_channel_id, option), + (9, confirmations, option), + (10, channel_value_satoshis, required), + (12, unspendable_punishment_reserve, option), + (14, user_channel_id_low, required), + (16, balance_msat, required), + (18, outbound_capacity_msat, required), + // Note that by the time we get past the required read above, outbound_capacity_msat will be + // filled in, so we can safely unwrap it here. + (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)), + (20, inbound_capacity_msat, required), + (22, confirmations_required, option), + (24, force_close_spend_delay, option), + (26, is_outbound, required), + (28, is_channel_ready, required), + (30, is_usable, required), + (32, is_public, required), + (33, inbound_htlc_minimum_msat, option), + (35, inbound_htlc_maximum_msat, option), + (37, user_channel_id_high_opt, option), + }); + + // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with + // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. + let user_channel_id_low: u64 = user_channel_id_low.0.unwrap(); + let user_channel_id = user_channel_id_low as u128 + + ((user_channel_id_high_opt.unwrap_or(0 as u64) as u128) << 64); + + Ok(Self { + inbound_scid_alias, + channel_id: channel_id.0.unwrap(), + channel_type, + counterparty: counterparty.0.unwrap(), + outbound_scid_alias, + funding_txo, + config, + short_channel_id, + channel_value_satoshis: channel_value_satoshis.0.unwrap(), + unspendable_punishment_reserve, + user_channel_id, + balance_msat: balance_msat.0.unwrap(), + outbound_capacity_msat: outbound_capacity_msat.0.unwrap(), + next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(), + inbound_capacity_msat: inbound_capacity_msat.0.unwrap(), + confirmations_required, + confirmations, + force_close_spend_delay, + is_outbound: is_outbound.0.unwrap(), + is_channel_ready: is_channel_ready.0.unwrap(), + is_usable: is_usable.0.unwrap(), + is_public: is_public.0.unwrap(), + inbound_htlc_minimum_msat, + inbound_htlc_maximum_msat, + }) + } +} impl_writeable_tlv_based!(PhantomRouteHints, { (2, channels, vec_type), @@ -6330,8 +6712,9 @@ impl_writeable_tlv_based!(PendingHTLCInfo, { (0, routing, required), (2, incoming_shared_secret, required), (4, payment_hash, required), - (6, amt_to_forward, required), - (8, outgoing_cltv_value, required) + (6, outgoing_amt_msat, required), + (8, outgoing_cltv_value, required), + (9, incoming_amt_msat, option), }); @@ -6530,7 +6913,7 @@ impl Writeable for HTLCSource { (1, payment_id_opt, option), (2, first_hop_htlc_msat, required), (3, payment_secret, option), - (4, path, vec_type), + (4, *path, vec_type), (5, payment_params, option), }); } @@ -6543,28 +6926,21 @@ impl Writeable for HTLCSource { } } -impl_writeable_tlv_based_enum!(HTLCFailReason, - (0, LightningError) => { - (0, err, required), - }, - (1, Reason) => { - (0, failure_code, required), - (2, data, vec_type), - }, -;); +impl_writeable_tlv_based!(PendingAddHTLCInfo, { + (0, forward_info, required), + (1, prev_user_channel_id, (default_value, 0)), + (2, prev_short_channel_id, required), + (4, prev_htlc_id, required), + (6, prev_funding_outpoint, required), +}); impl_writeable_tlv_based_enum!(HTLCForwardInfo, - (0, AddHTLC) => { - (0, forward_info, required), - (2, prev_short_channel_id, required), - (4, prev_htlc_id, required), - (6, prev_funding_outpoint, required), - }, (1, FailHTLC) => { (0, htlc_id, required), (2, err_packet, required), - }, -;); + }; + (0, AddHTLC) +); impl_writeable_tlv_based!(PendingInboundPayment, { (0, payment_secret, required), @@ -6581,6 +6957,7 @@ impl_writeable_tlv_based_enum_upgradable!(PendingOutboundPayment, (1, Fulfilled) => { (0, session_privs, required), (1, payment_hash, option), + (3, timer_ticks_without_htlcs, (default_value, 0)), }, (2, Retryable) => { (0, session_privs, required), @@ -6597,10 +6974,10 @@ impl_writeable_tlv_based_enum_upgradable!(PendingOutboundPayment, }, ); -impl Writeable for ChannelManager - where M::Target: chain::Watch, +impl Writeable for ChannelManager + where M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + K::Target: KeysInterface, F::Target: FeeEstimator, L::Target: Logger, { @@ -6646,10 +7023,13 @@ impl Writeable f } } - let channel_state = self.channel_state.lock().unwrap(); + let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap(); + let claimable_payments = self.claimable_payments.lock().unwrap(); + let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap(); + let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new(); - (channel_state.claimable_htlcs.len() as u64).write(writer)?; - for (payment_hash, (purpose, previous_hops)) in channel_state.claimable_htlcs.iter() { + (claimable_payments.claimable_htlcs.len() as u64).write(writer)?; + for (payment_hash, (purpose, previous_hops)) in claimable_payments.claimable_htlcs.iter() { payment_hash.write(writer)?; (previous_hops.len() as u64).write(writer)?; for htlc in previous_hops.iter() { @@ -6666,8 +7046,6 @@ impl Writeable f peer_state.latest_features.write(writer)?; } - let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap(); - let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap(); let events = self.pending_events.lock().unwrap(); (events.len() as u64).write(writer)?; for event in events.iter() { @@ -6730,9 +7108,27 @@ impl Writeable f _ => {}, } } + + let mut pending_intercepted_htlcs = None; + let our_pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); + if our_pending_intercepts.len() != 0 { + pending_intercepted_htlcs = Some(our_pending_intercepts); + } + + let mut pending_claiming_payments = Some(&claimable_payments.pending_claiming_payments); + if pending_claiming_payments.as_ref().unwrap().is_empty() { + // LDK versions prior to 0.0.113 do not know how to read the pending claimed payments + // map. Thus, if there are no entries we skip writing a TLV for it. + pending_claiming_payments = None; + } else { + debug_assert!(false, "While we have code to serialize pending_claiming_payments, the map should always be empty until a later PR"); + } + write_tlv_fields!(writer, { (1, pending_outbound_payments_no_retry, required), + (2, pending_intercepted_htlcs, option), (3, pending_outbound_payments, required), + (4, pending_claiming_payments, option), (5, self.our_network_pubkey, required), (7, self.fake_scid_rand_bytes, required), (9, htlc_purposes, vec_type), @@ -6775,10 +7171,10 @@ impl Writeable f /// which you've already broadcasted the transaction. /// /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor -pub struct ChannelManagerReadArgs<'a, Signer: 'a + Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> - where M::Target: chain::Watch, +pub struct ChannelManagerReadArgs<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> + where M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + K::Target: KeysInterface, F::Target: FeeEstimator, L::Target: Logger, { @@ -6821,14 +7217,14 @@ pub struct ChannelManagerReadArgs<'a, Signer: 'a + Sign, M: Deref, T: Deref, K: /// this struct. /// /// (C-not exported) because we have no HashMap bindings - pub channel_monitors: HashMap>, + pub channel_monitors: HashMap::Signer>>, } -impl<'a, Signer: 'a + Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> - ChannelManagerReadArgs<'a, Signer, M, T, K, F, L> - where M::Target: chain::Watch, +impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> + ChannelManagerReadArgs<'a, M, T, K, F, L> + where M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + K::Target: KeysInterface, F::Target: FeeEstimator, L::Target: Logger, { @@ -6836,7 +7232,7 @@ impl<'a, Signer: 'a + Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> /// HashMap for you. This is primarily useful for C bindings where it is not practical to /// populate a HashMap directly from C. pub fn new(keys_manager: K, fee_estimator: F, chain_monitor: M, tx_broadcaster: T, logger: L, default_config: UserConfig, - mut channel_monitors: Vec<&'a mut ChannelMonitor>) -> Self { + mut channel_monitors: Vec<&'a mut ChannelMonitor<::Signer>>) -> Self { Self { keys_manager, fee_estimator, chain_monitor, tx_broadcaster, logger, default_config, channel_monitors: channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect() @@ -6846,29 +7242,29 @@ impl<'a, Signer: 'a + Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> // Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the // SipmleArcChannelManager type: -impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> - ReadableArgs> for (BlockHash, Arc>) - where M::Target: chain::Watch, +impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> + ReadableArgs> for (BlockHash, Arc>) + where M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + K::Target: KeysInterface, F::Target: FeeEstimator, L::Target: Logger, { - fn read(reader: &mut R, args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result { - let (blockhash, chan_manager) = <(BlockHash, ChannelManager)>::read(reader, args)?; + fn read(reader: &mut R, args: ChannelManagerReadArgs<'a, M, T, K, F, L>) -> Result { + let (blockhash, chan_manager) = <(BlockHash, ChannelManager)>::read(reader, args)?; Ok((blockhash, Arc::new(chan_manager))) } } -impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> - ReadableArgs> for (BlockHash, ChannelManager) - where M::Target: chain::Watch, +impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> + ReadableArgs> for (BlockHash, ChannelManager) + where M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, - K::Target: KeysInterface, + K::Target: KeysInterface, F::Target: FeeEstimator, L::Target: Logger, { - fn read(reader: &mut R, mut args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result { + fn read(reader: &mut R, mut args: ChannelManagerReadArgs<'a, M, T, K, F, L>) -> Result { let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); let genesis_hash: BlockHash = Readable::read(reader)?; @@ -6884,7 +7280,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = Vec::new(); for _ in 0..channel_count { - let mut channel: Channel = Channel::read(reader, (&args.keys_manager, best_block_height))?; + let mut channel: Channel<::Signer> = Channel::read(reader, (&args.keys_manager, best_block_height))?; let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?; funding_txo_set.insert(funding_txo.clone()); if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) { @@ -6918,6 +7314,25 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> user_channel_id: channel.get_user_id(), reason: ClosureReason::OutdatedChannelManager }); + for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() { + let mut found_htlc = false; + for (monitor_htlc_source, _) in monitor.get_all_current_outbound_htlcs() { + if *channel_htlc_source == monitor_htlc_source { found_htlc = true; break; } + } + if !found_htlc { + // If we have some HTLCs in the channel which are not present in the newer + // ChannelMonitor, they have been removed and should be failed back to + // ensure we don't forget them entirely. Note that if the missing HTLC(s) + // were actually claimed we'd have generated and ensured the previous-hop + // claim update ChannelMonitor updates were persisted prior to persising + // the ChannelMonitor update for the forward leg, so attempting to fail the + // backwards leg of the HTLC will simply be rejected. + log_info!(args.logger, + "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager", + log_bytes!(channel.channel_id()), log_bytes!(payment_hash.0)); + failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.get_counterparty_node_id(), channel.channel_id())); + } + } } else { log_info!(args.logger, "Successfully loaded channel {}", log_bytes!(channel.channel_id())); if let Some(short_channel_id) = channel.get_short_channel_id() { @@ -6998,16 +7413,6 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> None => continue, } } - if forward_htlcs_count > 0 { - // If we have pending HTLCs to forward, assume we either dropped a - // `PendingHTLCsForwardable` or the user received it but never processed it as they - // shut down before the timer hit. Either way, set the time_forwardable to a small - // constant as enough time has likely passed that we should simply handle the forwards - // now, or at least after the user gets a chance to reconnect to our peers. - pending_events_read.push(events::Event::PendingHTLCsForwardable { - time_forwardable: Duration::from_secs(2), - }); - } let background_event_count: u64 = Readable::read(reader)?; let mut pending_background_events_read: Vec = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::())); @@ -7045,13 +7450,17 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients. let mut pending_outbound_payments_no_retry: Option>> = None; let mut pending_outbound_payments = None; + let mut pending_intercepted_htlcs: Option> = Some(HashMap::new()); let mut received_network_pubkey: Option = None; let mut fake_scid_rand_bytes: Option<[u8; 32]> = None; let mut probing_cookie_secret: Option<[u8; 32]> = None; let mut claimable_htlc_purposes = None; + let mut pending_claiming_payments = Some(HashMap::new()); read_tlv_fields!(reader, { (1, pending_outbound_payments_no_retry, option), + (2, pending_intercepted_htlcs, option), (3, pending_outbound_payments, option), + (4, pending_claiming_payments, option), (5, received_network_pubkey, option), (7, fake_scid_rand_bytes, option), (9, claimable_htlc_purposes, vec_type), @@ -7116,10 +7525,58 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } } } + for (htlc_source, htlc) in monitor.get_all_current_outbound_htlcs() { + if let HTLCSource::PreviousHopData(prev_hop_data) = htlc_source { + let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { + info.prev_funding_outpoint == prev_hop_data.outpoint && + info.prev_htlc_id == prev_hop_data.htlc_id + }; + // The ChannelMonitor is now responsible for this HTLC's + // failure/success and will let us know what its outcome is. If we + // still have an entry for this HTLC in `forward_htlcs` or + // `pending_intercepted_htlcs`, we were apparently not persisted after + // the monitor was when forwarding the payment. + forward_htlcs.retain(|_, forwards| { + forwards.retain(|forward| { + if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { + if pending_forward_matches_htlc(&htlc_info) { + log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", + log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id())); + false + } else { true } + } else { true } + }); + !forwards.is_empty() + }); + pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| { + if pending_forward_matches_htlc(&htlc_info) { + log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", + log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id())); + pending_events_read.retain(|event| { + if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { + intercepted_id != ev_id + } else { true } + }); + false + } else { true } + }); + } + } } } } + if !forward_htlcs.is_empty() { + // If we have pending HTLCs to forward, assume we either dropped a + // `PendingHTLCsForwardable` or the user received it but never processed it as they + // shut down before the timer hit. Either way, set the time_forwardable to a small + // constant as enough time has likely passed that we should simply handle the forwards + // now, or at least after the user gets a chance to reconnect to our peers. + pending_events_read.push(events::Event::PendingHTLCsForwardable { + time_forwardable: Duration::from_secs(2), + }); + } + let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material(); let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material); @@ -7215,6 +7672,13 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> if let Some((payment_purpose, claimable_htlcs)) = claimable_htlcs.remove(&payment_hash) { log_info!(args.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", log_bytes!(payment_hash.0)); let mut claimable_amt_msat = 0; + let mut receiver_node_id = Some(our_network_pubkey); + let phantom_shared_secret = claimable_htlcs[0].prev_hop.phantom_shared_secret; + if phantom_shared_secret.is_some() { + let phantom_pubkey = args.keys_manager.get_node_id(Recipient::PhantomNode) + .expect("Failed to get node_id for phantom node recipient"); + receiver_node_id = Some(phantom_pubkey) + } for claimable_htlc in claimable_htlcs { claimable_amt_msat += claimable_htlc.value; @@ -7242,6 +7706,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } } pending_events_read.push(events::Event::PaymentClaimed { + receiver_node_id, payment_hash, purpose: payment_purpose, amount_msat: claimable_amt_msat, @@ -7260,17 +7725,18 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> channel_state: Mutex::new(ChannelHolder { by_id, - short_to_chan_info, - claimable_htlcs, pending_msg_events: Vec::new(), }), inbound_payment_key: expanded_inbound_key, pending_inbound_payments: Mutex::new(pending_inbound_payments), pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()), + pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()), forward_htlcs: Mutex::new(forward_htlcs), + claimable_payments: Mutex::new(ClaimablePayments { claimable_htlcs, pending_claiming_payments: pending_claiming_payments.unwrap() }), outbound_scid_aliases: Mutex::new(outbound_scid_aliases), id_to_peer: Mutex::new(id_to_peer), + short_to_chan_info: FairRwLock::new(short_to_chan_info), fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(), probing_cookie_secret: probing_cookie_secret.unwrap(), @@ -7296,7 +7762,8 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> for htlc_source in failed_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; - channel_manager.fail_htlc_backwards_internal(source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); + let reason = HTLCFailReason::from_failure_code(0x4000 | 8); + channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } //TODO: Broadcast channel update for closed channels, but only after we've made a @@ -7413,18 +7880,22 @@ mod tests { // First, send a partial MPP payment. let (route, our_payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000); + let mut mpp_route = route.clone(); + mpp_route.paths.push(mpp_route.paths[0].clone()); + let payment_id = PaymentId([42; 32]); // Use the utility function send_payment_along_path to send the payment with MPP data which // indicates there are more HTLCs coming. let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match. - nodes[0].node.send_payment_along_path(&route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None).unwrap(); + let session_privs = nodes[0].node.add_new_pending_payment(our_payment_hash, Some(payment_secret), payment_id, &mpp_route).unwrap(); + nodes[0].node.send_payment_along_path(&mpp_route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None); // Next, send a keysend payment with the same payment_hash and make sure it fails. - nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); + nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage), PaymentId(payment_preimage.0)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -7447,7 +7918,7 @@ mod tests { expect_payment_failed!(nodes[0], our_payment_hash, true); // Send the second half of the original MPP payment. - nodes[0].node.send_payment_along_path(&route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None).unwrap(); + nodes[0].node.send_payment_along_path(&mpp_route.paths[1], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -7545,7 +8016,7 @@ mod tests { &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph, None, nodes[0].logger, &scorer, &random_seed_bytes ).unwrap(); - nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); + nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage), PaymentId(payment_preimage.0)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -7578,7 +8049,7 @@ mod tests { &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph, None, nodes[0].logger, &scorer, &random_seed_bytes ).unwrap(); - let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); + let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage), PaymentId(payment_preimage.0)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -7588,7 +8059,7 @@ mod tests { // Next, attempt a regular payment and make sure it fails. let payment_secret = PaymentSecret([43; 32]); - nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); + nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -7631,7 +8102,7 @@ mod tests { let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], channelmanager::provided_init_features(), channelmanager::provided_init_features()); let route_params = RouteParameters { payment_params: PaymentParameters::for_keysend(payee_pubkey), - final_value_msat: 10000, + final_value_msat: 10_000, final_cltv_expiry_delta: 40, }; let network_graph = nodes[0].network_graph; @@ -7645,7 +8116,8 @@ mod tests { let test_preimage = PaymentPreimage([42; 32]); let mismatch_payment_hash = PaymentHash([43; 32]); - let _ = nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage), None, None).unwrap(); + let session_privs = nodes[0].node.add_new_pending_payment(mismatch_payment_hash, None, PaymentId(mismatch_payment_hash.0), &route).unwrap(); + nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap(); check_added_monitors!(nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -7675,7 +8147,7 @@ mod tests { let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], channelmanager::provided_init_features(), channelmanager::provided_init_features()); let route_params = RouteParameters { payment_params: PaymentParameters::for_keysend(payee_pubkey), - final_value_msat: 10000, + final_value_msat: 10_000, final_cltv_expiry_delta: 40, }; let network_graph = nodes[0].network_graph; @@ -7690,7 +8162,8 @@ mod tests { let test_preimage = PaymentPreimage([42; 32]); let test_secret = PaymentSecret([43; 32]); let payment_hash = PaymentHash(Sha256::hash(&test_preimage.0).into_inner()); - let _ = nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage), None, None).unwrap(); + let session_privs = nodes[0].node.add_new_pending_payment(payment_hash, Some(test_secret), PaymentId(payment_hash.0), &route).unwrap(); + nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage), PaymentId(payment_hash.0), None, session_privs).unwrap(); check_added_monitors!(nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -7727,7 +8200,7 @@ mod tests { route.paths[1][0].short_channel_id = chan_2_id; route.paths[1][1].short_channel_id = chan_4_id; - match nodes[0].node.send_payment(&route, payment_hash, &None).unwrap_err() { + match nodes[0].node.send_payment(&route, payment_hash, &None, PaymentId(payment_hash.0)).unwrap_err() { PaymentSendFailure::ParameterError(APIError::APIMisuseError { ref err }) => { assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err)) }, _ => panic!("unexpected error") @@ -7882,7 +8355,7 @@ pub mod bench { use crate::chain::Listen; use crate::chain::chainmonitor::{ChainMonitor, Persist}; use crate::chain::keysinterface::{KeysManager, KeysInterface, InMemorySigner}; - use crate::ln::channelmanager::{self, BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage}; + use crate::ln::channelmanager::{self, BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId}; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{ChannelMessageHandler, Init}; use crate::routing::gossip::NetworkGraph; @@ -7900,12 +8373,12 @@ pub mod bench { use test::Bencher; struct NodeHolder<'a, P: Persist> { - node: &'a ChannelManager, &'a test_utils::TestBroadcaster, &'a KeysManager, - &'a test_utils::TestFeeEstimator, &'a test_utils::TestLogger> + &'a test_utils::TestFeeEstimator, &'a test_utils::TestLogger>, } #[cfg(test)] @@ -7988,6 +8461,24 @@ pub mod bench { _ => panic!(), } + let events_a = node_a.get_and_clear_pending_events(); + assert_eq!(events_a.len(), 1); + match events_a[0] { + Event::ChannelReady{ ref counterparty_node_id, .. } => { + assert_eq!(*counterparty_node_id, node_b.get_our_node_id()); + }, + _ => panic!("Unexpected event"), + } + + let events_b = node_b.get_and_clear_pending_events(); + assert_eq!(events_b.len(), 1); + match events_b[0] { + Event::ChannelReady{ ref counterparty_node_id, .. } => { + assert_eq!(*counterparty_node_id, node_a.get_our_node_id()); + }, + _ => panic!("Unexpected event"), + } + let dummy_graph = NetworkGraph::new(genesis_hash, &logger_a); let mut payment_count: u64 = 0; @@ -8009,7 +8500,7 @@ pub mod bench { let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()); let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200).unwrap(); - $node_a.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); + $node_a.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap(); let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap()); $node_b.handle_update_add_htlc(&$node_a.get_our_node_id(), &payment_event.msgs[0]); $node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &payment_event.commitment_msg); @@ -8019,7 +8510,7 @@ pub mod bench { $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id())); expect_pending_htlcs_forwardable!(NodeHolder { node: &$node_b }); - expect_payment_received!(NodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000); + expect_payment_claimable!(NodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000); $node_b.claim_funds(payment_preimage); expect_payment_claimed!(NodeHolder { node: &$node_b }, payment_hash, 10_000);