X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=b8372ad9b10302738882fcc7a35665135cee8e73;hb=e38ab09c3a9514768a9833b2636b2b969f62b3e1;hp=121c1d4c66c36cb64f21df76823f5d24ace0e97a;hpb=8d8ee55463612dc5e1661657d7eb2b1a1e8271cc;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 121c1d4c..b8372ad9 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -46,15 +46,16 @@ use crate::ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfi use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; #[cfg(any(feature = "_test_utils", test))] use crate::ln::features::InvoiceFeatures; -use crate::routing::router::{PaymentParameters, Route, RouteHop, RoutePath, RouteParameters}; +use crate::routing::router::{InFlightHtlcs, PaymentParameters, Route, RouteHop, RoutePath, RouteParameters}; use crate::ln::msgs; use crate::ln::onion_utils; +use crate::ln::onion_utils::HTLCFailReason; use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT}; use crate::ln::wire::Encode; use crate::chain::keysinterface::{Sign, KeysInterface, KeysManager, Recipient}; use crate::util::config::{UserConfig, ChannelConfig}; use crate::util::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; -use crate::util::{byte_utils, events}; +use crate::util::events; use crate::util::wakers::{Future, Notifier}; use crate::util::scid_utils::fake_scid; use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter}; @@ -92,8 +93,8 @@ use core::ops::Deref; pub(super) enum PendingHTLCRouting { Forward { onion_packet: msgs::OnionPacket, - /// The SCID from the onion that we should forward to. This could be a "real" SCID, an - /// outbound SCID alias, or a phantom node SCID. + /// The SCID from the onion that we should forward to. This could be a real SCID or a fake one + /// generated using `get_fake_scid` from the scid_utils::fake_scid module. short_channel_id: u64, // This should be NonZero eventually when we bump MSRV }, Receive { @@ -142,6 +143,7 @@ pub(super) struct PendingAddHTLCInfo { prev_short_channel_id: u64, prev_htlc_id: u64, prev_funding_outpoint: OutPoint, + prev_user_channel_id: u128, } pub(super) enum HTLCForwardInfo { @@ -206,6 +208,24 @@ impl Readable for PaymentId { Ok(PaymentId(buf)) } } + +/// An identifier used to uniquely identify an intercepted HTLC to LDK. +/// (C-not exported) as we just use [u8; 32] directly +#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)] +pub struct InterceptId(pub [u8; 32]); + +impl Writeable for InterceptId { + fn write(&self, w: &mut W) -> Result<(), io::Error> { + self.0.write(w) + } +} + +impl Readable for InterceptId { + fn read(r: &mut R) -> Result { + let buf: [u8; 32] = Readable::read(r)?; + Ok(InterceptId(buf)) + } +} /// Tracks the inbound corresponding to an outbound HTLC #[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash #[derive(Clone, PartialEq, Eq)] @@ -257,31 +277,12 @@ impl HTLCSource { } } -#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug -pub(super) enum HTLCFailReason { - LightningError { - err: msgs::OnionErrorPacket, - }, - Reason { - failure_code: u16, - data: Vec, - } -} - struct ReceiveError { err_code: u16, err_data: Vec, msg: &'static str, } -/// Return value for claim_funds_from_hop -enum ClaimFundsFromHop { - PrevHopForceClosed, - MonitorUpdateFail(PublicKey, MsgHandleErrInternal, Option), - Success(u64), - DuplicateClaim, -} - type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>); /// Error type returned across the channel_state mutex boundary. When an Err is generated for a @@ -395,16 +396,39 @@ pub(super) enum RAACommitmentOrder { RevokeAndACKFirst, } -// Note this is only exposed in cfg(test): -pub(super) struct ChannelHolder { - pub(super) by_id: HashMap<[u8; 32], Channel>, +/// Information about a payment which is currently being claimed. +struct ClaimingPayment { + amount_msat: u64, + payment_purpose: events::PaymentPurpose, + receiver_node_id: PublicKey, +} +impl_writeable_tlv_based!(ClaimingPayment, { + (0, amount_msat, required), + (2, payment_purpose, required), + (4, receiver_node_id, required), +}); + +/// Information about claimable or being-claimed payments +struct ClaimablePayments { /// Map from payment hash to the payment data and any HTLCs which are to us and can be /// failed/claimed by the user. /// - /// Note that while this is held in the same mutex as the channels themselves, no consistency - /// guarantees are made about the channels given here actually existing anymore by the time you - /// go to read them! + /// Note that, no consistency guarantees are made about the channels given here actually + /// existing anymore by the time you go to read them! + /// + /// When adding to the map, [`Self::pending_claiming_payments`] must also be checked to ensure + /// we don't get a duplicate payment. claimable_htlcs: HashMap)>, + + /// Map from payment hash to the payment data for HTLCs which we have begun claiming, but which + /// are waiting on a [`ChannelMonitorUpdate`] to complete in order to be surfaced to the user + /// as an [`events::Event::PaymentClaimed`]. + pending_claiming_payments: HashMap, +} + +// Note this is only exposed in cfg(test): +pub(super) struct ChannelHolder { + pub(super) by_id: HashMap<[u8; 32], Channel>, /// Messages to send to peers - pushed to in the same lock that they are generated in (except /// for broadcast messages, where ordering isn't as strict). pub(super) pending_msg_events: Vec, @@ -419,6 +443,16 @@ enum BackgroundEvent { ClosingMonitorUpdate((OutPoint, ChannelMonitorUpdate)), } +pub(crate) enum MonitorUpdateCompletionAction { + /// Indicates that a payment ultimately destined for us was claimed and we should emit an + /// [`events::Event::PaymentClaimed`] to the user if we haven't yet generated such an event for + /// this payment. Note that this is only best-effort. On restart it's possible such a duplicate + /// event can be generated. + PaymentClaimed { payment_hash: PaymentHash }, + /// Indicates an [`events::Event`] should be surfaced to the user. + EmitEvent { event: events::Event }, +} + /// State we hold per-peer. In the future we should put channels in here, but for now we only hold /// the latest Init features we heard from the peer. struct PeerState { @@ -672,20 +706,24 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManage // `total_consistency_lock` // | // |__`forward_htlcs` -// | -// |__`channel_state` // | | -// | |__`id_to_peer` +// | |__`pending_intercepted_htlcs` +// | +// |__`pending_inbound_payments` // | | -// | |__`short_to_chan_info` +// | |__`claimable_payments` // | | -// | |__`per_peer_state` -// | | -// | |__`outbound_scid_aliases` +// | |__`pending_outbound_payments` // | | -// | |__`pending_inbound_payments` +// | |__`channel_state` +// | | +// | |__`id_to_peer` +// | | +// | |__`short_to_chan_info` // | | -// | |__`pending_outbound_payments` +// | |__`per_peer_state` +// | | +// | |__`outbound_scid_aliases` // | | // | |__`best_block` // | | @@ -720,9 +758,9 @@ pub struct ChannelManager channel_state: Mutex::Signer>>, /// Storage for PaymentSecrets and any requirements on future inbound payments before we will - /// expose them to users via a PaymentReceived event. HTLCs which do not meet the requirements + /// expose them to users via a PaymentClaimable event. HTLCs which do not meet the requirements /// here are failed when we process them as pending-forwardable-HTLCs, and entries are removed - /// after we generate a PaymentReceived upon receipt of all MPP parts or when they time out. + /// after we generate a PaymentClaimable upon receipt of all MPP parts or when they time out. /// /// See `ChannelManager` struct-level documentation for lock order requirements. pending_inbound_payments: Mutex>, @@ -755,6 +793,17 @@ pub struct ChannelManager pub(super) forward_htlcs: Mutex>>, #[cfg(not(test))] forward_htlcs: Mutex>>, + /// Storage for HTLCs that have been intercepted and bubbled up to the user. We hold them here + /// until the user tells us what we should do with them. + /// + /// See `ChannelManager` struct-level documentation for lock order requirements. + pending_intercepted_htlcs: Mutex>, + + /// The sets of payments which are claimable or currently being claimed. See + /// [`ClaimablePayments`]' individual field docs for more info. + /// + /// See `ChannelManager` struct-level documentation for lock order requirements. + claimable_payments: Mutex, /// The set of outbound SCID aliases across all our channels, including unconfirmed channels /// and some closed channels which reached a usable state prior to being closed. This is used @@ -1087,7 +1136,8 @@ pub struct ChannelDetails { /// [`outbound_capacity_msat`]: ChannelDetails::outbound_capacity_msat pub unspendable_punishment_reserve: Option, /// The `user_channel_id` passed in to create_channel, or a random value if the channel was - /// inbound. + /// inbound. This may be zero for inbound channels serialized with LDK versions prior to + /// 0.0.113. pub user_channel_id: u128, /// Our total balance. This is the amount we would get if we close the channel. /// This value is not exact. Due to various in-flight changes and feerate changes, exactly this @@ -1140,6 +1190,10 @@ pub struct ChannelDetails { /// [`ChannelHandshakeConfig::minimum_depth`]: crate::util::config::ChannelHandshakeConfig::minimum_depth /// [`ChannelHandshakeLimits::max_minimum_depth`]: crate::util::config::ChannelHandshakeLimits::max_minimum_depth pub confirmations_required: Option, + /// The current number of confirmations on the funding transaction. + /// + /// This value will be `None` for objects serialized with LDK versions prior to 0.0.113. + pub confirmations: Option, /// The number of blocks (after our commitment transaction confirms) that we will need to wait /// until we can claim our funds after we force-close the channel. During this time our /// counterparty is allowed to punish us if we broadcasted a stale state. If our counterparty @@ -1204,24 +1258,40 @@ impl ChannelDetails { #[derive(Clone, Debug)] pub enum PaymentSendFailure { /// A parameter which was passed to send_payment was invalid, preventing us from attempting to - /// send the payment at all. No channel state has been changed or messages sent to peers, and - /// once you've changed the parameter at error, you can freely retry the payment in full. + /// send the payment at all. + /// + /// You can freely resend the payment in full (with the parameter error fixed). + /// + /// Because the payment failed outright, no payment tracking is done, you do not need to call + /// [`ChannelManager::abandon_payment`] and [`ChannelManager::retry_payment`] will *not* work + /// for this payment. ParameterError(APIError), /// A parameter in a single path which was passed to send_payment was invalid, preventing us - /// from attempting to send the payment at all. No channel state has been changed or messages - /// sent to peers, and once you've changed the parameter at error, you can freely retry the - /// payment in full. + /// from attempting to send the payment at all. + /// + /// You can freely resend the payment in full (with the parameter error fixed). /// /// The results here are ordered the same as the paths in the route object which was passed to /// send_payment. + /// + /// Because the payment failed outright, no payment tracking is done, you do not need to call + /// [`ChannelManager::abandon_payment`] and [`ChannelManager::retry_payment`] will *not* work + /// for this payment. PathParameterError(Vec>), /// All paths which were attempted failed to send, with no channel state change taking place. - /// You can freely retry the payment in full (though you probably want to do so over different + /// You can freely resend the payment in full (though you probably want to do so over different /// paths than the ones selected). /// - /// [`ChannelManager::abandon_payment`] does *not* need to be called for this payment and - /// [`ChannelManager::retry_payment`] will *not* work for this payment. - AllFailedRetrySafe(Vec), + /// Because the payment failed outright, no payment tracking is done, you do not need to call + /// [`ChannelManager::abandon_payment`] and [`ChannelManager::retry_payment`] will *not* work + /// for this payment. + AllFailedResendSafe(Vec), + /// Indicates that a payment for the provided [`PaymentId`] is already in-flight and has not + /// yet completed (i.e. generated an [`Event::PaymentSent`]) or been abandoned (via + /// [`ChannelManager::abandon_payment`]). + /// + /// [`Event::PaymentSent`]: events::Event::PaymentSent + DuplicatePayment, /// Some paths which were attempted failed to send, though possibly not all. At least some /// paths have irrevocably committed to the HTLC and retrying the payment in full would result /// in over-/re-payment. @@ -1500,134 +1570,6 @@ macro_rules! emit_channel_ready_event { } } -macro_rules! handle_chan_restoration_locked { - ($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr, - $raa: expr, $commitment_update: expr, $order: expr, $chanmon_update: expr, - $pending_forwards: expr, $funding_broadcastable: expr, $channel_ready: expr, $announcement_sigs: expr) => { { - let mut htlc_forwards = None; - - let chanmon_update: Option = $chanmon_update; // Force type-checking to resolve - let chanmon_update_is_none = chanmon_update.is_none(); - let counterparty_node_id = $channel_entry.get().get_counterparty_node_id(); - let res = loop { - let forwards: Vec<(PendingHTLCInfo, u64)> = $pending_forwards; // Force type-checking to resolve - if !forwards.is_empty() { - htlc_forwards = Some(($channel_entry.get().get_short_channel_id().unwrap_or($channel_entry.get().outbound_scid_alias()), - $channel_entry.get().get_funding_txo().unwrap(), forwards)); - } - - if chanmon_update.is_some() { - // On reconnect, we, by definition, only resend a channel_ready if there have been - // no commitment updates, so the only channel monitor update which could also be - // associated with a channel_ready would be the funding_created/funding_signed - // monitor update. That monitor update failing implies that we won't send - // channel_ready until it's been updated, so we can't have a channel_ready and a - // monitor update here (so we don't bother to handle it correctly below). - assert!($channel_ready.is_none()); - // A channel monitor update makes no sense without either a channel_ready or a - // commitment update to process after it. Since we can't have a channel_ready, we - // only bother to handle the monitor-update + commitment_update case below. - assert!($commitment_update.is_some()); - } - - if let Some(msg) = $channel_ready { - // Similar to the above, this implies that we're letting the channel_ready fly - // before it should be allowed to. - assert!(chanmon_update.is_none()); - send_channel_ready!($self, $channel_state.pending_msg_events, $channel_entry.get(), msg); - } - if let Some(msg) = $announcement_sigs { - $channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { - node_id: counterparty_node_id, - msg, - }); - } - - emit_channel_ready_event!($self, $channel_entry.get_mut()); - - let funding_broadcastable: Option = $funding_broadcastable; // Force type-checking to resolve - if let Some(monitor_update) = chanmon_update { - // We only ever broadcast a funding transaction in response to a funding_signed - // message and the resulting monitor update. Thus, on channel_reestablish - // message handling we can't have a funding transaction to broadcast. When - // processing a monitor update finishing resulting in a funding broadcast, we - // cannot have a second monitor update, thus this case would indicate a bug. - assert!(funding_broadcastable.is_none()); - // Given we were just reconnected or finished updating a channel monitor, the - // only case where we can get a new ChannelMonitorUpdate would be if we also - // have some commitment updates to send as well. - assert!($commitment_update.is_some()); - match $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) { - ChannelMonitorUpdateStatus::Completed => {}, - e => { - // channel_reestablish doesn't guarantee the order it returns is sensical - // for the messages it returns, but if we're setting what messages to - // re-transmit on monitor update success, we need to make sure it is sane. - let mut order = $order; - if $raa.is_none() { - order = RAACommitmentOrder::CommitmentFirst; - } - break handle_monitor_update_res!($self, e, $channel_entry, order, $raa.is_some(), true); - } - } - } - - macro_rules! handle_cs { () => { - if let Some(update) = $commitment_update { - $channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: counterparty_node_id, - updates: update, - }); - } - } } - macro_rules! handle_raa { () => { - if let Some(revoke_and_ack) = $raa { - $channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { - node_id: counterparty_node_id, - msg: revoke_and_ack, - }); - } - } } - match $order { - RAACommitmentOrder::CommitmentFirst => { - handle_cs!(); - handle_raa!(); - }, - RAACommitmentOrder::RevokeAndACKFirst => { - handle_raa!(); - handle_cs!(); - }, - } - if let Some(tx) = funding_broadcastable { - log_info!($self.logger, "Broadcasting funding transaction with txid {}", tx.txid()); - $self.tx_broadcaster.broadcast_transaction(&tx); - } - break Ok(()); - }; - - if chanmon_update_is_none { - // If there was no ChannelMonitorUpdate, we should never generate an Err in the res loop - // above. Doing so would imply calling handle_err!() from channel_monitor_updated() which - // should *never* end up calling back to `chain_monitor.update_channel()`. - assert!(res.is_ok()); - } - - (htlc_forwards, res, counterparty_node_id) - } } -} - -macro_rules! post_handle_chan_restoration { - ($self: ident, $locked_res: expr) => { { - let (htlc_forwards, res, counterparty_node_id) = $locked_res; - - let _ = handle_error!($self, res, counterparty_node_id); - - if let Some(forwards) = htlc_forwards { - $self.forward_htlcs(&mut [forwards][..]); - } - } } -} - impl ChannelManager where M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, @@ -1661,13 +1603,14 @@ impl ChannelManager ChannelManager ChannelManager ChannelManager { - let peer_state = peer_state.lock().unwrap(); - let their_features = &peer_state.latest_features; - chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)? - }, - None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }), + let (shutdown_msg, monitor_update, htlcs) = { + let per_peer_state = self.per_peer_state.read().unwrap(); + match per_peer_state.get(&counterparty_node_id) { + Some(peer_state) => { + let peer_state = peer_state.lock().unwrap(); + let their_features = &peer_state.latest_features; + chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)? + }, + None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }), + } }; failed_htlcs = htlcs; @@ -1948,8 +1895,9 @@ impl ChannelManager ChannelManager ChannelManager ChannelManager amt_msat { return Err(ReceiveError { err_code: 19, - err_data: byte_utils::be64_to_array(amt_msat).to_vec(), + err_data: amt_msat.to_be_bytes().to_vec(), msg: "Upstream node sent less than we were supposed to receive in payment", }); } @@ -2233,7 +2185,8 @@ impl ChannelManager ChannelManager { // unknown_next_peer // Note that this is likely a timing oracle for detecting whether an scid is a - // phantom. - if fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash) { + // phantom or an intercept. + if (self.default_configuration.accept_intercept_htlcs && + fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash)) || + fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash) + { None } else { break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); @@ -2352,10 +2308,13 @@ impl ChannelManager ChannelManager ChannelManager ChannelManager ChannelManager ChannelManager ChannelManager Err(PaymentSendFailure::ParameterError(APIError::RouteError { - err: "Payment already in progress" - })), + hash_map::Entry::Occupied(_) => Err(PaymentSendFailure::DuplicatePayment), hash_map::Entry::Vacant(entry) => { let payment = entry.insert(PendingOutboundPayment::Retryable { session_privs: HashSet::new(), @@ -2657,7 +2620,7 @@ impl ChannelManager, keysend_preimage: Option, payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> { if route.paths.len() < 1 { - return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "There must be at least one path to send over"})); + return Err(PaymentSendFailure::ParameterError(APIError::InvalidRoute{err: "There must be at least one path to send over"})); } if payment_secret.is_none() && route.paths.len() > 1 { return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError{err: "Payment secret is required for multi-path payments".to_string()})); @@ -2667,12 +2630,12 @@ impl ChannelManager 20 { - path_errs.push(Err(APIError::RouteError{err: "Path didn't go anywhere/had bogus size"})); + path_errs.push(Err(APIError::InvalidRoute{err: "Path didn't go anywhere/had bogus size"})); continue 'path_check; } for (idx, hop) in path.iter().enumerate() { if idx != path.len() - 1 && hop.pubkey == our_node_id { - path_errs.push(Err(APIError::RouteError{err: "Path went through us but wasn't a simple rebalance loop to us"})); + path_errs.push(Err(APIError::InvalidRoute{err: "Path went through us but wasn't a simple rebalance loop to us"})); continue 'path_check; } } @@ -2748,7 +2711,7 @@ impl ChannelManager ChannelManager ChannelManager Result<(), APIError> { + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + + let next_hop_scid = match self.channel_state.lock().unwrap().by_id.get(next_hop_channel_id) { + Some(chan) => { + if !chan.is_usable() { + return Err(APIError::ChannelUnavailable { + err: format!("Channel with id {} not fully established", log_bytes!(*next_hop_channel_id)) + }) + } + chan.get_short_channel_id().unwrap_or(chan.outbound_scid_alias()) + }, + None => return Err(APIError::ChannelUnavailable { + err: format!("Channel with id {} not found", log_bytes!(*next_hop_channel_id)) + }) + }; + + let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id) + .ok_or_else(|| APIError::APIMisuseError { + err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)) + })?; + + let routing = match payment.forward_info.routing { + PendingHTLCRouting::Forward { onion_packet, .. } => { + PendingHTLCRouting::Forward { onion_packet, short_channel_id: next_hop_scid } + }, + _ => unreachable!() // Only `PendingHTLCRouting::Forward`s are intercepted + }; + let pending_htlc_info = PendingHTLCInfo { + outgoing_amt_msat: amt_to_forward_msat, routing, ..payment.forward_info + }; + + let mut per_source_pending_forward = [( + payment.prev_short_channel_id, + payment.prev_funding_outpoint, + payment.prev_user_channel_id, + vec![(pending_htlc_info, payment.prev_htlc_id)] + )]; + self.forward_htlcs(&mut per_source_pending_forward); + Ok(()) + } + + /// Fails the intercepted HTLC indicated by intercept_id. Should only be called in response to + /// an [`HTLCIntercepted`] event. See [`ChannelManager::forward_intercepted_htlc`]. + /// + /// Errors if the event was not handled in time, in which case the HTLC was automatically failed + /// backwards. + /// + /// [`HTLCIntercepted`]: events::Event::HTLCIntercepted + pub fn fail_intercepted_htlc(&self, intercept_id: InterceptId) -> Result<(), APIError> { + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + + let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id) + .ok_or_else(|| APIError::APIMisuseError { + err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)) + })?; + + if let PendingHTLCRouting::Forward { short_channel_id, .. } = payment.forward_info.routing { + let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: payment.prev_short_channel_id, + outpoint: payment.prev_funding_outpoint, + htlc_id: payment.prev_htlc_id, + incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret, + phantom_shared_secret: None, + }); + + let failure_reason = HTLCFailReason::from_failure_code(0x4000 | 10); + let destination = HTLCDestination::UnknownNextHop { requested_forward_scid: short_channel_id }; + self.fail_htlc_backwards_internal(&htlc_source, &payment.forward_info.payment_hash, &failure_reason, destination); + } else { unreachable!() } // Only `PendingHTLCRouting::Forward`s are intercepted + + Ok(()) + } + /// Processes HTLCs which are pending waiting on random forward delay. /// /// Should only really ever be called in response to a PendingHTLCsForwardable event. @@ -3132,22 +3197,19 @@ impl ChannelManager)> = Vec::new(); - let mut handle_errors = Vec::new(); + let mut phantom_receives: Vec<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new(); { let mut forward_htlcs = HashMap::new(); mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap()); for (short_chan_id, mut pending_forwards) in forward_htlcs { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; if short_chan_id != 0 { macro_rules! forwarding_channel_not_found { () => { for forward_info in pending_forwards.drain(..) { match forward_info { HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, + prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id, forward_info: PendingHTLCInfo { routing, incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, incoming_amt_msat: _ @@ -3172,7 +3234,7 @@ impl ChannelManager ChannelManager { match self.construct_recv_pending_htlc_info(hop_data, incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, Some(phantom_shared_secret)) { - Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, vec![(info, prev_htlc_id)])), + Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])), Err(ReceiveError { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret)) } }, @@ -3243,18 +3305,18 @@ impl ChannelManager { forwarding_channel_not_found!(); continue; }, hash_map::Entry::Occupied(mut chan) => { - let mut add_htlc_msgs = Vec::new(); - let mut fail_htlc_msgs = Vec::new(); for forward_info in pending_forwards.drain(..) { match forward_info { HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_htlc_id, prev_funding_outpoint , + prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id: _, forward_info: PendingHTLCInfo { incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, routing: PendingHTLCRouting::Forward { onion_packet, .. }, incoming_amt_msat: _, @@ -3269,34 +3331,21 @@ impl ChannelManager { - if let ChannelError::Ignore(msg) = e { - log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg); - } else { - panic!("Stated return value requirements in send_htlc() were not met"); - } - let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get()); - failed_forwards.push((htlc_source, payment_hash, - HTLCFailReason::Reason { failure_code, data }, - HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id } - )); - continue; - }, - Ok(update_add) => { - match update_add { - Some(msg) => { add_htlc_msgs.push(msg); }, - None => { - // Nothing to do here...we're waiting on a remote - // revoke_and_ack before we can add anymore HTLCs. The Channel - // will automatically handle building the update_add_htlc and - // commitment_signed messages when we can. - // TODO: Do some kind of timer to set the channel as !is_live() - // as we don't really want others relying on us relaying through - // this channel currently :/. - } - } + if let Err(e) = chan.get_mut().queue_add_htlc(outgoing_amt_msat, + payment_hash, outgoing_cltv_value, htlc_source.clone(), + onion_packet, &self.logger) + { + if let ChannelError::Ignore(msg) = e { + log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg); + } else { + panic!("Stated return value requirements in send_htlc() were not met"); } + let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get()); + failed_forwards.push((htlc_source, payment_hash, + HTLCFailReason::reason(failure_code, data), + HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id } + )); + continue; } }, HTLCForwardInfo::AddHTLC { .. } => { @@ -3304,84 +3353,29 @@ impl ChannelManager { log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); - match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) { - Err(e) => { - if let ChannelError::Ignore(msg) = e { - log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); - } else { - panic!("Stated return value requirements in get_update_fail_htlc() were not met"); - } - // fail-backs are best-effort, we probably already have one - // pending, and if not that's OK, if not, the channel is on - // the chain and sending the HTLC-Timeout is their problem. - continue; - }, - Ok(Some(msg)) => { fail_htlc_msgs.push(msg); }, - Ok(None) => { - // Nothing to do here...we're waiting on a remote - // revoke_and_ack before we can update the commitment - // transaction. The Channel will automatically handle - // building the update_fail_htlc and commitment_signed - // messages when we can. - // We don't need any kind of timer here as they should fail - // the channel onto the chain if they can't get our - // update_fail_htlc in time, it's not our problem. + if let Err(e) = chan.get_mut().queue_fail_htlc( + htlc_id, err_packet, &self.logger + ) { + if let ChannelError::Ignore(msg) = e { + log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); + } else { + panic!("Stated return value requirements in queue_fail_htlc() were not met"); } + // fail-backs are best-effort, we probably already have one + // pending, and if not that's OK, if not, the channel is on + // the chain and sending the HTLC-Timeout is their problem. + continue; } }, } } - - if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() { - let (commitment_msg, monitor_update) = match chan.get_mut().send_commitment(&self.logger) { - Ok(res) => res, - Err(e) => { - // We surely failed send_commitment due to bad keys, in that case - // close channel and then send error message to peer. - let counterparty_node_id = chan.get().get_counterparty_node_id(); - let err: Result<(), _> = match e { - ChannelError::Ignore(_) | ChannelError::Warn(_) => { - panic!("Stated return value requirements in send_commitment() were not met"); - } - ChannelError::Close(msg) => { - log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg); - let mut channel = remove_channel!(self, chan); - // ChannelClosed event is generated by handle_error for us. - Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok())) - }, - }; - handle_errors.push((counterparty_node_id, err)); - continue; - } - }; - match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - ChannelMonitorUpdateStatus::Completed => {}, - e => { - handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, true))); - continue; - } - } - log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}", - add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id())); - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get().get_counterparty_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: add_htlc_msgs, - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: fail_htlc_msgs, - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed: commitment_msg, - }, - }); - } } } } else { for forward_info in pending_forwards.drain(..) { match forward_info { HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, + prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id, forward_info: PendingHTLCInfo { routing, incoming_shared_secret, payment_hash, outgoing_amt_msat, .. } @@ -3414,9 +3408,9 @@ impl ChannelManager { - let mut htlc_msat_height_data = byte_utils::be64_to_array($htlc.value).to_vec(); + let mut htlc_msat_height_data = $htlc.value.to_be_bytes().to_vec(); htlc_msat_height_data.extend_from_slice( - &byte_utils::be32_to_array(self.best_block.read().unwrap().height()), + &self.best_block.read().unwrap().height().to_be_bytes(), ); failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: $htlc.prev_hop.short_channel_id, @@ -3425,22 +3419,33 @@ impl ChannelManager {{ - let mut payment_received_generated = false; + let mut payment_claimable_generated = false; let purpose = || { events::PaymentPurpose::InvoicePayment { payment_preimage: $payment_preimage, payment_secret: $payment_data.payment_secret, } }; - let (_, htlcs) = channel_state.claimable_htlcs.entry(payment_hash) + let mut claimable_payments = self.claimable_payments.lock().unwrap(); + if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) { + fail_htlc!(claimable_htlc, payment_hash); + continue + } + let (_, htlcs) = claimable_payments.claimable_htlcs.entry(payment_hash) .or_insert_with(|| (purpose(), Vec::new())); if htlcs.len() == 1 { if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload { @@ -3469,20 +3474,24 @@ impl ChannelManager ChannelManager { - match channel_state.claimable_htlcs.entry(payment_hash) { + let mut claimable_payments = self.claimable_payments.lock().unwrap(); + if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) { + fail_htlc!(claimable_htlc, payment_hash); + continue + } + match claimable_payments.claimable_htlcs.entry(payment_hash) { hash_map::Entry::Vacant(e) => { let purpose = events::PaymentPurpose::SpontaneousPayment(preimage); e.insert((purpose.clone(), vec![claimable_htlc])); - new_events.push(events::Event::PaymentReceived { + let prev_channel_id = prev_funding_outpoint.to_channel_id(); + new_events.push(events::Event::PaymentClaimable { + receiver_node_id: Some(receiver_node_id), payment_hash, amount_msat: outgoing_amt_msat, purpose, + via_channel_id: Some(prev_channel_id), + via_user_channel_id: Some(prev_user_channel_id), }); }, hash_map::Entry::Occupied(_) => { @@ -3541,8 +3559,8 @@ impl ChannelManager ChannelManager ChannelManager, chan_id: &[u8; 32], chan: &mut Channel<::Signer>, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) { - if !chan.is_outbound() { return (true, NotifyOption::SkipPersist, Ok(())); } + fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<::Signer>, new_feerate: u32) -> NotifyOption { + if !chan.is_outbound() { return NotifyOption::SkipPersist; } // If the feerate has decreased by less than half, don't bother if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() { log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.", log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate); - return (true, NotifyOption::SkipPersist, Ok(())); + return NotifyOption::SkipPersist; } if !chan.is_live() { log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).", log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate); - return (true, NotifyOption::SkipPersist, Ok(())); + return NotifyOption::SkipPersist; } log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.", log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate); - let mut retain_channel = true; - let res = match chan.send_update_fee_and_commit(new_feerate, &self.logger) { - Ok(res) => Ok(res), - Err(e) => { - let (drop, res) = convert_chan_err!(self, e, chan, chan_id); - if drop { retain_channel = false; } - Err(res) - } - }; - let ret_err = match res { - Ok(Some((update_fee, commitment_signed, monitor_update))) => { - match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { - ChannelMonitorUpdateStatus::Completed => { - pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get_counterparty_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: Some(update_fee), - commitment_signed, - }, - }); - Ok(()) - }, - e => { - let (res, drop) = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY); - if drop { retain_channel = false; } - res - } - } - }, - Ok(None) => Ok(()), - Err(e) => Err(e), - }; - (retain_channel, NotifyOption::DoPersist, ret_err) + chan.queue_update_fee(new_feerate, &self.logger); + NotifyOption::DoPersist } #[cfg(fuzzing)] @@ -3669,19 +3654,10 @@ impl ChannelManager ChannelManager, _)> = Vec::new(); let mut timed_out_mpp_htlcs = Vec::new(); { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; let pending_msg_events = &mut channel_state.pending_msg_events; channel_state.by_id.retain(|chan_id, chan| { - let counterparty_node_id = chan.get_counterparty_node_id(); - let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(pending_msg_events, chan_id, chan, new_feerate); + let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate); if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } - if err.is_err() { - handle_errors.push((err, counterparty_node_id)); - } - if !retain_channel { return false; } if let Err(e) = chan.timer_check_closing_negotiation_progress() { let (needs_close, err) = convert_chan_err!(self, e, chan, chan_id); @@ -3797,33 +3768,35 @@ impl ChannelManager= MPP_TIMEOUT_TICKS + }) { + timed_out_mpp_htlcs.extend(htlcs.drain(..).map(|htlc: ClaimableHTLC| (htlc.prev_hop, *payment_hash))); return false; } - if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload { - // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat). - // In this case we're not going to handle any timeouts of the parts here. - if htlcs[0].total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) { - return true; - } else if htlcs.into_iter().any(|htlc| { - htlc.timer_ticks += 1; - return htlc.timer_ticks >= MPP_TIMEOUT_TICKS - }) { - timed_out_mpp_htlcs.extend(htlcs.into_iter().map(|htlc| (htlc.prev_hop.clone(), payment_hash.clone()))); - return false; - } - } - true - }); - } + } + true + }); for htlc_source in timed_out_mpp_htlcs.drain(..) { + let source = HTLCSource::PreviousHopData(htlc_source.0.clone()); + let reason = HTLCFailReason::from_failure_code(23); let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 }; - self.fail_htlc_backwards_internal(HTLCSource::PreviousHopData(htlc_source.0.clone()), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }, receiver ); + self.fail_htlc_backwards_internal(&source, &htlc_source.1, &reason, receiver); } for (err, counterparty_node_id) in handle_errors.drain(..) { @@ -3832,17 +3805,24 @@ impl ChannelManager ChannelManager ChannelManager, channel_id: [u8; 32], counterparty_node_id: &PublicKey ) { - for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) { - let (failure_code, onion_failure_data) = - match self.channel_state.lock().unwrap().by_id.entry(channel_id) { - hash_map::Entry::Occupied(chan_entry) => { - self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get()) - }, - hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new()) - }; + let (failure_code, onion_failure_data) = + match self.channel_state.lock().unwrap().by_id.entry(channel_id) { + hash_map::Entry::Occupied(chan_entry) => { + self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get()) + }, + hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new()) + }; + for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) { + let reason = HTLCFailReason::reason(failure_code, onion_failure_data.clone()); let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id }; - self.fail_htlc_backwards_internal(htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data }, receiver); + self.fail_htlc_backwards_internal(&htlc_src, &payment_hash, &reason, receiver); } } /// Fails an HTLC backwards to the sender of it to us. /// Note that we do not assume that channels corresponding to failed HTLCs are still available. - fn fail_htlc_backwards_internal(&self, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason,destination: HTLCDestination) { + fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) { #[cfg(debug_assertions)] { // Ensure that the `channel_state` lock is not held when calling this function. @@ -3958,13 +3935,13 @@ impl ChannelManager { + HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, ref payment_params, .. } => { let mut session_priv_bytes = [0; 32]; session_priv_bytes.copy_from_slice(&session_priv[..]); let mut outbounds = self.pending_outbound_payments.lock().unwrap(); let mut all_paths_failed = false; let mut full_failure_ev = None; - if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) { + if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(*payment_id) { if !payment.get_mut().remove(&session_priv_bytes, Some(&path)) { log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0)); return; @@ -3977,7 +3954,7 @@ impl ChannelManager ChannelManager { + let path_failure = { #[cfg(test)] - let (network_update, short_channel_id, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); + let (network_update, short_channel_id, payment_retryable, onion_error_code, onion_error_data) = onion_error.decode_onion_failure(&self.secp_ctx, &self.logger, &source); #[cfg(not(test))] - let (network_update, short_channel_id, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); + let (network_update, short_channel_id, payment_retryable, _, _) = onion_error.decode_onion_failure(&self.secp_ctx, &self.logger, &source); - if self.payment_is_probe(payment_hash, &payment_id) { - if !payment_retryable { - events::Event::ProbeSuccessful { - payment_id, - payment_hash: payment_hash.clone(), - path: path.clone(), - } - } else { - events::Event::ProbeFailed { - payment_id, - payment_hash: payment_hash.clone(), - path: path.clone(), - short_channel_id, - } - } - } else { - // TODO: If we decided to blame ourselves (or one of our channels) in - // process_onion_failure we should close that channel as it implies our - // next-hop is needlessly blaming us! - if let Some(scid) = short_channel_id { - retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid)); - } - events::Event::PaymentPathFailed { - payment_id: Some(payment_id), - payment_hash: payment_hash.clone(), - payment_failed_permanently: !payment_retryable, - network_update, - all_paths_failed, - path: path.clone(), - short_channel_id, - retry, - #[cfg(test)] - error_code: onion_error_code, - #[cfg(test)] - error_data: onion_error_data - } - } - }, - &HTLCFailReason::Reason { -#[cfg(test)] - ref failure_code, -#[cfg(test)] - ref data, - .. } => { - // we get a fail_malformed_htlc from the first hop - // TODO: We'd like to generate a NetworkUpdate for temporary - // failures here, but that would be insufficient as find_route - // generally ignores its view of our own channels as we provide them via - // ChannelDetails. - // TODO: For non-temporary failures, we really should be closing the - // channel here as we apparently can't relay through them anyway. - let scid = path.first().unwrap().short_channel_id; - retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid)); - - if self.payment_is_probe(payment_hash, &payment_id) { - events::Event::ProbeFailed { - payment_id, + if self.payment_is_probe(payment_hash, &payment_id) { + if !payment_retryable { + events::Event::ProbeSuccessful { + payment_id: *payment_id, payment_hash: payment_hash.clone(), path: path.clone(), - short_channel_id: Some(scid), } } else { - events::Event::PaymentPathFailed { - payment_id: Some(payment_id), + events::Event::ProbeFailed { + payment_id: *payment_id, payment_hash: payment_hash.clone(), - payment_failed_permanently: false, - network_update: None, - all_paths_failed, path: path.clone(), - short_channel_id: Some(scid), - retry, -#[cfg(test)] - error_code: Some(*failure_code), -#[cfg(test)] - error_data: Some(data.clone()), + short_channel_id, } } + } else { + // TODO: If we decided to blame ourselves (or one of our channels) in + // process_onion_failure we should close that channel as it implies our + // next-hop is needlessly blaming us! + if let Some(scid) = short_channel_id { + retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid)); + } + events::Event::PaymentPathFailed { + payment_id: Some(*payment_id), + payment_hash: payment_hash.clone(), + payment_failed_permanently: !payment_retryable, + network_update, + all_paths_failed, + path: path.clone(), + short_channel_id, + retry, + #[cfg(test)] + error_code: onion_error_code, + #[cfg(test)] + error_data: onion_error_data + } } }; let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push(path_failure); if let Some(ev) = full_failure_ev { pending_events.push(ev); } }, - HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, phantom_shared_secret, outpoint }) => { - let err_packet = match onion_error { - HTLCFailReason::Reason { failure_code, data } => { - log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with code {}", log_bytes!(payment_hash.0), failure_code); - if let Some(phantom_ss) = phantom_shared_secret { - let phantom_packet = onion_utils::build_failure_packet(&phantom_ss, failure_code, &data[..]).encode(); - let encrypted_phantom_packet = onion_utils::encrypt_failure_packet(&phantom_ss, &phantom_packet); - onion_utils::encrypt_failure_packet(&incoming_packet_shared_secret, &encrypted_phantom_packet.data[..]) - } else { - let packet = onion_utils::build_failure_packet(&incoming_packet_shared_secret, failure_code, &data[..]).encode(); - onion_utils::encrypt_failure_packet(&incoming_packet_shared_secret, &packet) - } - }, - HTLCFailReason::LightningError { err } => { - log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards with pre-built LightningError", log_bytes!(payment_hash.0)); - onion_utils::encrypt_failure_packet(&incoming_packet_shared_secret, &err.data) - } - }; + HTLCSource::PreviousHopData(HTLCPreviousHopData { ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret, ref phantom_shared_secret, ref outpoint }) => { + log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with {:?}", log_bytes!(payment_hash.0), onion_error); + let err_packet = onion_error.get_encrypted_failure_packet(incoming_packet_shared_secret, phantom_shared_secret); let mut forward_event = None; let mut forward_htlcs = self.forward_htlcs.lock().unwrap(); if forward_htlcs.is_empty() { forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS)); } - match forward_htlcs.entry(short_channel_id) { + match forward_htlcs.entry(*short_channel_id) { hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().push(HTLCForwardInfo::FailHTLC { htlc_id, err_packet }); + entry.get_mut().push(HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }); }, hash_map::Entry::Vacant(entry) => { - entry.insert(vec!(HTLCForwardInfo::FailHTLC { htlc_id, err_packet })); + entry.insert(vec!(HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet })); } } mem::drop(forward_htlcs); @@ -4128,13 +4048,13 @@ impl ChannelManager ChannelManager chan_id.clone(), - None => { - valid_mpp = false; + let mut sources = { + let mut claimable_payments = self.claimable_payments.lock().unwrap(); + if let Some((payment_purpose, sources)) = claimable_payments.claimable_htlcs.remove(&payment_hash) { + let mut receiver_node_id = self.our_network_pubkey; + for htlc in sources.iter() { + if htlc.prev_hop.phantom_shared_secret.is_some() { + let phantom_pubkey = self.keys_manager.get_node_id(Recipient::PhantomNode) + .expect("Failed to get node_id for phantom node recipient"); + receiver_node_id = phantom_pubkey; break; } - }; + } - if let None = channel_state.by_id.get(&chan_id) { - valid_mpp = false; - break; + let dup_purpose = claimable_payments.pending_claiming_payments.insert(payment_hash, + ClaimingPayment { amount_msat: sources.iter().map(|source| source.value).sum(), + payment_purpose, receiver_node_id, + }); + if dup_purpose.is_some() { + debug_assert!(false, "Shouldn't get a duplicate pending claim event ever"); + log_error!(self.logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug", + log_bytes!(payment_hash.0)); } + sources + } else { return; } + }; + debug_assert!(!sources.is_empty()); - if expected_amt_msat.is_some() && expected_amt_msat != Some(htlc.total_msat) { - log_error!(self.logger, "Somehow ended up with an MPP payment with different total amounts - this should not be reachable!"); - debug_assert!(false); + // If we are claiming an MPP payment, we check that all channels which contain a claimable + // HTLC still exist. While this isn't guaranteed to remain true if a channel closes while + // we're claiming (or even after we claim, before the commitment update dance completes), + // it should be a relatively rare race, and we'd rather not claim HTLCs that require us to + // go on-chain (and lose the on-chain fee to do so) than just reject the payment. + // + // Note that we'll still always get our funds - as long as the generated + // `ChannelMonitorUpdate` makes it out to the relevant monitor we can claim on-chain. + // + // If we find an HTLC which we would need to claim but for which we do not have a + // channel, we will fail all parts of the MPP payment. While we could wait and see if + // the sender retries the already-failed path(s), it should be a pretty rare case where + // we got all the HTLCs and then a channel closed while we were waiting for the user to + // provide the preimage, so worrying too much about the optimal handling isn't worth + // it. + let mut claimable_amt_msat = 0; + let mut expected_amt_msat = None; + let mut valid_mpp = true; + let mut errs = Vec::new(); + let mut channel_state = Some(self.channel_state.lock().unwrap()); + for htlc in sources.iter() { + let chan_id = match self.short_to_chan_info.read().unwrap().get(&htlc.prev_hop.short_channel_id) { + Some((_cp_id, chan_id)) => chan_id.clone(), + None => { valid_mpp = false; break; } - expected_amt_msat = Some(htlc.total_msat); - if let OnionPayload::Spontaneous(_) = &htlc.onion_payload { - // We don't currently support MPP for spontaneous payments, so just check - // that there's one payment here and move on. - if sources.len() != 1 { - log_error!(self.logger, "Somehow ended up with an MPP spontaneous payment - this should not be reachable!"); - debug_assert!(false); - valid_mpp = false; - break; - } - } + }; - claimable_amt_msat += htlc.value; - } - if sources.is_empty() || expected_amt_msat.is_none() { - log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!"); - return; + if let None = channel_state.as_ref().unwrap().by_id.get(&chan_id) { + valid_mpp = false; + break; } - if claimable_amt_msat != expected_amt_msat.unwrap() { - log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.", - expected_amt_msat.unwrap(), claimable_amt_msat); - return; + + if expected_amt_msat.is_some() && expected_amt_msat != Some(htlc.total_msat) { + log_error!(self.logger, "Somehow ended up with an MPP payment with different total amounts - this should not be reachable!"); + debug_assert!(false); + valid_mpp = false; + break; } - if valid_mpp { - for htlc in sources.drain(..) { - match self.claim_funds_from_hop(&mut channel_state_lock, htlc.prev_hop, payment_preimage) { - ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => { - if let msgs::ErrorAction::IgnoreError = err.err.action { - // We got a temporary failure updating monitor, but will claim the - // HTLC when the monitor updating is restored (or on chain). - log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err); - claimed_any_htlcs = true; - } else { errs.push((pk, err)); } - }, - ClaimFundsFromHop::PrevHopForceClosed => unreachable!("We already checked for channel existence, we can't fail here!"), - ClaimFundsFromHop::DuplicateClaim => { - // While we should never get here in most cases, if we do, it likely - // indicates that the HTLC was timed out some time ago and is no longer - // available to be claimed. Thus, it does not make sense to set - // `claimed_any_htlcs`. - }, - ClaimFundsFromHop::Success(_) => claimed_any_htlcs = true, - } + expected_amt_msat = Some(htlc.total_msat); + if let OnionPayload::Spontaneous(_) = &htlc.onion_payload { + // We don't currently support MPP for spontaneous payments, so just check + // that there's one payment here and move on. + if sources.len() != 1 { + log_error!(self.logger, "Somehow ended up with an MPP spontaneous payment - this should not be reachable!"); + debug_assert!(false); + valid_mpp = false; + break; } } - mem::drop(channel_state_lock); - if !valid_mpp { - for htlc in sources.drain(..) { - let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec(); - htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array( - self.best_block.read().unwrap().height())); - self.fail_htlc_backwards_internal( - HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash, - HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data }, - HTLCDestination::FailedPayment { payment_hash } ); + + claimable_amt_msat += htlc.value; + } + if sources.is_empty() || expected_amt_msat.is_none() { + mem::drop(channel_state); + self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); + log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!"); + return; + } + if claimable_amt_msat != expected_amt_msat.unwrap() { + mem::drop(channel_state); + self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); + log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.", + expected_amt_msat.unwrap(), claimable_amt_msat); + return; + } + if valid_mpp { + for htlc in sources.drain(..) { + if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); } + if let Err((pk, err)) = self.claim_funds_from_hop(channel_state.take().unwrap(), htlc.prev_hop, + payment_preimage, + |_| Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash })) + { + if let msgs::ErrorAction::IgnoreError = err.err.action { + // We got a temporary failure updating monitor, but will claim the + // HTLC when the monitor updating is restored (or on chain). + log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err); + } else { errs.push((pk, err)); } } } - - if claimed_any_htlcs { - self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed { - payment_hash, - purpose: payment_purpose, - amount_msat: claimable_amt_msat, - }); + } + mem::drop(channel_state); + if !valid_mpp { + for htlc in sources.drain(..) { + let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); + htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes()); + let source = HTLCSource::PreviousHopData(htlc.prev_hop); + let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data); + let receiver = HTLCDestination::FailedPayment { payment_hash }; + self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } + self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); + } - // Now we can handle any errors which were generated. - for (counterparty_node_id, err) in errs.drain(..) { - let res: Result<(), _> = Err(err); - let _ = handle_error!(self, res, counterparty_node_id); - } + // Now we can handle any errors which were generated. + for (counterparty_node_id, err) in errs.drain(..) { + let res: Result<(), _> = Err(err); + let _ = handle_error!(self, res, counterparty_node_id); } } - fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard::Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop { + fn claim_funds_from_hop) -> Option>(&self, + mut channel_state_lock: MutexGuard::Signer>>, + prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc) + -> Result<(), (PublicKey, MsgHandleErrInternal)> { //TODO: Delay the claimed_funds relaying just like we do outbound relay! - let channel_state = &mut **channel_state_lock; - let chan_id = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) { - Some((_cp_id, chan_id)) => chan_id.clone(), - None => { - return ClaimFundsFromHop::PrevHopForceClosed - } - }; + let chan_id = prev_hop.outpoint.to_channel_id(); + let channel_state = &mut *channel_state_lock; if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) { + let counterparty_node_id = chan.get().get_counterparty_node_id(); match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) { Ok(msgs_monitor_option) => { if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option { @@ -4293,11 +4224,10 @@ impl ChannelManager ChannelManager { match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { ChannelMonitorUpdateStatus::Completed => {}, e => { + // TODO: This needs to be handled somehow - if we receive a monitor update + // with a preimage we *must* somehow manage to propagate it to the upstream + // channel, or we must have an ability to receive the same update and try + // again on restart. log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Info }, "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}", payment_preimage, e); }, } - let counterparty_node_id = chan.get().get_counterparty_node_id(); let (drop, res) = convert_chan_err!(self, e, chan.get_mut(), &chan_id); if drop { chan.remove_entry(); } - return ClaimFundsFromHop::MonitorUpdateFail(counterparty_node_id, res, None); + mem::drop(channel_state_lock); + self.handle_monitor_update_completion_actions(completion_action(None)); + Err((counterparty_node_id, res)) }, } - } else { return ClaimFundsFromHop::PrevHopForceClosed } + } else { + let preimage_update = ChannelMonitorUpdate { + update_id: CLOSED_CHANNEL_UPDATE_ID, + updates: vec![ChannelMonitorUpdateStep::PaymentPreimage { + payment_preimage, + }], + }; + // We update the ChannelMonitor on the backward link, after + // receiving an `update_fulfill_htlc` from the forward link. + let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, preimage_update); + if update_res != ChannelMonitorUpdateStatus::Completed { + // TODO: This needs to be handled somehow - if we receive a monitor update + // with a preimage we *must* somehow manage to propagate it to the upstream + // channel, or we must have an ability to receive the same event and try + // again on restart. + log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}", + payment_preimage, update_res); + } + mem::drop(channel_state_lock); + // Note that we do process the completion action here. This totally could be a + // duplicate claim, but we have no way of knowing without interrogating the + // `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are + // generally always allowed to be duplicative (and it's specifically noted in + // `PaymentForwarded`). + self.handle_monitor_update_completion_actions(completion_action(None)); + Ok(()) + } } fn finalize_claims(&self, mut sources: Vec) { @@ -4363,7 +4326,7 @@ impl ChannelManager::Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool, next_channel_id: [u8; 32]) { + fn claim_funds_internal(&self, channel_state_lock: MutexGuard::Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool, next_channel_id: [u8; 32]) { match source { HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => { mem::drop(channel_state_lock); @@ -4410,62 +4373,28 @@ impl ChannelManager { let prev_outpoint = hop_data.outpoint; - let res = self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage); - let claimed_htlc = if let ClaimFundsFromHop::DuplicateClaim = res { false } else { true }; - let htlc_claim_value_msat = match res { - ClaimFundsFromHop::MonitorUpdateFail(_, _, amt_opt) => amt_opt, - ClaimFundsFromHop::Success(amt) => Some(amt), - _ => None, - }; - if let ClaimFundsFromHop::PrevHopForceClosed = res { - let preimage_update = ChannelMonitorUpdate { - update_id: CLOSED_CHANNEL_UPDATE_ID, - updates: vec![ChannelMonitorUpdateStep::PaymentPreimage { - payment_preimage: payment_preimage.clone(), - }], - }; - // We update the ChannelMonitor on the backward link, after - // receiving an offchain preimage event from the forward link (the - // event being update_fulfill_htlc). - let update_res = self.chain_monitor.update_channel(prev_outpoint, preimage_update); - if update_res != ChannelMonitorUpdateStatus::Completed { - // TODO: This needs to be handled somehow - if we receive a monitor update - // with a preimage we *must* somehow manage to propagate it to the upstream - // channel, or we must have an ability to receive the same event and try - // again on restart. - log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}", - payment_preimage, update_res); - } - // Note that we do *not* set `claimed_htlc` to false here. In fact, this - // totally could be a duplicate claim, but we have no way of knowing - // without interrogating the `ChannelMonitor` we've provided the above - // update to. Instead, we simply document in `PaymentForwarded` that this - // can happen. - } - mem::drop(channel_state_lock); - if let ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) = res { + let res = self.claim_funds_from_hop(channel_state_lock, hop_data, payment_preimage, + |htlc_claim_value_msat| { + if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { + let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat { + Some(claimed_htlc_value - forwarded_htlc_value) + } else { None }; + + let prev_channel_id = Some(prev_outpoint.to_channel_id()); + let next_channel_id = Some(next_channel_id); + + Some(MonitorUpdateCompletionAction::EmitEvent { event: events::Event::PaymentForwarded { + fee_earned_msat, + claim_from_onchain_tx: from_onchain, + prev_channel_id, + next_channel_id, + }}) + } else { None } + }); + if let Err((pk, err)) = res { let result: Result<(), _> = Err(err); let _ = handle_error!(self, result, pk); } - - if claimed_htlc { - if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { - let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat { - Some(claimed_htlc_value - forwarded_htlc_value) - } else { None }; - - let mut pending_events = self.pending_events.lock().unwrap(); - let prev_channel_id = Some(prev_outpoint.to_channel_id()); - let next_channel_id = Some(next_channel_id); - - pending_events.push(events::Event::PaymentForwarded { - fee_earned_msat, - claim_from_onchain_tx: from_onchain, - prev_channel_id, - next_channel_id, - }); - } - } }, } } @@ -4475,10 +4404,91 @@ impl ChannelManager>(&self, actions: I) { + for action in actions.into_iter() { + match action { + MonitorUpdateCompletionAction::PaymentClaimed { payment_hash } => { + let payment = self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); + if let Some(ClaimingPayment { amount_msat, payment_purpose: purpose, receiver_node_id }) = payment { + self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed { + payment_hash, purpose, amount_msat, receiver_node_id: Some(receiver_node_id), + }); + } + }, + MonitorUpdateCompletionAction::EmitEvent { event } => { + self.pending_events.lock().unwrap().push(event); + }, + } + } + } + + /// Handles a channel reentering a functional state, either due to reconnect or a monitor + /// update completion. + fn handle_channel_resumption(&self, pending_msg_events: &mut Vec, + channel: &mut Channel<::Signer>, raa: Option, + commitment_update: Option, order: RAACommitmentOrder, + pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option, + channel_ready: Option, announcement_sigs: Option) + -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> { + let mut htlc_forwards = None; + + let counterparty_node_id = channel.get_counterparty_node_id(); + if !pending_forwards.is_empty() { + htlc_forwards = Some((channel.get_short_channel_id().unwrap_or(channel.outbound_scid_alias()), + channel.get_funding_txo().unwrap(), channel.get_user_id(), pending_forwards)); + } + + if let Some(msg) = channel_ready { + send_channel_ready!(self, pending_msg_events, channel, msg); + } + if let Some(msg) = announcement_sigs { + pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { + node_id: counterparty_node_id, + msg, + }); + } + + emit_channel_ready_event!(self, channel); + + macro_rules! handle_cs { () => { + if let Some(update) = commitment_update { + pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: counterparty_node_id, + updates: update, + }); + } + } } + macro_rules! handle_raa { () => { + if let Some(revoke_and_ack) = raa { + pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { + node_id: counterparty_node_id, + msg: revoke_and_ack, + }); + } + } } + match order { + RAACommitmentOrder::CommitmentFirst => { + handle_cs!(); + handle_raa!(); + }, + RAACommitmentOrder::RevokeAndACKFirst => { + handle_raa!(); + handle_cs!(); + }, + } + + if let Some(tx) = funding_broadcastable { + log_info!(self.logger, "Broadcasting funding transaction with txid {}", tx.txid()); + self.tx_broadcaster.broadcast_transaction(&tx); + } + + htlc_forwards + } + fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let chan_restoration_res; + let htlc_forwards; let (mut pending_failures, finalized_claims, counterparty_node_id) = { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; @@ -4505,18 +4515,20 @@ impl ChannelManager ChannelManager ChannelManager return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id)) } @@ -4767,7 +4779,7 @@ impl ChannelManager update, Err(e) => try_chan_entry!(self, Err(e), chan), }; @@ -4884,7 +4896,8 @@ impl ChannelManager ChannelManager { let reason = if (error_code & 0x1000) != 0 { let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan); - onion_utils::build_first_hop_failure_packet(incoming_shared_secret, real_code, &error_data) + HTLCFailReason::reason(real_code, error_data) } else { - onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &[]) - }; + HTLCFailReason::from_failure_code(error_code) + }.get_encrypted_failure_packet(incoming_shared_secret, &None); let msg = msgs::UpdateFailHTLC { channel_id: msg.channel_id, htlc_id: msg.htlc_id, @@ -5010,7 +5023,7 @@ impl ChannelManager return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } @@ -5029,7 +5042,7 @@ impl ChannelManager return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) @@ -5084,31 +5097,85 @@ impl ChannelManager)]) { - for &mut (prev_short_channel_id, prev_funding_outpoint, ref mut pending_forwards) in per_source_pending_forwards { + fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)]) { + for &mut (prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards { let mut forward_event = None; + let mut new_intercept_events = Vec::new(); + let mut failed_intercept_forwards = Vec::new(); if !pending_forwards.is_empty() { - let mut forward_htlcs = self.forward_htlcs.lock().unwrap(); - if forward_htlcs.is_empty() { - forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS)) - } for (forward_info, prev_htlc_id) in pending_forwards.drain(..) { - match forward_htlcs.entry(match forward_info.routing { - PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id, - PendingHTLCRouting::Receive { .. } => 0, - PendingHTLCRouting::ReceiveKeysend { .. } => 0, - }) { + let scid = match forward_info.routing { + PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id, + PendingHTLCRouting::Receive { .. } => 0, + PendingHTLCRouting::ReceiveKeysend { .. } => 0, + }; + // Pull this now to avoid introducing a lock order with `forward_htlcs`. + let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid); + + let mut forward_htlcs = self.forward_htlcs.lock().unwrap(); + let forward_htlcs_empty = forward_htlcs.is_empty(); + match forward_htlcs.entry(scid) { hash_map::Entry::Occupied(mut entry) => { entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, forward_info })); + prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info })); }, hash_map::Entry::Vacant(entry) => { - entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, forward_info }))); + if !is_our_scid && forward_info.incoming_amt_msat.is_some() && + fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, scid, &self.genesis_hash) + { + let intercept_id = InterceptId(Sha256::hash(&forward_info.incoming_shared_secret).into_inner()); + let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); + match pending_intercepts.entry(intercept_id) { + hash_map::Entry::Vacant(entry) => { + new_intercept_events.push(events::Event::HTLCIntercepted { + requested_next_hop_scid: scid, + payment_hash: forward_info.payment_hash, + inbound_amount_msat: forward_info.incoming_amt_msat.unwrap(), + expected_outbound_amount_msat: forward_info.outgoing_amt_msat, + intercept_id + }); + entry.insert(PendingAddHTLCInfo { + prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info }); + }, + hash_map::Entry::Occupied(_) => { + log_info!(self.logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid); + let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: prev_short_channel_id, + outpoint: prev_funding_outpoint, + htlc_id: prev_htlc_id, + incoming_packet_shared_secret: forward_info.incoming_shared_secret, + phantom_shared_secret: None, + }); + + failed_intercept_forwards.push((htlc_source, forward_info.payment_hash, + HTLCFailReason::from_failure_code(0x4000 | 10), + HTLCDestination::InvalidForward { requested_forward_scid: scid }, + )); + } + } + } else { + // We don't want to generate a PendingHTLCsForwardable event if only intercepted + // payments are being processed. + if forward_htlcs_empty { + forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS)); + } + entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { + prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info }))); + } } } } } + + for (htlc_source, payment_hash, failure_reason, destination) in failed_intercept_forwards.drain(..) { + self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination); + } + + if !new_intercept_events.is_empty() { + let mut events = self.pending_events.lock().unwrap(); + events.append(&mut new_intercept_events); + } + match forward_event { Some(time) => { let mut pending_events = self.pending_events.lock().unwrap(); @@ -5163,7 +5230,8 @@ impl ChannelManager break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } @@ -5171,13 +5239,13 @@ impl ChannelManager + short_channel_id, channel_outpoint, user_channel_id)) => { for failure in pending_failures.drain(..) { let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: channel_outpoint.to_channel_id() }; - self.fail_htlc_backwards_internal(failure.0, &failure.1, failure.2, receiver); + self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver); } - self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, pending_forwards)]); + self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, user_channel_id, pending_forwards)]); self.finalize_claims(finalized_claim_htlcs); Ok(()) }, @@ -5193,7 +5261,7 @@ impl ChannelManager return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } @@ -5263,8 +5331,8 @@ impl ChannelManager Result<(), MsgHandleErrInternal> { - let chan_restoration_res; - let (htlcs_failed_forward, need_lnd_workaround) = { + let htlc_forwards; + let need_lnd_workaround = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -5298,19 +5366,21 @@ impl ChannelManager return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } }; - post_handle_chan_restoration!(self, chan_restoration_res); - self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id, counterparty_node_id); + + if let Some(forwards) = htlc_forwards { + self.forward_htlcs(&mut [forwards][..]); + } if let Some(channel_ready_msg) = need_lnd_workaround { self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?; @@ -5333,7 +5403,8 @@ impl ChannelManager ChannelManager bool { let mut has_monitor_update = false; let mut failed_htlcs = Vec::new(); @@ -5569,8 +5635,8 @@ impl ChannelManager ChannelManager, invoice_expiry_delta_secs: u32) -> Result<(PaymentHash, PaymentSecret), ()> { inbound_payment::create(&self.inbound_payment_key, min_value_msat, invoice_expiry_delta_secs, &self.keys_manager, self.highest_seen_timestamp.load(Ordering::Acquire) as u64) @@ -5613,7 +5679,7 @@ impl ChannelManager ChannelManager ChannelManager ChannelManager, invoice_expiry_delta_secs: u32) -> Result { inbound_payment::create_from_hash(&self.inbound_payment_key, min_value_msat, payment_hash, invoice_expiry_delta_secs, self.highest_seen_timestamp.load(Ordering::Acquire) as u64) } @@ -5707,6 +5773,39 @@ impl ChannelManager u64 { + let best_block_height = self.best_block.read().unwrap().height(); + let short_to_chan_info = self.short_to_chan_info.read().unwrap(); + loop { + let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager); + // Ensure the generated scid doesn't conflict with a real channel. + if short_to_chan_info.contains_key(&scid_candidate) { continue } + return scid_candidate + } + } + + /// Gets inflight HTLC information by processing pending outbound payments that are in + /// our channels. May be used during pathfinding to account for in-use channel liquidity. + pub fn compute_inflight_htlcs(&self) -> InFlightHtlcs { + let mut inflight_htlcs = InFlightHtlcs::new(); + + for chan in self.channel_state.lock().unwrap().by_id.values() { + for (htlc_source, _) in chan.inflight_htlc_sources() { + if let HTLCSource::OutboundRoute { path, .. } = htlc_source { + inflight_htlcs.process_path(path, self.get_our_node_id()); + } + } + } + + inflight_htlcs + } + #[cfg(any(test, fuzzing, feature = "_test_utils"))] pub fn get_and_clear_pending_events(&self) -> Vec { let events = core::cell::RefCell::new(Vec::new()); @@ -5715,6 +5814,12 @@ impl ChannelManager Option { + let mut events = self.pending_events.lock().unwrap(); + if events.is_empty() { None } else { Some(events.remove(0)) } + } + #[cfg(test)] pub fn has_pending_payments(&self) -> bool { !self.pending_outbound_payments.lock().unwrap().is_empty() @@ -5986,9 +6091,8 @@ where if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res { for (source, payment_hash) in timed_out_pending_htlcs.drain(..) { let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel); - timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason { - failure_code, data, - }, HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() })); + timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data), + HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() })); } if let Some(channel_ready) = channel_ready_opt { send_channel_ready!(self, pending_msg_events, channel, channel_ready); @@ -6062,34 +6166,56 @@ where } true }); + } + + if let Some(height) = height_opt { + self.claimable_payments.lock().unwrap().claimable_htlcs.retain(|payment_hash, (_, htlcs)| { + htlcs.retain(|htlc| { + // If height is approaching the number of blocks we think it takes us to get + // our commitment transaction confirmed before the HTLC expires, plus the + // number of blocks we generally consider it to take to do a commitment update, + // just give up on it and fail the HTLC. + if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER { + let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); + htlc_msat_height_data.extend_from_slice(&height.to_be_bytes()); + + timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), + HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data), + HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() })); + false + } else { true } + }); + !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry. + }); - if let Some(height) = height_opt { - channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| { - htlcs.retain(|htlc| { - // If height is approaching the number of blocks we think it takes us to get - // our commitment transaction confirmed before the HTLC expires, plus the - // number of blocks we generally consider it to take to do a commitment update, - // just give up on it and fail the HTLC. - if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER { - let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec(); - htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height)); - - timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason { - failure_code: 0x4000 | 15, - data: htlc_msat_height_data - }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() })); - false - } else { true } + let mut intercepted_htlcs = self.pending_intercepted_htlcs.lock().unwrap(); + intercepted_htlcs.retain(|_, htlc| { + if height >= htlc.forward_info.outgoing_cltv_value - HTLC_FAIL_BACK_BUFFER { + let prev_hop_data = HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: htlc.prev_short_channel_id, + htlc_id: htlc.prev_htlc_id, + incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret, + phantom_shared_secret: None, + outpoint: htlc.prev_funding_outpoint, }); - !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry. - }); - } + + let requested_forward_scid /* intercept scid */ = match htlc.forward_info.routing { + PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id, + _ => unreachable!(), + }; + timed_out_htlcs.push((prev_hop_data, htlc.forward_info.payment_hash, + HTLCFailReason::from_failure_code(0x2000 | 2), + HTLCDestination::InvalidForward { requested_forward_scid })); + log_trace!(self.logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid); + false + } else { true } + }); } self.handle_init_event_channel_failures(failed_channels); for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) { - self.fail_htlc_backwards_internal(source, &payment_hash, reason, destination); + self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, destination); } } @@ -6137,7 +6263,7 @@ where } } -impl +impl ChannelMessageHandler for ChannelManager where M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, @@ -6469,6 +6595,7 @@ impl Writeable for ChannelDetails { (6, self.funding_txo, option), (7, self.config, option), (8, self.short_channel_id, option), + (9, self.confirmations, option), (10, self.channel_value_satoshis, required), (12, self.unspendable_punishment_reserve, option), (14, user_channel_id_low, required), @@ -6503,6 +6630,7 @@ impl Readable for ChannelDetails { (6, funding_txo, option), (7, config, option), (8, short_channel_id, option), + (9, confirmations, option), (10, channel_value_satoshis, required), (12, unspendable_punishment_reserve, option), (14, user_channel_id_low, required), @@ -6546,6 +6674,7 @@ impl Readable for ChannelDetails { next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(), inbound_capacity_msat: inbound_capacity_msat.0.unwrap(), confirmations_required, + confirmations, force_close_spend_delay, is_outbound: is_outbound.0.unwrap(), is_channel_ready: is_channel_ready.0.unwrap(), @@ -6797,18 +6926,9 @@ impl Writeable for HTLCSource { } } -impl_writeable_tlv_based_enum!(HTLCFailReason, - (0, LightningError) => { - (0, err, required), - }, - (1, Reason) => { - (0, failure_code, required), - (2, data, vec_type), - }, -;); - impl_writeable_tlv_based!(PendingAddHTLCInfo, { (0, forward_info, required), + (1, prev_user_channel_id, (default_value, 0)), (2, prev_short_channel_id, required), (4, prev_htlc_id, required), (6, prev_funding_outpoint, required), @@ -6903,10 +7023,13 @@ impl Writeable for ChannelMana } } - let channel_state = self.channel_state.lock().unwrap(); + let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap(); + let claimable_payments = self.claimable_payments.lock().unwrap(); + let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap(); + let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new(); - (channel_state.claimable_htlcs.len() as u64).write(writer)?; - for (payment_hash, (purpose, previous_hops)) in channel_state.claimable_htlcs.iter() { + (claimable_payments.claimable_htlcs.len() as u64).write(writer)?; + for (payment_hash, (purpose, previous_hops)) in claimable_payments.claimable_htlcs.iter() { payment_hash.write(writer)?; (previous_hops.len() as u64).write(writer)?; for htlc in previous_hops.iter() { @@ -6923,8 +7046,6 @@ impl Writeable for ChannelMana peer_state.latest_features.write(writer)?; } - let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap(); - let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap(); let events = self.pending_events.lock().unwrap(); (events.len() as u64).write(writer)?; for event in events.iter() { @@ -6987,9 +7108,27 @@ impl Writeable for ChannelMana _ => {}, } } + + let mut pending_intercepted_htlcs = None; + let our_pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); + if our_pending_intercepts.len() != 0 { + pending_intercepted_htlcs = Some(our_pending_intercepts); + } + + let mut pending_claiming_payments = Some(&claimable_payments.pending_claiming_payments); + if pending_claiming_payments.as_ref().unwrap().is_empty() { + // LDK versions prior to 0.0.113 do not know how to read the pending claimed payments + // map. Thus, if there are no entries we skip writing a TLV for it. + pending_claiming_payments = None; + } else { + debug_assert!(false, "While we have code to serialize pending_claiming_payments, the map should always be empty until a later PR"); + } + write_tlv_fields!(writer, { (1, pending_outbound_payments_no_retry, required), + (2, pending_intercepted_htlcs, option), (3, pending_outbound_payments, required), + (4, pending_claiming_payments, option), (5, self.our_network_pubkey, required), (7, self.fake_scid_rand_bytes, required), (9, htlc_purposes, vec_type), @@ -7175,6 +7314,25 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> user_channel_id: channel.get_user_id(), reason: ClosureReason::OutdatedChannelManager }); + for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() { + let mut found_htlc = false; + for (monitor_htlc_source, _) in monitor.get_all_current_outbound_htlcs() { + if *channel_htlc_source == monitor_htlc_source { found_htlc = true; break; } + } + if !found_htlc { + // If we have some HTLCs in the channel which are not present in the newer + // ChannelMonitor, they have been removed and should be failed back to + // ensure we don't forget them entirely. Note that if the missing HTLC(s) + // were actually claimed we'd have generated and ensured the previous-hop + // claim update ChannelMonitor updates were persisted prior to persising + // the ChannelMonitor update for the forward leg, so attempting to fail the + // backwards leg of the HTLC will simply be rejected. + log_info!(args.logger, + "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager", + log_bytes!(channel.channel_id()), log_bytes!(payment_hash.0)); + failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.get_counterparty_node_id(), channel.channel_id())); + } + } } else { log_info!(args.logger, "Successfully loaded channel {}", log_bytes!(channel.channel_id())); if let Some(short_channel_id) = channel.get_short_channel_id() { @@ -7255,16 +7413,6 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> None => continue, } } - if forward_htlcs_count > 0 { - // If we have pending HTLCs to forward, assume we either dropped a - // `PendingHTLCsForwardable` or the user received it but never processed it as they - // shut down before the timer hit. Either way, set the time_forwardable to a small - // constant as enough time has likely passed that we should simply handle the forwards - // now, or at least after the user gets a chance to reconnect to our peers. - pending_events_read.push(events::Event::PendingHTLCsForwardable { - time_forwardable: Duration::from_secs(2), - }); - } let background_event_count: u64 = Readable::read(reader)?; let mut pending_background_events_read: Vec = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::())); @@ -7302,13 +7450,17 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients. let mut pending_outbound_payments_no_retry: Option>> = None; let mut pending_outbound_payments = None; + let mut pending_intercepted_htlcs: Option> = Some(HashMap::new()); let mut received_network_pubkey: Option = None; let mut fake_scid_rand_bytes: Option<[u8; 32]> = None; let mut probing_cookie_secret: Option<[u8; 32]> = None; let mut claimable_htlc_purposes = None; + let mut pending_claiming_payments = Some(HashMap::new()); read_tlv_fields!(reader, { (1, pending_outbound_payments_no_retry, option), + (2, pending_intercepted_htlcs, option), (3, pending_outbound_payments, option), + (4, pending_claiming_payments, option), (5, received_network_pubkey, option), (7, fake_scid_rand_bytes, option), (9, claimable_htlc_purposes, vec_type), @@ -7373,10 +7525,58 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } } } + for (htlc_source, htlc) in monitor.get_all_current_outbound_htlcs() { + if let HTLCSource::PreviousHopData(prev_hop_data) = htlc_source { + let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { + info.prev_funding_outpoint == prev_hop_data.outpoint && + info.prev_htlc_id == prev_hop_data.htlc_id + }; + // The ChannelMonitor is now responsible for this HTLC's + // failure/success and will let us know what its outcome is. If we + // still have an entry for this HTLC in `forward_htlcs` or + // `pending_intercepted_htlcs`, we were apparently not persisted after + // the monitor was when forwarding the payment. + forward_htlcs.retain(|_, forwards| { + forwards.retain(|forward| { + if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { + if pending_forward_matches_htlc(&htlc_info) { + log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", + log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id())); + false + } else { true } + } else { true } + }); + !forwards.is_empty() + }); + pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| { + if pending_forward_matches_htlc(&htlc_info) { + log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", + log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id())); + pending_events_read.retain(|event| { + if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { + intercepted_id != ev_id + } else { true } + }); + false + } else { true } + }); + } + } } } } + if !forward_htlcs.is_empty() { + // If we have pending HTLCs to forward, assume we either dropped a + // `PendingHTLCsForwardable` or the user received it but never processed it as they + // shut down before the timer hit. Either way, set the time_forwardable to a small + // constant as enough time has likely passed that we should simply handle the forwards + // now, or at least after the user gets a chance to reconnect to our peers. + pending_events_read.push(events::Event::PendingHTLCsForwardable { + time_forwardable: Duration::from_secs(2), + }); + } + let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material(); let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material); @@ -7472,6 +7672,13 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> if let Some((payment_purpose, claimable_htlcs)) = claimable_htlcs.remove(&payment_hash) { log_info!(args.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", log_bytes!(payment_hash.0)); let mut claimable_amt_msat = 0; + let mut receiver_node_id = Some(our_network_pubkey); + let phantom_shared_secret = claimable_htlcs[0].prev_hop.phantom_shared_secret; + if phantom_shared_secret.is_some() { + let phantom_pubkey = args.keys_manager.get_node_id(Recipient::PhantomNode) + .expect("Failed to get node_id for phantom node recipient"); + receiver_node_id = Some(phantom_pubkey) + } for claimable_htlc in claimable_htlcs { claimable_amt_msat += claimable_htlc.value; @@ -7499,6 +7706,7 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } } pending_events_read.push(events::Event::PaymentClaimed { + receiver_node_id, payment_hash, purpose: payment_purpose, amount_msat: claimable_amt_msat, @@ -7517,14 +7725,15 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> channel_state: Mutex::new(ChannelHolder { by_id, - claimable_htlcs, pending_msg_events: Vec::new(), }), inbound_payment_key: expanded_inbound_key, pending_inbound_payments: Mutex::new(pending_inbound_payments), pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()), + pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()), forward_htlcs: Mutex::new(forward_htlcs), + claimable_payments: Mutex::new(ClaimablePayments { claimable_htlcs, pending_claiming_payments: pending_claiming_payments.unwrap() }), outbound_scid_aliases: Mutex::new(outbound_scid_aliases), id_to_peer: Mutex::new(id_to_peer), short_to_chan_info: FairRwLock::new(short_to_chan_info), @@ -7553,7 +7762,8 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> for htlc_source in failed_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; - channel_manager.fail_htlc_backwards_internal(source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); + let reason = HTLCFailReason::from_failure_code(0x4000 | 8); + channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } //TODO: Broadcast channel update for closed channels, but only after we've made a @@ -8300,7 +8510,7 @@ pub mod bench { $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id())); expect_pending_htlcs_forwardable!(NodeHolder { node: &$node_b }); - expect_payment_received!(NodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000); + expect_payment_claimable!(NodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000); $node_b.claim_funds(payment_preimage); expect_payment_claimed!(NodeHolder { node: &$node_b }, payment_hash, 10_000);