X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=d4209ce9539695138e80e1eeaec5b93671c57c2a;hb=2390dbcb2236888f1a06f8b5666486eb54ace4b1;hp=c4d90480eecd2baeaeca20fd23299b0de2a00d58;hpb=f79ad2efb161d8ea9ccda9d67cf0ee2d63586cbe;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index c4d90480..d4209ce9 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -49,12 +49,13 @@ use crate::ln::features::InvoiceFeatures; use crate::routing::router::{InFlightHtlcs, PaymentParameters, Route, RouteHop, RoutePath, RouteParameters}; use crate::ln::msgs; use crate::ln::onion_utils; +use crate::ln::onion_utils::HTLCFailReason; use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT}; use crate::ln::wire::Encode; use crate::chain::keysinterface::{Sign, KeysInterface, KeysManager, Recipient}; use crate::util::config::{UserConfig, ChannelConfig}; use crate::util::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; -use crate::util::{byte_utils, events}; +use crate::util::events; use crate::util::wakers::{Future, Notifier}; use crate::util::scid_utils::fake_scid; use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter}; @@ -276,17 +277,6 @@ impl HTLCSource { } } -#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug -pub(super) enum HTLCFailReason { - LightningError { - err: msgs::OnionErrorPacket, - }, - Reason { - failure_code: u16, - data: Vec, - } -} - struct ReceiveError { err_code: u16, err_data: Vec, @@ -736,9 +726,9 @@ pub struct ChannelManager channel_state: Mutex::Signer>>, /// Storage for PaymentSecrets and any requirements on future inbound payments before we will - /// expose them to users via a PaymentReceived event. HTLCs which do not meet the requirements + /// expose them to users via a PaymentClaimable event. HTLCs which do not meet the requirements /// here are failed when we process them as pending-forwardable-HTLCs, and entries are removed - /// after we generate a PaymentReceived upon receipt of all MPP parts or when they time out. + /// after we generate a PaymentClaimable upon receipt of all MPP parts or when they time out. /// /// See `ChannelManager` struct-level documentation for lock order requirements. pending_inbound_payments: Mutex>, @@ -1876,8 +1866,9 @@ impl ChannelManager ChannelManager ChannelManager ChannelManager amt_msat { return Err(ReceiveError { err_code: 19, - err_data: byte_utils::be64_to_array(amt_msat).to_vec(), + err_data: amt_msat.to_be_bytes().to_vec(), msg: "Upstream node sent less than we were supposed to receive in payment", }); } @@ -2161,7 +2156,8 @@ impl ChannelManager ChannelManager { // unknown_next_peer // Note that this is likely a timing oracle for detecting whether an scid is a // phantom or an intercept. - if fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash) || - fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash) + if (self.default_configuration.accept_intercept_htlcs && + fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash)) || + fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash) { None } else { @@ -2282,10 +2279,13 @@ impl ChannelManager ChannelManager ChannelManager ChannelManager ChannelManager ChannelManager ChannelManager, keysend_preimage: Option, payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> { if route.paths.len() < 1 { - return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "There must be at least one path to send over"})); + return Err(PaymentSendFailure::ParameterError(APIError::InvalidRoute{err: "There must be at least one path to send over"})); } if payment_secret.is_none() && route.paths.len() > 1 { return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError{err: "Payment secret is required for multi-path payments".to_string()})); @@ -2595,12 +2601,12 @@ impl ChannelManager 20 { - path_errs.push(Err(APIError::RouteError{err: "Path didn't go anywhere/had bogus size"})); + path_errs.push(Err(APIError::InvalidRoute{err: "Path didn't go anywhere/had bogus size"})); continue 'path_check; } for (idx, hop) in path.iter().enumerate() { if idx != path.len() - 1 && hop.pubkey == our_node_id { - path_errs.push(Err(APIError::RouteError{err: "Path went through us but wasn't a simple rebalance loop to us"})); + path_errs.push(Err(APIError::InvalidRoute{err: "Path went through us but wasn't a simple rebalance loop to us"})); continue 'path_check; } } @@ -3057,14 +3063,19 @@ impl ChannelManager ChannelManager chan.get_short_channel_id().unwrap_or(chan.outbound_scid_alias()), - None => return Err(APIError::APIMisuseError { - err: format!("Channel with id {:?} not found", next_hop_channel_id) + Some(chan) => { + if !chan.is_usable() { + return Err(APIError::ChannelUnavailable { + err: format!("Channel with id {} not fully established", log_bytes!(*next_hop_channel_id)) + }) + } + chan.get_short_channel_id().unwrap_or(chan.outbound_scid_alias()) + }, + None => return Err(APIError::ChannelUnavailable { + err: format!("Channel with id {} not found", log_bytes!(*next_hop_channel_id)) }) }; let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id) .ok_or_else(|| APIError::APIMisuseError { - err: format!("Payment with intercept id {:?} not found", intercept_id.0) + err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)) })?; let routing = match payment.forward_info.routing { @@ -3106,13 +3124,16 @@ impl ChannelManager Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id) .ok_or_else(|| APIError::APIMisuseError { - err: format!("Payment with InterceptId {:?} not found", intercept_id) + err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)) })?; if let PendingHTLCRouting::Forward { short_channel_id, .. } = payment.forward_info.routing { @@ -3124,9 +3145,9 @@ impl ChannelManager ChannelManager ChannelManager ChannelManager { - let mut htlc_msat_height_data = byte_utils::be64_to_array($htlc.value).to_vec(); + let mut htlc_msat_height_data = $htlc.value.to_be_bytes().to_vec(); htlc_msat_height_data.extend_from_slice( - &byte_utils::be32_to_array(self.best_block.read().unwrap().height()), + &self.best_block.read().unwrap().height().to_be_bytes(), ); failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: $htlc.prev_hop.short_channel_id, @@ -3434,7 +3455,7 @@ impl ChannelManager ChannelManager {{ - let mut payment_received_generated = false; + let mut payment_claimable_generated = false; let purpose = || { events::PaymentPurpose::InvoicePayment { payment_preimage: $payment_preimage, @@ -3487,7 +3508,7 @@ impl ChannelManager ChannelManager ChannelManager ChannelManager ChannelManager ChannelManager ChannelManager ChannelManager ChannelManager, channel_id: [u8; 32], counterparty_node_id: &PublicKey ) { - for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) { - let (failure_code, onion_failure_data) = - match self.channel_state.lock().unwrap().by_id.entry(channel_id) { - hash_map::Entry::Occupied(chan_entry) => { - self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get()) - }, - hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new()) - }; + let (failure_code, onion_failure_data) = + match self.channel_state.lock().unwrap().by_id.entry(channel_id) { + hash_map::Entry::Occupied(chan_entry) => { + self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get()) + }, + hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new()) + }; + for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) { + let reason = HTLCFailReason::reason(failure_code, onion_failure_data.clone()); let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id }; - self.fail_htlc_backwards_internal(htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data }, receiver); + self.fail_htlc_backwards_internal(&htlc_src, &payment_hash, &reason, receiver); } } /// Fails an HTLC backwards to the sender of it to us. /// Note that we do not assume that channels corresponding to failed HTLCs are still available. - fn fail_htlc_backwards_internal(&self, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason,destination: HTLCDestination) { + fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) { #[cfg(debug_assertions)] { // Ensure that the `channel_state` lock is not held when calling this function. @@ -3979,13 +4002,13 @@ impl ChannelManager { + HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, ref payment_params, .. } => { let mut session_priv_bytes = [0; 32]; session_priv_bytes.copy_from_slice(&session_priv[..]); let mut outbounds = self.pending_outbound_payments.lock().unwrap(); let mut all_paths_failed = false; let mut full_failure_ev = None; - if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) { + if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(*payment_id) { if !payment.get_mut().remove(&session_priv_bytes, Some(&path)) { log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0)); return; @@ -3998,7 +4021,7 @@ impl ChannelManager ChannelManager { + let path_failure = { #[cfg(test)] - let (network_update, short_channel_id, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); + let (network_update, short_channel_id, payment_retryable, onion_error_code, onion_error_data) = onion_error.decode_onion_failure(&self.secp_ctx, &self.logger, &source); #[cfg(not(test))] - let (network_update, short_channel_id, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); + let (network_update, short_channel_id, payment_retryable, _, _) = onion_error.decode_onion_failure(&self.secp_ctx, &self.logger, &source); - if self.payment_is_probe(payment_hash, &payment_id) { - if !payment_retryable { - events::Event::ProbeSuccessful { - payment_id, - payment_hash: payment_hash.clone(), - path: path.clone(), - } - } else { - events::Event::ProbeFailed { - payment_id, - payment_hash: payment_hash.clone(), - path: path.clone(), - short_channel_id, - } - } - } else { - // TODO: If we decided to blame ourselves (or one of our channels) in - // process_onion_failure we should close that channel as it implies our - // next-hop is needlessly blaming us! - if let Some(scid) = short_channel_id { - retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid)); - } - events::Event::PaymentPathFailed { - payment_id: Some(payment_id), + if self.payment_is_probe(payment_hash, &payment_id) { + if !payment_retryable { + events::Event::ProbeSuccessful { + payment_id: *payment_id, payment_hash: payment_hash.clone(), - payment_failed_permanently: !payment_retryable, - network_update, - all_paths_failed, path: path.clone(), - short_channel_id, - retry, - #[cfg(test)] - error_code: onion_error_code, - #[cfg(test)] - error_data: onion_error_data - } - } - }, - &HTLCFailReason::Reason { -#[cfg(test)] - ref failure_code, -#[cfg(test)] - ref data, - .. } => { - // we get a fail_malformed_htlc from the first hop - // TODO: We'd like to generate a NetworkUpdate for temporary - // failures here, but that would be insufficient as find_route - // generally ignores its view of our own channels as we provide them via - // ChannelDetails. - // TODO: For non-temporary failures, we really should be closing the - // channel here as we apparently can't relay through them anyway. - let scid = path.first().unwrap().short_channel_id; - retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid)); - - if self.payment_is_probe(payment_hash, &payment_id) { - events::Event::ProbeFailed { - payment_id, - payment_hash: payment_hash.clone(), - path: path.clone(), - short_channel_id: Some(scid), } } else { - events::Event::PaymentPathFailed { - payment_id: Some(payment_id), + events::Event::ProbeFailed { + payment_id: *payment_id, payment_hash: payment_hash.clone(), - payment_failed_permanently: false, - network_update: None, - all_paths_failed, path: path.clone(), - short_channel_id: Some(scid), - retry, -#[cfg(test)] - error_code: Some(*failure_code), -#[cfg(test)] - error_data: Some(data.clone()), + short_channel_id, } } + } else { + // TODO: If we decided to blame ourselves (or one of our channels) in + // process_onion_failure we should close that channel as it implies our + // next-hop is needlessly blaming us! + if let Some(scid) = short_channel_id { + retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid)); + } + events::Event::PaymentPathFailed { + payment_id: Some(*payment_id), + payment_hash: payment_hash.clone(), + payment_failed_permanently: !payment_retryable, + network_update, + all_paths_failed, + path: path.clone(), + short_channel_id, + retry, + #[cfg(test)] + error_code: onion_error_code, + #[cfg(test)] + error_data: onion_error_data + } } }; let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push(path_failure); if let Some(ev) = full_failure_ev { pending_events.push(ev); } }, - HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, phantom_shared_secret, outpoint }) => { - let err_packet = match onion_error { - HTLCFailReason::Reason { failure_code, data } => { - log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with code {}", log_bytes!(payment_hash.0), failure_code); - if let Some(phantom_ss) = phantom_shared_secret { - let phantom_packet = onion_utils::build_failure_packet(&phantom_ss, failure_code, &data[..]).encode(); - let encrypted_phantom_packet = onion_utils::encrypt_failure_packet(&phantom_ss, &phantom_packet); - onion_utils::encrypt_failure_packet(&incoming_packet_shared_secret, &encrypted_phantom_packet.data[..]) - } else { - let packet = onion_utils::build_failure_packet(&incoming_packet_shared_secret, failure_code, &data[..]).encode(); - onion_utils::encrypt_failure_packet(&incoming_packet_shared_secret, &packet) - } - }, - HTLCFailReason::LightningError { err } => { - log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards with pre-built LightningError", log_bytes!(payment_hash.0)); - onion_utils::encrypt_failure_packet(&incoming_packet_shared_secret, &err.data) - } - }; + HTLCSource::PreviousHopData(HTLCPreviousHopData { ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret, ref phantom_shared_secret, ref outpoint }) => { + log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with {:?}", log_bytes!(payment_hash.0), onion_error); + let err_packet = onion_error.get_encrypted_failure_packet(incoming_packet_shared_secret, phantom_shared_secret); let mut forward_event = None; let mut forward_htlcs = self.forward_htlcs.lock().unwrap(); if forward_htlcs.is_empty() { forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS)); } - match forward_htlcs.entry(short_channel_id) { + match forward_htlcs.entry(*short_channel_id) { hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().push(HTLCForwardInfo::FailHTLC { htlc_id, err_packet }); + entry.get_mut().push(HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }); }, hash_map::Entry::Vacant(entry) => { - entry.insert(vec!(HTLCForwardInfo::FailHTLC { htlc_id, err_packet })); + entry.insert(vec!(HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet })); } } mem::drop(forward_htlcs); @@ -4149,13 +4115,13 @@ impl ChannelManager ChannelManager ChannelManager ChannelManager ChannelManager return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id)) } @@ -4856,7 +4821,7 @@ impl ChannelManager update, Err(e) => try_chan_entry!(self, Err(e), chan), }; @@ -4973,7 +4938,8 @@ impl ChannelManager ChannelManager { let reason = if (error_code & 0x1000) != 0 { let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan); - onion_utils::build_first_hop_failure_packet(incoming_shared_secret, real_code, &error_data) + HTLCFailReason::reason(real_code, error_data) } else { - onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &[]) - }; + HTLCFailReason::from_failure_code(error_code) + }.get_encrypted_failure_packet(incoming_shared_secret, &None); let msg = msgs::UpdateFailHTLC { channel_id: msg.channel_id, htlc_id: msg.htlc_id, @@ -5099,7 +5065,7 @@ impl ChannelManager return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } @@ -5118,7 +5084,7 @@ impl ChannelManager return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) @@ -5224,7 +5190,7 @@ impl ChannelManager ChannelManager ChannelManager ChannelManager ChannelManager ChannelManager, invoice_expiry_delta_secs: u32) -> Result<(PaymentHash, PaymentSecret), ()> { inbound_payment::create(&self.inbound_payment_key, min_value_msat, invoice_expiry_delta_secs, &self.keys_manager, self.highest_seen_timestamp.load(Ordering::Acquire) as u64) @@ -5759,7 +5726,7 @@ impl ChannelManager ChannelManager ChannelManager ChannelManager, invoice_expiry_delta_secs: u32) -> Result { inbound_payment::create_from_hash(&self.inbound_payment_key, min_value_msat, payment_hash, invoice_expiry_delta_secs, self.highest_seen_timestamp.load(Ordering::Acquire) as u64) } @@ -5876,7 +5843,7 @@ impl ChannelManager ChannelManager Option { + let mut events = self.pending_events.lock().unwrap(); + if events.is_empty() { None } else { Some(events.remove(0)) } + } + #[cfg(test)] pub fn has_pending_payments(&self) -> bool { !self.pending_outbound_payments.lock().unwrap().is_empty() @@ -6165,9 +6138,8 @@ where if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res { for (source, payment_hash) in timed_out_pending_htlcs.drain(..) { let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel); - timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason { - failure_code, data, - }, HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() })); + timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data), + HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() })); } if let Some(channel_ready) = channel_ready_opt { send_channel_ready!(self, pending_msg_events, channel, channel_ready); @@ -6251,24 +6223,46 @@ where // number of blocks we generally consider it to take to do a commitment update, // just give up on it and fail the HTLC. if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER { - let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec(); - htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height)); + let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); + htlc_msat_height_data.extend_from_slice(&height.to_be_bytes()); - timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason { - failure_code: 0x4000 | 15, - data: htlc_msat_height_data - }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() })); + timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), + HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data), + HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() })); false } else { true } }); !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry. }); + + let mut intercepted_htlcs = self.pending_intercepted_htlcs.lock().unwrap(); + intercepted_htlcs.retain(|_, htlc| { + if height >= htlc.forward_info.outgoing_cltv_value - HTLC_FAIL_BACK_BUFFER { + let prev_hop_data = HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: htlc.prev_short_channel_id, + htlc_id: htlc.prev_htlc_id, + incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret, + phantom_shared_secret: None, + outpoint: htlc.prev_funding_outpoint, + }); + + let requested_forward_scid /* intercept scid */ = match htlc.forward_info.routing { + PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id, + _ => unreachable!(), + }; + timed_out_htlcs.push((prev_hop_data, htlc.forward_info.payment_hash, + HTLCFailReason::from_failure_code(0x2000 | 2), + HTLCDestination::InvalidForward { requested_forward_scid })); + log_trace!(self.logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid); + false + } else { true } + }); } self.handle_init_event_channel_failures(failed_channels); for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) { - self.fail_htlc_backwards_internal(source, &payment_hash, reason, destination); + self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, destination); } } @@ -6979,16 +6973,6 @@ impl Writeable for HTLCSource { } } -impl_writeable_tlv_based_enum!(HTLCFailReason, - (0, LightningError) => { - (0, err, required), - }, - (1, Reason) => { - (0, failure_code, required), - (2, data, vec_type), - }, -;); - impl_writeable_tlv_based!(PendingAddHTLCInfo, { (0, forward_info, required), (1, prev_user_channel_id, (default_value, 0)), @@ -7366,6 +7350,25 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> user_channel_id: channel.get_user_id(), reason: ClosureReason::OutdatedChannelManager }); + for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() { + let mut found_htlc = false; + for (monitor_htlc_source, _) in monitor.get_all_current_outbound_htlcs() { + if *channel_htlc_source == monitor_htlc_source { found_htlc = true; break; } + } + if !found_htlc { + // If we have some HTLCs in the channel which are not present in the newer + // ChannelMonitor, they have been removed and should be failed back to + // ensure we don't forget them entirely. Note that if the missing HTLC(s) + // were actually claimed we'd have generated and ensured the previous-hop + // claim update ChannelMonitor updates were persisted prior to persising + // the ChannelMonitor update for the forward leg, so attempting to fail the + // backwards leg of the HTLC will simply be rejected. + log_info!(args.logger, + "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager", + log_bytes!(channel.channel_id()), log_bytes!(payment_hash.0)); + failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.get_counterparty_node_id(), channel.channel_id())); + } + } } else { log_info!(args.logger, "Successfully loaded channel {}", log_bytes!(channel.channel_id())); if let Some(short_channel_id) = channel.get_short_channel_id() { @@ -7446,16 +7449,6 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> None => continue, } } - if forward_htlcs_count > 0 { - // If we have pending HTLCs to forward, assume we either dropped a - // `PendingHTLCsForwardable` or the user received it but never processed it as they - // shut down before the timer hit. Either way, set the time_forwardable to a small - // constant as enough time has likely passed that we should simply handle the forwards - // now, or at least after the user gets a chance to reconnect to our peers. - pending_events_read.push(events::Event::PendingHTLCsForwardable { - time_forwardable: Duration::from_secs(2), - }); - } let background_event_count: u64 = Readable::read(reader)?; let mut pending_background_events_read: Vec = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::())); @@ -7566,10 +7559,44 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } } } + for (htlc_source, htlc) in monitor.get_all_current_outbound_htlcs() { + if let HTLCSource::PreviousHopData(prev_hop_data) = htlc_source { + // The ChannelMonitor is now responsible for this HTLC's + // failure/success and will let us know what its outcome is. If we + // still have an entry for this HTLC in `forward_htlcs`, we were + // apparently not persisted after the monitor was when forwarding + // the payment. + forward_htlcs.retain(|_, forwards| { + forwards.retain(|forward| { + if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { + if htlc_info.prev_short_channel_id == prev_hop_data.short_channel_id && + htlc_info.prev_htlc_id == prev_hop_data.htlc_id + { + log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", + log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id())); + false + } else { true } + } else { true } + }); + !forwards.is_empty() + }) + } + } } } } + if !forward_htlcs.is_empty() { + // If we have pending HTLCs to forward, assume we either dropped a + // `PendingHTLCsForwardable` or the user received it but never processed it as they + // shut down before the timer hit. Either way, set the time_forwardable to a small + // constant as enough time has likely passed that we should simply handle the forwards + // now, or at least after the user gets a chance to reconnect to our peers. + pending_events_read.push(events::Event::PendingHTLCsForwardable { + time_forwardable: Duration::from_secs(2), + }); + } + let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material(); let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material); @@ -7755,7 +7782,8 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> for htlc_source in failed_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; - channel_manager.fail_htlc_backwards_internal(source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); + let reason = HTLCFailReason::from_failure_code(0x4000 | 8); + channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } //TODO: Broadcast channel update for closed channels, but only after we've made a @@ -8502,7 +8530,7 @@ pub mod bench { $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id())); expect_pending_htlcs_forwardable!(NodeHolder { node: &$node_b }); - expect_payment_received!(NodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000); + expect_payment_claimable!(NodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000); $node_b.claim_funds(payment_preimage); expect_payment_claimed!(NodeHolder { node: &$node_b }, payment_hash, 10_000);