X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=5c4e5394375a4df4327e3ae1669ea1508a5a3217;hb=36e6023633ada2029bc5b6fbe37d96eb2eb6bd8e;hp=f27ca53bcf07a3c42334ee0b8cec05e681cdfc34;hpb=7809c5515c0bd7e3ae7a79c7634dd578b75d4386;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f27ca53b..5c4e5394 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -54,7 +54,7 @@ use crate::ln::wire::Encode; use crate::chain::keysinterface::{Sign, KeysInterface, KeysManager, Recipient}; use crate::util::config::{UserConfig, ChannelConfig}; use crate::util::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; -use crate::util::{byte_utils, events}; +use crate::util::events; use crate::util::wakers::{Future, Notifier}; use crate::util::scid_utils::fake_scid; use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter}; @@ -287,6 +287,16 @@ pub(super) enum HTLCFailReason { } } +impl HTLCFailReason { + pub(super) fn reason(failure_code: u16, data: Vec) -> Self { + Self::Reason { failure_code, data } + } + + pub(super) fn from_failure_code(failure_code: u16) -> Self { + Self::Reason { failure_code, data: Vec::new() } + } +} + struct ReceiveError { err_code: u16, err_data: Vec, @@ -736,9 +746,9 @@ pub struct ChannelManager channel_state: Mutex::Signer>>, /// Storage for PaymentSecrets and any requirements on future inbound payments before we will - /// expose them to users via a PaymentReceived event. HTLCs which do not meet the requirements + /// expose them to users via a PaymentClaimable event. HTLCs which do not meet the requirements /// here are failed when we process them as pending-forwardable-HTLCs, and entries are removed - /// after we generate a PaymentReceived upon receipt of all MPP parts or when they time out. + /// after we generate a PaymentClaimable upon receipt of all MPP parts or when they time out. /// /// See `ChannelManager` struct-level documentation for lock order requirements. pending_inbound_payments: Mutex>, @@ -1876,8 +1886,9 @@ impl ChannelManager ChannelManager ChannelManager ChannelManager amt_msat { return Err(ReceiveError { err_code: 19, - err_data: byte_utils::be64_to_array(amt_msat).to_vec(), + err_data: amt_msat.to_be_bytes().to_vec(), msg: "Upstream node sent less than we were supposed to receive in payment", }); } @@ -2407,10 +2419,10 @@ impl ChannelManager ChannelManager ChannelManager ChannelManager ChannelManager, keysend_preimage: Option, payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> { if route.paths.len() < 1 { - return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "There must be at least one path to send over"})); + return Err(PaymentSendFailure::ParameterError(APIError::InvalidRoute{err: "There must be at least one path to send over"})); } if payment_secret.is_none() && route.paths.len() > 1 { return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError{err: "Payment secret is required for multi-path payments".to_string()})); @@ -2596,12 +2608,12 @@ impl ChannelManager 20 { - path_errs.push(Err(APIError::RouteError{err: "Path didn't go anywhere/had bogus size"})); + path_errs.push(Err(APIError::InvalidRoute{err: "Path didn't go anywhere/had bogus size"})); continue 'path_check; } for (idx, hop) in path.iter().enumerate() { if idx != path.len() - 1 && hop.pubkey == our_node_id { - path_errs.push(Err(APIError::RouteError{err: "Path went through us but wasn't a simple rebalance loop to us"})); + path_errs.push(Err(APIError::InvalidRoute{err: "Path went through us but wasn't a simple rebalance loop to us"})); continue 'path_check; } } @@ -3078,15 +3090,22 @@ impl ChannelManager chan.get_short_channel_id().unwrap_or(chan.outbound_scid_alias()), - None => return Err(APIError::APIMisuseError { - err: format!("Channel with id {:?} not found", next_hop_channel_id) + Some(chan) => { + if !chan.is_usable() { + return Err(APIError::ChannelUnavailable { + err: format!("Channel with id {} not fully established", log_bytes!(*next_hop_channel_id)) + }) + } + chan.get_short_channel_id().unwrap_or(chan.outbound_scid_alias()) + }, + None => return Err(APIError::ChannelUnavailable { + err: format!("Channel with id {} not found", log_bytes!(*next_hop_channel_id)) }) }; let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id) .ok_or_else(|| APIError::APIMisuseError { - err: format!("Payment with intercept id {:?} not found", intercept_id.0) + err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)) })?; let routing = match payment.forward_info.routing { @@ -3121,7 +3140,7 @@ impl ChannelManager ChannelManager ChannelManager ChannelManager ChannelManager { - let mut htlc_msat_height_data = byte_utils::be64_to_array($htlc.value).to_vec(); + let mut htlc_msat_height_data = $htlc.value.to_be_bytes().to_vec(); htlc_msat_height_data.extend_from_slice( - &byte_utils::be32_to_array(self.best_block.read().unwrap().height()), + &self.best_block.read().unwrap().height().to_be_bytes(), ); failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: $htlc.prev_hop.short_channel_id, @@ -3443,7 +3462,7 @@ impl ChannelManager ChannelManager {{ - let mut payment_received_generated = false; + let mut payment_claimable_generated = false; let purpose = || { events::PaymentPurpose::InvoicePayment { payment_preimage: $payment_preimage, @@ -3496,7 +3515,7 @@ impl ChannelManager ChannelManager ChannelManager ChannelManager ChannelManager ChannelManager ChannelManager ChannelManager ChannelManager, channel_id: [u8; 32], counterparty_node_id: &PublicKey ) { - for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) { - let (failure_code, onion_failure_data) = - match self.channel_state.lock().unwrap().by_id.entry(channel_id) { - hash_map::Entry::Occupied(chan_entry) => { - self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get()) - }, - hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new()) - }; + let (failure_code, onion_failure_data) = + match self.channel_state.lock().unwrap().by_id.entry(channel_id) { + hash_map::Entry::Occupied(chan_entry) => { + self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get()) + }, + hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new()) + }; + for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) { + let reason = HTLCFailReason::reason(failure_code, onion_failure_data.clone()); let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id }; - self.fail_htlc_backwards_internal(htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data }, receiver); + self.fail_htlc_backwards_internal(&htlc_src, &payment_hash, &reason, receiver); } } /// Fails an HTLC backwards to the sender of it to us. /// Note that we do not assume that channels corresponding to failed HTLCs are still available. - fn fail_htlc_backwards_internal(&self, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason,destination: HTLCDestination) { + fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) { #[cfg(debug_assertions)] { // Ensure that the `channel_state` lock is not held when calling this function. @@ -3988,13 +4009,13 @@ impl ChannelManager { + HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, ref payment_params, .. } => { let mut session_priv_bytes = [0; 32]; session_priv_bytes.copy_from_slice(&session_priv[..]); let mut outbounds = self.pending_outbound_payments.lock().unwrap(); let mut all_paths_failed = false; let mut full_failure_ev = None; - if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) { + if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(*payment_id) { if !payment.get_mut().remove(&session_priv_bytes, Some(&path)) { log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0)); return; @@ -4007,7 +4028,7 @@ impl ChannelManager ChannelManager ChannelManager ChannelManager ChannelManager { + HTLCSource::PreviousHopData(HTLCPreviousHopData { ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret, ref phantom_shared_secret, ref outpoint }) => { let err_packet = match onion_error { - HTLCFailReason::Reason { failure_code, data } => { + HTLCFailReason::Reason { ref failure_code, ref data } => { log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with code {}", log_bytes!(payment_hash.0), failure_code); if let Some(phantom_ss) = phantom_shared_secret { - let phantom_packet = onion_utils::build_failure_packet(&phantom_ss, failure_code, &data[..]).encode(); - let encrypted_phantom_packet = onion_utils::encrypt_failure_packet(&phantom_ss, &phantom_packet); - onion_utils::encrypt_failure_packet(&incoming_packet_shared_secret, &encrypted_phantom_packet.data[..]) + let phantom_packet = onion_utils::build_failure_packet(phantom_ss, *failure_code, &data[..]).encode(); + let encrypted_phantom_packet = onion_utils::encrypt_failure_packet(phantom_ss, &phantom_packet); + onion_utils::encrypt_failure_packet(incoming_packet_shared_secret, &encrypted_phantom_packet.data[..]) } else { - let packet = onion_utils::build_failure_packet(&incoming_packet_shared_secret, failure_code, &data[..]).encode(); - onion_utils::encrypt_failure_packet(&incoming_packet_shared_secret, &packet) + let packet = onion_utils::build_failure_packet(incoming_packet_shared_secret, *failure_code, &data[..]).encode(); + onion_utils::encrypt_failure_packet(incoming_packet_shared_secret, &packet) } }, HTLCFailReason::LightningError { err } => { log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards with pre-built LightningError", log_bytes!(payment_hash.0)); - onion_utils::encrypt_failure_packet(&incoming_packet_shared_secret, &err.data) + onion_utils::encrypt_failure_packet(incoming_packet_shared_secret, &err.data) } }; @@ -4141,12 +4162,12 @@ impl ChannelManager { - entry.get_mut().push(HTLCForwardInfo::FailHTLC { htlc_id, err_packet }); + entry.get_mut().push(HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }); }, hash_map::Entry::Vacant(entry) => { - entry.insert(vec!(HTLCForwardInfo::FailHTLC { htlc_id, err_packet })); + entry.insert(vec!(HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet })); } } mem::drop(forward_htlcs); @@ -4158,13 +4179,13 @@ impl ChannelManager ChannelManager ChannelManager ChannelManager ChannelManager return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id)) } @@ -4865,7 +4885,7 @@ impl ChannelManager update, Err(e) => try_chan_entry!(self, Err(e), chan), }; @@ -4982,7 +5002,8 @@ impl ChannelManager ChannelManager return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) @@ -5233,7 +5254,7 @@ impl ChannelManager ChannelManager ChannelManager ChannelManager ChannelManager ChannelManager, invoice_expiry_delta_secs: u32) -> Result<(PaymentHash, PaymentSecret), ()> { inbound_payment::create(&self.inbound_payment_key, min_value_msat, invoice_expiry_delta_secs, &self.keys_manager, self.highest_seen_timestamp.load(Ordering::Acquire) as u64) @@ -5768,7 +5790,7 @@ impl ChannelManager ChannelManager ChannelManager ChannelManager, invoice_expiry_delta_secs: u32) -> Result { inbound_payment::create_from_hash(&self.inbound_payment_key, min_value_msat, payment_hash, invoice_expiry_delta_secs, self.highest_seen_timestamp.load(Ordering::Acquire) as u64) } @@ -5885,7 +5907,7 @@ impl ChannelManager ChannelManager Option { + let mut events = self.pending_events.lock().unwrap(); + if events.is_empty() { None } else { Some(events.remove(0)) } + } + #[cfg(test)] pub fn has_pending_payments(&self) -> bool { !self.pending_outbound_payments.lock().unwrap().is_empty() @@ -6174,9 +6202,8 @@ where if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res { for (source, payment_hash) in timed_out_pending_htlcs.drain(..) { let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel); - timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason { - failure_code, data, - }, HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() })); + timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data), + HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() })); } if let Some(channel_ready) = channel_ready_opt { send_channel_ready!(self, pending_msg_events, channel, channel_ready); @@ -6260,12 +6287,12 @@ where // number of blocks we generally consider it to take to do a commitment update, // just give up on it and fail the HTLC. if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER { - let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec(); - htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height)); - timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason { - failure_code: 0x4000 | 15, - data: htlc_msat_height_data - }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() })); + let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); + htlc_msat_height_data.extend_from_slice(&height.to_be_bytes()); + + timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), + HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data), + HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() })); false } else { true } }); @@ -6288,7 +6315,7 @@ where _ => unreachable!(), }; timed_out_htlcs.push((prev_hop_data, htlc.forward_info.payment_hash, - HTLCFailReason::Reason { failure_code: 0x2000 | 2, data: Vec::new() }, + HTLCFailReason::from_failure_code(0x2000 | 2), HTLCDestination::InvalidForward { requested_forward_scid })); log_trace!(self.logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid); false @@ -6299,7 +6326,7 @@ where self.handle_init_event_channel_failures(failed_channels); for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) { - self.fail_htlc_backwards_internal(source, &payment_hash, reason, destination); + self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, destination); } } @@ -7397,6 +7424,25 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> user_channel_id: channel.get_user_id(), reason: ClosureReason::OutdatedChannelManager }); + for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() { + let mut found_htlc = false; + for (monitor_htlc_source, _) in monitor.get_all_current_outbound_htlcs() { + if *channel_htlc_source == monitor_htlc_source { found_htlc = true; break; } + } + if !found_htlc { + // If we have some HTLCs in the channel which are not present in the newer + // ChannelMonitor, they have been removed and should be failed back to + // ensure we don't forget them entirely. Note that if the missing HTLC(s) + // were actually claimed we'd have generated and ensured the previous-hop + // claim update ChannelMonitor updates were persisted prior to persising + // the ChannelMonitor update for the forward leg, so attempting to fail the + // backwards leg of the HTLC will simply be rejected. + log_info!(args.logger, + "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager", + log_bytes!(channel.channel_id()), log_bytes!(payment_hash.0)); + failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.get_counterparty_node_id(), channel.channel_id())); + } + } } else { log_info!(args.logger, "Successfully loaded channel {}", log_bytes!(channel.channel_id())); if let Some(short_channel_id) = channel.get_short_channel_id() { @@ -7477,16 +7523,6 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> None => continue, } } - if forward_htlcs_count > 0 { - // If we have pending HTLCs to forward, assume we either dropped a - // `PendingHTLCsForwardable` or the user received it but never processed it as they - // shut down before the timer hit. Either way, set the time_forwardable to a small - // constant as enough time has likely passed that we should simply handle the forwards - // now, or at least after the user gets a chance to reconnect to our peers. - pending_events_read.push(events::Event::PendingHTLCsForwardable { - time_forwardable: Duration::from_secs(2), - }); - } let background_event_count: u64 = Readable::read(reader)?; let mut pending_background_events_read: Vec = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::())); @@ -7597,10 +7633,44 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } } } + for (htlc_source, htlc) in monitor.get_all_current_outbound_htlcs() { + if let HTLCSource::PreviousHopData(prev_hop_data) = htlc_source { + // The ChannelMonitor is now responsible for this HTLC's + // failure/success and will let us know what its outcome is. If we + // still have an entry for this HTLC in `forward_htlcs`, we were + // apparently not persisted after the monitor was when forwarding + // the payment. + forward_htlcs.retain(|_, forwards| { + forwards.retain(|forward| { + if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { + if htlc_info.prev_short_channel_id == prev_hop_data.short_channel_id && + htlc_info.prev_htlc_id == prev_hop_data.htlc_id + { + log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", + log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id())); + false + } else { true } + } else { true } + }); + !forwards.is_empty() + }) + } + } } } } + if !forward_htlcs.is_empty() { + // If we have pending HTLCs to forward, assume we either dropped a + // `PendingHTLCsForwardable` or the user received it but never processed it as they + // shut down before the timer hit. Either way, set the time_forwardable to a small + // constant as enough time has likely passed that we should simply handle the forwards + // now, or at least after the user gets a chance to reconnect to our peers. + pending_events_read.push(events::Event::PendingHTLCsForwardable { + time_forwardable: Duration::from_secs(2), + }); + } + let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material(); let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material); @@ -7786,7 +7856,8 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> for htlc_source in failed_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; - channel_manager.fail_htlc_backwards_internal(source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); + let reason = HTLCFailReason::from_failure_code(0x4000 | 8); + channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } //TODO: Broadcast channel update for closed channels, but only after we've made a @@ -8533,7 +8604,7 @@ pub mod bench { $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id())); expect_pending_htlcs_forwardable!(NodeHolder { node: &$node_b }); - expect_payment_received!(NodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000); + expect_payment_claimable!(NodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000); $node_b.claim_funds(payment_preimage); expect_payment_claimed!(NodeHolder { node: &$node_b }, payment_hash, 10_000);