X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=0506a81c5192e945bed3f4418b0a9e6efe97cb86;hb=271103d66017e4622d42edc3aa8a3b850b104e3e;hp=db2e160430313b1dd3a80e12b1267ba8006bafd3;hpb=9d803a92bdb3eb4270cce869eb19630bc03a5d36;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index db2e16043..0506a81c5 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -48,7 +48,7 @@ use crate::ln::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFea #[cfg(any(feature = "_test_utils", test))] use crate::ln::features::Bolt11InvoiceFeatures; use crate::routing::router::{BlindedTail, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router}; -use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundOnionErr, NextPacketDetails}; +use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundHTLCErr, NextPacketDetails}; use crate::ln::msgs; use crate::ln::onion_utils; use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING}; @@ -202,15 +202,16 @@ pub struct BlindedForward { /// onion payload if we're the introduction node. Useful for calculating the next hop's /// [`msgs::UpdateAddHTLC::blinding_point`]. pub inbound_blinding_point: PublicKey, - // Another field will be added here when we support forwarding as a non-intro node. + /// If needed, this determines how this HTLC should be failed backwards, based on whether we are + /// the introduction node. + pub failure: BlindedFailure, } impl PendingHTLCRouting { // Used to override the onion failure code and data if the HTLC is blinded. fn blinded_failure(&self) -> Option { - // TODO: needs update when we support forwarding blinded HTLCs as non-intro node match self { - Self::Forward { blinded: Some(_), .. } => Some(BlindedFailure::FromIntroductionNode), + Self::Forward { blinded: Some(BlindedForward { failure, .. }), .. } => Some(*failure), Self::Receive { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode), _ => None, } @@ -305,10 +306,15 @@ pub(super) enum HTLCForwardInfo { }, } -// Used for failing blinded HTLCs backwards correctly. +/// Whether this blinded HTLC is being failed backwards by the introduction node or a blinded node, +/// which determines the failure message that should be used. #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] -enum BlindedFailure { +pub enum BlindedFailure { + /// This HTLC is being failed backwards by the introduction node, and thus should be failed with + /// [`msgs::UpdateFailHTLC`] and error code `0x8000|0x4000|24`. FromIntroductionNode, + /// This HTLC is being failed backwards by a blinded node within the path, and thus should be + /// failed with [`msgs::UpdateFailMalformedHTLC`] and error code `0x8000|0x4000|24`. FromBlindedNode, } @@ -2884,6 +2890,7 @@ where reason: shutdown_res.closure_reason, counterparty_node_id: Some(shutdown_res.counterparty_node_id), channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis), + channel_funding_txo: shutdown_res.channel_funding_txo, }, None)); if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx { @@ -3024,8 +3031,9 @@ where let is_intro_node_forward = match next_hop { onion_utils::Hop::Forward { - // TODO: update this when we support blinded forwarding as non-intro node - next_hop_data: msgs::InboundOnionPayload::BlindedForward { .. }, .. + next_hop_data: msgs::InboundOnionPayload::BlindedForward { + intro_node_blinding_point: Some(_), .. + }, .. } => true, _ => false, }; @@ -3234,14 +3242,14 @@ where // delay) once they've send us a commitment_signed! PendingHTLCStatus::Forward(info) }, - Err(InboundOnionErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data) + Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data) } }, onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => { match create_fwd_pending_htlc_info(msg, next_hop_data, next_hop_hmac, new_packet_bytes, shared_secret, next_packet_pubkey_opt) { Ok(info) => PendingHTLCStatus::Forward(info), - Err(InboundOnionErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data) + Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data) } } } @@ -3976,6 +3984,7 @@ where }); } } + mem::drop(funding_batch_states); for shutdown_result in shutdown_results.drain(..) { self.finish_close_channel(shutdown_result); } @@ -4309,7 +4318,7 @@ where current_height, self.default_configuration.accept_mpp_keysend) { Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])), - Err(InboundOnionErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret)) + Err(InboundHTLCErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret)) } }, _ => panic!(), @@ -4370,7 +4379,7 @@ where incoming_packet_shared_secret: incoming_shared_secret, // Phantom payments are only PendingHTLCRouting::Receive. phantom_shared_secret: None, - blinded_failure: blinded.map(|_| BlindedFailure::FromIntroductionNode), + blinded_failure: blinded.map(|b| b.failure), }); let next_blinding_point = blinded.and_then(|b| { let encrypted_tlvs_ss = self.node_signer.ecdh( @@ -6005,10 +6014,10 @@ where let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { - let err_str = format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id); + let err_str = format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id); log_error!(logger, "{}", err_str); - APIError::ChannelUnavailable { err: err_str } + APIError::ChannelUnavailable { err: err_str } })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -6031,7 +6040,7 @@ where APIError::ChannelUnavailable { err: err_str } }) } - _ => { + _ => { let err_str = "No such channel awaiting to be accepted.".to_owned(); log_error!(logger, "{}", err_str); @@ -9021,8 +9030,6 @@ where } fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); - match &msg.data as &str { "cannot co-op close channel w/ active htlcs"| "link failed to shutdown" => @@ -9035,34 +9042,45 @@ where // We're not going to bother handling this in a sensible way, instead simply // repeating the Shutdown message on repeat until morale improves. if !msg.channel_id.is_zero() { - let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); - if peer_state_mutex_opt.is_none() { return; } - let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap(); - if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) { - if let Some(msg) = chan.get_outbound_shutdown() { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: *counterparty_node_id, - msg, - }); - } - peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError { - node_id: *counterparty_node_id, - action: msgs::ErrorAction::SendWarningMessage { - msg: msgs::WarningMessage { - channel_id: msg.channel_id, - data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned() - }, - log_level: Level::Trace, + PersistenceNotifierGuard::optionally_notify( + self, + || -> NotifyOption { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if peer_state_mutex_opt.is_none() { return NotifyOption::SkipPersistNoEvents; } + let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap(); + if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) { + if let Some(msg) = chan.get_outbound_shutdown() { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: *counterparty_node_id, + msg, + }); + } + peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: *counterparty_node_id, + action: msgs::ErrorAction::SendWarningMessage { + msg: msgs::WarningMessage { + channel_id: msg.channel_id, + data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned() + }, + log_level: Level::Trace, + } + }); + // This can happen in a fairly tight loop, so we absolutely cannot trigger + // a `ChannelManager` write here. + return NotifyOption::SkipPersistHandleEvents; } - }); - } + NotifyOption::SkipPersistNoEvents + } + ); } return; } _ => {} } + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + if msg.channel_id.is_zero() { let channel_ids: Vec = { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -9350,6 +9368,7 @@ pub fn provided_init_features(config: &UserConfig) -> InitFeatures { features.set_channel_type_optional(); features.set_scid_privacy_optional(); features.set_zero_conf_optional(); + features.set_route_blinding_optional(); if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx { features.set_anchors_zero_fee_htlc_tx_optional(); } @@ -9495,6 +9514,7 @@ impl_writeable_tlv_based!(PhantomRouteHints, { impl_writeable_tlv_based!(BlindedForward, { (0, inbound_blinding_point, required), + (1, failure, (default_value, BlindedFailure::FromIntroductionNode)), }); impl_writeable_tlv_based_enum!(PendingHTLCRouting, @@ -10327,6 +10347,7 @@ where reason: ClosureReason::OutdatedChannelManager, counterparty_node_id: Some(channel.context.get_counterparty_node_id()), channel_capacity_sats: Some(channel.context.get_value_satoshis()), + channel_funding_txo: channel.context.get_funding_txo(), }, None)); for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() { let mut found_htlc = false; @@ -10380,6 +10401,7 @@ where reason: ClosureReason::DisconnectedPeer, counterparty_node_id: Some(channel.context.get_counterparty_node_id()), channel_capacity_sats: Some(channel.context.get_value_satoshis()), + channel_funding_txo: channel.context.get_funding_txo(), }, None)); } else { log_error!(logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", &channel.context.channel_id()); @@ -12119,8 +12141,8 @@ mod tests { let sender_intended_amt_msat = 100; let extra_fee_msat = 10; let hop_data = msgs::InboundOnionPayload::Receive { - amt_msat: 100, - outgoing_cltv_value: 42, + sender_intended_htlc_amt_msat: 100, + cltv_expiry_height: 42, payment_metadata: None, keysend_preimage: None, payment_data: Some(msgs::FinalOnionHopData { @@ -12131,7 +12153,7 @@ mod tests { // Check that if the amount we received + the penultimate hop extra fee is less than the sender // intended amount, we fail the payment. let current_height: u32 = node[0].node.best_block.read().unwrap().height(); - if let Err(crate::ln::channelmanager::InboundOnionErr { err_code, .. }) = + if let Err(crate::ln::channelmanager::InboundHTLCErr { err_code, .. }) = create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]), sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat), current_height, node[0].node.default_configuration.accept_mpp_keysend) @@ -12141,8 +12163,8 @@ mod tests { // If amt_received + extra_fee is equal to the sender intended amount, we're fine. let hop_data = msgs::InboundOnionPayload::Receive { // This is the same payload as above, InboundOnionPayload doesn't implement Clone - amt_msat: 100, - outgoing_cltv_value: 42, + sender_intended_htlc_amt_msat: 100, + cltv_expiry_height: 42, payment_metadata: None, keysend_preimage: None, payment_data: Some(msgs::FinalOnionHopData { @@ -12165,8 +12187,8 @@ mod tests { let current_height: u32 = node[0].node.best_block.read().unwrap().height(); let result = create_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive { - amt_msat: 100, - outgoing_cltv_value: 22, + sender_intended_htlc_amt_msat: 100, + cltv_expiry_height: 22, payment_metadata: None, keysend_preimage: None, payment_data: Some(msgs::FinalOnionHopData {