X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=0506a81c5192e945bed3f4418b0a9e6efe97cb86;hb=271103d66017e4622d42edc3aa8a3b850b104e3e;hp=9f3a3f425fb8728789551bdf7dec011200266c17;hpb=62d52c6020830385844943de094d85a330c456df;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 9f3a3f425..0506a81c5 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -202,15 +202,16 @@ pub struct BlindedForward { /// onion payload if we're the introduction node. Useful for calculating the next hop's /// [`msgs::UpdateAddHTLC::blinding_point`]. pub inbound_blinding_point: PublicKey, - // Another field will be added here when we support forwarding as a non-intro node. + /// If needed, this determines how this HTLC should be failed backwards, based on whether we are + /// the introduction node. + pub failure: BlindedFailure, } impl PendingHTLCRouting { // Used to override the onion failure code and data if the HTLC is blinded. fn blinded_failure(&self) -> Option { - // TODO: needs update when we support forwarding blinded HTLCs as non-intro node match self { - Self::Forward { blinded: Some(_), .. } => Some(BlindedFailure::FromIntroductionNode), + Self::Forward { blinded: Some(BlindedForward { failure, .. }), .. } => Some(*failure), Self::Receive { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode), _ => None, } @@ -305,10 +306,15 @@ pub(super) enum HTLCForwardInfo { }, } -// Used for failing blinded HTLCs backwards correctly. +/// Whether this blinded HTLC is being failed backwards by the introduction node or a blinded node, +/// which determines the failure message that should be used. #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] -enum BlindedFailure { +pub enum BlindedFailure { + /// This HTLC is being failed backwards by the introduction node, and thus should be failed with + /// [`msgs::UpdateFailHTLC`] and error code `0x8000|0x4000|24`. FromIntroductionNode, + /// This HTLC is being failed backwards by a blinded node within the path, and thus should be + /// failed with [`msgs::UpdateFailMalformedHTLC`] and error code `0x8000|0x4000|24`. FromBlindedNode, } @@ -2884,6 +2890,7 @@ where reason: shutdown_res.closure_reason, counterparty_node_id: Some(shutdown_res.counterparty_node_id), channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis), + channel_funding_txo: shutdown_res.channel_funding_txo, }, None)); if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx { @@ -3024,8 +3031,9 @@ where let is_intro_node_forward = match next_hop { onion_utils::Hop::Forward { - // TODO: update this when we support blinded forwarding as non-intro node - next_hop_data: msgs::InboundOnionPayload::BlindedForward { .. }, .. + next_hop_data: msgs::InboundOnionPayload::BlindedForward { + intro_node_blinding_point: Some(_), .. + }, .. } => true, _ => false, }; @@ -3976,6 +3984,7 @@ where }); } } + mem::drop(funding_batch_states); for shutdown_result in shutdown_results.drain(..) { self.finish_close_channel(shutdown_result); } @@ -4370,7 +4379,7 @@ where incoming_packet_shared_secret: incoming_shared_secret, // Phantom payments are only PendingHTLCRouting::Receive. phantom_shared_secret: None, - blinded_failure: blinded.map(|_| BlindedFailure::FromIntroductionNode), + blinded_failure: blinded.map(|b| b.failure), }); let next_blinding_point = blinded.and_then(|b| { let encrypted_tlvs_ss = self.node_signer.ecdh( @@ -9021,8 +9030,6 @@ where } fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); - match &msg.data as &str { "cannot co-op close channel w/ active htlcs"| "link failed to shutdown" => @@ -9035,34 +9042,45 @@ where // We're not going to bother handling this in a sensible way, instead simply // repeating the Shutdown message on repeat until morale improves. if !msg.channel_id.is_zero() { - let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); - if peer_state_mutex_opt.is_none() { return; } - let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap(); - if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) { - if let Some(msg) = chan.get_outbound_shutdown() { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: *counterparty_node_id, - msg, - }); - } - peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError { - node_id: *counterparty_node_id, - action: msgs::ErrorAction::SendWarningMessage { - msg: msgs::WarningMessage { - channel_id: msg.channel_id, - data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned() - }, - log_level: Level::Trace, + PersistenceNotifierGuard::optionally_notify( + self, + || -> NotifyOption { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if peer_state_mutex_opt.is_none() { return NotifyOption::SkipPersistNoEvents; } + let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap(); + if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) { + if let Some(msg) = chan.get_outbound_shutdown() { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: *counterparty_node_id, + msg, + }); + } + peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: *counterparty_node_id, + action: msgs::ErrorAction::SendWarningMessage { + msg: msgs::WarningMessage { + channel_id: msg.channel_id, + data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned() + }, + log_level: Level::Trace, + } + }); + // This can happen in a fairly tight loop, so we absolutely cannot trigger + // a `ChannelManager` write here. + return NotifyOption::SkipPersistHandleEvents; } - }); - } + NotifyOption::SkipPersistNoEvents + } + ); } return; } _ => {} } + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + if msg.channel_id.is_zero() { let channel_ids: Vec = { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -9350,6 +9368,7 @@ pub fn provided_init_features(config: &UserConfig) -> InitFeatures { features.set_channel_type_optional(); features.set_scid_privacy_optional(); features.set_zero_conf_optional(); + features.set_route_blinding_optional(); if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx { features.set_anchors_zero_fee_htlc_tx_optional(); } @@ -9495,6 +9514,7 @@ impl_writeable_tlv_based!(PhantomRouteHints, { impl_writeable_tlv_based!(BlindedForward, { (0, inbound_blinding_point, required), + (1, failure, (default_value, BlindedFailure::FromIntroductionNode)), }); impl_writeable_tlv_based_enum!(PendingHTLCRouting, @@ -10327,6 +10347,7 @@ where reason: ClosureReason::OutdatedChannelManager, counterparty_node_id: Some(channel.context.get_counterparty_node_id()), channel_capacity_sats: Some(channel.context.get_value_satoshis()), + channel_funding_txo: channel.context.get_funding_txo(), }, None)); for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() { let mut found_htlc = false; @@ -10380,6 +10401,7 @@ where reason: ClosureReason::DisconnectedPeer, counterparty_node_id: Some(channel.context.get_counterparty_node_id()), channel_capacity_sats: Some(channel.context.get_value_satoshis()), + channel_funding_txo: channel.context.get_funding_txo(), }, None)); } else { log_error!(logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", &channel.context.channel_id());