X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=c9f431b0ec33304780cc5b91e3174e9924724ebe;hb=db7e69667303fb4ba8c2ae6e792d09442260d7ad;hp=9c559909bc723fdd81b83c15533da71a9ae4b39e;hpb=998fffb99bfc83b8ab66d4174d235e597ea4494f;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 9c559909..c9f431b0 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -31,8 +31,8 @@ use bitcoin::secp256k1::{SecretKey,PublicKey}; use bitcoin::secp256k1::Secp256k1; use bitcoin::{secp256k1, Sequence}; -use crate::blinded_path::BlindedPath; -use crate::blinded_path::payment::{PaymentConstraints, ReceiveTlvs}; +use crate::blinded_path::{BlindedPath, NodeIdLookUp}; +use crate::blinded_path::payment::{Bolt12OfferContext, Bolt12RefundContext, PaymentConstraints, PaymentContext, ReceiveTlvs}; use crate::chain; use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock}; use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator}; @@ -61,7 +61,6 @@ use crate::ln::wire::Encode; use crate::offers::invoice::{BlindedPayInfo, Bolt12Invoice, DEFAULT_RELATIVE_EXPIRY, DerivedSigningPubkey, ExplicitSigningPubkey, InvoiceBuilder, UnsignedBolt12Invoice}; use crate::offers::invoice_error::InvoiceError; use crate::offers::invoice_request::{DerivedPayerId, InvoiceRequestBuilder}; -use crate::offers::merkle::SignError; use crate::offers::offer::{Offer, OfferBuilder}; use crate::offers::parse::Bolt12SemanticError; use crate::offers::refund::{Refund, RefundBuilder}; @@ -156,6 +155,11 @@ pub enum PendingHTLCRouting { /// [`Event::PaymentClaimable::onion_fields`] as /// [`RecipientOnionFields::payment_metadata`]. payment_metadata: Option>, + /// The context of the payment included by the recipient in a blinded path, or `None` if a + /// blinded path was not used. + /// + /// Used in part to determine the [`events::PaymentPurpose`]. + payment_context: Option, /// CLTV expiry of the received HTLC. /// /// Used to track when we should expire pending HTLCs that go unclaimed. @@ -353,6 +357,11 @@ enum OnionPayload { /// This is only here for backwards-compatibility in serialization, in the future it can be /// removed, breaking clients running 0.0.106 and earlier. _legacy_hop_data: Option, + /// The context of the payment included by the recipient in a blinded path, or `None` if a + /// blinded path was not used. + /// + /// Used in part to determine the [`events::PaymentPurpose`]. + payment_context: Option, }, /// Contains the payer-provided preimage. Spontaneous(PaymentPreimage), @@ -903,7 +912,7 @@ pub(super) struct PeerState where SP::Target: SignerProvider { /// The peer is currently connected (i.e. we've seen a /// [`ChannelMessageHandler::peer_connected`] and no corresponding /// [`ChannelMessageHandler::peer_disconnected`]. - is_connected: bool, + pub is_connected: bool, } impl PeerState where SP::Target: SignerProvider { @@ -918,9 +927,9 @@ impl PeerState where SP::Target: SignerProvider { match phase { ChannelPhase::Funded(_) | ChannelPhase::UnfundedOutboundV1(_) => true, ChannelPhase::UnfundedInboundV1(_) => false, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] ChannelPhase::UnfundedOutboundV2(_) => true, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] ChannelPhase::UnfundedInboundV2(_) => false, } ) @@ -1455,12 +1464,12 @@ where /// // On the event processing thread /// channel_manager.process_pending_events(&|event| match event { /// Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose { -/// PaymentPurpose::InvoicePayment { payment_preimage: Some(payment_preimage), .. } => { +/// PaymentPurpose::Bolt11InvoicePayment { payment_preimage: Some(payment_preimage), .. } => { /// assert_eq!(payment_hash, known_payment_hash); /// println!("Claiming payment {}", payment_hash); /// channel_manager.claim_funds(payment_preimage); /// }, -/// PaymentPurpose::InvoicePayment { payment_preimage: None, .. } => { +/// PaymentPurpose::Bolt11InvoicePayment { payment_preimage: None, .. } => { /// println!("Unknown payment hash: {}", payment_hash); /// }, /// PaymentPurpose::SpontaneousPayment(payment_preimage) => { @@ -1468,6 +1477,8 @@ where /// println!("Claiming spontaneous payment {}", payment_hash); /// channel_manager.claim_funds(payment_preimage); /// }, +/// // ... +/// # _ => {}, /// }, /// Event::PaymentClaimed { payment_hash, amount_msat, .. } => { /// assert_eq!(payment_hash, known_payment_hash); @@ -1543,11 +1554,12 @@ where /// # fn example(channel_manager: T) -> Result<(), Bolt12SemanticError> { /// # let channel_manager = channel_manager.get_cm(); /// let offer = channel_manager -/// .create_offer_builder("coffee".to_string())? +/// .create_offer_builder()? /// # ; /// # // Needed for compiling for c_bindings /// # let builder: lightning::offers::offer::OfferBuilder<_, _> = offer.into(); /// # let offer = builder +/// .description("coffee".to_string()) /// .amount_msats(10_000_000) /// .build()?; /// let bech32_offer = offer.to_string(); @@ -1555,11 +1567,11 @@ where /// // On the event processing thread /// channel_manager.process_pending_events(&|event| match event { /// Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose { -/// PaymentPurpose::InvoicePayment { payment_preimage: Some(payment_preimage), .. } => { +/// PaymentPurpose::Bolt12OfferPayment { payment_preimage: Some(payment_preimage), .. } => { /// println!("Claiming payment {}", payment_hash); /// channel_manager.claim_funds(payment_preimage); /// }, -/// PaymentPurpose::InvoicePayment { payment_preimage: None, .. } => { +/// PaymentPurpose::Bolt12OfferPayment { payment_preimage: None, .. } => { /// println!("Unknown payment hash: {}", payment_hash); /// }, /// // ... @@ -1646,13 +1658,13 @@ where /// let payment_id = PaymentId([42; 32]); /// let refund = channel_manager /// .create_refund_builder( -/// "coffee".to_string(), amount_msats, absolute_expiry, payment_id, retry, -/// max_total_routing_fee_msat +/// amount_msats, absolute_expiry, payment_id, retry, max_total_routing_fee_msat /// )? /// # ; /// # // Needed for compiling for c_bindings /// # let builder: lightning::offers::refund::RefundBuilder<_> = refund.into(); /// # let refund = builder +/// .description("coffee".to_string()) /// .payer_note("refund for order 1234".to_string()) /// .build()?; /// let bech32_refund = refund.to_string(); @@ -1695,25 +1707,31 @@ where /// # /// # fn example(channel_manager: T, refund: &Refund) { /// # let channel_manager = channel_manager.get_cm(); -/// match channel_manager.request_refund_payment(refund) { -/// Ok(()) => println!("Requesting payment for refund"), -/// Err(e) => println!("Unable to request payment for refund: {:?}", e), -/// } +/// let known_payment_hash = match channel_manager.request_refund_payment(refund) { +/// Ok(invoice) => { +/// let payment_hash = invoice.payment_hash(); +/// println!("Requesting refund payment {}", payment_hash); +/// payment_hash +/// }, +/// Err(e) => panic!("Unable to request payment for refund: {:?}", e), +/// }; /// /// // On the event processing thread /// channel_manager.process_pending_events(&|event| match event { /// Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose { -/// PaymentPurpose::InvoicePayment { payment_preimage: Some(payment_preimage), .. } => { +/// PaymentPurpose::Bolt12RefundPayment { payment_preimage: Some(payment_preimage), .. } => { +/// assert_eq!(payment_hash, known_payment_hash); /// println!("Claiming payment {}", payment_hash); /// channel_manager.claim_funds(payment_preimage); /// }, -/// PaymentPurpose::InvoicePayment { payment_preimage: None, .. } => { +/// PaymentPurpose::Bolt12RefundPayment { payment_preimage: None, .. } => { /// println!("Unknown payment hash: {}", payment_hash); /// }, /// // ... /// # _ => {}, /// }, /// Event::PaymentClaimed { payment_hash, amount_msat, .. } => { +/// assert_eq!(payment_hash, known_payment_hash); /// println!("Claimed {} msats", amount_msat); /// }, /// // ... @@ -1828,6 +1846,8 @@ where // | | // | |__`pending_intercepted_htlcs` // | +// |__`decode_update_add_htlcs` +// | // |__`per_peer_state` // | // |__`pending_inbound_payments` @@ -1918,6 +1938,18 @@ where /// See `ChannelManager` struct-level documentation for lock order requirements. pending_intercepted_htlcs: Mutex>, + /// SCID/SCID Alias -> pending `update_add_htlc`s to decode. + /// + /// Note that because we may have an SCID Alias as the key we can have two entries per channel, + /// though in practice we probably won't be receiving HTLCs for a channel both via the alias + /// and via the classic SCID. + /// + /// Note that no consistency guarantees are made about the existence of a channel with the + /// `short_channel_id` here, nor the `channel_id` in `UpdateAddHTLC`! + /// + /// See `ChannelManager` struct-level documentation for lock order requirements. + decode_update_add_htlcs: Mutex>>, + /// The sets of payments which are claimable or currently being claimed. See /// [`ClaimablePayments`]' individual field docs for more info. /// @@ -2061,6 +2093,9 @@ where pending_offers_messages: Mutex>>, + /// Tracks the message events that are to be broadcasted when we are connected to some peer. + pending_broadcast_messages: Mutex>, + entropy_source: ES, node_signer: NS, signer_provider: SP, @@ -2652,7 +2687,7 @@ macro_rules! handle_error { match $internal { Ok(msg) => Ok(msg), Err(MsgHandleErrInternal { err, shutdown_finish, .. }) => { - let mut msg_events = Vec::with_capacity(2); + let mut msg_event = None; if let Some((shutdown_res, update_option)) = shutdown_finish { let counterparty_node_id = shutdown_res.counterparty_node_id; @@ -2664,7 +2699,8 @@ macro_rules! handle_error { $self.finish_close_channel(shutdown_res); if let Some(update) = update_option { - msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + let mut pending_broadcast_messages = $self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -2674,17 +2710,17 @@ macro_rules! handle_error { if let msgs::ErrorAction::IgnoreError = err.action { } else { - msg_events.push(events::MessageSendEvent::HandleError { + msg_event = Some(events::MessageSendEvent::HandleError { node_id: $counterparty_node_id, action: err.action.clone() }); } - if !msg_events.is_empty() { + if let Some(msg_event) = msg_event { let per_peer_state = $self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(&$counterparty_node_id) { let mut peer_state = peer_state_mutex.lock().unwrap(); - peer_state.pending_msg_events.append(&mut msg_events); + peer_state.pending_msg_events.push(msg_event); } } @@ -2756,11 +2792,11 @@ macro_rules! convert_chan_phase_err { ChannelPhase::UnfundedInboundV1(channel) => { convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL) }, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] ChannelPhase::UnfundedOutboundV2(channel) => { convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL) }, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] ChannelPhase::UnfundedInboundV2(channel) => { convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL) }, @@ -2885,9 +2921,9 @@ macro_rules! handle_monitor_update_completion { let update_actions = $peer_state.monitor_update_blocked_actions .remove(&$chan.context.channel_id()).unwrap_or(Vec::new()); - let htlc_forwards = $self.handle_channel_resumption( + let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption( &mut $peer_state.pending_msg_events, $chan, updates.raa, - updates.commitment_update, updates.order, updates.accepted_htlcs, + updates.commitment_update, updates.order, updates.accepted_htlcs, updates.pending_update_adds, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs); if let Some(upd) = channel_update { @@ -2948,6 +2984,9 @@ macro_rules! handle_monitor_update_completion { if let Some(forwards) = htlc_forwards { $self.forward_htlcs(&mut [forwards][..]); } + if let Some(decode) = decode_update_add_htlcs { + $self.push_decode_update_add_htlcs(decode); + } $self.finalize_claims(updates.finalized_claimed_htlcs); for failure in updates.failed_htlcs.drain(..) { let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; @@ -3124,6 +3163,7 @@ where pending_inbound_payments: Mutex::new(new_hash_map()), pending_outbound_payments: OutboundPayments::new(), forward_htlcs: Mutex::new(new_hash_map()), + decode_update_add_htlcs: Mutex::new(new_hash_map()), claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: new_hash_map(), pending_claiming_payments: new_hash_map() }), pending_intercepted_htlcs: Mutex::new(new_hash_map()), outpoint_to_peer: Mutex::new(new_hash_map()), @@ -3151,6 +3191,7 @@ where funding_batch_states: Mutex::new(BTreeMap::new()), pending_offers_messages: Mutex::new(Vec::new()), + pending_broadcast_messages: Mutex::new(Vec::new()), entropy_source, node_signer, @@ -3630,8 +3671,8 @@ where // Unfunded channel has no update (None, chan_phase.context().get_counterparty_node_id()) }, - // TODO(dual_funding): Combine this match arm with above once #[cfg(dual_funding)] is removed. - #[cfg(dual_funding)] + // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed. + #[cfg(any(dual_funding, splicing))] ChannelPhase::UnfundedOutboundV2(_) | ChannelPhase::UnfundedInboundV2(_) => { self.finish_close_channel(chan_phase.context_mut().force_shutdown(false, closure_reason)); // Unfunded channel has no update @@ -3649,17 +3690,11 @@ where } }; if let Some(update) = update_opt { - // Try to send the `BroadcastChannelUpdate` to the peer we just force-closed on, but if - // not try to broadcast it via whatever peer we have. - let per_peer_state = self.per_peer_state.read().unwrap(); - let a_peer_state_opt = per_peer_state.get(peer_node_id) - .ok_or(per_peer_state.values().next()); - if let Ok(a_peer_state_mutex) = a_peer_state_opt { - let mut a_peer_state = a_peer_state_mutex.lock().unwrap(); - a_peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } + // If we have some Channel Update to broadcast, we cache it and broadcast it later. + let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); } Ok(counterparty_node_id) @@ -3723,6 +3758,163 @@ where } } + fn can_forward_htlc_to_outgoing_channel( + &self, chan: &mut Channel, msg: &msgs::UpdateAddHTLC, next_packet: &NextPacketDetails + ) -> Result<(), (&'static str, u16, Option)> { + if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels { + // Note that the behavior here should be identical to the above block - we + // should NOT reveal the existence or non-existence of a private channel if + // we don't allow forwards outbound over them. + return Err(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None)); + } + if chan.context.get_channel_type().supports_scid_privacy() && next_packet.outgoing_scid != chan.context.outbound_scid_alias() { + // `option_scid_alias` (referred to in LDK as `scid_privacy`) means + // "refuse to forward unless the SCID alias was used", so we pretend + // we don't have the channel here. + return Err(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None)); + } + + // Note that we could technically not return an error yet here and just hope + // that the connection is reestablished or monitor updated by the time we get + // around to doing the actual forward, but better to fail early if we can and + // hopefully an attacker trying to path-trace payments cannot make this occur + // on a small/per-node/per-channel scale. + if !chan.context.is_live() { // channel_disabled + // If the channel_update we're going to return is disabled (i.e. the + // peer has been disabled for some time), return `channel_disabled`, + // otherwise return `temporary_channel_failure`. + let chan_update_opt = self.get_channel_update_for_onion(next_packet.outgoing_scid, chan).ok(); + if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) { + return Err(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt)); + } else { + return Err(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt)); + } + } + if next_packet.outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum + let chan_update_opt = self.get_channel_update_for_onion(next_packet.outgoing_scid, chan).ok(); + return Err(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt)); + } + if let Err((err, code)) = chan.htlc_satisfies_config(msg, next_packet.outgoing_amt_msat, next_packet.outgoing_cltv_value) { + let chan_update_opt = self.get_channel_update_for_onion(next_packet.outgoing_scid, chan).ok(); + return Err((err, code, chan_update_opt)); + } + + Ok(()) + } + + /// Executes a callback `C` that returns some value `X` on the channel found with the given + /// `scid`. `None` is returned when the channel is not found. + fn do_funded_channel_callback) -> X>( + &self, scid: u64, callback: C, + ) -> Option { + let (counterparty_node_id, channel_id) = match self.short_to_chan_info.read().unwrap().get(&scid).cloned() { + None => return None, + Some((cp_id, id)) => (cp_id, id), + }; + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id); + if peer_state_mutex_opt.is_none() { + return None; + } + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.get_mut(&channel_id).and_then( + |chan_phase| if let ChannelPhase::Funded(chan) = chan_phase { Some(chan) } else { None } + ) { + None => None, + Some(chan) => Some(callback(chan)), + } + } + + fn can_forward_htlc( + &self, msg: &msgs::UpdateAddHTLC, next_packet_details: &NextPacketDetails + ) -> Result<(), (&'static str, u16, Option)> { + match self.do_funded_channel_callback(next_packet_details.outgoing_scid, |chan: &mut Channel| { + self.can_forward_htlc_to_outgoing_channel(chan, msg, next_packet_details) + }) { + Some(Ok(())) => {}, + Some(Err(e)) => return Err(e), + None => { + // If we couldn't find the channel info for the scid, it may be a phantom or + // intercept forward. + if (self.default_configuration.accept_intercept_htlcs && + fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, next_packet_details.outgoing_scid, &self.chain_hash)) || + fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, next_packet_details.outgoing_scid, &self.chain_hash) + {} else { + return Err(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); + } + } + } + + let cur_height = self.best_block.read().unwrap().height + 1; + if let Err((err_msg, err_code)) = check_incoming_htlc_cltv( + cur_height, next_packet_details.outgoing_cltv_value, msg.cltv_expiry + ) { + let chan_update_opt = self.do_funded_channel_callback(next_packet_details.outgoing_scid, |chan: &mut Channel| { + self.get_channel_update_for_onion(next_packet_details.outgoing_scid, chan).ok() + }).flatten(); + return Err((err_msg, err_code, chan_update_opt)); + } + + Ok(()) + } + + fn htlc_failure_from_update_add_err( + &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, err_msg: &'static str, + mut err_code: u16, chan_update: Option, is_intro_node_blinded_forward: bool, + shared_secret: &[u8; 32] + ) -> HTLCFailureMsg { + let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2)); + if chan_update.is_some() && err_code & 0x1000 == 0x1000 { + let chan_update = chan_update.unwrap(); + if err_code == 0x1000 | 11 || err_code == 0x1000 | 12 { + msg.amount_msat.write(&mut res).expect("Writes cannot fail"); + } + else if err_code == 0x1000 | 13 { + msg.cltv_expiry.write(&mut res).expect("Writes cannot fail"); + } + else if err_code == 0x1000 | 20 { + // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791 + 0u16.write(&mut res).expect("Writes cannot fail"); + } + (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail"); + msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail"); + chan_update.write(&mut res).expect("Writes cannot fail"); + } else if err_code & 0x1000 == 0x1000 { + // If we're trying to return an error that requires a `channel_update` but + // we're forwarding to a phantom or intercept "channel" (i.e. cannot + // generate an update), just use the generic "temporary_node_failure" + // instead. + err_code = 0x2000 | 2; + } + + log_info!( + WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id)), + "Failed to accept/forward incoming HTLC: {}", err_msg + ); + // If `msg.blinding_point` is set, we must always fail with malformed. + if msg.blinding_point.is_some() { + return HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC { + channel_id: msg.channel_id, + htlc_id: msg.htlc_id, + sha256_of_onion: [0; 32], + failure_code: INVALID_ONION_BLINDING, + }); + } + + let (err_code, err_data) = if is_intro_node_blinded_forward { + (INVALID_ONION_BLINDING, &[0; 32][..]) + } else { + (err_code, &res.0[..]) + }; + HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { + channel_id: msg.channel_id, + htlc_id: msg.htlc_id, + reason: HTLCFailReason::reason(err_code, err_data.to_vec()) + .get_encrypted_failure_packet(shared_secret, &None), + }) + } + fn decode_update_add_htlc_onion( &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, ) -> Result< @@ -3732,48 +3924,7 @@ where msg, &self.node_signer, &self.logger, &self.secp_ctx )?; - let is_intro_node_forward = match next_hop { - onion_utils::Hop::Forward { - next_hop_data: msgs::InboundOnionPayload::BlindedForward { - intro_node_blinding_point: Some(_), .. - }, .. - } => true, - _ => false, - }; - - macro_rules! return_err { - ($msg: expr, $err_code: expr, $data: expr) => { - { - log_info!( - WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id)), - "Failed to accept/forward incoming HTLC: {}", $msg - ); - // If `msg.blinding_point` is set, we must always fail with malformed. - if msg.blinding_point.is_some() { - return Err(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC { - channel_id: msg.channel_id, - htlc_id: msg.htlc_id, - sha256_of_onion: [0; 32], - failure_code: INVALID_ONION_BLINDING, - })); - } - - let (err_code, err_data) = if is_intro_node_forward { - (INVALID_ONION_BLINDING, &[0; 32][..]) - } else { ($err_code, $data) }; - return Err(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { - channel_id: msg.channel_id, - htlc_id: msg.htlc_id, - reason: HTLCFailReason::reason(err_code, err_data.to_vec()) - .get_encrypted_failure_packet(&shared_secret, &None), - })); - } - } - } - - let NextPacketDetails { - next_packet_pubkey, outgoing_amt_msat, outgoing_scid, outgoing_cltv_value - } = match next_packet_details_opt { + let next_packet_details = match next_packet_details_opt { Some(next_packet_details) => next_packet_details, // it is a receive, so no need for outbound checks None => return Ok((next_hop, shared_secret, None)), @@ -3781,124 +3932,15 @@ where // Perform outbound checks here instead of in [`Self::construct_pending_htlc_info`] because we // can't hold the outbound peer state lock at the same time as the inbound peer state lock. - if let Some((err, mut code, chan_update)) = loop { - let id_option = self.short_to_chan_info.read().unwrap().get(&outgoing_scid).cloned(); - let forwarding_chan_info_opt = match id_option { - None => { // unknown_next_peer - // Note that this is likely a timing oracle for detecting whether an scid is a - // phantom or an intercept. - if (self.default_configuration.accept_intercept_htlcs && - fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)) || - fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash) - { - None - } else { - break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); - } - }, - Some((cp_id, id)) => Some((cp_id.clone(), id.clone())), - }; - let chan_update_opt = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt { - let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id); - if peer_state_mutex_opt.is_none() { - break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); - } - let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); - let peer_state = &mut *peer_state_lock; - let chan = match peer_state.channel_by_id.get_mut(&forwarding_id).map( - |chan_phase| if let ChannelPhase::Funded(chan) = chan_phase { Some(chan) } else { None } - ).flatten() { - None => { - // Channel was removed. The short_to_chan_info and channel_by_id maps - // have no consistency guarantees. - break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); - }, - Some(chan) => chan - }; - if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels { - // Note that the behavior here should be identical to the above block - we - // should NOT reveal the existence or non-existence of a private channel if - // we don't allow forwards outbound over them. - break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None)); - } - if chan.context.get_channel_type().supports_scid_privacy() && outgoing_scid != chan.context.outbound_scid_alias() { - // `option_scid_alias` (referred to in LDK as `scid_privacy`) means - // "refuse to forward unless the SCID alias was used", so we pretend - // we don't have the channel here. - break Some(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None)); - } - let chan_update_opt = self.get_channel_update_for_onion(outgoing_scid, chan).ok(); - - // Note that we could technically not return an error yet here and just hope - // that the connection is reestablished or monitor updated by the time we get - // around to doing the actual forward, but better to fail early if we can and - // hopefully an attacker trying to path-trace payments cannot make this occur - // on a small/per-node/per-channel scale. - if !chan.context.is_live() { // channel_disabled - // If the channel_update we're going to return is disabled (i.e. the - // peer has been disabled for some time), return `channel_disabled`, - // otherwise return `temporary_channel_failure`. - if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) { - break Some(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt)); - } else { - break Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt)); - } - } - if outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum - break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt)); - } - if let Err((err, code)) = chan.htlc_satisfies_config(&msg, outgoing_amt_msat, outgoing_cltv_value) { - break Some((err, code, chan_update_opt)); - } - chan_update_opt - } else { - None - }; - - let cur_height = self.best_block.read().unwrap().height + 1; - - if let Err((err_msg, code)) = check_incoming_htlc_cltv( - cur_height, outgoing_cltv_value, msg.cltv_expiry - ) { - if code & 0x1000 != 0 && chan_update_opt.is_none() { - // We really should set `incorrect_cltv_expiry` here but as we're not - // forwarding over a real channel we can't generate a channel_update - // for it. Instead we just return a generic temporary_node_failure. - break Some((err_msg, 0x2000 | 2, None)) - } - let chan_update_opt = if code & 0x1000 != 0 { chan_update_opt } else { None }; - break Some((err_msg, code, chan_update_opt)); - } + self.can_forward_htlc(&msg, &next_packet_details).map_err(|e| { + let (err_msg, err_code, chan_update_opt) = e; + self.htlc_failure_from_update_add_err( + msg, counterparty_node_id, err_msg, err_code, chan_update_opt, + next_hop.is_intro_node_blinded_forward(), &shared_secret + ) + })?; - break None; - } - { - let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2)); - if let Some(chan_update) = chan_update { - if code == 0x1000 | 11 || code == 0x1000 | 12 { - msg.amount_msat.write(&mut res).expect("Writes cannot fail"); - } - else if code == 0x1000 | 13 { - msg.cltv_expiry.write(&mut res).expect("Writes cannot fail"); - } - else if code == 0x1000 | 20 { - // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791 - 0u16.write(&mut res).expect("Writes cannot fail"); - } - (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail"); - msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail"); - chan_update.write(&mut res).expect("Writes cannot fail"); - } else if code & 0x1000 == 0x1000 { - // If we're trying to return an error that requires a `channel_update` but - // we're forwarding to a phantom or intercept "channel" (i.e. cannot - // generate an update), just use the generic "temporary_node_failure" - // instead. - code = 0x2000 | 2; - } - return_err!(err, code, &res.0[..]); - } - Ok((next_hop, shared_secret, Some(next_packet_pubkey))) + Ok((next_hop, shared_secret, Some(next_packet_details.next_packet_pubkey))) } fn construct_pending_htlc_status<'a>( @@ -4735,6 +4777,7 @@ where .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + for channel_id in channel_ids { if !peer_state.has_channel(channel_id) { return Err(APIError::ChannelUnavailable { @@ -4751,7 +4794,8 @@ where } if let ChannelPhase::Funded(channel) = channel_phase { if let Ok(msg) = self.get_channel_update_for_broadcast(channel) { - peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); + let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) { peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { node_id: channel.context.get_counterparty_node_id(), @@ -4926,6 +4970,145 @@ where Ok(()) } + fn process_pending_update_add_htlcs(&self) { + let mut decode_update_add_htlcs = new_hash_map(); + mem::swap(&mut decode_update_add_htlcs, &mut self.decode_update_add_htlcs.lock().unwrap()); + + let get_failed_htlc_destination = |outgoing_scid_opt: Option, payment_hash: PaymentHash| { + if let Some(outgoing_scid) = outgoing_scid_opt { + match self.short_to_chan_info.read().unwrap().get(&outgoing_scid) { + Some((outgoing_counterparty_node_id, outgoing_channel_id)) => + HTLCDestination::NextHopChannel { + node_id: Some(*outgoing_counterparty_node_id), + channel_id: *outgoing_channel_id, + }, + None => HTLCDestination::UnknownNextHop { + requested_forward_scid: outgoing_scid, + }, + } + } else { + HTLCDestination::FailedPayment { payment_hash } + } + }; + + 'outer_loop: for (incoming_scid, update_add_htlcs) in decode_update_add_htlcs { + let incoming_channel_details_opt = self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel| { + let counterparty_node_id = chan.context.get_counterparty_node_id(); + let channel_id = chan.context.channel_id(); + let funding_txo = chan.context.get_funding_txo().unwrap(); + let user_channel_id = chan.context.get_user_id(); + let accept_underpaying_htlcs = chan.context.config().accept_underpaying_htlcs; + (counterparty_node_id, channel_id, funding_txo, user_channel_id, accept_underpaying_htlcs) + }); + let ( + incoming_counterparty_node_id, incoming_channel_id, incoming_funding_txo, + incoming_user_channel_id, incoming_accept_underpaying_htlcs + ) = if let Some(incoming_channel_details) = incoming_channel_details_opt { + incoming_channel_details + } else { + // The incoming channel no longer exists, HTLCs should be resolved onchain instead. + continue; + }; + + let mut htlc_forwards = Vec::new(); + let mut htlc_fails = Vec::new(); + for update_add_htlc in &update_add_htlcs { + let (next_hop, shared_secret, next_packet_details_opt) = match decode_incoming_update_add_htlc_onion( + &update_add_htlc, &self.node_signer, &self.logger, &self.secp_ctx + ) { + Ok(decoded_onion) => decoded_onion, + Err(htlc_fail) => { + htlc_fails.push((htlc_fail, HTLCDestination::InvalidOnion)); + continue; + }, + }; + + let is_intro_node_blinded_forward = next_hop.is_intro_node_blinded_forward(); + let outgoing_scid_opt = next_packet_details_opt.as_ref().map(|d| d.outgoing_scid); + + // Process the HTLC on the incoming channel. + match self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel| { + let logger = WithChannelContext::from(&self.logger, &chan.context); + chan.can_accept_incoming_htlc( + update_add_htlc, &self.fee_estimator, &logger, + ) + }) { + Some(Ok(_)) => {}, + Some(Err((err, code))) => { + let outgoing_chan_update_opt = if let Some(outgoing_scid) = outgoing_scid_opt.as_ref() { + self.do_funded_channel_callback(*outgoing_scid, |chan: &mut Channel| { + self.get_channel_update_for_onion(*outgoing_scid, chan).ok() + }).flatten() + } else { + None + }; + let htlc_fail = self.htlc_failure_from_update_add_err( + &update_add_htlc, &incoming_counterparty_node_id, err, code, + outgoing_chan_update_opt, is_intro_node_blinded_forward, &shared_secret, + ); + let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash); + htlc_fails.push((htlc_fail, htlc_destination)); + continue; + }, + // The incoming channel no longer exists, HTLCs should be resolved onchain instead. + None => continue 'outer_loop, + } + + // Now process the HTLC on the outgoing channel if it's a forward. + if let Some(next_packet_details) = next_packet_details_opt.as_ref() { + if let Err((err, code, chan_update_opt)) = self.can_forward_htlc( + &update_add_htlc, next_packet_details + ) { + let htlc_fail = self.htlc_failure_from_update_add_err( + &update_add_htlc, &incoming_counterparty_node_id, err, code, + chan_update_opt, is_intro_node_blinded_forward, &shared_secret, + ); + let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash); + htlc_fails.push((htlc_fail, htlc_destination)); + continue; + } + } + + match self.construct_pending_htlc_status( + &update_add_htlc, &incoming_counterparty_node_id, shared_secret, next_hop, + incoming_accept_underpaying_htlcs, next_packet_details_opt.map(|d| d.next_packet_pubkey), + ) { + PendingHTLCStatus::Forward(htlc_forward) => { + htlc_forwards.push((htlc_forward, update_add_htlc.htlc_id)); + }, + PendingHTLCStatus::Fail(htlc_fail) => { + let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash); + htlc_fails.push((htlc_fail, htlc_destination)); + }, + } + } + + // Process all of the forwards and failures for the channel in which the HTLCs were + // proposed to as a batch. + let pending_forwards = (incoming_scid, incoming_funding_txo, incoming_channel_id, + incoming_user_channel_id, htlc_forwards.drain(..).collect()); + self.forward_htlcs_without_forward_event(&mut [pending_forwards]); + for (htlc_fail, htlc_destination) in htlc_fails.drain(..) { + let failure = match htlc_fail { + HTLCFailureMsg::Relay(fail_htlc) => HTLCForwardInfo::FailHTLC { + htlc_id: fail_htlc.htlc_id, + err_packet: fail_htlc.reason, + }, + HTLCFailureMsg::Malformed(fail_malformed_htlc) => HTLCForwardInfo::FailMalformedHTLC { + htlc_id: fail_malformed_htlc.htlc_id, + sha256_of_onion: fail_malformed_htlc.sha256_of_onion, + failure_code: fail_malformed_htlc.failure_code, + }, + }; + self.forward_htlcs.lock().unwrap().entry(incoming_scid).or_insert(vec![]).push(failure); + self.pending_events.lock().unwrap().push_back((events::Event::HTLCHandlingFailed { + prev_channel_id: incoming_channel_id, + failed_next_destination: htlc_destination, + }, None)); + } + } + } + /// Processes HTLCs which are pending waiting on random forward delay. /// /// Should only really ever be called in response to a PendingHTLCsForwardable event. @@ -4933,6 +5116,8 @@ where pub fn process_pending_htlc_forwards(&self) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + self.process_pending_update_add_htlcs(); + let mut new_events = VecDeque::new(); let mut failed_forwards = Vec::new(); let mut phantom_receives: Vec<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new(); @@ -5164,13 +5349,14 @@ where let blinded_failure = routing.blinded_failure(); let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret, mut onion_fields) = match routing { PendingHTLCRouting::Receive { - payment_data, payment_metadata, incoming_cltv_expiry, phantom_shared_secret, - custom_tlvs, requires_blinded_error: _ + payment_data, payment_metadata, payment_context, + incoming_cltv_expiry, phantom_shared_secret, custom_tlvs, + requires_blinded_error: _ } => { let _legacy_hop_data = Some(payment_data.clone()); let onion_fields = RecipientOnionFields { payment_secret: Some(payment_data.payment_secret), payment_metadata, custom_tlvs }; - (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data }, + (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data, payment_context }, Some(payment_data), phantom_shared_secret, onion_fields) }, PendingHTLCRouting::ReceiveKeysend { @@ -5248,10 +5434,7 @@ where macro_rules! check_total_value { ($purpose: expr) => {{ let mut payment_claimable_generated = false; - let is_keysend = match $purpose { - events::PaymentPurpose::SpontaneousPayment(_) => true, - events::PaymentPurpose::InvoicePayment { .. } => false, - }; + let is_keysend = $purpose.is_keysend(); let mut claimable_payments = self.claimable_payments.lock().unwrap(); if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) { fail_htlc!(claimable_htlc, payment_hash); @@ -5348,7 +5531,7 @@ where match payment_secrets.entry(payment_hash) { hash_map::Entry::Vacant(_) => { match claimable_htlc.onion_payload { - OnionPayload::Invoice { .. } => { + OnionPayload::Invoice { ref payment_context, .. } => { let payment_data = payment_data.unwrap(); let (payment_preimage, min_final_cltv_expiry_delta) = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) { Ok(result) => result, @@ -5365,10 +5548,11 @@ where fail_htlc!(claimable_htlc, payment_hash); } } - let purpose = events::PaymentPurpose::InvoicePayment { - payment_preimage: payment_preimage.clone(), - payment_secret: payment_data.payment_secret, - }; + let purpose = events::PaymentPurpose::from_parts( + payment_preimage.clone(), + payment_data.payment_secret, + payment_context.clone(), + ); check_total_value!(purpose); }, OnionPayload::Spontaneous(preimage) => { @@ -5378,10 +5562,13 @@ where } }, hash_map::Entry::Occupied(inbound_payment) => { - if let OnionPayload::Spontaneous(_) = claimable_htlc.onion_payload { - log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", &payment_hash); - fail_htlc!(claimable_htlc, payment_hash); - } + let payment_context = match claimable_htlc.onion_payload { + OnionPayload::Spontaneous(_) => { + log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", &payment_hash); + fail_htlc!(claimable_htlc, payment_hash); + }, + OnionPayload::Invoice { ref payment_context, .. } => payment_context, + }; let payment_data = payment_data.unwrap(); if inbound_payment.get().payment_secret != payment_data.payment_secret { log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", &payment_hash); @@ -5391,10 +5578,11 @@ where &payment_hash, payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap()); fail_htlc!(claimable_htlc, payment_hash); } else { - let purpose = events::PaymentPurpose::InvoicePayment { - payment_preimage: inbound_payment.get().payment_preimage, - payment_secret: payment_data.payment_secret, - }; + let purpose = events::PaymentPurpose::from_parts( + inbound_payment.get().payment_preimage, + payment_data.payment_secret, + payment_context.clone(), + ); let payment_claimable_generated = check_total_value!(purpose); if payment_claimable_generated { inbound_payment.remove_entry(); @@ -5660,7 +5848,8 @@ where if n >= DISABLE_GOSSIP_TICKS { chan.set_channel_update_status(ChannelUpdateStatus::Disabled); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -5674,7 +5863,8 @@ where if n >= ENABLE_GOSSIP_TICKS { chan.set_channel_update_status(ChannelUpdateStatus::Enabled); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -5713,12 +5903,12 @@ where process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context, pending_msg_events, counterparty_node_id) }, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] ChannelPhase::UnfundedInboundV2(chan) => { process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context, pending_msg_events, counterparty_node_id) }, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] ChannelPhase::UnfundedOutboundV2(chan) => { process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context, pending_msg_events, counterparty_node_id) @@ -5977,9 +6167,14 @@ where } } + fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) { + let push_forward_event = self.fail_htlc_backwards_internal_without_forward_event(source, payment_hash, onion_error, destination); + if push_forward_event { self.push_pending_forwards_ev(); } + } + /// Fails an HTLC backwards to the sender of it to us. /// Note that we do not assume that channels corresponding to failed HTLCs are still available. - fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) { + fn fail_htlc_backwards_internal_without_forward_event(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) -> bool { // Ensure that no peer state channel storage lock is held when calling this function. // This ensures that future code doesn't introduce a lock-order requirement for // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling @@ -5997,12 +6192,12 @@ where // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called // from block_connected which may run during initialization prior to the chain_monitor // being fully configured. See the docs for `ChannelManagerReadArgs` for more. + let mut push_forward_event; match source { HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => { - if self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path, + push_forward_event = self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path, session_priv, payment_id, self.probing_cookie_secret, &self.secp_ctx, - &self.pending_events, &self.logger) - { self.push_pending_forwards_ev(); } + &self.pending_events, &self.logger); }, HTLCSource::PreviousHopData(HTLCPreviousHopData { ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret, @@ -6036,11 +6231,9 @@ where } }; - let mut push_forward_ev = false; + push_forward_event = self.decode_update_add_htlcs.lock().unwrap().is_empty(); let mut forward_htlcs = self.forward_htlcs.lock().unwrap(); - if forward_htlcs.is_empty() { - push_forward_ev = true; - } + push_forward_event &= forward_htlcs.is_empty(); match forward_htlcs.entry(*short_channel_id) { hash_map::Entry::Occupied(mut entry) => { entry.get_mut().push(failure); @@ -6050,7 +6243,6 @@ where } } mem::drop(forward_htlcs); - if push_forward_ev { self.push_pending_forwards_ev(); } let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push_back((events::Event::HTLCHandlingFailed { prev_channel_id: *channel_id, @@ -6058,6 +6250,7 @@ where }, None)); }, } + push_forward_event } /// Provides a payment preimage in response to [`Event::PaymentClaimable`], generating any @@ -6576,24 +6769,31 @@ where fn handle_channel_resumption(&self, pending_msg_events: &mut Vec, channel: &mut Channel, raa: Option, commitment_update: Option, order: RAACommitmentOrder, - pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option, + pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_update_adds: Vec, + funding_broadcastable: Option, channel_ready: Option, announcement_sigs: Option) - -> Option<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> { + -> (Option<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec)>) { let logger = WithChannelContext::from(&self.logger, &channel.context); - log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement", + log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement", &channel.context.channel_id(), if raa.is_some() { "an" } else { "no" }, - if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(), + if commitment_update.is_some() { "a" } else { "no" }, + pending_forwards.len(), pending_update_adds.len(), if funding_broadcastable.is_some() { "" } else { "not " }, if channel_ready.is_some() { "sending" } else { "without" }, if announcement_sigs.is_some() { "sending" } else { "without" }); - let mut htlc_forwards = None; - let counterparty_node_id = channel.context.get_counterparty_node_id(); + let short_channel_id = channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias()); + + let mut htlc_forwards = None; if !pending_forwards.is_empty() { - htlc_forwards = Some((channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias()), - channel.context.get_funding_txo().unwrap(), channel.context.channel_id(), channel.context.get_user_id(), pending_forwards)); + htlc_forwards = Some((short_channel_id, channel.context.get_funding_txo().unwrap(), + channel.context.channel_id(), channel.context.get_user_id(), pending_forwards)); + } + let mut decode_update_add_htlcs = None; + if !pending_update_adds.is_empty() { + decode_update_add_htlcs = Some((short_channel_id, pending_update_adds)); } if let Some(msg) = channel_ready { @@ -6644,7 +6844,7 @@ where emit_channel_ready_event!(pending_events, channel); } - htlc_forwards + (htlc_forwards, decode_update_add_htlcs) } fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) { @@ -6880,8 +7080,8 @@ where num_unfunded_channels += 1; } }, - // TODO(dual_funding): Combine this match arm with above once #[cfg(dual_funding)] is removed. - #[cfg(dual_funding)] + // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed. + #[cfg(any(dual_funding, splicing))] ChannelPhase::UnfundedInboundV2(chan) => { // Only inbound V2 channels that are not 0conf and that we do not contribute to will be // included in the unfunded count. @@ -6894,8 +7094,8 @@ where // Outbound channels don't contribute to the unfunded count in the DoS context. continue; }, - // TODO(dual_funding): Combine this match arm with above once #[cfg(dual_funding)] is removed. - #[cfg(dual_funding)] + // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed. + #[cfg(any(dual_funding, splicing))] ChannelPhase::UnfundedOutboundV2(_) => { // Outbound channels don't contribute to the unfunded count in the DoS context. continue; @@ -7322,7 +7522,7 @@ where finish_shutdown = Some(chan.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel)); }, // TODO(dual_funding): Combine this match arm with above. - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] ChannelPhase::UnfundedInboundV2(_) | ChannelPhase::UnfundedOutboundV2(_) => { let context = phase.context_mut(); log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id); @@ -7390,9 +7590,8 @@ where } if let Some(ChannelPhase::Funded(chan)) = chan_option { if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -7429,7 +7628,7 @@ where match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { - let pending_forward_info = match decoded_hop_res { + let mut pending_forward_info = match decoded_hop_res { Ok((next_hop, shared_secret, next_packet_pk_opt)) => self.construct_pending_htlc_status( msg, counterparty_node_id, shared_secret, next_hop, @@ -7437,44 +7636,45 @@ where ), Err(e) => PendingHTLCStatus::Fail(e) }; - let create_pending_htlc_status = |chan: &Channel, pending_forward_info: PendingHTLCStatus, error_code: u16| { + let logger = WithChannelContext::from(&self.logger, &chan.context); + // If the update_add is completely bogus, the call will Err and we will close, + // but if we've sent a shutdown and they haven't acknowledged it yet, we just + // want to reject the new HTLC and fail it backwards instead of forwarding. + if let Err((_, error_code)) = chan.can_accept_incoming_htlc(&msg, &self.fee_estimator, &logger) { if msg.blinding_point.is_some() { - return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed( - msgs::UpdateFailMalformedHTLC { - channel_id: msg.channel_id, - htlc_id: msg.htlc_id, - sha256_of_onion: [0; 32], - failure_code: INVALID_ONION_BLINDING, - } - )) - } - // If the update_add is completely bogus, the call will Err and we will close, - // but if we've sent a shutdown and they haven't acknowledged it yet, we just - // want to reject the new HTLC and fail it backwards instead of forwarding. - match pending_forward_info { - PendingHTLCStatus::Forward(PendingHTLCInfo { - ref incoming_shared_secret, ref routing, .. - }) => { - let reason = if routing.blinded_failure().is_some() { - HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32]) - } else if (error_code & 0x1000) != 0 { - let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan); - HTLCFailReason::reason(real_code, error_data) - } else { - HTLCFailReason::from_failure_code(error_code) - }.get_encrypted_failure_packet(incoming_shared_secret, &None); - let msg = msgs::UpdateFailHTLC { + pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed( + msgs::UpdateFailMalformedHTLC { channel_id: msg.channel_id, htlc_id: msg.htlc_id, - reason - }; - PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg)) - }, - _ => pending_forward_info + sha256_of_onion: [0; 32], + failure_code: INVALID_ONION_BLINDING, + } + )) + } else { + match pending_forward_info { + PendingHTLCStatus::Forward(PendingHTLCInfo { + ref incoming_shared_secret, ref routing, .. + }) => { + let reason = if routing.blinded_failure().is_some() { + HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32]) + } else if (error_code & 0x1000) != 0 { + let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan); + HTLCFailReason::reason(real_code, error_data) + } else { + HTLCFailReason::from_failure_code(error_code) + }.get_encrypted_failure_packet(incoming_shared_secret, &None); + let msg = msgs::UpdateFailHTLC { + channel_id: msg.channel_id, + htlc_id: msg.htlc_id, + reason + }; + pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg)); + }, + _ => {}, + } } - }; - let logger = WithChannelContext::from(&self.logger, &chan.context); - try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &&logger), chan_phase_entry); + } + try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info), chan_phase_entry); } else { return try_chan_phase_entry!(self, Err(ChannelError::Close( "Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry); @@ -7618,10 +7818,28 @@ where } } + fn push_decode_update_add_htlcs(&self, mut update_add_htlcs: (u64, Vec)) { + let mut push_forward_event = self.forward_htlcs.lock().unwrap().is_empty(); + let mut decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap(); + push_forward_event &= decode_update_add_htlcs.is_empty(); + let scid = update_add_htlcs.0; + match decode_update_add_htlcs.entry(scid) { + hash_map::Entry::Occupied(mut e) => { e.get_mut().append(&mut update_add_htlcs.1); }, + hash_map::Entry::Vacant(e) => { e.insert(update_add_htlcs.1); }, + } + if push_forward_event { self.push_pending_forwards_ev(); } + } + #[inline] fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) { + let push_forward_event = self.forward_htlcs_without_forward_event(per_source_pending_forwards); + if push_forward_event { self.push_pending_forwards_ev() } + } + + #[inline] + fn forward_htlcs_without_forward_event(&self, per_source_pending_forwards: &mut [(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) -> bool { + let mut push_forward_event = false; for &mut (prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards { - let mut push_forward_event = false; let mut new_intercept_events = VecDeque::new(); let mut failed_intercept_forwards = Vec::new(); if !pending_forwards.is_empty() { @@ -7634,6 +7852,7 @@ where // Pull this now to avoid introducing a lock order with `forward_htlcs`. let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid); + let decode_update_add_htlcs_empty = self.decode_update_add_htlcs.lock().unwrap().is_empty(); let mut forward_htlcs = self.forward_htlcs.lock().unwrap(); let forward_htlcs_empty = forward_htlcs.is_empty(); match forward_htlcs.entry(scid) { @@ -7682,9 +7901,7 @@ where } else { // We don't want to generate a PendingHTLCsForwardable event if only intercepted // payments are being processed. - if forward_htlcs_empty { - push_forward_event = true; - } + push_forward_event |= forward_htlcs_empty && decode_update_add_htlcs_empty; entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info }))); } @@ -7694,15 +7911,15 @@ where } for (htlc_source, payment_hash, failure_reason, destination) in failed_intercept_forwards.drain(..) { - self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination); + push_forward_event |= self.fail_htlc_backwards_internal_without_forward_event(&htlc_source, &payment_hash, &failure_reason, destination); } if !new_intercept_events.is_empty() { let mut events = self.pending_events.lock().unwrap(); events.append(&mut new_intercept_events); } - if push_forward_event { self.push_pending_forwards_ev() } } + push_forward_event } fn push_pending_forwards_ev(&self) { @@ -7912,7 +8129,6 @@ where } fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result { - let htlc_forwards; let need_lnd_workaround = { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -7955,9 +8171,11 @@ where } } let need_lnd_workaround = chan.context.workaround_lnd_bug_4006.take(); - htlc_forwards = self.handle_channel_resumption( + let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption( &mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.order, - Vec::new(), None, responses.channel_ready, responses.announcement_sigs); + Vec::new(), Vec::new(), None, responses.channel_ready, responses.announcement_sigs); + debug_assert!(htlc_forwards.is_none()); + debug_assert!(decode_update_add_htlcs.is_none()); if let Some(upd) = channel_update { peer_state.pending_msg_events.push(upd); } @@ -8003,16 +8221,10 @@ where } }; - let mut persist = NotifyOption::SkipPersistHandleEvents; - if let Some(forwards) = htlc_forwards { - self.forward_htlcs(&mut [forwards][..]); - persist = NotifyOption::DoPersist; - } - if let Some(channel_ready_msg) = need_lnd_workaround { self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?; } - Ok(persist) + Ok(NotifyOption::SkipPersistHandleEvents) } /// Process pending events from the [`chain::Watch`], returning whether any events were processed. @@ -8064,7 +8276,8 @@ where }; failed_channels.push(chan.context.force_shutdown(false, reason.clone())); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -8249,7 +8462,8 @@ where // We're done with this channel. We got a closing_signed and sent back // a closing_signed with a closing transaction to broadcast. if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -8340,9 +8554,7 @@ macro_rules! create_offer_builder { ($self: ident, $builder: ty) => { /// /// [`Offer`]: crate::offers::offer::Offer /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest - pub fn create_offer_builder( - &$self, description: String - ) -> Result<$builder, Bolt12SemanticError> { + pub fn create_offer_builder(&$self) -> Result<$builder, Bolt12SemanticError> { let node_id = $self.get_our_node_id(); let expanded_key = &$self.inbound_payment_key; let entropy = &*$self.entropy_source; @@ -8350,7 +8562,7 @@ macro_rules! create_offer_builder { ($self: ident, $builder: ty) => { let path = $self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?; let builder = OfferBuilder::deriving_signing_pubkey( - description, node_id, expanded_key, entropy, secp_ctx + node_id, expanded_key, entropy, secp_ctx ) .chain_hash($self.chain_hash) .path(path); @@ -8409,8 +8621,8 @@ macro_rules! create_refund_builder { ($self: ident, $builder: ty) => { /// [`Bolt12Invoice::payment_paths`]: crate::offers::invoice::Bolt12Invoice::payment_paths /// [Avoiding Duplicate Payments]: #avoiding-duplicate-payments pub fn create_refund_builder( - &$self, description: String, amount_msats: u64, absolute_expiry: Duration, - payment_id: PaymentId, retry_strategy: Retry, max_total_routing_fee_msat: Option + &$self, amount_msats: u64, absolute_expiry: Duration, payment_id: PaymentId, + retry_strategy: Retry, max_total_routing_fee_msat: Option ) -> Result<$builder, Bolt12SemanticError> { let node_id = $self.get_our_node_id(); let expanded_key = &$self.inbound_payment_key; @@ -8419,7 +8631,7 @@ macro_rules! create_refund_builder { ($self: ident, $builder: ty) => { let path = $self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?; let builder = RefundBuilder::deriving_payer_id( - description, node_id, expanded_key, entropy, secp_ctx, amount_msats, payment_id + node_id, expanded_key, entropy, secp_ctx, amount_msats, payment_id )? .chain_hash($self.chain_hash) .absolute_expiry(absolute_expiry) @@ -8552,14 +8764,7 @@ where .map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?; let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap(); - if offer.paths().is_empty() { - let message = new_pending_onion_message( - OffersMessage::InvoiceRequest(invoice_request), - Destination::Node(offer.signing_pubkey()), - Some(reply_path), - ); - pending_offers_messages.push(message); - } else { + if !offer.paths().is_empty() { // Send as many invoice requests as there are paths in the offer (with an upper bound). // Using only one path could result in a failure if the path no longer exists. But only // one invoice for a given payment id will be paid, even if more than one is received. @@ -8572,6 +8777,16 @@ where ); pending_offers_messages.push(message); } + } else if let Some(signing_pubkey) = offer.signing_pubkey() { + let message = new_pending_onion_message( + OffersMessage::InvoiceRequest(invoice_request), + Destination::Node(signing_pubkey), + Some(reply_path), + ); + pending_offers_messages.push(message); + } else { + debug_assert!(false); + return Err(Bolt12SemanticError::MissingSigningPubkey); } Ok(()) @@ -8582,7 +8797,7 @@ where /// /// The resulting invoice uses a [`PaymentHash`] recognized by the [`ChannelManager`] and a /// [`BlindedPath`] containing the [`PaymentSecret`] needed to reconstruct the corresponding - /// [`PaymentPreimage`]. + /// [`PaymentPreimage`]. It is returned purely for informational purposes. /// /// # Limitations /// @@ -8599,7 +8814,9 @@ where /// the invoice. /// /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice - pub fn request_refund_payment(&self, refund: &Refund) -> Result<(), Bolt12SemanticError> { + pub fn request_refund_payment( + &self, refund: &Refund + ) -> Result { let expanded_key = &self.inbound_payment_key; let entropy = &*self.entropy_source; let secp_ctx = &self.secp_ctx; @@ -8615,7 +8832,10 @@ where match self.create_inbound_payment(Some(amount_msats), relative_expiry, None) { Ok((payment_hash, payment_secret)) => { - let payment_paths = self.create_blinded_payment_paths(amount_msats, payment_secret) + let payment_context = PaymentContext::Bolt12Refund(Bolt12RefundContext {}); + let payment_paths = self.create_blinded_payment_paths( + amount_msats, payment_secret, payment_context + ) .map_err(|_| Bolt12SemanticError::MissingPaths)?; #[cfg(feature = "std")] @@ -8638,7 +8858,7 @@ where let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap(); if refund.paths().is_empty() { let message = new_pending_onion_message( - OffersMessage::Invoice(invoice), + OffersMessage::Invoice(invoice.clone()), Destination::Node(refund.payer_id()), Some(reply_path), ); @@ -8654,7 +8874,7 @@ where } } - Ok(()) + Ok(invoice) }, Err(()) => Err(Bolt12SemanticError::InvalidAmount), } @@ -8666,10 +8886,9 @@ where /// This differs from [`create_inbound_payment_for_hash`] only in that it generates the /// [`PaymentHash`] and [`PaymentPreimage`] for you. /// - /// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentClaimable`], which - /// will have the [`PaymentClaimable::purpose`] be [`PaymentPurpose::InvoicePayment`] with - /// its [`PaymentPurpose::InvoicePayment::payment_preimage`] field filled in. That should then be - /// passed directly to [`claim_funds`]. + /// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentClaimable`] event, which + /// will have the [`PaymentClaimable::purpose`] return `Some` for [`PaymentPurpose::preimage`]. That + /// should then be passed directly to [`claim_funds`]. /// /// See [`create_inbound_payment_for_hash`] for detailed documentation on behavior and requirements. /// @@ -8689,8 +8908,7 @@ where /// [`claim_funds`]: Self::claim_funds /// [`PaymentClaimable`]: events::Event::PaymentClaimable /// [`PaymentClaimable::purpose`]: events::Event::PaymentClaimable::purpose - /// [`PaymentPurpose::InvoicePayment`]: events::PaymentPurpose::InvoicePayment - /// [`PaymentPurpose::InvoicePayment::payment_preimage`]: events::PaymentPurpose::InvoicePayment::payment_preimage + /// [`PaymentPurpose::preimage`]: events::PaymentPurpose::preimage /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash pub fn create_inbound_payment(&self, min_value_msat: Option, invoice_expiry_delta_secs: u32, min_final_cltv_expiry_delta: Option) -> Result<(PaymentHash, PaymentSecret), ()> { @@ -8781,7 +8999,7 @@ where /// Creates multi-hop blinded payment paths for the given `amount_msats` by delegating to /// [`Router::create_blinded_payment_paths`]. fn create_blinded_payment_paths( - &self, amount_msats: u64, payment_secret: PaymentSecret + &self, amount_msats: u64, payment_secret: PaymentSecret, payment_context: PaymentContext ) -> Result, ()> { let secp_ctx = &self.secp_ctx; @@ -8795,6 +9013,7 @@ where max_cltv_expiry, htlc_minimum_msat: 1, }, + payment_context, }; self.router.create_blinded_payment_paths( payee_node_id, first_hops, payee_tlvs, amount_msats, secp_ctx @@ -9009,7 +9228,7 @@ where /// will randomly be placed first or last in the returned array. /// /// Note that even though `BroadcastChannelAnnouncement` and `BroadcastChannelUpdate` - /// `MessageSendEvent`s are intended to be broadcasted to all peers, they will be pleaced among + /// `MessageSendEvent`s are intended to be broadcasted to all peers, they will be placed among /// the `MessageSendEvent`s to the specific peer they were generated under. fn get_and_clear_pending_msg_events(&self) -> Vec { let events = RefCell::new(Vec::new()); @@ -9029,6 +9248,7 @@ where result = NotifyOption::DoPersist; } + let mut is_any_peer_connected = false; let mut pending_events = Vec::new(); let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { @@ -9037,6 +9257,15 @@ where if peer_state.pending_msg_events.len() > 0 { pending_events.append(&mut peer_state.pending_msg_events); } + if peer_state.is_connected { + is_any_peer_connected = true + } + } + + // Ensure that we are connected to some peers before getting broadcast messages. + if is_any_peer_connected { + let mut broadcast_msgs = self.pending_broadcast_messages.lock().unwrap(); + pending_events.append(&mut broadcast_msgs); } if !pending_events.is_empty() { @@ -9241,12 +9470,13 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; + peer_state.channel_by_id.retain(|_, phase| { match phase { // Retain unfunded channels. ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => true, // TODO(dual_funding): Combine this match arm with above. - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] ChannelPhase::UnfundedOutboundV2(_) | ChannelPhase::UnfundedInboundV2(_) => true, ChannelPhase::Funded(channel) => { let res = f(channel); @@ -9316,7 +9546,8 @@ where let reason_message = format!("{}", reason); failed_channels.push(channel.context.force_shutdown(true, reason)); if let Ok(update) = self.get_channel_update_for_broadcast(&channel) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -9551,18 +9782,21 @@ where msg.channel_id.clone())), *counterparty_node_id); } + #[cfg(splicing)] fn handle_splice(&self, counterparty_node_id: &PublicKey, msg: &msgs::Splice) { let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( "Splicing not supported".to_owned(), msg.channel_id.clone())), *counterparty_node_id); } + #[cfg(splicing)] fn handle_splice_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceAck) { let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( "Splicing not supported (splice_ack)".to_owned(), msg.channel_id.clone())), *counterparty_node_id); } + #[cfg(splicing)] fn handle_splice_locked(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceLocked) { let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( "Splicing not supported (splice_locked)".to_owned(), @@ -9720,11 +9954,11 @@ where ChannelPhase::UnfundedInboundV1(chan) => { &mut chan.context }, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] ChannelPhase::UnfundedOutboundV2(chan) => { &mut chan.context }, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] ChannelPhase::UnfundedInboundV2(chan) => { &mut chan.context }, @@ -9776,7 +10010,12 @@ where // Gossip &events::MessageSendEvent::SendChannelAnnouncement { .. } => false, &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true, - &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true, + // [`ChannelManager::pending_broadcast_events`] holds the [`BroadcastChannelUpdate`] + // This check here is to ensure exhaustivity. + &events::MessageSendEvent::BroadcastChannelUpdate { .. } => { + debug_assert!(false, "This event shouldn't have been here"); + false + }, &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true, &events::MessageSendEvent::SendChannelUpdate { .. } => false, &events::MessageSendEvent::SendChannelRangeQuery { .. } => false, @@ -9880,8 +10119,8 @@ where }); } - // TODO(dual_funding): Combine this match arm with above once #[cfg(dual_funding)] is removed. - #[cfg(dual_funding)] + // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed. + #[cfg(any(dual_funding, splicing))] ChannelPhase::UnfundedOutboundV2(chan) => { pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 { node_id: chan.context.get_counterparty_node_id(), @@ -9896,8 +10135,8 @@ where debug_assert!(false); } - // TODO(dual_funding): Combine this match arm with above once #[cfg(dual_funding)] is removed. - #[cfg(dual_funding)] + // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed. + #[cfg(any(dual_funding, splicing))] ChannelPhase::UnfundedInboundV2(channel) => { // Since unfunded inbound channel maps are cleared upon disconnecting a peer, // they are not persisted and won't be recovered after a crash. @@ -10000,7 +10239,7 @@ where return; } }, - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] Some(ChannelPhase::UnfundedOutboundV2(ref mut chan)) => { if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) { peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 { @@ -10011,7 +10250,7 @@ where } }, None | Some(ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::Funded(_)) => (), - #[cfg(dual_funding)] + #[cfg(any(dual_funding, splicing))] Some(ChannelPhase::UnfundedInboundV2(_)) => (), } } @@ -10131,8 +10370,12 @@ where }, }; + let payment_context = PaymentContext::Bolt12Offer(Bolt12OfferContext { + offer_id: invoice_request.offer_id, + invoice_request: invoice_request.fields(), + }); let payment_paths = match self.create_blinded_payment_paths( - amount_msats, payment_secret + amount_msats, payment_secret, payment_context ) { Ok(payment_paths) => payment_paths, Err(()) => { @@ -10146,7 +10389,7 @@ where self.highest_seen_timestamp.load(Ordering::Acquire) as u64 ); - if invoice_request.keys.is_some() { + let response = if invoice_request.keys.is_some() { #[cfg(feature = "std")] let builder = invoice_request.respond_using_derived_keys( payment_paths, payment_hash @@ -10155,12 +10398,10 @@ where let builder = invoice_request.respond_using_derived_keys_no_std( payment_paths, payment_hash, created_at ); - let builder: Result, _> = - builder.map(|b| b.into()); - match builder.and_then(|b| b.allow_mpp().build_and_sign(secp_ctx)) { - Ok(invoice) => Some(OffersMessage::Invoice(invoice)), - Err(error) => Some(OffersMessage::InvoiceError(error.into())), - } + builder + .map(InvoiceBuilder::::from) + .and_then(|builder| builder.allow_mpp().build_and_sign(secp_ctx)) + .map_err(InvoiceError::from) } else { #[cfg(feature = "std")] let builder = invoice_request.respond_with(payment_paths, payment_hash); @@ -10168,47 +10409,46 @@ where let builder = invoice_request.respond_with_no_std( payment_paths, payment_hash, created_at ); - let builder: Result, _> = - builder.map(|b| b.into()); - let response = builder.and_then(|builder| builder.allow_mpp().build()) - .map_err(|e| OffersMessage::InvoiceError(e.into())) + builder + .map(InvoiceBuilder::::from) + .and_then(|builder| builder.allow_mpp().build()) + .map_err(InvoiceError::from) .and_then(|invoice| { #[cfg(c_bindings)] let mut invoice = invoice; - match invoice.sign(|invoice: &UnsignedBolt12Invoice| - self.node_signer.sign_bolt12_invoice(invoice) - ) { - Ok(invoice) => Ok(OffersMessage::Invoice(invoice)), - Err(SignError::Signing) => Err(OffersMessage::InvoiceError( - InvoiceError::from_string("Failed signing invoice".to_string()) - )), - Err(SignError::Verification(_)) => Err(OffersMessage::InvoiceError( - InvoiceError::from_string("Failed invoice signature verification".to_string()) - )), - } - }); - match response { - Ok(invoice) => Some(invoice), - Err(error) => Some(error), - } + invoice + .sign(|invoice: &UnsignedBolt12Invoice| + self.node_signer.sign_bolt12_invoice(invoice) + ) + .map_err(InvoiceError::from) + }) + }; + + match response { + Ok(invoice) => Some(OffersMessage::Invoice(invoice)), + Err(error) => Some(OffersMessage::InvoiceError(error.into())), } }, OffersMessage::Invoice(invoice) => { - match invoice.verify(expanded_key, secp_ctx) { - Err(()) => { - Some(OffersMessage::InvoiceError(InvoiceError::from_string("Unrecognized invoice".to_owned()))) - }, - Ok(_) if invoice.invoice_features().requires_unknown_bits_from(&self.bolt12_invoice_features()) => { - Some(OffersMessage::InvoiceError(Bolt12SemanticError::UnknownRequiredFeatures.into())) - }, - Ok(payment_id) => { - if let Err(e) = self.send_payment_for_bolt12_invoice(&invoice, payment_id) { - log_trace!(self.logger, "Failed paying invoice: {:?}", e); - Some(OffersMessage::InvoiceError(InvoiceError::from_string(format!("{:?}", e)))) + let response = invoice + .verify(expanded_key, secp_ctx) + .map_err(|()| InvoiceError::from_string("Unrecognized invoice".to_owned())) + .and_then(|payment_id| { + let features = self.bolt12_invoice_features(); + if invoice.invoice_features().requires_unknown_bits_from(&features) { + Err(InvoiceError::from(Bolt12SemanticError::UnknownRequiredFeatures)) } else { - None + self.send_payment_for_bolt12_invoice(&invoice, payment_id) + .map_err(|e| { + log_trace!(self.logger, "Failed paying invoice: {:?}", e); + InvoiceError::from_string(format!("{:?}", e)) + }) } - }, + }); + + match response { + Ok(()) => None, + Err(e) => Some(OffersMessage::InvoiceError(e)), } }, OffersMessage::InvoiceError(invoice_error) => { @@ -10223,6 +10463,23 @@ where } } +impl +NodeIdLookUp for ChannelManager +where + M::Target: chain::Watch<::EcdsaSigner>, + T::Target: BroadcasterInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, + F::Target: FeeEstimator, + R::Target: Router, + L::Target: Logger, +{ + fn next_node_id(&self, short_channel_id: u64) -> Option { + self.short_to_chan_info.read().unwrap().get(&short_channel_id).map(|(pubkey, _)| *pubkey) + } +} + /// Fetches the set of [`NodeFeatures`] flags that are provided by or required by /// [`ChannelManager`]. pub(crate) fn provided_node_features(config: &UserConfig) -> NodeFeatures { @@ -10445,6 +10702,7 @@ impl_writeable_tlv_based_enum!(PendingHTLCRouting, (3, payment_metadata, option), (5, custom_tlvs, optional_vec), (7, requires_blinded_error, (default_value, false)), + (9, payment_context, option), }, (2, ReceiveKeysend) => { (0, payment_preimage, required), @@ -10559,9 +10817,11 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, { impl Writeable for ClaimableHTLC { fn write(&self, writer: &mut W) -> Result<(), io::Error> { - let (payment_data, keysend_preimage) = match &self.onion_payload { - OnionPayload::Invoice { _legacy_hop_data } => (_legacy_hop_data.as_ref(), None), - OnionPayload::Spontaneous(preimage) => (None, Some(preimage)), + let (payment_data, keysend_preimage, payment_context) = match &self.onion_payload { + OnionPayload::Invoice { _legacy_hop_data, payment_context } => { + (_legacy_hop_data.as_ref(), None, payment_context.as_ref()) + }, + OnionPayload::Spontaneous(preimage) => (None, Some(preimage), None), }; write_tlv_fields!(writer, { (0, self.prev_hop, required), @@ -10573,6 +10833,7 @@ impl Writeable for ClaimableHTLC { (6, self.cltv_expiry, required), (8, keysend_preimage, option), (10, self.counterparty_skimmed_fee_msat, option), + (11, payment_context, option), }); Ok(()) } @@ -10590,6 +10851,7 @@ impl Readable for ClaimableHTLC { (6, cltv_expiry, required), (8, keysend_preimage, option), (10, counterparty_skimmed_fee_msat, option), + (11, payment_context, option), }); let payment_data: Option = payment_data_opt; let value = value_ser.0.unwrap(); @@ -10610,7 +10872,7 @@ impl Readable for ClaimableHTLC { } total_msat = Some(payment_data.as_ref().unwrap().total_msat); } - OnionPayload::Invoice { _legacy_hop_data: payment_data } + OnionPayload::Invoice { _legacy_hop_data: payment_data, payment_context } }, }; Ok(Self { @@ -10805,9 +11067,10 @@ where best_block.block_hash.write(writer)?; } + let per_peer_state = self.per_peer_state.write().unwrap(); + let mut serializable_peer_count: u64 = 0; { - let per_peer_state = self.per_peer_state.read().unwrap(); let mut number_of_funded_channels = 0; for (_, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -10848,7 +11111,11 @@ where } } - let per_peer_state = self.per_peer_state.write().unwrap(); + let mut decode_update_add_htlcs_opt = None; + let decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap(); + if !decode_update_add_htlcs.is_empty() { + decode_update_add_htlcs_opt = Some(decode_update_add_htlcs); + } let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap(); let claimable_payments = self.claimable_payments.lock().unwrap(); @@ -10999,6 +11266,7 @@ where (10, in_flight_monitor_updates, option), (11, self.probing_cookie_secret, required), (13, htlc_onion_fields, optional_vec), + (14, decode_update_add_htlcs_opt, option), }); Ok(()) @@ -11464,6 +11732,7 @@ where let mut monitor_update_blocked_actions_per_peer: Option>)>> = Some(Vec::new()); let mut events_override = None; let mut in_flight_monitor_updates: Option>> = None; + let mut decode_update_add_htlcs: Option>> = None; read_tlv_fields!(reader, { (1, pending_outbound_payments_no_retry, option), (2, pending_intercepted_htlcs, option), @@ -11477,7 +11746,9 @@ where (10, in_flight_monitor_updates, option), (11, probing_cookie_secret, option), (13, claimable_htlc_onion_fields, optional_vec), + (14, decode_update_add_htlcs, option), }); + let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map()); if fake_scid_rand_bytes.is_none() { fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes()); } @@ -11577,7 +11848,7 @@ where } } if chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id { - // If the channel is ahead of the monitor, return InvalidValue: + // If the channel is ahead of the monitor, return DangerousValue: log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!"); log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight", chan.context.channel_id(), monitor.get_latest_update_id(), max_in_flight_update_id); @@ -11586,7 +11857,7 @@ where log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds."); log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); - return Err(DecodeError::InvalidValue); + return Err(DecodeError::DangerousValue); } } else { // We shouldn't have persisted (or read) any unfunded channel types so none should have been @@ -11697,6 +11968,18 @@ where // still have an entry for this HTLC in `forward_htlcs` or // `pending_intercepted_htlcs`, we were apparently not persisted after // the monitor was when forwarding the payment. + decode_update_add_htlcs.retain(|scid, update_add_htlcs| { + update_add_htlcs.retain(|update_add_htlc| { + let matches = *scid == prev_hop_data.short_channel_id && + update_add_htlc.htlc_id == prev_hop_data.htlc_id; + if matches { + log_info!(logger, "Removing pending to-decode HTLC with hash {} as it was forwarded to the closed channel {}", + &htlc.payment_hash, &monitor.channel_id()); + } + !matches + }); + !update_add_htlcs.is_empty() + }); forward_htlcs.retain(|_, forwards| { forwards.retain(|forward| { if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { @@ -11778,7 +12061,7 @@ where } } - if !forward_htlcs.is_empty() || pending_outbounds.needs_abandon() { + if !forward_htlcs.is_empty() || !decode_update_add_htlcs.is_empty() || pending_outbounds.needs_abandon() { // If we have pending HTLCs to forward, assume we either dropped a // `PendingHTLCsForwardable` or the user received it but never processed it as they // shut down before the timer hit. Either way, set the time_forwardable to a small @@ -11825,9 +12108,9 @@ where return Err(DecodeError::InvalidValue); } let purpose = match &htlcs[0].onion_payload { - OnionPayload::Invoice { _legacy_hop_data } => { + OnionPayload::Invoice { _legacy_hop_data, payment_context: _ } => { if let Some(hop_data) = _legacy_hop_data { - events::PaymentPurpose::InvoicePayment { + events::PaymentPurpose::Bolt11InvoicePayment { payment_preimage: match pending_inbound_payments.get(&payment_hash) { Some(inbound_payment) => inbound_payment.payment_preimage, None => match inbound_payment::verify(payment_hash, &hop_data, 0, &expanded_inbound_key, &args.logger) { @@ -12012,6 +12295,7 @@ where pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()), forward_htlcs: Mutex::new(forward_htlcs), + decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs), claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap() }), outbound_scid_aliases: Mutex::new(outbound_scid_aliases), outpoint_to_peer: Mutex::new(outpoint_to_peer), @@ -12040,6 +12324,8 @@ where pending_offers_messages: Mutex::new(Vec::new()), + pending_broadcast_messages: Mutex::new(Vec::new()), + entropy_source: args.entropy_source, node_signer: args.node_signer, signer_provider: args.signer_provider, @@ -12571,6 +12857,61 @@ mod tests { } } + #[test] + fn test_channel_update_cached() { + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + + nodes[0].node.force_close_channel_with_peer(&chan.2, &nodes[1].node.get_our_node_id(), None, true).unwrap(); + check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); + + // Confirm that the channel_update was not sent immediately to node[1] but was cached. + let node_1_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(node_1_events.len(), 0); + + { + // Assert that ChannelUpdate message has been added to node[0] pending broadcast messages + let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap(); + assert_eq!(pending_broadcast_messages.len(), 1); + } + + // Test that we do not retrieve the pending broadcast messages when we are not connected to any peer + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + + nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id()); + nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + + let node_0_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(node_0_events.len(), 0); + + // Now we reconnect to a peer + nodes[0].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { + features: nodes[2].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + nodes[2].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + + // Confirm that get_and_clear_pending_msg_events correctly captures pending broadcast messages + let node_0_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(node_0_events.len(), 1); + match &node_0_events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => (), + _ => panic!("Unexpected event"), + } + { + // Assert that ChannelUpdate message has been cleared from nodes[0] pending broadcast messages + let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap(); + assert_eq!(pending_broadcast_messages.len(), 0); + } + } + #[test] fn test_drop_disconnected_peers_when_removing_channels() { let chanmon_cfgs = create_chanmon_cfgs(2);