X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=961f25cc664ad3425d39dcf5e099b66c44eb4949;hb=131560e08fa4f66b8ce9302cde637f87602c86b0;hp=07bb18ff37c346df4d4fdfe14e5d929556feb72e;hpb=df237ba3b455f0ef246604125b8933a7f0074fc5;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 07bb18ff..961f25cc 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -40,10 +40,10 @@ use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, Messa // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret}; -use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel}; +use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel}; use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; #[cfg(any(feature = "_test_utils", test))] -use crate::ln::features::InvoiceFeatures; +use crate::ln::features::Bolt11InvoiceFeatures; use crate::routing::gossip::NetworkGraph; use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router}; use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters}; @@ -53,7 +53,7 @@ use crate::ln::onion_utils::HTLCFailReason; use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError}; #[cfg(test)] use crate::ln::outbound_payment; -use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment}; +use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs}; use crate::ln::wire::Encode; use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner, WriteableEcdsaChannelSigner}; use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate}; @@ -110,6 +110,8 @@ pub(super) enum PendingHTLCRouting { payment_metadata: Option>, incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed phantom_shared_secret: Option<[u8; 32]>, + /// See [`RecipientOnionFields::custom_tlvs`] for more info. + custom_tlvs: Vec<(u64, Vec)>, }, ReceiveKeysend { /// This was added in 0.0.116 and will break deserialization on downgrades. @@ -117,6 +119,8 @@ pub(super) enum PendingHTLCRouting { payment_preimage: PaymentPreimage, payment_metadata: Option>, incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed + /// See [`RecipientOnionFields::custom_tlvs`] for more info. + custom_tlvs: Vec<(u64, Vec)>, }, } @@ -341,7 +345,7 @@ impl HTLCSource { } } -struct ReceiveError { +struct InboundOnionErr { err_code: u16, err_data: Vec, msg: &'static str, @@ -355,15 +359,32 @@ struct ReceiveError { pub enum FailureCode { /// We had a temporary error processing the payment. Useful if no other error codes fit /// and you want to indicate that the payer may want to retry. - TemporaryNodeFailure = 0x2000 | 2, + TemporaryNodeFailure, /// We have a required feature which was not in this onion. For example, you may require /// some additional metadata that was not provided with this payment. - RequiredNodeFeatureMissing = 0x4000 | 0x2000 | 3, + RequiredNodeFeatureMissing, /// You may wish to use this when a `payment_preimage` is unknown, or the CLTV expiry of /// the HTLC is too close to the current block height for safe handling. /// Using this failure code in [`ChannelManager::fail_htlc_backwards_with_reason`] is /// equivalent to calling [`ChannelManager::fail_htlc_backwards`]. - IncorrectOrUnknownPaymentDetails = 0x4000 | 15, + IncorrectOrUnknownPaymentDetails, + /// We failed to process the payload after the onion was decrypted. You may wish to + /// use this when receiving custom HTLC TLVs with even type numbers that you don't recognize. + /// + /// If available, the tuple data may include the type number and byte offset in the + /// decrypted byte stream where the failure occurred. + InvalidOnionPayload(Option<(u64, u16)>), +} + +impl Into for FailureCode { + fn into(self) -> u16 { + match self { + FailureCode::TemporaryNodeFailure => 0x2000 | 2, + FailureCode::RequiredNodeFeatureMissing => 0x4000 | 0x2000 | 3, + FailureCode::IncorrectOrUnknownPaymentDetails => 0x4000 | 15, + FailureCode::InvalidOnionPayload(_) => 0x4000 | 22, + } + } } /// Error type returned across the peer_state mutex boundary. When an Err is generated for a @@ -376,6 +397,7 @@ struct MsgHandleErrInternal { err: msgs::LightningError, chan_id: Option<([u8; 32], u128)>, // If Some a channel of ours has been closed shutdown_finish: Option<(ShutdownResult, Option)>, + channel_capacity: Option, } impl MsgHandleErrInternal { #[inline] @@ -392,14 +414,15 @@ impl MsgHandleErrInternal { }, chan_id: None, shutdown_finish: None, + channel_capacity: None, } } #[inline] fn from_no_close(err: msgs::LightningError) -> Self { - Self { err, chan_id: None, shutdown_finish: None } + Self { err, chan_id: None, shutdown_finish: None, channel_capacity: None } } #[inline] - fn from_finish_shutdown(err: String, channel_id: [u8; 32], user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option) -> Self { + fn from_finish_shutdown(err: String, channel_id: [u8; 32], user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option, channel_capacity: u64) -> Self { Self { err: LightningError { err: err.clone(), @@ -412,6 +435,7 @@ impl MsgHandleErrInternal { }, chan_id: Some((channel_id, user_channel_id)), shutdown_finish: Some((shutdown_res, channel_update)), + channel_capacity: Some(channel_capacity) } } #[inline] @@ -444,6 +468,7 @@ impl MsgHandleErrInternal { }, chan_id: None, shutdown_finish: None, + channel_capacity: None, } } } @@ -685,7 +710,7 @@ impl PeerState { && self.in_flight_monitor_updates.is_empty() } - // Returns a count of all channels we have with this peer, including pending channels. + // Returns a count of all channels we have with this peer, including unfunded channels. fn total_channel_count(&self) -> usize { self.channel_by_id.len() + self.outbound_v1_channel_by_id.len() + @@ -1659,7 +1684,7 @@ macro_rules! handle_error { match $internal { Ok(msg) => Ok(msg), - Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => { + Err(MsgHandleErrInternal { err, chan_id, shutdown_finish, channel_capacity }) => { let mut msg_events = Vec::with_capacity(2); if let Some((shutdown_res, update_option)) = shutdown_finish { @@ -1672,7 +1697,9 @@ macro_rules! handle_error { if let Some((channel_id, user_channel_id)) = chan_id { $self.pending_events.lock().unwrap().push_back((events::Event::ChannelClosed { channel_id, user_channel_id, - reason: ClosureReason::ProcessingError { err: err.err.clone() } + reason: ClosureReason::ProcessingError { err: err.err.clone() }, + counterparty_node_id: Some($counterparty_node_id), + channel_capacity_sats: channel_capacity, }, None)); } } @@ -1745,20 +1772,20 @@ macro_rules! convert_chan_err { update_maps_on_chan_removal!($self, &$channel.context); let shutdown_res = $channel.context.force_shutdown(true); (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.context.get_user_id(), - shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) + shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok(), $channel.context.get_value_satoshis())) }, } }; - ($self: ident, $err: expr, $channel_context: expr, $channel_id: expr, PREFUNDED) => { + ($self: ident, $err: expr, $channel_context: expr, $channel_id: expr, UNFUNDED) => { match $err { - // We should only ever have `ChannelError::Close` when prefunded channels error. + // We should only ever have `ChannelError::Close` when unfunded channels error. // In any case, just close the channel. ChannelError::Warn(msg) | ChannelError::Ignore(msg) | ChannelError::Close(msg) => { - log_error!($self.logger, "Closing prefunded channel {} due to an error: {}", log_bytes!($channel_id[..]), msg); + log_error!($self.logger, "Closing unfunded channel {} due to an error: {}", log_bytes!($channel_id[..]), msg); update_maps_on_chan_removal!($self, &$channel_context); let shutdown_res = $channel_context.force_shutdown(false); (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel_context.get_user_id(), - shutdown_res, None)) + shutdown_res, None, $channel_context.get_value_satoshis())) }, } } @@ -1784,7 +1811,7 @@ macro_rules! try_v1_outbound_chan_entry { match $res { Ok(res) => res, Err(e) => { - let (drop, res) = convert_chan_err!($self, e, $entry.get_mut().context, $entry.key(), PREFUNDED); + let (drop, res) = convert_chan_err!($self, e, $entry.get_mut().context, $entry.key(), UNFUNDED); if drop { $entry.remove_entry(); } @@ -1937,7 +1964,7 @@ macro_rules! handle_new_monitor_update { let res = Err(MsgHandleErrInternal::from_finish_shutdown( "ChannelMonitor storage failure".to_owned(), $chan.context.channel_id(), $chan.context.get_user_id(), $chan.context.force_shutdown(false), - $self.get_channel_update_for_broadcast(&$chan).ok())); + $self.get_channel_update_for_broadcast(&$chan).ok(), $chan.context.get_value_satoshis())); $remove; res }, @@ -2246,6 +2273,7 @@ where for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + // Only `Channels` in the channel_by_id map can be considered funded. for (_channel_id, channel) in peer_state.channel_by_id.iter().filter(f) { let details = ChannelDetails::from_channel_context(&channel.context, best_block_height, peer_state.latest_features.clone(), &self.fee_estimator); @@ -2314,11 +2342,15 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let features = &peer_state.latest_features; + let chan_context_to_details = |context| { + ChannelDetails::from_channel_context(context, best_block_height, features.clone(), &self.fee_estimator) + }; return peer_state.channel_by_id .iter() - .map(|(_, channel)| - ChannelDetails::from_channel_context(&channel.context, best_block_height, - features.clone(), &self.fee_estimator)) + .map(|(_, channel)| &channel.context) + .chain(peer_state.outbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context)) + .chain(peer_state.inbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context)) + .map(chan_context_to_details) .collect(); } vec![] @@ -2366,7 +2398,9 @@ where pending_events_lock.push_back((events::Event::ChannelClosed { channel_id: context.channel_id(), user_channel_id: context.get_user_id(), - reason: closure_reason + reason: closure_reason, + counterparty_node_id: Some(context.get_counterparty_node_id()), + channel_capacity_sats: Some(context.get_value_satoshis()), }, None)); } @@ -2375,48 +2409,58 @@ where let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>; let result: Result<(), _> = loop { - let per_peer_state = self.per_peer_state.read().unwrap(); + { + let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id) + .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?; - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(channel_id.clone()) { - hash_map::Entry::Occupied(mut chan_entry) => { - let funding_txo_opt = chan_entry.get().context.get_funding_txo(); - let their_features = &peer_state.latest_features; - let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut() - .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?; - failed_htlcs = htlcs; + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; - // We can send the `shutdown` message before updating the `ChannelMonitor` - // here as we don't need the monitor update to complete until we send a - // `shutdown_signed`, which we'll delay if we're pending a monitor update. - peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: *counterparty_node_id, - msg: shutdown_msg, - }); + match peer_state.channel_by_id.entry(channel_id.clone()) { + hash_map::Entry::Occupied(mut chan_entry) => { + let funding_txo_opt = chan_entry.get().context.get_funding_txo(); + let their_features = &peer_state.latest_features; + let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut() + .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?; + failed_htlcs = htlcs; - // Update the monitor with the shutdown script if necessary. - if let Some(monitor_update) = monitor_update_opt.take() { - break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, - peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ()); - } + // We can send the `shutdown` message before updating the `ChannelMonitor` + // here as we don't need the monitor update to complete until we send a + // `shutdown_signed`, which we'll delay if we're pending a monitor update. + peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: *counterparty_node_id, + msg: shutdown_msg, + }); - if chan_entry.get().is_shutdown() { - let channel = remove_channel!(self, chan_entry); - if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) { - peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: channel_update - }); + // Update the monitor with the shutdown script if necessary. + if let Some(monitor_update) = monitor_update_opt.take() { + break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, + peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ()); } - self.issue_channel_close_events(&channel.context, ClosureReason::HolderForceClosed); - } - break Ok(()); - }, - hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), counterparty_node_id) }) + + if chan_entry.get().is_shutdown() { + let channel = remove_channel!(self, chan_entry); + if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) { + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: channel_update + }); + } + self.issue_channel_close_events(&channel.context, ClosureReason::HolderForceClosed); + } + break Ok(()); + }, + hash_map::Entry::Vacant(_) => (), + } } + // If we reach this point, it means that the channel_id either refers to an unfunded channel or + // it does not exist for this peer. Either way, we can attempt to force-close it. + // + // An appropriate error will be returned for non-existence of the channel if that's the case. + return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ()) + // TODO(dunxen): This is still not ideal as we're doing some extra lookups. + // Fix this with https://github.com/lightningdevkit/rust-lightning/issues/2422 }; for htlc_source in failed_htlcs.drain(..) { @@ -2535,14 +2579,14 @@ where self.issue_channel_close_events(&chan.get().context, closure_reason); let mut chan = remove_channel!(self, chan); self.finish_force_close_channel(chan.context.force_shutdown(false)); - // Prefunded channel has no update + // Unfunded channel has no update (None, chan.context.get_counterparty_node_id()) } else if let hash_map::Entry::Occupied(chan) = peer_state.inbound_v1_channel_by_id.entry(channel_id.clone()) { log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); self.issue_channel_close_events(&chan.get().context, closure_reason); let mut chan = remove_channel!(self, chan); self.finish_force_close_channel(chan.context.force_shutdown(false)); - // Prefunded channel has no update + // Unfunded channel has no update (None, chan.context.get_counterparty_node_id()) } else { return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) }); @@ -2616,14 +2660,64 @@ where } } + fn construct_fwd_pending_htlc_info( + &self, msg: &msgs::UpdateAddHTLC, hop_data: msgs::InboundOnionPayload, hop_hmac: [u8; 32], + new_packet_bytes: [u8; onion_utils::ONION_DATA_LEN], shared_secret: [u8; 32], + next_packet_pubkey_opt: Option> + ) -> Result { + debug_assert!(next_packet_pubkey_opt.is_some()); + let outgoing_packet = msgs::OnionPacket { + version: 0, + public_key: next_packet_pubkey_opt.unwrap_or(Err(secp256k1::Error::InvalidPublicKey)), + hop_data: new_packet_bytes, + hmac: hop_hmac, + }; + + let (short_channel_id, amt_to_forward, outgoing_cltv_value) = match hop_data { + msgs::InboundOnionPayload::Forward { short_channel_id, amt_to_forward, outgoing_cltv_value } => + (short_channel_id, amt_to_forward, outgoing_cltv_value), + msgs::InboundOnionPayload::Receive { .. } => + return Err(InboundOnionErr { + msg: "Final Node OnionHopData provided for us as an intermediary node", + err_code: 0x4000 | 22, + err_data: Vec::new(), + }), + }; + + Ok(PendingHTLCInfo { + routing: PendingHTLCRouting::Forward { + onion_packet: outgoing_packet, + short_channel_id, + }, + payment_hash: msg.payment_hash, + incoming_shared_secret: shared_secret, + incoming_amt_msat: Some(msg.amount_msat), + outgoing_amt_msat: amt_to_forward, + outgoing_cltv_value, + skimmed_fee_msat: None, + }) + } + fn construct_recv_pending_htlc_info( - &self, hop_data: msgs::OnionHopData, shared_secret: [u8; 32], payment_hash: PaymentHash, + &self, hop_data: msgs::InboundOnionPayload, shared_secret: [u8; 32], payment_hash: PaymentHash, amt_msat: u64, cltv_expiry: u32, phantom_shared_secret: Option<[u8; 32]>, allow_underpay: bool, counterparty_skimmed_fee_msat: Option, - ) -> Result { + ) -> Result { + let (payment_data, keysend_preimage, custom_tlvs, onion_amt_msat, outgoing_cltv_value, payment_metadata) = match hop_data { + msgs::InboundOnionPayload::Receive { + payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata, .. + } => + (payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata), + _ => + return Err(InboundOnionErr { + err_code: 0x4000|22, + err_data: Vec::new(), + msg: "Got non final data with an HMAC of 0", + }), + }; // final_incorrect_cltv_expiry - if hop_data.outgoing_cltv_value > cltv_expiry { - return Err(ReceiveError { + if outgoing_cltv_value > cltv_expiry { + return Err(InboundOnionErr { msg: "Upstream node set CLTV to less than the CLTV set by the sender", err_code: 18, err_data: cltv_expiry.to_be_bytes().to_vec() @@ -2637,85 +2731,76 @@ where // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a // channel closure (see HTLC_FAIL_BACK_BUFFER rationale). let current_height: u32 = self.best_block.read().unwrap().height(); - if (hop_data.outgoing_cltv_value as u64) <= current_height as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 { + if (outgoing_cltv_value as u64) <= current_height as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 { let mut err_data = Vec::with_capacity(12); err_data.extend_from_slice(&amt_msat.to_be_bytes()); err_data.extend_from_slice(¤t_height.to_be_bytes()); - return Err(ReceiveError { + return Err(InboundOnionErr { err_code: 0x4000 | 15, err_data, msg: "The final CLTV expiry is too soon to handle", }); } - if (!allow_underpay && hop_data.amt_to_forward > amt_msat) || - (allow_underpay && hop_data.amt_to_forward > + if (!allow_underpay && onion_amt_msat > amt_msat) || + (allow_underpay && onion_amt_msat > amt_msat.saturating_add(counterparty_skimmed_fee_msat.unwrap_or(0))) { - return Err(ReceiveError { + return Err(InboundOnionErr { err_code: 19, err_data: amt_msat.to_be_bytes().to_vec(), msg: "Upstream node sent less than we were supposed to receive in payment", }); } - let routing = match hop_data.format { - msgs::OnionHopDataFormat::NonFinalNode { .. } => { - return Err(ReceiveError { + let routing = if let Some(payment_preimage) = keysend_preimage { + // We need to check that the sender knows the keysend preimage before processing this + // payment further. Otherwise, an intermediary routing hop forwarding non-keysend-HTLC X + // could discover the final destination of X, by probing the adjacent nodes on the route + // with a keysend payment of identical payment hash to X and observing the processing + // time discrepancies due to a hash collision with X. + let hashed_preimage = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); + if hashed_preimage != payment_hash { + return Err(InboundOnionErr { err_code: 0x4000|22, err_data: Vec::new(), - msg: "Got non final data with an HMAC of 0", + msg: "Payment preimage didn't match payment hash", }); - }, - msgs::OnionHopDataFormat::FinalNode { payment_data, keysend_preimage, payment_metadata } => { - if let Some(payment_preimage) = keysend_preimage { - // We need to check that the sender knows the keysend preimage before processing this - // payment further. Otherwise, an intermediary routing hop forwarding non-keysend-HTLC X - // could discover the final destination of X, by probing the adjacent nodes on the route - // with a keysend payment of identical payment hash to X and observing the processing - // time discrepancies due to a hash collision with X. - let hashed_preimage = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); - if hashed_preimage != payment_hash { - return Err(ReceiveError { - err_code: 0x4000|22, - err_data: Vec::new(), - msg: "Payment preimage didn't match payment hash", - }); - } - if !self.default_configuration.accept_mpp_keysend && payment_data.is_some() { - return Err(ReceiveError { - err_code: 0x4000|22, - err_data: Vec::new(), - msg: "We don't support MPP keysend payments", - }); - } - PendingHTLCRouting::ReceiveKeysend { - payment_data, - payment_preimage, - payment_metadata, - incoming_cltv_expiry: hop_data.outgoing_cltv_value, - } - } else if let Some(data) = payment_data { - PendingHTLCRouting::Receive { - payment_data: data, - payment_metadata, - incoming_cltv_expiry: hop_data.outgoing_cltv_value, - phantom_shared_secret, - } - } else { - return Err(ReceiveError { - err_code: 0x4000|0x2000|3, - err_data: Vec::new(), - msg: "We require payment_secrets", - }); - } - }, + } + if !self.default_configuration.accept_mpp_keysend && payment_data.is_some() { + return Err(InboundOnionErr { + err_code: 0x4000|22, + err_data: Vec::new(), + msg: "We don't support MPP keysend payments", + }); + } + PendingHTLCRouting::ReceiveKeysend { + payment_data, + payment_preimage, + payment_metadata, + incoming_cltv_expiry: outgoing_cltv_value, + custom_tlvs, + } + } else if let Some(data) = payment_data { + PendingHTLCRouting::Receive { + payment_data: data, + payment_metadata, + incoming_cltv_expiry: outgoing_cltv_value, + phantom_shared_secret, + custom_tlvs, + } + } else { + return Err(InboundOnionErr { + err_code: 0x4000|0x2000|3, + err_data: Vec::new(), + msg: "We require payment_secrets", + }); }; Ok(PendingHTLCInfo { routing, payment_hash, incoming_shared_secret: shared_secret, incoming_amt_msat: Some(amt_msat), - outgoing_amt_msat: hop_data.amt_to_forward, - outgoing_cltv_value: hop_data.outgoing_cltv_value, + outgoing_amt_msat: onion_amt_msat, + outgoing_cltv_value, skimmed_fee_msat: counterparty_skimmed_fee_msat, }) } @@ -2779,9 +2864,8 @@ where }; let (outgoing_scid, outgoing_amt_msat, outgoing_cltv_value, next_packet_pk_opt) = match next_hop { onion_utils::Hop::Forward { - next_hop_data: msgs::OnionHopData { - format: msgs::OnionHopDataFormat::NonFinalNode { short_channel_id }, amt_to_forward, - outgoing_cltv_value, + next_hop_data: msgs::InboundOnionPayload::Forward { + short_channel_id, amt_to_forward, outgoing_cltv_value }, .. } => { let next_pk = onion_utils::next_hop_packet_pubkey(&self.secp_ctx, @@ -2791,9 +2875,7 @@ where // We'll do receive checks in [`Self::construct_pending_htlc_info`] so we have access to the // inbound channel's state. onion_utils::Hop::Receive { .. } => return Ok((next_hop, shared_secret, None)), - onion_utils::Hop::Forward { - next_hop_data: msgs::OnionHopData { format: msgs::OnionHopDataFormat::FinalNode { .. }, .. }, .. - } => { + onion_utils::Hop::Forward { next_hop_data: msgs::InboundOnionPayload::Receive { .. }, .. } => { return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0; 0]); } }; @@ -2964,37 +3046,15 @@ where // delay) once they've send us a commitment_signed! PendingHTLCStatus::Forward(info) }, - Err(ReceiveError { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data) + Err(InboundOnionErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data) } }, onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => { - debug_assert!(next_packet_pubkey_opt.is_some()); - let outgoing_packet = msgs::OnionPacket { - version: 0, - public_key: next_packet_pubkey_opt.unwrap_or(Err(secp256k1::Error::InvalidPublicKey)), - hop_data: new_packet_bytes, - hmac: next_hop_hmac.clone(), - }; - - let short_channel_id = match next_hop_data.format { - msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id, - msgs::OnionHopDataFormat::FinalNode { .. } => { - return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]); - }, - }; - - PendingHTLCStatus::Forward(PendingHTLCInfo { - routing: PendingHTLCRouting::Forward { - onion_packet: outgoing_packet, - short_channel_id, - }, - payment_hash: msg.payment_hash.clone(), - incoming_shared_secret: shared_secret, - incoming_amt_msat: Some(msg.amount_msat), - outgoing_amt_msat: next_hop_data.amt_to_forward, - outgoing_cltv_value: next_hop_data.outgoing_cltv_value, - skimmed_fee_msat: None, - }) + match self.construct_fwd_pending_htlc_info(msg, next_hop_data, next_hop_hmac, + new_packet_bytes, shared_secret, next_packet_pubkey_opt) { + Ok(info) => PendingHTLCStatus::Forward(info), + Err(InboundOnionErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data) + } } } } @@ -3082,10 +3142,17 @@ where #[cfg(test)] pub(crate) fn test_send_payment_along_path(&self, path: &Path, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option, session_priv_bytes: [u8; 32]) -> Result<(), APIError> { let _lck = self.total_consistency_lock.read().unwrap(); - self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv_bytes) + self.send_payment_along_path(SendAlongPathArgs { + path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, + session_priv_bytes + }) } - fn send_payment_along_path(&self, path: &Path, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option, session_priv_bytes: [u8; 32]) -> Result<(), APIError> { + fn send_payment_along_path(&self, args: SendAlongPathArgs) -> Result<(), APIError> { + let SendAlongPathArgs { + path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, + session_priv_bytes + } = args; // The top-level caller should hold the total_consistency_lock read lock. debug_assert!(self.total_consistency_lock.try_write().is_err()); @@ -3215,9 +3282,9 @@ where let best_block_height = self.best_block.read().unwrap().height(); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); self.pending_outbound_payments - .send_payment_with_route(route, payment_hash, recipient_onion, payment_id, &self.entropy_source, &self.node_signer, best_block_height, - |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + .send_payment_with_route(route, payment_hash, recipient_onion, payment_id, + &self.entropy_source, &self.node_signer, best_block_height, + |args| self.send_payment_along_path(args)) } /// Similar to [`ChannelManager::send_payment_with_route`], but will automatically find a route based on @@ -3229,18 +3296,16 @@ where .send_payment(payment_hash, recipient_onion, payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, &self.logger, - &self.pending_events, - |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + &self.pending_events, |args| self.send_payment_along_path(args)) } #[cfg(test)] pub(super) fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option, payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); - self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, recipient_onion, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, best_block_height, - |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, recipient_onion, + keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, + best_block_height, |args| self.send_payment_along_path(args)) } #[cfg(test)] @@ -3294,9 +3359,7 @@ where let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); self.pending_outbound_payments.send_spontaneous_payment_with_route( route, payment_preimage, recipient_onion, payment_id, &self.entropy_source, - &self.node_signer, best_block_height, - |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + &self.node_signer, best_block_height, |args| self.send_payment_along_path(args)) } /// Similar to [`ChannelManager::send_spontaneous_payment`], but will automatically find a route @@ -3312,9 +3375,7 @@ where self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, recipient_onion, payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, - &self.logger, &self.pending_events, - |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + &self.logger, &self.pending_events, |args| self.send_payment_along_path(args)) } /// Send a payment that is probing the given route for liquidity. We calculate the @@ -3323,9 +3384,9 @@ where pub fn send_probe(&self, path: Path) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); - self.pending_outbound_payments.send_probe(path, self.probing_cookie_secret, &self.entropy_source, &self.node_signer, best_block_height, - |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + self.pending_outbound_payments.send_probe(path, self.probing_cookie_secret, + &self.entropy_source, &self.node_signer, best_block_height, + |args| self.send_payment_along_path(args)) } /// Returns whether a payment with the given [`PaymentHash`] and [`PaymentId`] is, in fact, a @@ -3350,12 +3411,13 @@ where Some(chan) => { let funding_txo = find_funding_output(&chan, &funding_transaction)?; - let funding_res = chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger) + let funding_res = chan.get_funding_created(funding_transaction, funding_txo, &self.logger) .map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e { let channel_id = chan.context.channel_id(); let user_id = chan.context.get_user_id(); let shutdown_res = chan.context.force_shutdown(false); - (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None)) + let channel_capacity = chan.context.get_value_satoshis(); + (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None, channel_capacity)) } else { unreachable!(); }); match funding_res { Ok((chan, funding_msg)) => (chan, funding_msg), @@ -3523,27 +3585,48 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for channel_id in channel_ids { - if !peer_state.channel_by_id.contains_key(channel_id) { + if !peer_state.has_channel(channel_id) { return Err(APIError::ChannelUnavailable { err: format!("Channel with ID {} was not found for the passed counterparty_node_id {}", log_bytes!(*channel_id), counterparty_node_id), }); - } + }; } for channel_id in channel_ids { - let channel = peer_state.channel_by_id.get_mut(channel_id).unwrap(); - let mut config = channel.context.config(); - config.apply(config_update); - if !channel.context.update_config(&config) { + if let Some(channel) = peer_state.channel_by_id.get_mut(channel_id) { + let mut config = channel.context.config(); + config.apply(config_update); + if !channel.context.update_config(&config) { + continue; + } + if let Ok(msg) = self.get_channel_update_for_broadcast(channel) { + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); + } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + node_id: channel.context.get_counterparty_node_id(), + msg, + }); + } continue; } - if let Ok(msg) = self.get_channel_update_for_broadcast(channel) { - peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); - } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { - node_id: channel.context.get_counterparty_node_id(), - msg, + + let context = if let Some(channel) = peer_state.inbound_v1_channel_by_id.get_mut(channel_id) { + &mut channel.context + } else if let Some(channel) = peer_state.outbound_v1_channel_by_id.get_mut(channel_id) { + &mut channel.context + } else { + // This should not be reachable as we've already checked for non-existence in the previous channel_id loop. + debug_assert!(false); + return Err(APIError::ChannelUnavailable { + err: format!( + "Channel with ID {} for passed counterparty_node_id {} disappeared after we confirmed its existence - this should not be reachable!", + log_bytes!(*channel_id), counterparty_node_id), }); - } + }; + let mut config = context.config(); + config.apply(config_update); + // We update the config, but we MUST NOT broadcast a `channel_update` before `channel_ready` + // which would be the case for pending inbound/outbound channels. + context.update_config(&config); } Ok(()) } @@ -3777,7 +3860,7 @@ where outgoing_cltv_value, Some(phantom_shared_secret), false, None) { Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])), - Err(ReceiveError { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret)) + Err(InboundOnionErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret)) } }, _ => panic!(), @@ -3890,17 +3973,18 @@ where } }) => { let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret, mut onion_fields) = match routing { - PendingHTLCRouting::Receive { payment_data, payment_metadata, incoming_cltv_expiry, phantom_shared_secret } => { + PendingHTLCRouting::Receive { payment_data, payment_metadata, incoming_cltv_expiry, phantom_shared_secret, custom_tlvs } => { let _legacy_hop_data = Some(payment_data.clone()); - let onion_fields = - RecipientOnionFields { payment_secret: Some(payment_data.payment_secret), payment_metadata }; + let onion_fields = RecipientOnionFields { payment_secret: Some(payment_data.payment_secret), + payment_metadata, custom_tlvs }; (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data }, Some(payment_data), phantom_shared_secret, onion_fields) }, - PendingHTLCRouting::ReceiveKeysend { payment_data, payment_preimage, payment_metadata, incoming_cltv_expiry } => { + PendingHTLCRouting::ReceiveKeysend { payment_data, payment_preimage, payment_metadata, incoming_cltv_expiry, custom_tlvs } => { let onion_fields = RecipientOnionFields { payment_secret: payment_data.as_ref().map(|data| data.payment_secret), - payment_metadata + payment_metadata, + custom_tlvs, }; (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage), payment_data, None, onion_fields) @@ -4130,9 +4214,7 @@ where let best_block_height = self.best_block.read().unwrap().height(); self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(), || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, - &self.pending_events, &self.logger, - |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv)); + &self.pending_events, &self.logger, |args| self.send_payment_along_path(args)); for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) { self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination); @@ -4258,13 +4340,19 @@ where PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { let mut should_persist = self.process_background_events(); - let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); + let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); + let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum); let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for (chan_id, chan) in peer_state.channel_by_id.iter_mut() { + let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + min_mempool_feerate + } else { + normal_feerate + }; let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate); if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } } @@ -4284,6 +4372,7 @@ where /// * Expiring a channel's previous [`ChannelConfig`] if necessary to only allow forwarding HTLCs /// with the current [`ChannelConfig`]. /// * Removing peers which have disconnected but and no longer have any channels. + /// * Force-closing and removing channels which have not completed establishment in a timely manner. /// /// Note that this may cause reentrancy through [`chain::Watch::update_channel`] calls or feerate /// estimate fetches. @@ -4294,7 +4383,8 @@ where PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { let mut should_persist = self.process_background_events(); - let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); + let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); + let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum); let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new(); let mut timed_out_mpp_htlcs = Vec::new(); @@ -4307,6 +4397,11 @@ where let pending_msg_events = &mut peer_state.pending_msg_events; let counterparty_node_id = *counterparty_node_id; peer_state.channel_by_id.retain(|chan_id, chan| { + let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + min_mempool_feerate + } else { + normal_feerate + }; let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate); if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } @@ -4372,6 +4467,26 @@ where true }); + + let process_unfunded_channel_tick = | + chan_id: &[u8; 32], + chan_context: &mut ChannelContext<::Signer>, + unfunded_chan_context: &mut UnfundedChannelContext, + | { + chan_context.maybe_expire_prev_config(); + if unfunded_chan_context.should_expire_unfunded_channel() { + log_error!(self.logger, "Force-closing pending outbound channel {} for not establishing in a timely manner", log_bytes!(&chan_id[..])); + update_maps_on_chan_removal!(self, &chan_context); + self.issue_channel_close_events(&chan_context, ClosureReason::HolderForceClosed); + self.finish_force_close_channel(chan_context.force_shutdown(false)); + false + } else { + true + } + }; + peer_state.outbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context)); + peer_state.inbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context)); + if peer_state.ok_to_remove(true) { pending_peers_awaiting_removal.push(counterparty_node_id); } @@ -4494,12 +4609,19 @@ where /// Gets error data to form an [`HTLCFailReason`] given a [`FailureCode`] and [`ClaimableHTLC`]. fn get_htlc_fail_reason_from_failure_code(&self, failure_code: FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason { match failure_code { - FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(failure_code as u16), - FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(failure_code as u16), + FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(failure_code.into()), + FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(failure_code.into()), FailureCode::IncorrectOrUnknownPaymentDetails => { let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes()); - HTLCFailReason::reason(failure_code as u16, htlc_msat_height_data) + HTLCFailReason::reason(failure_code.into(), htlc_msat_height_data) + }, + FailureCode::InvalidOnionPayload(data) => { + let fail_data = match data { + Some((typ, offset)) => [BigSize(typ).encode(), offset.encode()].concat(), + None => Vec::new(), + }; + HTLCFailReason::reason(failure_code.into(), fail_data) } } } @@ -4646,13 +4768,35 @@ where /// event matches your expectation. If you fail to do so and call this method, you may provide /// the sender "proof-of-payment" when they did not fulfill the full expected payment. /// + /// This function will fail the payment if it has custom TLVs with even type numbers, as we + /// will assume they are unknown. If you intend to accept even custom TLVs, you should use + /// [`claim_funds_with_known_custom_tlvs`]. + /// /// [`Event::PaymentClaimable`]: crate::events::Event::PaymentClaimable /// [`Event::PaymentClaimable::claim_deadline`]: crate::events::Event::PaymentClaimable::claim_deadline /// [`Event::PaymentClaimed`]: crate::events::Event::PaymentClaimed /// [`process_pending_events`]: EventsProvider::process_pending_events /// [`create_inbound_payment`]: Self::create_inbound_payment /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash + /// [`claim_funds_with_known_custom_tlvs`]: Self::claim_funds_with_known_custom_tlvs pub fn claim_funds(&self, payment_preimage: PaymentPreimage) { + self.claim_payment_internal(payment_preimage, false); + } + + /// This is a variant of [`claim_funds`] that allows accepting a payment with custom TLVs with + /// even type numbers. + /// + /// # Note + /// + /// You MUST check you've understood all even TLVs before using this to + /// claim, otherwise you may unintentionally agree to some protocol you do not understand. + /// + /// [`claim_funds`]: Self::claim_funds + pub fn claim_funds_with_known_custom_tlvs(&self, payment_preimage: PaymentPreimage) { + self.claim_payment_internal(payment_preimage, true); + } + + fn claim_payment_internal(&self, payment_preimage: PaymentPreimage, custom_tlvs_known: bool) { let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); @@ -4679,6 +4823,23 @@ where log_error!(self.logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug", log_bytes!(payment_hash.0)); } + + if let Some(RecipientOnionFields { ref custom_tlvs, .. }) = payment.onion_fields { + if !custom_tlvs_known && custom_tlvs.iter().any(|(typ, _)| typ % 2 == 0) { + log_info!(self.logger, "Rejecting payment with payment hash {} as we cannot accept payment with unknown even TLVs: {}", + log_bytes!(payment_hash.0), log_iter!(custom_tlvs.iter().map(|(typ, _)| typ).filter(|typ| *typ % 2 == 0))); + claimable_payments.pending_claiming_payments.remove(&payment_hash); + mem::drop(claimable_payments); + for htlc in payment.htlcs { + let reason = self.get_htlc_fail_reason_from_failure_code(FailureCode::InvalidOnionPayload(None), &htlc); + let source = HTLCSource::PreviousHopData(htlc.prev_hop); + let receiver = HTLCDestination::FailedPayment { payment_hash }; + self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); + } + return; + } + } + payment.htlcs } else { return; } }; @@ -5340,7 +5501,7 @@ where let user_id = inbound_chan.context.get_user_id(); let shutdown_res = inbound_chan.context.force_shutdown(false); return Err(MsgHandleErrInternal::from_finish_shutdown(format!("{}", err), - msg.temporary_channel_id, user_id, shutdown_res, None)); + msg.temporary_channel_id, user_id, shutdown_res, None, inbound_chan.context.get_value_satoshis())); }, } }, @@ -5482,38 +5643,50 @@ where })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(msg.channel_id.clone()) { - hash_map::Entry::Occupied(mut chan_entry) => { - - if !chan_entry.get().received_shutdown() { - log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.", - log_bytes!(msg.channel_id), - if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); - } + // TODO(dunxen): Fix this duplication when we switch to a single map with enums as per + // https://github.com/lightningdevkit/rust-lightning/issues/2422 + if let hash_map::Entry::Occupied(chan_entry) = peer_state.outbound_v1_channel_by_id.entry(msg.channel_id.clone()) { + log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", log_bytes!(&msg.channel_id[..])); + self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel); + let mut chan = remove_channel!(self, chan_entry); + self.finish_force_close_channel(chan.context.force_shutdown(false)); + return Ok(()); + } else if let hash_map::Entry::Occupied(chan_entry) = peer_state.inbound_v1_channel_by_id.entry(msg.channel_id.clone()) { + log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", log_bytes!(&msg.channel_id[..])); + self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel); + let mut chan = remove_channel!(self, chan_entry); + self.finish_force_close_channel(chan.context.force_shutdown(false)); + return Ok(()); + } else if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) { + if !chan_entry.get().received_shutdown() { + log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.", + log_bytes!(msg.channel_id), + if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); + } - let funding_txo_opt = chan_entry.get().context.get_funding_txo(); - let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self, - chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry); - dropped_htlcs = htlcs; + let funding_txo_opt = chan_entry.get().context.get_funding_txo(); + let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self, + chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry); + dropped_htlcs = htlcs; - if let Some(msg) = shutdown { - // We can send the `shutdown` message before updating the `ChannelMonitor` - // here as we don't need the monitor update to complete until we send a - // `shutdown_signed`, which we'll delay if we're pending a monitor update. - peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: *counterparty_node_id, - msg, - }); - } + if let Some(msg) = shutdown { + // We can send the `shutdown` message before updating the `ChannelMonitor` + // here as we don't need the monitor update to complete until we send a + // `shutdown_signed`, which we'll delay if we're pending a monitor update. + peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: *counterparty_node_id, + msg, + }); + } - // Update the monitor with the shutdown script if necessary. - if let Some(monitor_update) = monitor_update_opt { - break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, - peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ()); - } - break Ok(()); - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + // Update the monitor with the shutdown script if necessary. + if let Some(monitor_update) = monitor_update_opt { + break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, + peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ()); + } + break Ok(()); + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; for htlc_source in dropped_htlcs.drain(..) { @@ -6928,13 +7101,13 @@ where provided_node_features(&self.default_configuration) } - /// Fetches the set of [`InvoiceFeatures`] flags which are provided by or required by + /// Fetches the set of [`Bolt11InvoiceFeatures`] flags which are provided by or required by /// [`ChannelManager`]. /// /// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice" /// or not. Thus, this method is not public. #[cfg(any(feature = "_test_utils", test))] - pub fn invoice_features(&self) -> InvoiceFeatures { + pub fn invoice_features(&self) -> Bolt11InvoiceFeatures { provided_invoice_features(&self.default_configuration) } @@ -7214,37 +7387,20 @@ where log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id)); let per_peer_state = self.per_peer_state.read().unwrap(); - for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; - peer_state.channel_by_id.retain(|_, chan| { - let retain = if chan.context.get_counterparty_node_id() == *counterparty_node_id { - if !chan.context.have_received_message() { - // If we created this (outbound) channel while we were disconnected from the - // peer we probably failed to send the open_channel message, which is now - // lost. We can't have had anything pending related to this channel, so we just - // drop it. - false - } else { - pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { - node_id: chan.context.get_counterparty_node_id(), - msg: chan.get_channel_reestablish(&self.logger), - }); - true - } - } else { true }; - if retain && chan.context.get_counterparty_node_id() != *counterparty_node_id { - if let Some(msg) = chan.get_signed_channel_announcement(&self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), &self.default_configuration) { - if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) { - pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement { - node_id: *counterparty_node_id, - msg, update_msg, - }); - } - } - } - retain + + // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted + // (so won't be recovered after a crash) we don't need to bother closing unfunded channels and + // clearing their maps here. Instead we can just send queue channel_reestablish messages for + // channels in the channel_by_id map. + peer_state.channel_by_id.iter_mut().for_each(|(_, chan)| { + pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { + node_id: chan.context.get_counterparty_node_id(), + msg: chan.get_channel_reestablish(&self.logger), + }); }); } //TODO: Also re-broadcast announcement_signatures @@ -7278,7 +7434,7 @@ where let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; if let Some(chan) = peer_state.outbound_v1_channel_by_id.get_mut(&msg.channel_id) { - if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) { + if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash, &self.fee_estimator) { peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { node_id: *counterparty_node_id, msg, @@ -7363,16 +7519,18 @@ where /// Fetches the set of [`NodeFeatures`] flags which are provided by or required by /// [`ChannelManager`]. pub(crate) fn provided_node_features(config: &UserConfig) -> NodeFeatures { - provided_init_features(config).to_context() + let mut node_features = provided_init_features(config).to_context(); + node_features.set_keysend_optional(); + node_features } -/// Fetches the set of [`InvoiceFeatures`] flags which are provided by or required by +/// Fetches the set of [`Bolt11InvoiceFeatures`] flags which are provided by or required by /// [`ChannelManager`]. /// /// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice" /// or not. Thus, this method is not public. #[cfg(any(feature = "_test_utils", test))] -pub(crate) fn provided_invoice_features(config: &UserConfig) -> InvoiceFeatures { +pub(crate) fn provided_invoice_features(config: &UserConfig) -> Bolt11InvoiceFeatures { provided_init_features(config).to_context() } @@ -7559,12 +7717,14 @@ impl_writeable_tlv_based_enum!(PendingHTLCRouting, (1, phantom_shared_secret, option), (2, incoming_cltv_expiry, required), (3, payment_metadata, option), + (5, custom_tlvs, optional_vec), }, (2, ReceiveKeysend) => { (0, payment_preimage, required), (2, incoming_cltv_expiry, required), (3, payment_metadata, option), (4, payment_data, option), // Added in 0.0.116 + (5, custom_tlvs, optional_vec), }, ;); @@ -8291,7 +8451,9 @@ where channel_closures.push_back((events::Event::ChannelClosed { channel_id: channel.context.channel_id(), user_channel_id: channel.context.get_user_id(), - reason: ClosureReason::OutdatedChannelManager + reason: ClosureReason::OutdatedChannelManager, + counterparty_node_id: Some(channel.context.get_counterparty_node_id()), + channel_capacity_sats: Some(channel.context.get_value_satoshis()), }, None)); for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() { let mut found_htlc = false; @@ -8343,6 +8505,8 @@ where channel_id: channel.context.channel_id(), user_channel_id: channel.context.get_user_id(), reason: ClosureReason::DisconnectedPeer, + counterparty_node_id: Some(channel.context.get_counterparty_node_id()), + channel_capacity_sats: Some(channel.context.get_value_satoshis()), }, None)); } else { log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.context.channel_id())); @@ -8671,6 +8835,7 @@ where payment_secret: None, // only used for retries, and we'll never retry on startup payment_metadata: None, // only used for retries, and we'll never retry on startup keysend_preimage: None, // only used for retries, and we'll never retry on startup + custom_tlvs: Vec::new(), // only used for retries, and we'll never retry on startup pending_amt_msat: path_amt, pending_fee_msat: Some(path_fee), total_msat: path_amt, @@ -9558,7 +9723,7 @@ mod tests { nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap(); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); { // Assert that nodes[1] is awaiting removal for nodes[0] once nodes[1] has been @@ -9721,8 +9886,8 @@ mod tests { } let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); - check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000); } fn check_not_connected_to_peer_error(res_err: Result, expected_public_key: PublicKey) { @@ -10005,20 +10170,19 @@ mod tests { let node = create_network(1, &node_cfg, &node_chanmgr); let sender_intended_amt_msat = 100; let extra_fee_msat = 10; - let hop_data = msgs::OnionHopData { - amt_to_forward: 100, + let hop_data = msgs::InboundOnionPayload::Receive { + amt_msat: 100, outgoing_cltv_value: 42, - format: msgs::OnionHopDataFormat::FinalNode { - keysend_preimage: None, - payment_metadata: None, - payment_data: Some(msgs::FinalOnionHopData { - payment_secret: PaymentSecret([0; 32]), total_msat: sender_intended_amt_msat, - }), - } + payment_metadata: None, + keysend_preimage: None, + payment_data: Some(msgs::FinalOnionHopData { + payment_secret: PaymentSecret([0; 32]), total_msat: sender_intended_amt_msat, + }), + custom_tlvs: Vec::new(), }; // Check that if the amount we received + the penultimate hop extra fee is less than the sender // intended amount, we fail the payment. - if let Err(crate::ln::channelmanager::ReceiveError { err_code, .. }) = + if let Err(crate::ln::channelmanager::InboundOnionErr { err_code, .. }) = node[0].node.construct_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]), sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat)) { @@ -10026,16 +10190,15 @@ mod tests { } else { panic!(); } // If amt_received + extra_fee is equal to the sender intended amount, we're fine. - let hop_data = msgs::OnionHopData { // This is the same hop_data as above, OnionHopData doesn't implement Clone - amt_to_forward: 100, + let hop_data = msgs::InboundOnionPayload::Receive { // This is the same payload as above, InboundOnionPayload doesn't implement Clone + amt_msat: 100, outgoing_cltv_value: 42, - format: msgs::OnionHopDataFormat::FinalNode { - keysend_preimage: None, - payment_metadata: None, - payment_data: Some(msgs::FinalOnionHopData { - payment_secret: PaymentSecret([0; 32]), total_msat: sender_intended_amt_msat, - }), - } + payment_metadata: None, + keysend_preimage: None, + payment_data: Some(msgs::FinalOnionHopData { + payment_secret: PaymentSecret([0; 32]), total_msat: sender_intended_amt_msat, + }), + custom_tlvs: Vec::new(), }; assert!(node[0].node.construct_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]), sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat)).is_ok()); @@ -10117,7 +10280,7 @@ mod tests { let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); assert!(!open_channel_msg.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx()); - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000); } #[test] @@ -10174,6 +10337,25 @@ mod tests { MessageSendEvent::BroadcastChannelUpdate { .. } => {}, _ => panic!("expected BroadcastChannelUpdate event"), } + + // If we provide a channel_id not associated with the peer, we should get an error and no updates + // should be applied to ensure update atomicity as specified in the API docs. + let bad_channel_id = [10; 32]; + let current_fee = nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths; + let new_fee = current_fee + 100; + assert!( + matches!( + nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id, bad_channel_id], &ChannelConfigUpdate { + forwarding_fee_proportional_millionths: Some(new_fee), + ..Default::default() + }), + Err(APIError::ChannelUnavailable { err: _ }), + ) + ); + // Check that the fee hasn't changed for the channel that exists. + assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, current_fee); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 0); } }