X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=961f25cc664ad3425d39dcf5e099b66c44eb4949;hb=131560e08fa4f66b8ce9302cde637f87602c86b0;hp=1fa5bd4d519f3bc49e566c7f055b4b46f1a04c0f;hpb=6ebb6d182e9903d5bcdba1473350e218cf6587a7;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 1fa5bd4d..961f25cc 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -40,12 +40,12 @@ use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, Messa // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret}; -use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel}; +use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel}; use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; #[cfg(any(feature = "_test_utils", test))] -use crate::ln::features::InvoiceFeatures; +use crate::ln::features::Bolt11InvoiceFeatures; use crate::routing::gossip::NetworkGraph; -use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteHop, RouteParameters, Router}; +use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router}; use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters}; use crate::ln::msgs; use crate::ln::onion_utils; @@ -53,7 +53,7 @@ use crate::ln::onion_utils::HTLCFailReason; use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError}; #[cfg(test)] use crate::ln::outbound_payment; -use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment}; +use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs}; use crate::ln::wire::Encode; use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner, WriteableEcdsaChannelSigner}; use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate}; @@ -110,6 +110,8 @@ pub(super) enum PendingHTLCRouting { payment_metadata: Option>, incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed phantom_shared_secret: Option<[u8; 32]>, + /// See [`RecipientOnionFields::custom_tlvs`] for more info. + custom_tlvs: Vec<(u64, Vec)>, }, ReceiveKeysend { /// This was added in 0.0.116 and will break deserialization on downgrades. @@ -117,6 +119,8 @@ pub(super) enum PendingHTLCRouting { payment_preimage: PaymentPreimage, payment_metadata: Option>, incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed + /// See [`RecipientOnionFields::custom_tlvs`] for more info. + custom_tlvs: Vec<(u64, Vec)>, }, } @@ -341,7 +345,7 @@ impl HTLCSource { } } -struct ReceiveError { +struct InboundOnionErr { err_code: u16, err_data: Vec, msg: &'static str, @@ -355,15 +359,32 @@ struct ReceiveError { pub enum FailureCode { /// We had a temporary error processing the payment. Useful if no other error codes fit /// and you want to indicate that the payer may want to retry. - TemporaryNodeFailure = 0x2000 | 2, + TemporaryNodeFailure, /// We have a required feature which was not in this onion. For example, you may require /// some additional metadata that was not provided with this payment. - RequiredNodeFeatureMissing = 0x4000 | 0x2000 | 3, + RequiredNodeFeatureMissing, /// You may wish to use this when a `payment_preimage` is unknown, or the CLTV expiry of /// the HTLC is too close to the current block height for safe handling. /// Using this failure code in [`ChannelManager::fail_htlc_backwards_with_reason`] is /// equivalent to calling [`ChannelManager::fail_htlc_backwards`]. - IncorrectOrUnknownPaymentDetails = 0x4000 | 15, + IncorrectOrUnknownPaymentDetails, + /// We failed to process the payload after the onion was decrypted. You may wish to + /// use this when receiving custom HTLC TLVs with even type numbers that you don't recognize. + /// + /// If available, the tuple data may include the type number and byte offset in the + /// decrypted byte stream where the failure occurred. + InvalidOnionPayload(Option<(u64, u16)>), +} + +impl Into for FailureCode { + fn into(self) -> u16 { + match self { + FailureCode::TemporaryNodeFailure => 0x2000 | 2, + FailureCode::RequiredNodeFeatureMissing => 0x4000 | 0x2000 | 3, + FailureCode::IncorrectOrUnknownPaymentDetails => 0x4000 | 15, + FailureCode::InvalidOnionPayload(_) => 0x4000 | 22, + } + } } /// Error type returned across the peer_state mutex boundary. When an Err is generated for a @@ -376,6 +397,7 @@ struct MsgHandleErrInternal { err: msgs::LightningError, chan_id: Option<([u8; 32], u128)>, // If Some a channel of ours has been closed shutdown_finish: Option<(ShutdownResult, Option)>, + channel_capacity: Option, } impl MsgHandleErrInternal { #[inline] @@ -392,14 +414,15 @@ impl MsgHandleErrInternal { }, chan_id: None, shutdown_finish: None, + channel_capacity: None, } } #[inline] fn from_no_close(err: msgs::LightningError) -> Self { - Self { err, chan_id: None, shutdown_finish: None } + Self { err, chan_id: None, shutdown_finish: None, channel_capacity: None } } #[inline] - fn from_finish_shutdown(err: String, channel_id: [u8; 32], user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option) -> Self { + fn from_finish_shutdown(err: String, channel_id: [u8; 32], user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option, channel_capacity: u64) -> Self { Self { err: LightningError { err: err.clone(), @@ -412,6 +435,7 @@ impl MsgHandleErrInternal { }, chan_id: Some((channel_id, user_channel_id)), shutdown_finish: Some((shutdown_res, channel_update)), + channel_capacity: Some(channel_capacity) } } #[inline] @@ -444,6 +468,7 @@ impl MsgHandleErrInternal { }, chan_id: None, shutdown_finish: None, + channel_capacity: None, } } } @@ -507,19 +532,19 @@ struct ClaimablePayments { /// running normally, and specifically must be processed before any other non-background /// [`ChannelMonitorUpdate`]s are applied. enum BackgroundEvent { - /// Handle a ChannelMonitorUpdate which closes the channel. This is only separated from - /// [`Self::MonitorUpdateRegeneratedOnStartup`] as the maybe-non-closing variant needs a public - /// key to handle channel resumption, whereas if the channel has been force-closed we do not - /// need the counterparty node_id. + /// Handle a ChannelMonitorUpdate which closes the channel or for an already-closed channel. + /// This is only separated from [`Self::MonitorUpdateRegeneratedOnStartup`] as the + /// maybe-non-closing variant needs a public key to handle channel resumption, whereas if the + /// channel has been force-closed we do not need the counterparty node_id. /// /// Note that any such events are lost on shutdown, so in general they must be updates which /// are regenerated on startup. - ClosingMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)), + ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)), /// Handle a ChannelMonitorUpdate which may or may not close the channel and may unblock the /// channel to continue normal operation. /// /// In general this should be used rather than - /// [`Self::ClosingMonitorUpdateRegeneratedOnStartup`], however in cases where the + /// [`Self::ClosedMonitorUpdateRegeneratedOnStartup`], however in cases where the /// `counterparty_node_id` is not available as the channel has closed from a [`ChannelMonitor`] /// error the other variant is acceptable. /// @@ -530,6 +555,13 @@ enum BackgroundEvent { funding_txo: OutPoint, update: ChannelMonitorUpdate }, + /// Some [`ChannelMonitorUpdate`] (s) completed before we were serialized but we still have + /// them marked pending, thus we need to run any [`MonitorUpdateCompletionAction`] (s) pending + /// on a channel. + MonitorUpdatesComplete { + counterparty_node_id: PublicKey, + channel_id: [u8; 32], + }, } #[derive(Debug)] @@ -678,7 +710,7 @@ impl PeerState { && self.in_flight_monitor_updates.is_empty() } - // Returns a count of all channels we have with this peer, including pending channels. + // Returns a count of all channels we have with this peer, including unfunded channels. fn total_channel_count(&self) -> usize { self.channel_by_id.len() + self.outbound_v1_channel_by_id.len() + @@ -1114,7 +1146,6 @@ where /// Notifier the lock contains sends out a notification when the lock is released. total_consistency_lock: RwLock<()>, - #[cfg(debug_assertions)] background_events_processed_since_startup: AtomicBool, persistence_notifier: Notifier, @@ -1653,7 +1684,7 @@ macro_rules! handle_error { match $internal { Ok(msg) => Ok(msg), - Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => { + Err(MsgHandleErrInternal { err, chan_id, shutdown_finish, channel_capacity }) => { let mut msg_events = Vec::with_capacity(2); if let Some((shutdown_res, update_option)) = shutdown_finish { @@ -1666,7 +1697,9 @@ macro_rules! handle_error { if let Some((channel_id, user_channel_id)) = chan_id { $self.pending_events.lock().unwrap().push_back((events::Event::ChannelClosed { channel_id, user_channel_id, - reason: ClosureReason::ProcessingError { err: err.err.clone() } + reason: ClosureReason::ProcessingError { err: err.err.clone() }, + counterparty_node_id: Some($counterparty_node_id), + channel_capacity_sats: channel_capacity, }, None)); } } @@ -1739,20 +1772,20 @@ macro_rules! convert_chan_err { update_maps_on_chan_removal!($self, &$channel.context); let shutdown_res = $channel.context.force_shutdown(true); (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.context.get_user_id(), - shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) + shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok(), $channel.context.get_value_satoshis())) }, } }; - ($self: ident, $err: expr, $channel_context: expr, $channel_id: expr, PREFUNDED) => { + ($self: ident, $err: expr, $channel_context: expr, $channel_id: expr, UNFUNDED) => { match $err { - // We should only ever have `ChannelError::Close` when prefunded channels error. + // We should only ever have `ChannelError::Close` when unfunded channels error. // In any case, just close the channel. ChannelError::Warn(msg) | ChannelError::Ignore(msg) | ChannelError::Close(msg) => { - log_error!($self.logger, "Closing prefunded channel {} due to an error: {}", log_bytes!($channel_id[..]), msg); + log_error!($self.logger, "Closing unfunded channel {} due to an error: {}", log_bytes!($channel_id[..]), msg); update_maps_on_chan_removal!($self, &$channel_context); let shutdown_res = $channel_context.force_shutdown(false); (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel_context.get_user_id(), - shutdown_res, None)) + shutdown_res, None, $channel_context.get_value_satoshis())) }, } } @@ -1778,7 +1811,7 @@ macro_rules! try_v1_outbound_chan_entry { match $res { Ok(res) => res, Err(e) => { - let (drop, res) = convert_chan_err!($self, e, $entry.get_mut().context, $entry.key(), PREFUNDED); + let (drop, res) = convert_chan_err!($self, e, $entry.get_mut().context, $entry.key(), UNFUNDED); if drop { $entry.remove_entry(); } @@ -1917,9 +1950,7 @@ macro_rules! handle_new_monitor_update { // update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in // any case so that it won't deadlock. debug_assert_ne!($self.id_to_peer.held_by_thread(), LockHeldState::HeldByThread); - #[cfg(debug_assertions)] { - debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire)); - } + debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire)); match $update_res { ChannelMonitorUpdateStatus::InProgress => { log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.", @@ -1933,7 +1964,7 @@ macro_rules! handle_new_monitor_update { let res = Err(MsgHandleErrInternal::from_finish_shutdown( "ChannelMonitor storage failure".to_owned(), $chan.context.channel_id(), $chan.context.get_user_id(), $chan.context.force_shutdown(false), - $self.get_channel_update_for_broadcast(&$chan).ok())); + $self.get_channel_update_for_broadcast(&$chan).ok(), $chan.context.get_value_satoshis())); $remove; res }, @@ -2023,6 +2054,8 @@ macro_rules! process_events_body { let mut pending_events = $self.pending_events.lock().unwrap(); pending_events.drain(..num_events); processed_all_events = pending_events.is_empty(); + // Note that `push_pending_forwards_ev` relies on `pending_events_processor` being + // updated here with the `pending_events` lock acquired. $self.pending_events_processor.store(false, Ordering::Release); } @@ -2111,7 +2144,6 @@ where pending_events_processor: AtomicBool::new(false), pending_background_events: Mutex::new(Vec::new()), total_consistency_lock: RwLock::new(()), - #[cfg(debug_assertions)] background_events_processed_since_startup: AtomicBool::new(false), persistence_notifier: Notifier::new(), @@ -2241,6 +2273,7 @@ where for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + // Only `Channels` in the channel_by_id map can be considered funded. for (_channel_id, channel) in peer_state.channel_by_id.iter().filter(f) { let details = ChannelDetails::from_channel_context(&channel.context, best_block_height, peer_state.latest_features.clone(), &self.fee_estimator); @@ -2309,11 +2342,15 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let features = &peer_state.latest_features; + let chan_context_to_details = |context| { + ChannelDetails::from_channel_context(context, best_block_height, features.clone(), &self.fee_estimator) + }; return peer_state.channel_by_id .iter() - .map(|(_, channel)| - ChannelDetails::from_channel_context(&channel.context, best_block_height, - features.clone(), &self.fee_estimator)) + .map(|(_, channel)| &channel.context) + .chain(peer_state.outbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context)) + .chain(peer_state.inbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context)) + .map(chan_context_to_details) .collect(); } vec![] @@ -2361,7 +2398,9 @@ where pending_events_lock.push_back((events::Event::ChannelClosed { channel_id: context.channel_id(), user_channel_id: context.get_user_id(), - reason: closure_reason + reason: closure_reason, + counterparty_node_id: Some(context.get_counterparty_node_id()), + channel_capacity_sats: Some(context.get_value_satoshis()), }, None)); } @@ -2370,48 +2409,58 @@ where let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>; let result: Result<(), _> = loop { - let per_peer_state = self.per_peer_state.read().unwrap(); + { + let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id) + .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?; - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(channel_id.clone()) { - hash_map::Entry::Occupied(mut chan_entry) => { - let funding_txo_opt = chan_entry.get().context.get_funding_txo(); - let their_features = &peer_state.latest_features; - let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut() - .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?; - failed_htlcs = htlcs; + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; - // We can send the `shutdown` message before updating the `ChannelMonitor` - // here as we don't need the monitor update to complete until we send a - // `shutdown_signed`, which we'll delay if we're pending a monitor update. - peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: *counterparty_node_id, - msg: shutdown_msg, - }); + match peer_state.channel_by_id.entry(channel_id.clone()) { + hash_map::Entry::Occupied(mut chan_entry) => { + let funding_txo_opt = chan_entry.get().context.get_funding_txo(); + let their_features = &peer_state.latest_features; + let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut() + .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?; + failed_htlcs = htlcs; - // Update the monitor with the shutdown script if necessary. - if let Some(monitor_update) = monitor_update_opt.take() { - break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, - peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ()); - } + // We can send the `shutdown` message before updating the `ChannelMonitor` + // here as we don't need the monitor update to complete until we send a + // `shutdown_signed`, which we'll delay if we're pending a monitor update. + peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: *counterparty_node_id, + msg: shutdown_msg, + }); - if chan_entry.get().is_shutdown() { - let channel = remove_channel!(self, chan_entry); - if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) { - peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: channel_update - }); + // Update the monitor with the shutdown script if necessary. + if let Some(monitor_update) = monitor_update_opt.take() { + break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, + peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ()); } - self.issue_channel_close_events(&channel.context, ClosureReason::HolderForceClosed); - } - break Ok(()); - }, - hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), counterparty_node_id) }) + + if chan_entry.get().is_shutdown() { + let channel = remove_channel!(self, chan_entry); + if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) { + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: channel_update + }); + } + self.issue_channel_close_events(&channel.context, ClosureReason::HolderForceClosed); + } + break Ok(()); + }, + hash_map::Entry::Vacant(_) => (), + } } + // If we reach this point, it means that the channel_id either refers to an unfunded channel or + // it does not exist for this peer. Either way, we can attempt to force-close it. + // + // An appropriate error will be returned for non-existence of the channel if that's the case. + return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ()) + // TODO(dunxen): This is still not ideal as we're doing some extra lookups. + // Fix this with https://github.com/lightningdevkit/rust-lightning/issues/2422 }; for htlc_source in failed_htlcs.drain(..) { @@ -2530,14 +2579,14 @@ where self.issue_channel_close_events(&chan.get().context, closure_reason); let mut chan = remove_channel!(self, chan); self.finish_force_close_channel(chan.context.force_shutdown(false)); - // Prefunded channel has no update + // Unfunded channel has no update (None, chan.context.get_counterparty_node_id()) } else if let hash_map::Entry::Occupied(chan) = peer_state.inbound_v1_channel_by_id.entry(channel_id.clone()) { log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); self.issue_channel_close_events(&chan.get().context, closure_reason); let mut chan = remove_channel!(self, chan); self.finish_force_close_channel(chan.context.force_shutdown(false)); - // Prefunded channel has no update + // Unfunded channel has no update (None, chan.context.get_counterparty_node_id()) } else { return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) }); @@ -2611,14 +2660,64 @@ where } } + fn construct_fwd_pending_htlc_info( + &self, msg: &msgs::UpdateAddHTLC, hop_data: msgs::InboundOnionPayload, hop_hmac: [u8; 32], + new_packet_bytes: [u8; onion_utils::ONION_DATA_LEN], shared_secret: [u8; 32], + next_packet_pubkey_opt: Option> + ) -> Result { + debug_assert!(next_packet_pubkey_opt.is_some()); + let outgoing_packet = msgs::OnionPacket { + version: 0, + public_key: next_packet_pubkey_opt.unwrap_or(Err(secp256k1::Error::InvalidPublicKey)), + hop_data: new_packet_bytes, + hmac: hop_hmac, + }; + + let (short_channel_id, amt_to_forward, outgoing_cltv_value) = match hop_data { + msgs::InboundOnionPayload::Forward { short_channel_id, amt_to_forward, outgoing_cltv_value } => + (short_channel_id, amt_to_forward, outgoing_cltv_value), + msgs::InboundOnionPayload::Receive { .. } => + return Err(InboundOnionErr { + msg: "Final Node OnionHopData provided for us as an intermediary node", + err_code: 0x4000 | 22, + err_data: Vec::new(), + }), + }; + + Ok(PendingHTLCInfo { + routing: PendingHTLCRouting::Forward { + onion_packet: outgoing_packet, + short_channel_id, + }, + payment_hash: msg.payment_hash, + incoming_shared_secret: shared_secret, + incoming_amt_msat: Some(msg.amount_msat), + outgoing_amt_msat: amt_to_forward, + outgoing_cltv_value, + skimmed_fee_msat: None, + }) + } + fn construct_recv_pending_htlc_info( - &self, hop_data: msgs::OnionHopData, shared_secret: [u8; 32], payment_hash: PaymentHash, + &self, hop_data: msgs::InboundOnionPayload, shared_secret: [u8; 32], payment_hash: PaymentHash, amt_msat: u64, cltv_expiry: u32, phantom_shared_secret: Option<[u8; 32]>, allow_underpay: bool, counterparty_skimmed_fee_msat: Option, - ) -> Result { + ) -> Result { + let (payment_data, keysend_preimage, custom_tlvs, onion_amt_msat, outgoing_cltv_value, payment_metadata) = match hop_data { + msgs::InboundOnionPayload::Receive { + payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata, .. + } => + (payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata), + _ => + return Err(InboundOnionErr { + err_code: 0x4000|22, + err_data: Vec::new(), + msg: "Got non final data with an HMAC of 0", + }), + }; // final_incorrect_cltv_expiry - if hop_data.outgoing_cltv_value > cltv_expiry { - return Err(ReceiveError { + if outgoing_cltv_value > cltv_expiry { + return Err(InboundOnionErr { msg: "Upstream node set CLTV to less than the CLTV set by the sender", err_code: 18, err_data: cltv_expiry.to_be_bytes().to_vec() @@ -2632,85 +2731,76 @@ where // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a // channel closure (see HTLC_FAIL_BACK_BUFFER rationale). let current_height: u32 = self.best_block.read().unwrap().height(); - if (hop_data.outgoing_cltv_value as u64) <= current_height as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 { + if (outgoing_cltv_value as u64) <= current_height as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 { let mut err_data = Vec::with_capacity(12); err_data.extend_from_slice(&amt_msat.to_be_bytes()); err_data.extend_from_slice(¤t_height.to_be_bytes()); - return Err(ReceiveError { + return Err(InboundOnionErr { err_code: 0x4000 | 15, err_data, msg: "The final CLTV expiry is too soon to handle", }); } - if (!allow_underpay && hop_data.amt_to_forward > amt_msat) || - (allow_underpay && hop_data.amt_to_forward > + if (!allow_underpay && onion_amt_msat > amt_msat) || + (allow_underpay && onion_amt_msat > amt_msat.saturating_add(counterparty_skimmed_fee_msat.unwrap_or(0))) { - return Err(ReceiveError { + return Err(InboundOnionErr { err_code: 19, err_data: amt_msat.to_be_bytes().to_vec(), msg: "Upstream node sent less than we were supposed to receive in payment", }); } - let routing = match hop_data.format { - msgs::OnionHopDataFormat::NonFinalNode { .. } => { - return Err(ReceiveError { + let routing = if let Some(payment_preimage) = keysend_preimage { + // We need to check that the sender knows the keysend preimage before processing this + // payment further. Otherwise, an intermediary routing hop forwarding non-keysend-HTLC X + // could discover the final destination of X, by probing the adjacent nodes on the route + // with a keysend payment of identical payment hash to X and observing the processing + // time discrepancies due to a hash collision with X. + let hashed_preimage = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); + if hashed_preimage != payment_hash { + return Err(InboundOnionErr { err_code: 0x4000|22, err_data: Vec::new(), - msg: "Got non final data with an HMAC of 0", + msg: "Payment preimage didn't match payment hash", }); - }, - msgs::OnionHopDataFormat::FinalNode { payment_data, keysend_preimage, payment_metadata } => { - if let Some(payment_preimage) = keysend_preimage { - // We need to check that the sender knows the keysend preimage before processing this - // payment further. Otherwise, an intermediary routing hop forwarding non-keysend-HTLC X - // could discover the final destination of X, by probing the adjacent nodes on the route - // with a keysend payment of identical payment hash to X and observing the processing - // time discrepancies due to a hash collision with X. - let hashed_preimage = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); - if hashed_preimage != payment_hash { - return Err(ReceiveError { - err_code: 0x4000|22, - err_data: Vec::new(), - msg: "Payment preimage didn't match payment hash", - }); - } - if !self.default_configuration.accept_mpp_keysend && payment_data.is_some() { - return Err(ReceiveError { - err_code: 0x4000|22, - err_data: Vec::new(), - msg: "We don't support MPP keysend payments", - }); - } - PendingHTLCRouting::ReceiveKeysend { - payment_data, - payment_preimage, - payment_metadata, - incoming_cltv_expiry: hop_data.outgoing_cltv_value, - } - } else if let Some(data) = payment_data { - PendingHTLCRouting::Receive { - payment_data: data, - payment_metadata, - incoming_cltv_expiry: hop_data.outgoing_cltv_value, - phantom_shared_secret, - } - } else { - return Err(ReceiveError { - err_code: 0x4000|0x2000|3, - err_data: Vec::new(), - msg: "We require payment_secrets", - }); - } - }, + } + if !self.default_configuration.accept_mpp_keysend && payment_data.is_some() { + return Err(InboundOnionErr { + err_code: 0x4000|22, + err_data: Vec::new(), + msg: "We don't support MPP keysend payments", + }); + } + PendingHTLCRouting::ReceiveKeysend { + payment_data, + payment_preimage, + payment_metadata, + incoming_cltv_expiry: outgoing_cltv_value, + custom_tlvs, + } + } else if let Some(data) = payment_data { + PendingHTLCRouting::Receive { + payment_data: data, + payment_metadata, + incoming_cltv_expiry: outgoing_cltv_value, + phantom_shared_secret, + custom_tlvs, + } + } else { + return Err(InboundOnionErr { + err_code: 0x4000|0x2000|3, + err_data: Vec::new(), + msg: "We require payment_secrets", + }); }; Ok(PendingHTLCInfo { routing, payment_hash, incoming_shared_secret: shared_secret, incoming_amt_msat: Some(amt_msat), - outgoing_amt_msat: hop_data.amt_to_forward, - outgoing_cltv_value: hop_data.outgoing_cltv_value, + outgoing_amt_msat: onion_amt_msat, + outgoing_cltv_value, skimmed_fee_msat: counterparty_skimmed_fee_msat, }) } @@ -2774,9 +2864,8 @@ where }; let (outgoing_scid, outgoing_amt_msat, outgoing_cltv_value, next_packet_pk_opt) = match next_hop { onion_utils::Hop::Forward { - next_hop_data: msgs::OnionHopData { - format: msgs::OnionHopDataFormat::NonFinalNode { short_channel_id }, amt_to_forward, - outgoing_cltv_value, + next_hop_data: msgs::InboundOnionPayload::Forward { + short_channel_id, amt_to_forward, outgoing_cltv_value }, .. } => { let next_pk = onion_utils::next_hop_packet_pubkey(&self.secp_ctx, @@ -2786,9 +2875,7 @@ where // We'll do receive checks in [`Self::construct_pending_htlc_info`] so we have access to the // inbound channel's state. onion_utils::Hop::Receive { .. } => return Ok((next_hop, shared_secret, None)), - onion_utils::Hop::Forward { - next_hop_data: msgs::OnionHopData { format: msgs::OnionHopDataFormat::FinalNode { .. }, .. }, .. - } => { + onion_utils::Hop::Forward { next_hop_data: msgs::InboundOnionPayload::Receive { .. }, .. } => { return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0; 0]); } }; @@ -2959,37 +3046,15 @@ where // delay) once they've send us a commitment_signed! PendingHTLCStatus::Forward(info) }, - Err(ReceiveError { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data) + Err(InboundOnionErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data) } }, onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => { - debug_assert!(next_packet_pubkey_opt.is_some()); - let outgoing_packet = msgs::OnionPacket { - version: 0, - public_key: next_packet_pubkey_opt.unwrap_or(Err(secp256k1::Error::InvalidPublicKey)), - hop_data: new_packet_bytes, - hmac: next_hop_hmac.clone(), - }; - - let short_channel_id = match next_hop_data.format { - msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id, - msgs::OnionHopDataFormat::FinalNode { .. } => { - return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]); - }, - }; - - PendingHTLCStatus::Forward(PendingHTLCInfo { - routing: PendingHTLCRouting::Forward { - onion_packet: outgoing_packet, - short_channel_id, - }, - payment_hash: msg.payment_hash.clone(), - incoming_shared_secret: shared_secret, - incoming_amt_msat: Some(msg.amount_msat), - outgoing_amt_msat: next_hop_data.amt_to_forward, - outgoing_cltv_value: next_hop_data.outgoing_cltv_value, - skimmed_fee_msat: None, - }) + match self.construct_fwd_pending_htlc_info(msg, next_hop_data, next_hop_hmac, + new_packet_bytes, shared_secret, next_packet_pubkey_opt) { + Ok(info) => PendingHTLCStatus::Forward(info), + Err(InboundOnionErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data) + } } } } @@ -3077,10 +3142,17 @@ where #[cfg(test)] pub(crate) fn test_send_payment_along_path(&self, path: &Path, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option, session_priv_bytes: [u8; 32]) -> Result<(), APIError> { let _lck = self.total_consistency_lock.read().unwrap(); - self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv_bytes) + self.send_payment_along_path(SendAlongPathArgs { + path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, + session_priv_bytes + }) } - fn send_payment_along_path(&self, path: &Path, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option, session_priv_bytes: [u8; 32]) -> Result<(), APIError> { + fn send_payment_along_path(&self, args: SendAlongPathArgs) -> Result<(), APIError> { + let SendAlongPathArgs { + path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, + session_priv_bytes + } = args; // The top-level caller should hold the total_consistency_lock read lock. debug_assert!(self.total_consistency_lock.try_write().is_err()); @@ -3200,6 +3272,7 @@ where /// irrevocably committed to on our end. In such a case, do NOT retry the payment with a /// different route unless you intend to pay twice! /// + /// [`RouteHop`]: crate::routing::router::RouteHop /// [`Event::PaymentSent`]: events::Event::PaymentSent /// [`Event::PaymentFailed`]: events::Event::PaymentFailed /// [`UpdateHTLCs`]: events::MessageSendEvent::UpdateHTLCs @@ -3209,9 +3282,9 @@ where let best_block_height = self.best_block.read().unwrap().height(); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); self.pending_outbound_payments - .send_payment_with_route(route, payment_hash, recipient_onion, payment_id, &self.entropy_source, &self.node_signer, best_block_height, - |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + .send_payment_with_route(route, payment_hash, recipient_onion, payment_id, + &self.entropy_source, &self.node_signer, best_block_height, + |args| self.send_payment_along_path(args)) } /// Similar to [`ChannelManager::send_payment_with_route`], but will automatically find a route based on @@ -3223,18 +3296,16 @@ where .send_payment(payment_hash, recipient_onion, payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, &self.logger, - &self.pending_events, - |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + &self.pending_events, |args| self.send_payment_along_path(args)) } #[cfg(test)] pub(super) fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option, payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); - self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, recipient_onion, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, best_block_height, - |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, recipient_onion, + keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, + best_block_height, |args| self.send_payment_along_path(args)) } #[cfg(test)] @@ -3288,9 +3359,7 @@ where let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); self.pending_outbound_payments.send_spontaneous_payment_with_route( route, payment_preimage, recipient_onion, payment_id, &self.entropy_source, - &self.node_signer, best_block_height, - |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + &self.node_signer, best_block_height, |args| self.send_payment_along_path(args)) } /// Similar to [`ChannelManager::send_spontaneous_payment`], but will automatically find a route @@ -3306,9 +3375,7 @@ where self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, recipient_onion, payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, - &self.logger, &self.pending_events, - |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + &self.logger, &self.pending_events, |args| self.send_payment_along_path(args)) } /// Send a payment that is probing the given route for liquidity. We calculate the @@ -3317,9 +3384,9 @@ where pub fn send_probe(&self, path: Path) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); - self.pending_outbound_payments.send_probe(path, self.probing_cookie_secret, &self.entropy_source, &self.node_signer, best_block_height, - |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + self.pending_outbound_payments.send_probe(path, self.probing_cookie_secret, + &self.entropy_source, &self.node_signer, best_block_height, + |args| self.send_payment_along_path(args)) } /// Returns whether a payment with the given [`PaymentHash`] and [`PaymentId`] is, in fact, a @@ -3344,12 +3411,13 @@ where Some(chan) => { let funding_txo = find_funding_output(&chan, &funding_transaction)?; - let funding_res = chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger) + let funding_res = chan.get_funding_created(funding_transaction, funding_txo, &self.logger) .map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e { let channel_id = chan.context.channel_id(); let user_id = chan.context.get_user_id(); let shutdown_res = chan.context.force_shutdown(false); - (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None)) + let channel_capacity = chan.context.get_value_satoshis(); + (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None, channel_capacity)) } else { unreachable!(); }); match funding_res { Ok((chan, funding_msg)) => (chan, funding_msg), @@ -3517,27 +3585,48 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for channel_id in channel_ids { - if !peer_state.channel_by_id.contains_key(channel_id) { + if !peer_state.has_channel(channel_id) { return Err(APIError::ChannelUnavailable { err: format!("Channel with ID {} was not found for the passed counterparty_node_id {}", log_bytes!(*channel_id), counterparty_node_id), }); - } + }; } for channel_id in channel_ids { - let channel = peer_state.channel_by_id.get_mut(channel_id).unwrap(); - let mut config = channel.context.config(); - config.apply(config_update); - if !channel.context.update_config(&config) { + if let Some(channel) = peer_state.channel_by_id.get_mut(channel_id) { + let mut config = channel.context.config(); + config.apply(config_update); + if !channel.context.update_config(&config) { + continue; + } + if let Ok(msg) = self.get_channel_update_for_broadcast(channel) { + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); + } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + node_id: channel.context.get_counterparty_node_id(), + msg, + }); + } continue; } - if let Ok(msg) = self.get_channel_update_for_broadcast(channel) { - peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); - } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { - node_id: channel.context.get_counterparty_node_id(), - msg, + + let context = if let Some(channel) = peer_state.inbound_v1_channel_by_id.get_mut(channel_id) { + &mut channel.context + } else if let Some(channel) = peer_state.outbound_v1_channel_by_id.get_mut(channel_id) { + &mut channel.context + } else { + // This should not be reachable as we've already checked for non-existence in the previous channel_id loop. + debug_assert!(false); + return Err(APIError::ChannelUnavailable { + err: format!( + "Channel with ID {} for passed counterparty_node_id {} disappeared after we confirmed its existence - this should not be reachable!", + log_bytes!(*channel_id), counterparty_node_id), }); - } + }; + let mut config = context.config(); + config.apply(config_update); + // We update the config, but we MUST NOT broadcast a `channel_update` before `channel_ready` + // which would be the case for pending inbound/outbound channels. + context.update_config(&config); } Ok(()) } @@ -3771,7 +3860,7 @@ where outgoing_cltv_value, Some(phantom_shared_secret), false, None) { Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])), - Err(ReceiveError { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret)) + Err(InboundOnionErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret)) } }, _ => panic!(), @@ -3884,17 +3973,18 @@ where } }) => { let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret, mut onion_fields) = match routing { - PendingHTLCRouting::Receive { payment_data, payment_metadata, incoming_cltv_expiry, phantom_shared_secret } => { + PendingHTLCRouting::Receive { payment_data, payment_metadata, incoming_cltv_expiry, phantom_shared_secret, custom_tlvs } => { let _legacy_hop_data = Some(payment_data.clone()); - let onion_fields = - RecipientOnionFields { payment_secret: Some(payment_data.payment_secret), payment_metadata }; + let onion_fields = RecipientOnionFields { payment_secret: Some(payment_data.payment_secret), + payment_metadata, custom_tlvs }; (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data }, Some(payment_data), phantom_shared_secret, onion_fields) }, - PendingHTLCRouting::ReceiveKeysend { payment_data, payment_preimage, payment_metadata, incoming_cltv_expiry } => { + PendingHTLCRouting::ReceiveKeysend { payment_data, payment_preimage, payment_metadata, incoming_cltv_expiry, custom_tlvs } => { let onion_fields = RecipientOnionFields { payment_secret: payment_data.as_ref().map(|data| data.payment_secret), - payment_metadata + payment_metadata, + custom_tlvs, }; (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage), payment_data, None, onion_fields) @@ -4124,9 +4214,7 @@ where let best_block_height = self.best_block.read().unwrap().height(); self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(), || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, - &self.pending_events, &self.logger, - |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv)); + &self.pending_events, &self.logger, |args| self.send_payment_along_path(args)); for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) { self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination); @@ -4150,7 +4238,6 @@ where fn process_background_events(&self) -> NotifyOption { debug_assert_ne!(self.total_consistency_lock.held_by_thread(), LockHeldState::NotHeldByThread); - #[cfg(debug_assertions)] self.background_events_processed_since_startup.store(true, Ordering::Release); let mut background_events = Vec::new(); @@ -4161,7 +4248,7 @@ where for event in background_events.drain(..) { match event { - BackgroundEvent::ClosingMonitorUpdateRegeneratedOnStartup((funding_txo, update)) => { + BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, update)) => { // The channel has already been closed, so no use bothering to care about the // monitor updating completing. let _ = self.chain_monitor.update_channel(funding_txo, &update); @@ -4196,6 +4283,22 @@ where } let _ = handle_error!(self, res, counterparty_node_id); }, + BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => { + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if let Some(chan) = peer_state.channel_by_id.get_mut(&channel_id) { + handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); + } else { + let update_actions = peer_state.monitor_update_blocked_actions + .remove(&channel_id).unwrap_or(Vec::new()); + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_monitor_update_completion_actions(update_actions); + } + } + }, } } NotifyOption::DoPersist @@ -4237,13 +4340,19 @@ where PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { let mut should_persist = self.process_background_events(); - let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); + let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); + let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum); let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for (chan_id, chan) in peer_state.channel_by_id.iter_mut() { + let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + min_mempool_feerate + } else { + normal_feerate + }; let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate); if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } } @@ -4263,6 +4372,7 @@ where /// * Expiring a channel's previous [`ChannelConfig`] if necessary to only allow forwarding HTLCs /// with the current [`ChannelConfig`]. /// * Removing peers which have disconnected but and no longer have any channels. + /// * Force-closing and removing channels which have not completed establishment in a timely manner. /// /// Note that this may cause reentrancy through [`chain::Watch::update_channel`] calls or feerate /// estimate fetches. @@ -4273,7 +4383,8 @@ where PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { let mut should_persist = self.process_background_events(); - let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); + let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); + let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum); let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new(); let mut timed_out_mpp_htlcs = Vec::new(); @@ -4286,6 +4397,11 @@ where let pending_msg_events = &mut peer_state.pending_msg_events; let counterparty_node_id = *counterparty_node_id; peer_state.channel_by_id.retain(|chan_id, chan| { + let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + min_mempool_feerate + } else { + normal_feerate + }; let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate); if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } @@ -4351,6 +4467,26 @@ where true }); + + let process_unfunded_channel_tick = | + chan_id: &[u8; 32], + chan_context: &mut ChannelContext<::Signer>, + unfunded_chan_context: &mut UnfundedChannelContext, + | { + chan_context.maybe_expire_prev_config(); + if unfunded_chan_context.should_expire_unfunded_channel() { + log_error!(self.logger, "Force-closing pending outbound channel {} for not establishing in a timely manner", log_bytes!(&chan_id[..])); + update_maps_on_chan_removal!(self, &chan_context); + self.issue_channel_close_events(&chan_context, ClosureReason::HolderForceClosed); + self.finish_force_close_channel(chan_context.force_shutdown(false)); + false + } else { + true + } + }; + peer_state.outbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context)); + peer_state.inbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context)); + if peer_state.ok_to_remove(true) { pending_peers_awaiting_removal.push(counterparty_node_id); } @@ -4473,12 +4609,19 @@ where /// Gets error data to form an [`HTLCFailReason`] given a [`FailureCode`] and [`ClaimableHTLC`]. fn get_htlc_fail_reason_from_failure_code(&self, failure_code: FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason { match failure_code { - FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(failure_code as u16), - FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(failure_code as u16), + FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(failure_code.into()), + FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(failure_code.into()), FailureCode::IncorrectOrUnknownPaymentDetails => { let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes()); - HTLCFailReason::reason(failure_code as u16, htlc_msat_height_data) + HTLCFailReason::reason(failure_code.into(), htlc_msat_height_data) + }, + FailureCode::InvalidOnionPayload(data) => { + let fail_data = match data { + Some((typ, offset)) => [BigSize(typ).encode(), offset.encode()].concat(), + None => Vec::new(), + }; + HTLCFailReason::reason(failure_code.into(), fail_data) } } } @@ -4625,13 +4768,35 @@ where /// event matches your expectation. If you fail to do so and call this method, you may provide /// the sender "proof-of-payment" when they did not fulfill the full expected payment. /// + /// This function will fail the payment if it has custom TLVs with even type numbers, as we + /// will assume they are unknown. If you intend to accept even custom TLVs, you should use + /// [`claim_funds_with_known_custom_tlvs`]. + /// /// [`Event::PaymentClaimable`]: crate::events::Event::PaymentClaimable /// [`Event::PaymentClaimable::claim_deadline`]: crate::events::Event::PaymentClaimable::claim_deadline /// [`Event::PaymentClaimed`]: crate::events::Event::PaymentClaimed /// [`process_pending_events`]: EventsProvider::process_pending_events /// [`create_inbound_payment`]: Self::create_inbound_payment /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash + /// [`claim_funds_with_known_custom_tlvs`]: Self::claim_funds_with_known_custom_tlvs pub fn claim_funds(&self, payment_preimage: PaymentPreimage) { + self.claim_payment_internal(payment_preimage, false); + } + + /// This is a variant of [`claim_funds`] that allows accepting a payment with custom TLVs with + /// even type numbers. + /// + /// # Note + /// + /// You MUST check you've understood all even TLVs before using this to + /// claim, otherwise you may unintentionally agree to some protocol you do not understand. + /// + /// [`claim_funds`]: Self::claim_funds + pub fn claim_funds_with_known_custom_tlvs(&self, payment_preimage: PaymentPreimage) { + self.claim_payment_internal(payment_preimage, true); + } + + fn claim_payment_internal(&self, payment_preimage: PaymentPreimage, custom_tlvs_known: bool) { let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); @@ -4658,6 +4823,23 @@ where log_error!(self.logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug", log_bytes!(payment_hash.0)); } + + if let Some(RecipientOnionFields { ref custom_tlvs, .. }) = payment.onion_fields { + if !custom_tlvs_known && custom_tlvs.iter().any(|(typ, _)| typ % 2 == 0) { + log_info!(self.logger, "Rejecting payment with payment hash {} as we cannot accept payment with unknown even TLVs: {}", + log_bytes!(payment_hash.0), log_iter!(custom_tlvs.iter().map(|(typ, _)| typ).filter(|typ| *typ % 2 == 0))); + claimable_payments.pending_claiming_payments.remove(&payment_hash); + mem::drop(claimable_payments); + for htlc in payment.htlcs { + let reason = self.get_htlc_fail_reason_from_failure_code(FailureCode::InvalidOnionPayload(None), &htlc); + let source = HTLCSource::PreviousHopData(htlc.prev_hop); + let receiver = HTLCDestination::FailedPayment { payment_hash }; + self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); + } + return; + } + } + payment.htlcs } else { return; } }; @@ -4741,6 +4923,11 @@ where -> Result<(), (PublicKey, MsgHandleErrInternal)> { //TODO: Delay the claimed_funds relaying just like we do outbound relay! + // If we haven't yet run background events assume we're still deserializing and shouldn't + // actually pass `ChannelMonitorUpdate`s to users yet. Instead, queue them up as + // `BackgroundEvent`s. + let during_init = !self.background_events_processed_since_startup.load(Ordering::Acquire); + { let per_peer_state = self.per_peer_state.read().unwrap(); let chan_id = prev_hop.outpoint.to_channel_id(); @@ -4767,14 +4954,26 @@ where log_bytes!(chan_id), action); peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action); } - let res = handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock, - peer_state, per_peer_state, chan); - if let Err(e) = res { - // TODO: This is a *critical* error - we probably updated the outbound edge - // of the HTLC's monitor with a preimage. We should retry this monitor - // update over and over again until morale improves. - log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage); - return Err((counterparty_node_id, e)); + if !during_init { + let res = handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock, + peer_state, per_peer_state, chan); + if let Err(e) = res { + // TODO: This is a *critical* error - we probably updated the outbound edge + // of the HTLC's monitor with a preimage. We should retry this monitor + // update over and over again until morale improves. + log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage); + return Err((counterparty_node_id, e)); + } + } else { + // If we're running during init we cannot update a monitor directly - + // they probably haven't actually been loaded yet. Instead, push the + // monitor update as a background event. + self.pending_background_events.lock().unwrap().push( + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id, + funding_txo: prev_hop.outpoint, + update: monitor_update.clone(), + }); } } return Ok(()); @@ -4787,16 +4986,34 @@ where payment_preimage, }], }; - // We update the ChannelMonitor on the backward link, after - // receiving an `update_fulfill_htlc` from the forward link. - let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &preimage_update); - if update_res != ChannelMonitorUpdateStatus::Completed { - // TODO: This needs to be handled somehow - if we receive a monitor update - // with a preimage we *must* somehow manage to propagate it to the upstream - // channel, or we must have an ability to receive the same event and try - // again on restart. - log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}", - payment_preimage, update_res); + + if !during_init { + // We update the ChannelMonitor on the backward link, after + // receiving an `update_fulfill_htlc` from the forward link. + let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &preimage_update); + if update_res != ChannelMonitorUpdateStatus::Completed { + // TODO: This needs to be handled somehow - if we receive a monitor update + // with a preimage we *must* somehow manage to propagate it to the upstream + // channel, or we must have an ability to receive the same event and try + // again on restart. + log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}", + payment_preimage, update_res); + } + } else { + // If we're running during init we cannot update a monitor directly - they probably + // haven't actually been loaded yet. Instead, push the monitor update as a background + // event. + // Note that while it's safe to use `ClosedMonitorUpdateRegeneratedOnStartup` here (the + // channel is already closed) we need to ultimately handle the monitor update + // completion action only after we've completed the monitor update. This is the only + // way to guarantee this update *will* be regenerated on startup (otherwise if this was + // from a forwarded HTLC the downstream preimage may be deleted before we claim + // upstream). Thus, we need to transition to some new `BackgroundEvent` type which will + // complete the monitor update completion action from `completion_action`. + self.pending_background_events.lock().unwrap().push( + BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup(( + prev_hop.outpoint, preimage_update, + ))); } // Note that we do process the completion action here. This totally could be a // duplicate claim, but we have no way of knowing without interrogating the @@ -4814,6 +5031,8 @@ where fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool, next_channel_id: [u8; 32]) { match source { HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => { + debug_assert!(self.background_events_processed_since_startup.load(Ordering::Acquire), + "We don't support claim_htlc claims during startup - monitors may not be available yet"); self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage, session_priv, path, from_onchain, &self.pending_events, &self.logger); }, HTLCSource::PreviousHopData(hop_data) => { @@ -4969,24 +5188,29 @@ where if peer_state_mutex_opt.is_none() { return } peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; - let mut channel = { - match peer_state.channel_by_id.entry(funding_txo.to_channel_id()){ - hash_map::Entry::Occupied(chan) => chan, - hash_map::Entry::Vacant(_) => return, - } - }; + let channel = + if let Some(chan) = peer_state.channel_by_id.get_mut(&funding_txo.to_channel_id()) { + chan + } else { + let update_actions = peer_state.monitor_update_blocked_actions + .remove(&funding_txo.to_channel_id()).unwrap_or(Vec::new()); + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_monitor_update_completion_actions(update_actions); + return; + }; let remaining_in_flight = if let Some(pending) = peer_state.in_flight_monitor_updates.get_mut(funding_txo) { pending.retain(|upd| upd.update_id > highest_applied_update_id); pending.len() } else { 0 }; log_trace!(self.logger, "ChannelMonitor updated to {}. Current highest is {}. {} pending in-flight updates.", - highest_applied_update_id, channel.get().context.get_latest_monitor_update_id(), + highest_applied_update_id, channel.context.get_latest_monitor_update_id(), remaining_in_flight); - if !channel.get().is_awaiting_monitor_update() || channel.get().context.get_latest_monitor_update_id() != highest_applied_update_id { + if !channel.is_awaiting_monitor_update() || channel.context.get_latest_monitor_update_id() != highest_applied_update_id { return; } - handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, channel.get_mut()); + handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, channel); } /// Accepts a request to open a channel after a [`Event::OpenChannelRequest`]. @@ -5277,7 +5501,7 @@ where let user_id = inbound_chan.context.get_user_id(); let shutdown_res = inbound_chan.context.force_shutdown(false); return Err(MsgHandleErrInternal::from_finish_shutdown(format!("{}", err), - msg.temporary_channel_id, user_id, shutdown_res, None)); + msg.temporary_channel_id, user_id, shutdown_res, None, inbound_chan.context.get_value_satoshis())); }, } }, @@ -5419,38 +5643,50 @@ where })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(msg.channel_id.clone()) { - hash_map::Entry::Occupied(mut chan_entry) => { - - if !chan_entry.get().received_shutdown() { - log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.", - log_bytes!(msg.channel_id), - if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); - } + // TODO(dunxen): Fix this duplication when we switch to a single map with enums as per + // https://github.com/lightningdevkit/rust-lightning/issues/2422 + if let hash_map::Entry::Occupied(chan_entry) = peer_state.outbound_v1_channel_by_id.entry(msg.channel_id.clone()) { + log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", log_bytes!(&msg.channel_id[..])); + self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel); + let mut chan = remove_channel!(self, chan_entry); + self.finish_force_close_channel(chan.context.force_shutdown(false)); + return Ok(()); + } else if let hash_map::Entry::Occupied(chan_entry) = peer_state.inbound_v1_channel_by_id.entry(msg.channel_id.clone()) { + log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", log_bytes!(&msg.channel_id[..])); + self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel); + let mut chan = remove_channel!(self, chan_entry); + self.finish_force_close_channel(chan.context.force_shutdown(false)); + return Ok(()); + } else if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) { + if !chan_entry.get().received_shutdown() { + log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.", + log_bytes!(msg.channel_id), + if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); + } - let funding_txo_opt = chan_entry.get().context.get_funding_txo(); - let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self, - chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry); - dropped_htlcs = htlcs; + let funding_txo_opt = chan_entry.get().context.get_funding_txo(); + let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self, + chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry); + dropped_htlcs = htlcs; - if let Some(msg) = shutdown { - // We can send the `shutdown` message before updating the `ChannelMonitor` - // here as we don't need the monitor update to complete until we send a - // `shutdown_signed`, which we'll delay if we're pending a monitor update. - peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: *counterparty_node_id, - msg, - }); - } + if let Some(msg) = shutdown { + // We can send the `shutdown` message before updating the `ChannelMonitor` + // here as we don't need the monitor update to complete until we send a + // `shutdown_signed`, which we'll delay if we're pending a monitor update. + peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: *counterparty_node_id, + msg, + }); + } - // Update the monitor with the shutdown script if necessary. - if let Some(monitor_update) = monitor_update_opt { - break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, - peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ()); - } - break Ok(()); - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + // Update the monitor with the shutdown script if necessary. + if let Some(monitor_update) = monitor_update_opt { + break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, + peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ()); + } + break Ok(()); + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; for htlc_source in dropped_htlcs.drain(..) { @@ -5733,22 +5969,27 @@ where } } - // We only want to push a PendingHTLCsForwardable event if no others are queued. fn push_pending_forwards_ev(&self) { let mut pending_events = self.pending_events.lock().unwrap(); - let forward_ev_exists = pending_events.iter() - .find(|(ev, _)| if let events::Event::PendingHTLCsForwardable { .. } = ev { true } else { false }) - .is_some(); - if !forward_ev_exists { - pending_events.push_back((events::Event::PendingHTLCsForwardable { - time_forwardable: - Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS), + let is_processing_events = self.pending_events_processor.load(Ordering::Acquire); + let num_forward_events = pending_events.iter().filter(|(ev, _)| + if let events::Event::PendingHTLCsForwardable { .. } = ev { true } else { false } + ).count(); + // We only want to push a PendingHTLCsForwardable event if no others are queued. Processing + // events is done in batches and they are not removed until we're done processing each + // batch. Since handling a `PendingHTLCsForwardable` event will call back into the + // `ChannelManager`, we'll still see the original forwarding event not removed. Phantom + // payments will need an additional forwarding event before being claimed to make them look + // real by taking more time. + if (is_processing_events && num_forward_events <= 1) || num_forward_events < 1 { + pending_events.push_back((Event::PendingHTLCsForwardable { + time_forwardable: Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS), }, None)); } } /// Checks whether [`ChannelMonitorUpdate`]s generated by the receipt of a remote - /// [`msgs::RevokeAndACK`] should be held for the given channel until some other event + /// [`msgs::RevokeAndACK`] should be held for the given channel until some other action /// completes. Note that this needs to happen in the same [`PeerState`] mutex as any release of /// the [`ChannelMonitorUpdate`] in question. fn raa_monitor_updates_held(&self, @@ -6365,7 +6606,7 @@ where /// When something which was blocking a channel from updating its [`ChannelMonitor`] (e.g. an /// [`Event`] being handled) completes, this should be called to restore the channel to normal /// operation. It will double-check that nothing *else* is also blocking the same channel from - /// making progress and then any blocked [`ChannelMonitorUpdate`]s fly. + /// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly. fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option) { let mut errors = Vec::new(); loop { @@ -6860,13 +7101,13 @@ where provided_node_features(&self.default_configuration) } - /// Fetches the set of [`InvoiceFeatures`] flags which are provided by or required by + /// Fetches the set of [`Bolt11InvoiceFeatures`] flags which are provided by or required by /// [`ChannelManager`]. /// /// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice" /// or not. Thus, this method is not public. #[cfg(any(feature = "_test_utils", test))] - pub fn invoice_features(&self) -> InvoiceFeatures { + pub fn invoice_features(&self) -> Bolt11InvoiceFeatures { provided_invoice_features(&self.default_configuration) } @@ -7146,37 +7387,20 @@ where log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id)); let per_peer_state = self.per_peer_state.read().unwrap(); - for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; - peer_state.channel_by_id.retain(|_, chan| { - let retain = if chan.context.get_counterparty_node_id() == *counterparty_node_id { - if !chan.context.have_received_message() { - // If we created this (outbound) channel while we were disconnected from the - // peer we probably failed to send the open_channel message, which is now - // lost. We can't have had anything pending related to this channel, so we just - // drop it. - false - } else { - pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { - node_id: chan.context.get_counterparty_node_id(), - msg: chan.get_channel_reestablish(&self.logger), - }); - true - } - } else { true }; - if retain && chan.context.get_counterparty_node_id() != *counterparty_node_id { - if let Some(msg) = chan.get_signed_channel_announcement(&self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), &self.default_configuration) { - if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) { - pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement { - node_id: *counterparty_node_id, - msg, update_msg, - }); - } - } - } - retain + + // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted + // (so won't be recovered after a crash) we don't need to bother closing unfunded channels and + // clearing their maps here. Instead we can just send queue channel_reestablish messages for + // channels in the channel_by_id map. + peer_state.channel_by_id.iter_mut().for_each(|(_, chan)| { + pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { + node_id: chan.context.get_counterparty_node_id(), + msg: chan.get_channel_reestablish(&self.logger), + }); }); } //TODO: Also re-broadcast announcement_signatures @@ -7210,7 +7434,7 @@ where let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; if let Some(chan) = peer_state.outbound_v1_channel_by_id.get_mut(&msg.channel_id) { - if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) { + if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash, &self.fee_estimator) { peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { node_id: *counterparty_node_id, msg, @@ -7295,16 +7519,18 @@ where /// Fetches the set of [`NodeFeatures`] flags which are provided by or required by /// [`ChannelManager`]. pub(crate) fn provided_node_features(config: &UserConfig) -> NodeFeatures { - provided_init_features(config).to_context() + let mut node_features = provided_init_features(config).to_context(); + node_features.set_keysend_optional(); + node_features } -/// Fetches the set of [`InvoiceFeatures`] flags which are provided by or required by +/// Fetches the set of [`Bolt11InvoiceFeatures`] flags which are provided by or required by /// [`ChannelManager`]. /// /// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice" /// or not. Thus, this method is not public. #[cfg(any(feature = "_test_utils", test))] -pub(crate) fn provided_invoice_features(config: &UserConfig) -> InvoiceFeatures { +pub(crate) fn provided_invoice_features(config: &UserConfig) -> Bolt11InvoiceFeatures { provided_init_features(config).to_context() } @@ -7476,7 +7702,7 @@ impl Readable for ChannelDetails { } impl_writeable_tlv_based!(PhantomRouteHints, { - (2, channels, vec_type), + (2, channels, required_vec), (4, phantom_scid, required), (6, real_node_pubkey, required), }); @@ -7491,12 +7717,14 @@ impl_writeable_tlv_based_enum!(PendingHTLCRouting, (1, phantom_shared_secret, option), (2, incoming_cltv_expiry, required), (3, payment_metadata, option), + (5, custom_tlvs, optional_vec), }, (2, ReceiveKeysend) => { (0, payment_preimage, required), (2, incoming_cltv_expiry, required), (3, payment_metadata, option), (4, payment_data, option), // Added in 0.0.116 + (5, custom_tlvs, optional_vec), }, ;); @@ -7668,7 +7896,7 @@ impl Readable for HTLCSource { 0 => { let mut session_priv: crate::util::ser::RequiredWrapper = crate::util::ser::RequiredWrapper(None); let mut first_hop_htlc_msat: u64 = 0; - let mut path_hops: Option> = Some(Vec::new()); + let mut path_hops = Vec::new(); let mut payment_id = None; let mut payment_params: Option = None; let mut blinded_tail: Option = None; @@ -7676,7 +7904,7 @@ impl Readable for HTLCSource { (0, session_priv, required), (1, payment_id, option), (2, first_hop_htlc_msat, required), - (4, path_hops, vec_type), + (4, path_hops, required_vec), (5, payment_params, (option: ReadableArgs, 0)), (6, blinded_tail, option), }); @@ -7685,7 +7913,7 @@ impl Readable for HTLCSource { // instead. payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref())); } - let path = Path { hops: path_hops.ok_or(DecodeError::InvalidValue)?, blinded_tail }; + let path = Path { hops: path_hops, blinded_tail }; if path.hops.len() == 0 { return Err(DecodeError::InvalidValue); } @@ -7720,7 +7948,7 @@ impl Writeable for HTLCSource { (1, payment_id_opt, option), (2, first_hop_htlc_msat, required), // 3 was previously used to write a PaymentSecret for the payment. - (4, path.hops, vec_type), + (4, path.hops, required_vec), (5, None::, option), // payment_params in LDK versions prior to 0.0.115 (6, path.blinded_tail, option), }); @@ -7970,7 +8198,7 @@ where (6, monitor_update_blocked_actions_per_peer, option), (7, self.fake_scid_rand_bytes, required), (8, if events_not_backwards_compatible { Some(&*events) } else { None }, option), - (9, htlc_purposes, vec_type), + (9, htlc_purposes, required_vec), (10, in_flight_monitor_updates, option), (11, self.probing_cookie_secret, required), (13, htlc_onion_fields, optional_vec), @@ -8223,7 +8451,9 @@ where channel_closures.push_back((events::Event::ChannelClosed { channel_id: channel.context.channel_id(), user_channel_id: channel.context.get_user_id(), - reason: ClosureReason::OutdatedChannelManager + reason: ClosureReason::OutdatedChannelManager, + counterparty_node_id: Some(channel.context.get_counterparty_node_id()), + channel_capacity_sats: Some(channel.context.get_value_satoshis()), }, None)); for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() { let mut found_htlc = false; @@ -8275,6 +8505,8 @@ where channel_id: channel.context.channel_id(), user_channel_id: channel.context.get_user_id(), reason: ClosureReason::DisconnectedPeer, + counterparty_node_id: Some(channel.context.get_counterparty_node_id()), + channel_capacity_sats: Some(channel.context.get_value_satoshis()), }, None)); } else { log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.context.channel_id())); @@ -8294,7 +8526,7 @@ where update_id: CLOSED_CHANNEL_UPDATE_ID, updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }], }; - close_background_events.push(BackgroundEvent::ClosingMonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update))); + close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update))); } } @@ -8417,7 +8649,7 @@ where (6, monitor_update_blocked_actions_per_peer, option), (7, fake_scid_rand_bytes, option), (8, events_override, option), - (9, claimable_htlc_purposes, vec_type), + (9, claimable_htlc_purposes, optional_vec), (10, in_flight_monitor_updates, option), (11, probing_cookie_secret, option), (13, claimable_htlc_onion_fields, optional_vec), @@ -8481,6 +8713,16 @@ where update: update.clone(), }); } + if $chan_in_flight_upds.is_empty() { + // We had some updates to apply, but it turns out they had completed before we + // were serialized, we just weren't notified of that. Thus, we may have to run + // the completion actions for any monitor updates, but otherwise are done. + pending_background_events.push( + BackgroundEvent::MonitorUpdatesComplete { + counterparty_node_id: $counterparty_node_id, + channel_id: $funding_txo.to_channel_id(), + }); + } if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() { log_error!(args.logger, "Duplicate in-flight monitor update set for the same channel!"); return Err(DecodeError::InvalidValue); @@ -8549,6 +8791,11 @@ where // Note that we have to do the above replays before we push new monitor updates. pending_background_events.append(&mut close_background_events); + // If there's any preimages for forwarded HTLCs hanging around in ChannelMonitors we + // should ensure we try them again on the inbound edge. We put them here and do so after we + // have a fully-constructed `ChannelManager` at the end. + let mut pending_claims_to_replay = Vec::new(); + { // If we're tracking pending payments, ensure we haven't lost any by looking at the // ChannelMonitor data for any channels for which we do not have authorative state @@ -8559,7 +8806,8 @@ where // We only rebuild the pending payments map if we were most recently serialized by // 0.0.102+ for (_, monitor) in args.channel_monitors.iter() { - if id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() { + let counterparty_opt = id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id()); + if counterparty_opt.is_none() { for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() { if let HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } = htlc_source { if path.hops.is_empty() { @@ -8587,6 +8835,7 @@ where payment_secret: None, // only used for retries, and we'll never retry on startup payment_metadata: None, // only used for retries, and we'll never retry on startup keysend_preimage: None, // only used for retries, and we'll never retry on startup + custom_tlvs: Vec::new(), // only used for retries, and we'll never retry on startup pending_amt_msat: path_amt, pending_fee_msat: Some(path_fee), total_msat: path_amt, @@ -8653,6 +8902,33 @@ where } } } + + // Whether the downstream channel was closed or not, try to re-apply any payment + // preimages from it which may be needed in upstream channels for forwarded + // payments. + let outbound_claimed_htlcs_iter = monitor.get_all_current_outbound_htlcs() + .into_iter() + .filter_map(|(htlc_source, (htlc, preimage_opt))| { + if let HTLCSource::PreviousHopData(_) = htlc_source { + if let Some(payment_preimage) = preimage_opt { + Some((htlc_source, payment_preimage, htlc.amount_msat, + // Check if `counterparty_opt.is_none()` to see if the + // downstream chan is closed (because we don't have a + // channel_id -> peer map entry). + counterparty_opt.is_none(), + monitor.get_funding_txo().0.to_channel_id())) + } else { None } + } else { + // If it was an outbound payment, we've handled it above - if a preimage + // came in and we persisted the `ChannelManager` we either handled it and + // are good to go or the channel force-closed - we don't have to handle the + // channel still live case here. + None + } + }); + for tuple in outbound_claimed_htlcs_iter { + pending_claims_to_replay.push(tuple); + } } } @@ -8840,6 +9116,12 @@ where blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates .entry(blocked_channel_outpoint.to_channel_id()) .or_insert_with(Vec::new).push(blocking_action.clone()); + } else { + // If the channel we were blocking has closed, we don't need to + // worry about it - the blocked monitor update should never have + // been released from the `Channel` object so it can't have + // completed, and if the channel closed there's no reason to bother + // anymore. } } } @@ -8885,7 +9167,6 @@ where pending_events_processor: AtomicBool::new(false), pending_background_events: Mutex::new(pending_background_events), total_consistency_lock: RwLock::new(()), - #[cfg(debug_assertions)] background_events_processed_since_startup: AtomicBool::new(false), persistence_notifier: Notifier::new(), @@ -8904,6 +9185,14 @@ where channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } + for (source, preimage, downstream_value, downstream_closed, downstream_chan_id) in pending_claims_to_replay { + // We use `downstream_closed` in place of `from_onchain` here just as a guess - we + // don't remember in the `ChannelMonitor` where we got a preimage from, but if the + // channel is closed we just assume that it probably came from an on-chain claim. + channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), + downstream_closed, downstream_chan_id); + } + //TODO: Broadcast channel update for closed channels, but only after we've made a //connection or two. @@ -9434,7 +9723,7 @@ mod tests { nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap(); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); { // Assert that nodes[1] is awaiting removal for nodes[0] once nodes[1] has been @@ -9597,8 +9886,8 @@ mod tests { } let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); - check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000); } fn check_not_connected_to_peer_error(res_err: Result, expected_public_key: PublicKey) { @@ -9881,20 +10170,19 @@ mod tests { let node = create_network(1, &node_cfg, &node_chanmgr); let sender_intended_amt_msat = 100; let extra_fee_msat = 10; - let hop_data = msgs::OnionHopData { - amt_to_forward: 100, + let hop_data = msgs::InboundOnionPayload::Receive { + amt_msat: 100, outgoing_cltv_value: 42, - format: msgs::OnionHopDataFormat::FinalNode { - keysend_preimage: None, - payment_metadata: None, - payment_data: Some(msgs::FinalOnionHopData { - payment_secret: PaymentSecret([0; 32]), total_msat: sender_intended_amt_msat, - }), - } + payment_metadata: None, + keysend_preimage: None, + payment_data: Some(msgs::FinalOnionHopData { + payment_secret: PaymentSecret([0; 32]), total_msat: sender_intended_amt_msat, + }), + custom_tlvs: Vec::new(), }; // Check that if the amount we received + the penultimate hop extra fee is less than the sender // intended amount, we fail the payment. - if let Err(crate::ln::channelmanager::ReceiveError { err_code, .. }) = + if let Err(crate::ln::channelmanager::InboundOnionErr { err_code, .. }) = node[0].node.construct_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]), sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat)) { @@ -9902,16 +10190,15 @@ mod tests { } else { panic!(); } // If amt_received + extra_fee is equal to the sender intended amount, we're fine. - let hop_data = msgs::OnionHopData { // This is the same hop_data as above, OnionHopData doesn't implement Clone - amt_to_forward: 100, + let hop_data = msgs::InboundOnionPayload::Receive { // This is the same payload as above, InboundOnionPayload doesn't implement Clone + amt_msat: 100, outgoing_cltv_value: 42, - format: msgs::OnionHopDataFormat::FinalNode { - keysend_preimage: None, - payment_metadata: None, - payment_data: Some(msgs::FinalOnionHopData { - payment_secret: PaymentSecret([0; 32]), total_msat: sender_intended_amt_msat, - }), - } + payment_metadata: None, + keysend_preimage: None, + payment_data: Some(msgs::FinalOnionHopData { + payment_secret: PaymentSecret([0; 32]), total_msat: sender_intended_amt_msat, + }), + custom_tlvs: Vec::new(), }; assert!(node[0].node.construct_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]), sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat)).is_ok()); @@ -9993,7 +10280,7 @@ mod tests { let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); assert!(!open_channel_msg.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx()); - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000); } #[test] @@ -10050,6 +10337,25 @@ mod tests { MessageSendEvent::BroadcastChannelUpdate { .. } => {}, _ => panic!("expected BroadcastChannelUpdate event"), } + + // If we provide a channel_id not associated with the peer, we should get an error and no updates + // should be applied to ensure update atomicity as specified in the API docs. + let bad_channel_id = [10; 32]; + let current_fee = nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths; + let new_fee = current_fee + 100; + assert!( + matches!( + nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id, bad_channel_id], &ChannelConfigUpdate { + forwarding_fee_proportional_millionths: Some(new_fee), + ..Default::default() + }), + Err(APIError::ChannelUnavailable { err: _ }), + ) + ); + // Check that the fee hasn't changed for the channel that exists. + assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, current_fee); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 0); } }