X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=c7fc70a4593d66601b7d0d0e2acf8759196e5ffd;hb=c383f06538ac664fe3312daf765595ba106d5b98;hp=398975c6565c1968778b4de5948e1109c301f886;hpb=9ce7e8e650282d961af10a72a84dfdb587436827;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 398975c6..c7fc70a4 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -40,12 +40,12 @@ use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, Messa // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret}; -use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel}; +use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel}; use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; #[cfg(any(feature = "_test_utils", test))] -use crate::ln::features::InvoiceFeatures; +use crate::ln::features::Bolt11InvoiceFeatures; use crate::routing::gossip::NetworkGraph; -use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteHop, RouteParameters, Router}; +use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router}; use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters}; use crate::ln::msgs; use crate::ln::onion_utils; @@ -53,7 +53,7 @@ use crate::ln::onion_utils::HTLCFailReason; use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError}; #[cfg(test)] use crate::ln::outbound_payment; -use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment}; +use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs}; use crate::ln::wire::Encode; use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner, WriteableEcdsaChannelSigner}; use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate}; @@ -530,6 +530,13 @@ enum BackgroundEvent { funding_txo: OutPoint, update: ChannelMonitorUpdate }, + /// Some [`ChannelMonitorUpdate`] (s) completed before we were serialized but we still have + /// them marked pending, thus we need to run any [`MonitorUpdateCompletionAction`] (s) pending + /// on a channel. + MonitorUpdatesComplete { + counterparty_node_id: PublicKey, + channel_id: [u8; 32], + }, } #[derive(Debug)] @@ -678,7 +685,7 @@ impl PeerState { && self.in_flight_monitor_updates.is_empty() } - // Returns a count of all channels we have with this peer, including pending channels. + // Returns a count of all channels we have with this peer, including unfunded channels. fn total_channel_count(&self) -> usize { self.channel_by_id.len() + self.outbound_v1_channel_by_id.len() + @@ -752,7 +759,23 @@ pub type SimpleArcChannelManager = ChannelManager< /// of [`KeysManager`] and [`DefaultRouter`]. /// /// This is not exported to bindings users as Arcs don't make sense in bindings -pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex, &'g L>>, ProbabilisticScoringFeeParameters, ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>, &'g L>; +pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = + ChannelManager< + &'a M, + &'b T, + &'c KeysManager, + &'c KeysManager, + &'c KeysManager, + &'d F, + &'e DefaultRouter< + &'f NetworkGraph<&'g L>, + &'g L, + &'h Mutex, &'g L>>, + ProbabilisticScoringFeeParameters, + ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L> + >, + &'g L + >; macro_rules! define_test_pub_trait { ($vis: vis) => { /// A trivial trait which describes any [`ChannelManager`] used in testing. @@ -1463,6 +1486,9 @@ pub struct ChannelDetails { /// /// [`confirmations_required`]: ChannelDetails::confirmations_required pub is_channel_ready: bool, + /// The stage of the channel's shutdown. + /// `None` for `ChannelDetails` serialized on LDK versions prior to 0.0.116. + pub channel_shutdown_state: Option, /// True if the channel is (a) confirmed and channel_ready messages have been exchanged, (b) /// the peer is connected, and (c) the channel is not currently negotiating a shutdown. /// @@ -1502,10 +1528,13 @@ impl ChannelDetails { self.short_channel_id.or(self.outbound_scid_alias) } - fn from_channel_context(context: &ChannelContext, - best_block_height: u32, latest_features: InitFeatures) -> Self { - - let balance = context.get_available_balances(); + fn from_channel_context( + context: &ChannelContext, best_block_height: u32, latest_features: InitFeatures, + fee_estimator: &LowerBoundedFeeEstimator + ) -> Self + where F::Target: FeeEstimator + { + let balance = context.get_available_balances(fee_estimator); let (to_remote_reserve_satoshis, to_self_reserve_satoshis) = context.get_holder_counterparty_selected_channel_reserve_satoshis(); ChannelDetails { @@ -1550,10 +1579,33 @@ impl ChannelDetails { inbound_htlc_minimum_msat: Some(context.get_holder_htlc_minimum_msat()), inbound_htlc_maximum_msat: context.get_holder_htlc_maximum_msat(), config: Some(context.config()), + channel_shutdown_state: Some(context.shutdown_state()), } } } +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +/// Further information on the details of the channel shutdown. +/// Upon channels being forced closed (i.e. commitment transaction confirmation detected +/// by `ChainMonitor`), ChannelShutdownState will be set to `ShutdownComplete` or +/// the channel will be removed shortly. +/// Also note, that in normal operation, peers could disconnect at any of these states +/// and require peer re-connection before making progress onto other states +pub enum ChannelShutdownState { + /// Channel has not sent or received a shutdown message. + NotShuttingDown, + /// Local node has sent a shutdown message for this channel. + ShutdownInitiated, + /// Shutdown message exchanges have concluded and the channels are in the midst of + /// resolving all existing open HTLCs before closing can continue. + ResolvingHTLCs, + /// All HTLCs have been resolved, nodes are currently negotiating channel close onchain fee rates. + NegotiatingClosingFee, + /// We've successfully negotiated a closing_signed dance. At this point `ChannelManager` is about + /// to drop the channel. + ShutdownComplete, +} + /// Used by [`ChannelManager::list_recent_payments`] to express the status of recent payments. /// These include payments that have yet to find a successful path, or have unresolved HTLCs. #[derive(Debug, PartialEq)] @@ -1697,12 +1749,12 @@ macro_rules! convert_chan_err { }, } }; - ($self: ident, $err: expr, $channel_context: expr, $channel_id: expr, PREFUNDED) => { + ($self: ident, $err: expr, $channel_context: expr, $channel_id: expr, UNFUNDED) => { match $err { - // We should only ever have `ChannelError::Close` when prefunded channels error. + // We should only ever have `ChannelError::Close` when unfunded channels error. // In any case, just close the channel. ChannelError::Warn(msg) | ChannelError::Ignore(msg) | ChannelError::Close(msg) => { - log_error!($self.logger, "Closing prefunded channel {} due to an error: {}", log_bytes!($channel_id[..]), msg); + log_error!($self.logger, "Closing unfunded channel {} due to an error: {}", log_bytes!($channel_id[..]), msg); update_maps_on_chan_removal!($self, &$channel_context); let shutdown_res = $channel_context.force_shutdown(false); (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel_context.get_user_id(), @@ -1732,7 +1784,7 @@ macro_rules! try_v1_outbound_chan_entry { match $res { Ok(res) => res, Err(e) => { - let (drop, res) = convert_chan_err!($self, e, $entry.get_mut().context, $entry.key(), PREFUNDED); + let (drop, res) = convert_chan_err!($self, e, $entry.get_mut().context, $entry.key(), UNFUNDED); if drop { $entry.remove_entry(); } @@ -1975,6 +2027,8 @@ macro_rules! process_events_body { let mut pending_events = $self.pending_events.lock().unwrap(); pending_events.drain(..num_events); processed_all_events = pending_events.is_empty(); + // Note that `push_pending_forwards_ev` relies on `pending_events_processor` being + // updated here with the `pending_events` lock acquired. $self.pending_events_processor.store(false, Ordering::Release); } @@ -2004,6 +2058,8 @@ where { /// Constructs a new `ChannelManager` to hold several channels and route between them. /// + /// The current time or latest block header time can be provided as the `current_timestamp`. + /// /// This is the main "logic hub" for all channel-related actions, and implements /// [`ChannelMessageHandler`]. /// @@ -2017,7 +2073,11 @@ where /// [`block_connected`]: chain::Listen::block_connected /// [`block_disconnected`]: chain::Listen::block_disconnected /// [`params.best_block.block_hash`]: chain::BestBlock::block_hash - pub fn new(fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, entropy_source: ES, node_signer: NS, signer_provider: SP, config: UserConfig, params: ChainParameters) -> Self { + pub fn new( + fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, entropy_source: ES, + node_signer: NS, signer_provider: SP, config: UserConfig, params: ChainParameters, + current_timestamp: u32, + ) -> Self { let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); let inbound_pmt_key_material = node_signer.get_inbound_payment_key_material(); @@ -2049,7 +2109,7 @@ where probing_cookie_secret: entropy_source.get_secure_random_bytes(), - highest_seen_timestamp: AtomicUsize::new(0), + highest_seen_timestamp: AtomicUsize::new(current_timestamp as usize), per_peer_state: FairRwLock::new(HashMap::new()), @@ -2186,9 +2246,10 @@ where for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + // Only `Channels` in the channel_by_id map can be considered funded. for (_channel_id, channel) in peer_state.channel_by_id.iter().filter(f) { let details = ChannelDetails::from_channel_context(&channel.context, best_block_height, - peer_state.latest_features.clone()); + peer_state.latest_features.clone(), &self.fee_estimator); res.push(details); } } @@ -2214,17 +2275,17 @@ where let peer_state = &mut *peer_state_lock; for (_channel_id, channel) in peer_state.channel_by_id.iter() { let details = ChannelDetails::from_channel_context(&channel.context, best_block_height, - peer_state.latest_features.clone()); + peer_state.latest_features.clone(), &self.fee_estimator); res.push(details); } for (_channel_id, channel) in peer_state.inbound_v1_channel_by_id.iter() { let details = ChannelDetails::from_channel_context(&channel.context, best_block_height, - peer_state.latest_features.clone()); + peer_state.latest_features.clone(), &self.fee_estimator); res.push(details); } for (_channel_id, channel) in peer_state.outbound_v1_channel_by_id.iter() { let details = ChannelDetails::from_channel_context(&channel.context, best_block_height, - peer_state.latest_features.clone()); + peer_state.latest_features.clone(), &self.fee_estimator); res.push(details); } } @@ -2254,10 +2315,15 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let features = &peer_state.latest_features; + let chan_context_to_details = |context| { + ChannelDetails::from_channel_context(context, best_block_height, features.clone(), &self.fee_estimator) + }; return peer_state.channel_by_id .iter() - .map(|(_, channel)| - ChannelDetails::from_channel_context(&channel.context, best_block_height, features.clone())) + .map(|(_, channel)| &channel.context) + .chain(peer_state.outbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context)) + .chain(peer_state.inbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context)) + .map(chan_context_to_details) .collect(); } vec![] @@ -2314,48 +2380,58 @@ where let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>; let result: Result<(), _> = loop { - let per_peer_state = self.per_peer_state.read().unwrap(); + { + let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id) + .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?; - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(channel_id.clone()) { - hash_map::Entry::Occupied(mut chan_entry) => { - let funding_txo_opt = chan_entry.get().context.get_funding_txo(); - let their_features = &peer_state.latest_features; - let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut() - .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?; - failed_htlcs = htlcs; + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; - // We can send the `shutdown` message before updating the `ChannelMonitor` - // here as we don't need the monitor update to complete until we send a - // `shutdown_signed`, which we'll delay if we're pending a monitor update. - peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: *counterparty_node_id, - msg: shutdown_msg, - }); + match peer_state.channel_by_id.entry(channel_id.clone()) { + hash_map::Entry::Occupied(mut chan_entry) => { + let funding_txo_opt = chan_entry.get().context.get_funding_txo(); + let their_features = &peer_state.latest_features; + let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut() + .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?; + failed_htlcs = htlcs; - // Update the monitor with the shutdown script if necessary. - if let Some(monitor_update) = monitor_update_opt.take() { - break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, - peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ()); - } + // We can send the `shutdown` message before updating the `ChannelMonitor` + // here as we don't need the monitor update to complete until we send a + // `shutdown_signed`, which we'll delay if we're pending a monitor update. + peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: *counterparty_node_id, + msg: shutdown_msg, + }); - if chan_entry.get().is_shutdown() { - let channel = remove_channel!(self, chan_entry); - if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) { - peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: channel_update - }); + // Update the monitor with the shutdown script if necessary. + if let Some(monitor_update) = monitor_update_opt.take() { + break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, + peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ()); } - self.issue_channel_close_events(&channel.context, ClosureReason::HolderForceClosed); - } - break Ok(()); - }, - hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), counterparty_node_id) }) + + if chan_entry.get().is_shutdown() { + let channel = remove_channel!(self, chan_entry); + if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) { + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: channel_update + }); + } + self.issue_channel_close_events(&channel.context, ClosureReason::HolderForceClosed); + } + break Ok(()); + }, + hash_map::Entry::Vacant(_) => (), + } } + // If we reach this point, it means that the channel_id either refers to an unfunded channel or + // it does not exist for this peer. Either way, we can attempt to force-close it. + // + // An appropriate error will be returned for non-existence of the channel if that's the case. + return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ()) + // TODO(dunxen): This is still not ideal as we're doing some extra lookups. + // Fix this with https://github.com/lightningdevkit/rust-lightning/issues/2422 }; for htlc_source in failed_htlcs.drain(..) { @@ -2474,14 +2550,14 @@ where self.issue_channel_close_events(&chan.get().context, closure_reason); let mut chan = remove_channel!(self, chan); self.finish_force_close_channel(chan.context.force_shutdown(false)); - // Prefunded channel has no update + // Unfunded channel has no update (None, chan.context.get_counterparty_node_id()) } else if let hash_map::Entry::Occupied(chan) = peer_state.inbound_v1_channel_by_id.entry(channel_id.clone()) { log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); self.issue_channel_close_events(&chan.get().context, closure_reason); let mut chan = remove_channel!(self, chan); self.finish_force_close_channel(chan.context.force_shutdown(false)); - // Prefunded channel has no update + // Unfunded channel has no update (None, chan.context.get_counterparty_node_id()) } else { return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) }); @@ -3021,10 +3097,17 @@ where #[cfg(test)] pub(crate) fn test_send_payment_along_path(&self, path: &Path, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option, session_priv_bytes: [u8; 32]) -> Result<(), APIError> { let _lck = self.total_consistency_lock.read().unwrap(); - self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv_bytes) + self.send_payment_along_path(SendAlongPathArgs { + path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, + session_priv_bytes + }) } - fn send_payment_along_path(&self, path: &Path, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option, session_priv_bytes: [u8; 32]) -> Result<(), APIError> { + fn send_payment_along_path(&self, args: SendAlongPathArgs) -> Result<(), APIError> { + let SendAlongPathArgs { + path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, + session_priv_bytes + } = args; // The top-level caller should hold the total_consistency_lock read lock. debug_assert!(self.total_consistency_lock.try_write().is_err()); @@ -3061,7 +3144,7 @@ where session_priv: session_priv.clone(), first_hop_htlc_msat: htlc_msat, payment_id, - }, onion_packet, None, &self.logger); + }, onion_packet, None, &self.fee_estimator, &self.logger); match break_chan_entry!(self, send_res, chan) { Some(monitor_update) => { match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) { @@ -3144,6 +3227,7 @@ where /// irrevocably committed to on our end. In such a case, do NOT retry the payment with a /// different route unless you intend to pay twice! /// + /// [`RouteHop`]: crate::routing::router::RouteHop /// [`Event::PaymentSent`]: events::Event::PaymentSent /// [`Event::PaymentFailed`]: events::Event::PaymentFailed /// [`UpdateHTLCs`]: events::MessageSendEvent::UpdateHTLCs @@ -3153,9 +3237,9 @@ where let best_block_height = self.best_block.read().unwrap().height(); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); self.pending_outbound_payments - .send_payment_with_route(route, payment_hash, recipient_onion, payment_id, &self.entropy_source, &self.node_signer, best_block_height, - |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + .send_payment_with_route(route, payment_hash, recipient_onion, payment_id, + &self.entropy_source, &self.node_signer, best_block_height, + |args| self.send_payment_along_path(args)) } /// Similar to [`ChannelManager::send_payment_with_route`], but will automatically find a route based on @@ -3167,18 +3251,16 @@ where .send_payment(payment_hash, recipient_onion, payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, &self.logger, - &self.pending_events, - |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + &self.pending_events, |args| self.send_payment_along_path(args)) } #[cfg(test)] pub(super) fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option, payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); - self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, recipient_onion, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, best_block_height, - |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, recipient_onion, + keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, + best_block_height, |args| self.send_payment_along_path(args)) } #[cfg(test)] @@ -3232,9 +3314,7 @@ where let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); self.pending_outbound_payments.send_spontaneous_payment_with_route( route, payment_preimage, recipient_onion, payment_id, &self.entropy_source, - &self.node_signer, best_block_height, - |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + &self.node_signer, best_block_height, |args| self.send_payment_along_path(args)) } /// Similar to [`ChannelManager::send_spontaneous_payment`], but will automatically find a route @@ -3250,9 +3330,7 @@ where self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, recipient_onion, payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, - &self.logger, &self.pending_events, - |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + &self.logger, &self.pending_events, |args| self.send_payment_along_path(args)) } /// Send a payment that is probing the given route for liquidity. We calculate the @@ -3261,9 +3339,9 @@ where pub fn send_probe(&self, path: Path) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); - self.pending_outbound_payments.send_probe(path, self.probing_cookie_secret, &self.entropy_source, &self.node_signer, best_block_height, - |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + self.pending_outbound_payments.send_probe(path, self.probing_cookie_secret, + &self.entropy_source, &self.node_signer, best_block_height, + |args| self.send_payment_along_path(args)) } /// Returns whether a payment with the given [`PaymentHash`] and [`PaymentId`] is, in fact, a @@ -3288,7 +3366,7 @@ where Some(chan) => { let funding_txo = find_funding_output(&chan, &funding_transaction)?; - let funding_res = chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger) + let funding_res = chan.get_funding_created(funding_transaction, funding_txo, &self.logger) .map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e { let channel_id = chan.context.channel_id(); let user_id = chan.context.get_user_id(); @@ -3461,27 +3539,48 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for channel_id in channel_ids { - if !peer_state.channel_by_id.contains_key(channel_id) { + if !peer_state.has_channel(channel_id) { return Err(APIError::ChannelUnavailable { err: format!("Channel with ID {} was not found for the passed counterparty_node_id {}", log_bytes!(*channel_id), counterparty_node_id), }); - } + }; } for channel_id in channel_ids { - let channel = peer_state.channel_by_id.get_mut(channel_id).unwrap(); - let mut config = channel.context.config(); - config.apply(config_update); - if !channel.context.update_config(&config) { + if let Some(channel) = peer_state.channel_by_id.get_mut(channel_id) { + let mut config = channel.context.config(); + config.apply(config_update); + if !channel.context.update_config(&config) { + continue; + } + if let Ok(msg) = self.get_channel_update_for_broadcast(channel) { + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); + } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + node_id: channel.context.get_counterparty_node_id(), + msg, + }); + } continue; } - if let Ok(msg) = self.get_channel_update_for_broadcast(channel) { - peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); - } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { - node_id: channel.context.get_counterparty_node_id(), - msg, + + let context = if let Some(channel) = peer_state.inbound_v1_channel_by_id.get_mut(channel_id) { + &mut channel.context + } else if let Some(channel) = peer_state.outbound_v1_channel_by_id.get_mut(channel_id) { + &mut channel.context + } else { + // This should not be reachable as we've already checked for non-existence in the previous channel_id loop. + debug_assert!(false); + return Err(APIError::ChannelUnavailable { + err: format!( + "Channel with ID {} for passed counterparty_node_id {} disappeared after we confirmed its existence - this should not be reachable!", + log_bytes!(*channel_id), counterparty_node_id), }); - } + }; + let mut config = context.config(); + config.apply(config_update); + // We update the config, but we MUST NOT broadcast a `channel_update` before `channel_ready` + // which would be the case for pending inbound/outbound channels. + context.update_config(&config); } Ok(()) } @@ -3778,7 +3877,8 @@ where }); if let Err(e) = chan.get_mut().queue_add_htlc(outgoing_amt_msat, payment_hash, outgoing_cltv_value, htlc_source.clone(), - onion_packet, skimmed_fee_msat, &self.logger) + onion_packet, skimmed_fee_msat, &self.fee_estimator, + &self.logger) { if let ChannelError::Ignore(msg) = e { log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg); @@ -4067,9 +4167,7 @@ where let best_block_height = self.best_block.read().unwrap().height(); self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(), || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, - &self.pending_events, &self.logger, - |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv)); + &self.pending_events, &self.logger, |args| self.send_payment_along_path(args)); for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) { self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination); @@ -4138,6 +4236,22 @@ where } let _ = handle_error!(self, res, counterparty_node_id); }, + BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => { + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if let Some(chan) = peer_state.channel_by_id.get_mut(&channel_id) { + handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); + } else { + let update_actions = peer_state.monitor_update_blocked_actions + .remove(&channel_id).unwrap_or(Vec::new()); + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_monitor_update_completion_actions(update_actions); + } + } + }, } } NotifyOption::DoPersist @@ -4166,7 +4280,7 @@ where log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.", log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate); - chan.queue_update_fee(new_feerate, &self.logger); + chan.queue_update_fee(new_feerate, &self.fee_estimator, &self.logger); NotifyOption::DoPersist } @@ -4179,13 +4293,19 @@ where PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { let mut should_persist = self.process_background_events(); - let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); + let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); + let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum); let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for (chan_id, chan) in peer_state.channel_by_id.iter_mut() { + let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + min_mempool_feerate + } else { + normal_feerate + }; let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate); if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } } @@ -4205,6 +4325,7 @@ where /// * Expiring a channel's previous [`ChannelConfig`] if necessary to only allow forwarding HTLCs /// with the current [`ChannelConfig`]. /// * Removing peers which have disconnected but and no longer have any channels. + /// * Force-closing and removing channels which have not completed establishment in a timely manner. /// /// Note that this may cause reentrancy through [`chain::Watch::update_channel`] calls or feerate /// estimate fetches. @@ -4215,7 +4336,8 @@ where PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { let mut should_persist = self.process_background_events(); - let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); + let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); + let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum); let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new(); let mut timed_out_mpp_htlcs = Vec::new(); @@ -4228,6 +4350,11 @@ where let pending_msg_events = &mut peer_state.pending_msg_events; let counterparty_node_id = *counterparty_node_id; peer_state.channel_by_id.retain(|chan_id, chan| { + let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + min_mempool_feerate + } else { + normal_feerate + }; let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate); if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } @@ -4293,6 +4420,26 @@ where true }); + + let process_unfunded_channel_tick = | + chan_id: &[u8; 32], + chan_context: &mut ChannelContext<::Signer>, + unfunded_chan_context: &mut UnfundedChannelContext, + | { + chan_context.maybe_expire_prev_config(); + if unfunded_chan_context.should_expire_unfunded_channel() { + log_error!(self.logger, "Force-closing pending outbound channel {} for not establishing in a timely manner", log_bytes!(&chan_id[..])); + update_maps_on_chan_removal!(self, &chan_context); + self.issue_channel_close_events(&chan_context, ClosureReason::HolderForceClosed); + self.finish_force_close_channel(chan_context.force_shutdown(false)); + false + } else { + true + } + }; + peer_state.outbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context)); + peer_state.inbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context)); + if peer_state.ok_to_remove(true) { pending_peers_awaiting_removal.push(counterparty_node_id); } @@ -4948,24 +5095,29 @@ where if peer_state_mutex_opt.is_none() { return } peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; - let mut channel = { - match peer_state.channel_by_id.entry(funding_txo.to_channel_id()){ - hash_map::Entry::Occupied(chan) => chan, - hash_map::Entry::Vacant(_) => return, - } - }; + let channel = + if let Some(chan) = peer_state.channel_by_id.get_mut(&funding_txo.to_channel_id()) { + chan + } else { + let update_actions = peer_state.monitor_update_blocked_actions + .remove(&funding_txo.to_channel_id()).unwrap_or(Vec::new()); + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_monitor_update_completion_actions(update_actions); + return; + }; let remaining_in_flight = if let Some(pending) = peer_state.in_flight_monitor_updates.get_mut(funding_txo) { pending.retain(|upd| upd.update_id > highest_applied_update_id); pending.len() } else { 0 }; log_trace!(self.logger, "ChannelMonitor updated to {}. Current highest is {}. {} pending in-flight updates.", - highest_applied_update_id, channel.get().context.get_latest_monitor_update_id(), + highest_applied_update_id, channel.context.get_latest_monitor_update_id(), remaining_in_flight); - if !channel.get().is_awaiting_monitor_update() || channel.get().context.get_latest_monitor_update_id() != highest_applied_update_id { + if !channel.is_awaiting_monitor_update() || channel.context.get_latest_monitor_update_id() != highest_applied_update_id { return; } - handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, channel.get_mut()); + handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, channel); } /// Accepts a request to open a channel after a [`Event::OpenChannelRequest`]. @@ -5398,38 +5550,50 @@ where })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(msg.channel_id.clone()) { - hash_map::Entry::Occupied(mut chan_entry) => { - - if !chan_entry.get().received_shutdown() { - log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.", - log_bytes!(msg.channel_id), - if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); - } + // TODO(dunxen): Fix this duplication when we switch to a single map with enums as per + // https://github.com/lightningdevkit/rust-lightning/issues/2422 + if let hash_map::Entry::Occupied(chan_entry) = peer_state.outbound_v1_channel_by_id.entry(msg.channel_id.clone()) { + log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", log_bytes!(&msg.channel_id[..])); + self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel); + let mut chan = remove_channel!(self, chan_entry); + self.finish_force_close_channel(chan.context.force_shutdown(false)); + return Ok(()); + } else if let hash_map::Entry::Occupied(chan_entry) = peer_state.inbound_v1_channel_by_id.entry(msg.channel_id.clone()) { + log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", log_bytes!(&msg.channel_id[..])); + self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel); + let mut chan = remove_channel!(self, chan_entry); + self.finish_force_close_channel(chan.context.force_shutdown(false)); + return Ok(()); + } else if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) { + if !chan_entry.get().received_shutdown() { + log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.", + log_bytes!(msg.channel_id), + if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); + } - let funding_txo_opt = chan_entry.get().context.get_funding_txo(); - let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self, - chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry); - dropped_htlcs = htlcs; + let funding_txo_opt = chan_entry.get().context.get_funding_txo(); + let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self, + chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry); + dropped_htlcs = htlcs; - if let Some(msg) = shutdown { - // We can send the `shutdown` message before updating the `ChannelMonitor` - // here as we don't need the monitor update to complete until we send a - // `shutdown_signed`, which we'll delay if we're pending a monitor update. - peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: *counterparty_node_id, - msg, - }); - } + if let Some(msg) = shutdown { + // We can send the `shutdown` message before updating the `ChannelMonitor` + // here as we don't need the monitor update to complete until we send a + // `shutdown_signed`, which we'll delay if we're pending a monitor update. + peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: *counterparty_node_id, + msg, + }); + } - // Update the monitor with the shutdown script if necessary. - if let Some(monitor_update) = monitor_update_opt { - break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, - peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ()); - } - break Ok(()); - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + // Update the monitor with the shutdown script if necessary. + if let Some(monitor_update) = monitor_update_opt { + break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, + peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ()); + } + break Ok(()); + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; for htlc_source in dropped_htlcs.drain(..) { @@ -5539,7 +5703,7 @@ where _ => pending_forward_info } }; - try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), chan); + try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &self.logger), chan); }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } @@ -5712,16 +5876,21 @@ where } } - // We only want to push a PendingHTLCsForwardable event if no others are queued. fn push_pending_forwards_ev(&self) { let mut pending_events = self.pending_events.lock().unwrap(); - let forward_ev_exists = pending_events.iter() - .find(|(ev, _)| if let events::Event::PendingHTLCsForwardable { .. } = ev { true } else { false }) - .is_some(); - if !forward_ev_exists { - pending_events.push_back((events::Event::PendingHTLCsForwardable { - time_forwardable: - Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS), + let is_processing_events = self.pending_events_processor.load(Ordering::Acquire); + let num_forward_events = pending_events.iter().filter(|(ev, _)| + if let events::Event::PendingHTLCsForwardable { .. } = ev { true } else { false } + ).count(); + // We only want to push a PendingHTLCsForwardable event if no others are queued. Processing + // events is done in batches and they are not removed until we're done processing each + // batch. Since handling a `PendingHTLCsForwardable` event will call back into the + // `ChannelManager`, we'll still see the original forwarding event not removed. Phantom + // payments will need an additional forwarding event before being claimed to make them look + // real by taking more time. + if (is_processing_events && num_forward_events <= 1) || num_forward_events < 1 { + pending_events.push_back((Event::PendingHTLCsForwardable { + time_forwardable: Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS), }, None)); } } @@ -5756,7 +5925,7 @@ where match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { let funding_txo = chan.get().context.get_funding_txo(); - let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan); + let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), chan); let res = if let Some(monitor_update) = monitor_update_opt { handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock, peer_state, per_peer_state, chan).map(|_| ()) @@ -6027,7 +6196,7 @@ where let counterparty_node_id = chan.context.get_counterparty_node_id(); let funding_txo = chan.context.get_funding_txo(); let (monitor_opt, holding_cell_failed_htlcs) = - chan.maybe_free_holding_cell_htlcs(&self.logger); + chan.maybe_free_holding_cell_htlcs(&self.fee_estimator, &self.logger); if !holding_cell_failed_htlcs.is_empty() { failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id)); } @@ -6839,13 +7008,13 @@ where provided_node_features(&self.default_configuration) } - /// Fetches the set of [`InvoiceFeatures`] flags which are provided by or required by + /// Fetches the set of [`Bolt11InvoiceFeatures`] flags which are provided by or required by /// [`ChannelManager`]. /// /// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice" /// or not. Thus, this method is not public. #[cfg(any(feature = "_test_utils", test))] - pub fn invoice_features(&self) -> InvoiceFeatures { + pub fn invoice_features(&self) -> Bolt11InvoiceFeatures { provided_invoice_features(&self.default_configuration) } @@ -7125,37 +7294,20 @@ where log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id)); let per_peer_state = self.per_peer_state.read().unwrap(); - for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; - peer_state.channel_by_id.retain(|_, chan| { - let retain = if chan.context.get_counterparty_node_id() == *counterparty_node_id { - if !chan.context.have_received_message() { - // If we created this (outbound) channel while we were disconnected from the - // peer we probably failed to send the open_channel message, which is now - // lost. We can't have had anything pending related to this channel, so we just - // drop it. - false - } else { - pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { - node_id: chan.context.get_counterparty_node_id(), - msg: chan.get_channel_reestablish(&self.logger), - }); - true - } - } else { true }; - if retain && chan.context.get_counterparty_node_id() != *counterparty_node_id { - if let Some(msg) = chan.get_signed_channel_announcement(&self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), &self.default_configuration) { - if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) { - pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement { - node_id: *counterparty_node_id, - msg, update_msg, - }); - } - } - } - retain + + // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted + // (so won't be recovered after a crash) we don't need to bother closing unfunded channels and + // clearing their maps here. Instead we can just send queue channel_reestablish messages for + // channels in the channel_by_id map. + peer_state.channel_by_id.iter_mut().for_each(|(_, chan)| { + pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { + node_id: chan.context.get_counterparty_node_id(), + msg: chan.get_channel_reestablish(&self.logger), + }); }); } //TODO: Also re-broadcast announcement_signatures @@ -7189,7 +7341,7 @@ where let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; if let Some(chan) = peer_state.outbound_v1_channel_by_id.get_mut(&msg.channel_id) { - if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) { + if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash, &self.fee_estimator) { peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { node_id: *counterparty_node_id, msg, @@ -7277,13 +7429,13 @@ pub(crate) fn provided_node_features(config: &UserConfig) -> NodeFeatures { provided_init_features(config).to_context() } -/// Fetches the set of [`InvoiceFeatures`] flags which are provided by or required by +/// Fetches the set of [`Bolt11InvoiceFeatures`] flags which are provided by or required by /// [`ChannelManager`]. /// /// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice" /// or not. Thus, this method is not public. #[cfg(any(feature = "_test_utils", test))] -pub(crate) fn provided_invoice_features(config: &UserConfig) -> InvoiceFeatures { +pub(crate) fn provided_invoice_features(config: &UserConfig) -> Bolt11InvoiceFeatures { provided_init_features(config).to_context() } @@ -7375,6 +7527,7 @@ impl Writeable for ChannelDetails { (35, self.inbound_htlc_maximum_msat, option), (37, user_channel_id_high_opt, option), (39, self.feerate_sat_per_1000_weight, option), + (41, self.channel_shutdown_state, option), }); Ok(()) } @@ -7412,6 +7565,7 @@ impl Readable for ChannelDetails { (35, inbound_htlc_maximum_msat, option), (37, user_channel_id_high_opt, option), (39, feerate_sat_per_1000_weight, option), + (41, channel_shutdown_state, option), }); // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with @@ -7447,12 +7601,13 @@ impl Readable for ChannelDetails { inbound_htlc_minimum_msat, inbound_htlc_maximum_msat, feerate_sat_per_1000_weight, + channel_shutdown_state, }) } } impl_writeable_tlv_based!(PhantomRouteHints, { - (2, channels, vec_type), + (2, channels, required_vec), (4, phantom_scid, required), (6, real_node_pubkey, required), }); @@ -7644,7 +7799,7 @@ impl Readable for HTLCSource { 0 => { let mut session_priv: crate::util::ser::RequiredWrapper = crate::util::ser::RequiredWrapper(None); let mut first_hop_htlc_msat: u64 = 0; - let mut path_hops: Option> = Some(Vec::new()); + let mut path_hops = Vec::new(); let mut payment_id = None; let mut payment_params: Option = None; let mut blinded_tail: Option = None; @@ -7652,7 +7807,7 @@ impl Readable for HTLCSource { (0, session_priv, required), (1, payment_id, option), (2, first_hop_htlc_msat, required), - (4, path_hops, vec_type), + (4, path_hops, required_vec), (5, payment_params, (option: ReadableArgs, 0)), (6, blinded_tail, option), }); @@ -7661,7 +7816,7 @@ impl Readable for HTLCSource { // instead. payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref())); } - let path = Path { hops: path_hops.ok_or(DecodeError::InvalidValue)?, blinded_tail }; + let path = Path { hops: path_hops, blinded_tail }; if path.hops.len() == 0 { return Err(DecodeError::InvalidValue); } @@ -7696,7 +7851,7 @@ impl Writeable for HTLCSource { (1, payment_id_opt, option), (2, first_hop_htlc_msat, required), // 3 was previously used to write a PaymentSecret for the payment. - (4, path.hops, vec_type), + (4, path.hops, required_vec), (5, None::, option), // payment_params in LDK versions prior to 0.0.115 (6, path.blinded_tail, option), }); @@ -7946,7 +8101,7 @@ where (6, monitor_update_blocked_actions_per_peer, option), (7, self.fake_scid_rand_bytes, required), (8, if events_not_backwards_compatible { Some(&*events) } else { None }, option), - (9, htlc_purposes, vec_type), + (9, htlc_purposes, required_vec), (10, in_flight_monitor_updates, option), (11, self.probing_cookie_secret, required), (13, htlc_onion_fields, optional_vec), @@ -7997,6 +8152,14 @@ impl Readable for VecDeque<(Event, Option)> { } } +impl_writeable_tlv_based_enum!(ChannelShutdownState, + (0, NotShuttingDown) => {}, + (2, ShutdownInitiated) => {}, + (4, ResolvingHTLCs) => {}, + (6, NegotiatingClosingFee) => {}, + (8, ShutdownComplete) => {}, ; +); + /// Arguments for the creation of a ChannelManager that are not deserialized. /// /// At a high-level, the process for deserializing a ChannelManager and resuming normal operation @@ -8385,7 +8548,7 @@ where (6, monitor_update_blocked_actions_per_peer, option), (7, fake_scid_rand_bytes, option), (8, events_override, option), - (9, claimable_htlc_purposes, vec_type), + (9, claimable_htlc_purposes, optional_vec), (10, in_flight_monitor_updates, option), (11, probing_cookie_secret, option), (13, claimable_htlc_onion_fields, optional_vec), @@ -8449,6 +8612,16 @@ where update: update.clone(), }); } + if $chan_in_flight_upds.is_empty() { + // We had some updates to apply, but it turns out they had completed before we + // were serialized, we just weren't notified of that. Thus, we may have to run + // the completion actions for any monitor updates, but otherwise are done. + pending_background_events.push( + BackgroundEvent::MonitorUpdatesComplete { + counterparty_node_id: $counterparty_node_id, + channel_id: $funding_txo.to_channel_id(), + }); + } if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() { log_error!(args.logger, "Duplicate in-flight monitor update set for the same channel!"); return Err(DecodeError::InvalidValue); @@ -8841,6 +9014,12 @@ where blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates .entry(blocked_channel_outpoint.to_channel_id()) .or_insert_with(Vec::new).push(blocking_action.clone()); + } else { + // If the channel we were blocking has closed, we don't need to + // worry about it - the blocked monitor update should never have + // been released from the `Channel` object so it can't have + // completed, and if the channel closed there's no reason to bother + // anymore. } } } @@ -10058,6 +10237,25 @@ mod tests { MessageSendEvent::BroadcastChannelUpdate { .. } => {}, _ => panic!("expected BroadcastChannelUpdate event"), } + + // If we provide a channel_id not associated with the peer, we should get an error and no updates + // should be applied to ensure update atomicity as specified in the API docs. + let bad_channel_id = [10; 32]; + let current_fee = nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths; + let new_fee = current_fee + 100; + assert!( + matches!( + nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id, bad_channel_id], &ChannelConfigUpdate { + forwarding_fee_proportional_millionths: Some(new_fee), + ..Default::default() + }), + Err(APIError::ChannelUnavailable { err: _ }), + ) + ); + // Check that the fee hasn't changed for the channel that exists. + assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, current_fee); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 0); } } @@ -10073,7 +10271,7 @@ pub mod bench { use crate::routing::gossip::NetworkGraph; use crate::routing::router::{PaymentParameters, RouteParameters}; use crate::util::test_utils; - use crate::util::config::UserConfig; + use crate::util::config::{UserConfig, MaxDustHTLCExposure}; use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; @@ -10111,6 +10309,7 @@ pub mod bench { // Note that this is unrealistic as each payment send will require at least two fsync // calls per node. let network = bitcoin::Network::Testnet; + let genesis_block = bitcoin::blockdata::constants::genesis_block(network); let tx_broadcaster = test_utils::TestBroadcaster::new(network); let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }; @@ -10119,6 +10318,7 @@ pub mod bench { let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &scorer); let mut config: UserConfig = Default::default(); + config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253); config.channel_handshake_config.minimum_depth = 1; let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a); @@ -10127,7 +10327,7 @@ pub mod bench { let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &router, &logger_a, &keys_manager_a, &keys_manager_a, &keys_manager_a, config.clone(), ChainParameters { network, best_block: BestBlock::from_network(network), - }); + }, genesis_block.header.time); let node_a_holder = ANodeHolder { node: &node_a }; let logger_b = test_utils::TestLogger::with_id("node a".to_owned()); @@ -10137,7 +10337,7 @@ pub mod bench { let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &router, &logger_b, &keys_manager_b, &keys_manager_b, &keys_manager_b, config.clone(), ChainParameters { network, best_block: BestBlock::from_network(network), - }); + }, genesis_block.header.time); let node_b_holder = ANodeHolder { node: &node_b }; node_a.peer_connected(&node_b.get_our_node_id(), &Init {