Refer to channels busy with funding tx negotiation as "unfunded"
[rust-lightning] / lightning / src / ln / channelmanager.rs
index b87be9cbb75cca9bc838592145ca924eee3d6a7a..111d79ae3c2df5a39a93120bec2c22fd75b2515b 100644 (file)
@@ -43,9 +43,9 @@ use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret};
 use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel};
 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
 #[cfg(any(feature = "_test_utils", test))]
-use crate::ln::features::InvoiceFeatures;
+use crate::ln::features::Bolt11InvoiceFeatures;
 use crate::routing::gossip::NetworkGraph;
-use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteHop, RouteParameters, Router};
+use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router};
 use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
 use crate::ln::msgs;
 use crate::ln::onion_utils;
@@ -530,6 +530,13 @@ enum BackgroundEvent {
                funding_txo: OutPoint,
                update: ChannelMonitorUpdate
        },
+       /// Some [`ChannelMonitorUpdate`] (s) completed before we were serialized but we still have
+       /// them marked pending, thus we need to run any [`MonitorUpdateCompletionAction`] (s) pending
+       /// on a channel.
+       MonitorUpdatesComplete {
+               counterparty_node_id: PublicKey,
+               channel_id: [u8; 32],
+       },
 }
 
 #[derive(Debug)]
@@ -678,7 +685,7 @@ impl <Signer: ChannelSigner> PeerState<Signer> {
                        && self.in_flight_monitor_updates.is_empty()
        }
 
-       // Returns a count of all channels we have with this peer, including pending channels.
+       // Returns a count of all channels we have with this peer, including unfunded channels.
        fn total_channel_count(&self) -> usize {
                self.channel_by_id.len() +
                        self.outbound_v1_channel_by_id.len() +
@@ -1742,12 +1749,12 @@ macro_rules! convert_chan_err {
                        },
                }
        };
-       ($self: ident, $err: expr, $channel_context: expr, $channel_id: expr, PREFUNDED) => {
+       ($self: ident, $err: expr, $channel_context: expr, $channel_id: expr, UNFUNDED) => {
                match $err {
-                       // We should only ever have `ChannelError::Close` when prefunded channels error.
+                       // We should only ever have `ChannelError::Close` when unfunded channels error.
                        // In any case, just close the channel.
                        ChannelError::Warn(msg) | ChannelError::Ignore(msg) | ChannelError::Close(msg) => {
-                               log_error!($self.logger, "Closing prefunded channel {} due to an error: {}", log_bytes!($channel_id[..]), msg);
+                               log_error!($self.logger, "Closing unfunded channel {} due to an error: {}", log_bytes!($channel_id[..]), msg);
                                update_maps_on_chan_removal!($self, &$channel_context);
                                let shutdown_res = $channel_context.force_shutdown(false);
                                (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel_context.get_user_id(),
@@ -1777,7 +1784,7 @@ macro_rules! try_v1_outbound_chan_entry {
                match $res {
                        Ok(res) => res,
                        Err(e) => {
-                               let (drop, res) = convert_chan_err!($self, e, $entry.get_mut().context, $entry.key(), PREFUNDED);
+                               let (drop, res) = convert_chan_err!($self, e, $entry.get_mut().context, $entry.key(), UNFUNDED);
                                if drop {
                                        $entry.remove_entry();
                                }
@@ -2528,14 +2535,14 @@ where
                                self.issue_channel_close_events(&chan.get().context, closure_reason);
                                let mut chan = remove_channel!(self, chan);
                                self.finish_force_close_channel(chan.context.force_shutdown(false));
-                               // Prefunded channel has no update
+                               // Unfunded channel has no update
                                (None, chan.context.get_counterparty_node_id())
                        } else if let hash_map::Entry::Occupied(chan) = peer_state.inbound_v1_channel_by_id.entry(channel_id.clone()) {
                                log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
                                self.issue_channel_close_events(&chan.get().context, closure_reason);
                                let mut chan = remove_channel!(self, chan);
                                self.finish_force_close_channel(chan.context.force_shutdown(false));
-                               // Prefunded channel has no update
+                               // Unfunded channel has no update
                                (None, chan.context.get_counterparty_node_id())
                        } else {
                                return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) });
@@ -3198,6 +3205,7 @@ where
        /// irrevocably committed to on our end. In such a case, do NOT retry the payment with a
        /// different route unless you intend to pay twice!
        ///
+       /// [`RouteHop`]: crate::routing::router::RouteHop
        /// [`Event::PaymentSent`]: events::Event::PaymentSent
        /// [`Event::PaymentFailed`]: events::Event::PaymentFailed
        /// [`UpdateHTLCs`]: events::MessageSendEvent::UpdateHTLCs
@@ -4193,6 +4201,22 @@ where
                                        }
                                        let _ = handle_error!(self, res, counterparty_node_id);
                                },
+                               BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
+                                       let per_peer_state = self.per_peer_state.read().unwrap();
+                                       if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
+                                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                                               let peer_state = &mut *peer_state_lock;
+                                               if let Some(chan) = peer_state.channel_by_id.get_mut(&channel_id) {
+                                                       handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
+                                               } else {
+                                                       let update_actions = peer_state.monitor_update_blocked_actions
+                                                               .remove(&channel_id).unwrap_or(Vec::new());
+                                                       mem::drop(peer_state_lock);
+                                                       mem::drop(per_peer_state);
+                                                       self.handle_monitor_update_completion_actions(update_actions);
+                                               }
+                                       }
+                               },
                        }
                }
                NotifyOption::DoPersist
@@ -4234,13 +4258,19 @@ where
                PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
                        let mut should_persist = self.process_background_events();
 
-                       let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
+                       let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
+                       let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum);
 
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
                                for (chan_id, chan) in peer_state.channel_by_id.iter_mut() {
+                                       let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+                                               min_mempool_feerate
+                                       } else {
+                                               normal_feerate
+                                       };
                                        let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
                                        if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
                                }
@@ -4270,7 +4300,8 @@ where
                PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
                        let mut should_persist = self.process_background_events();
 
-                       let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
+                       let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
+                       let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum);
 
                        let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
                        let mut timed_out_mpp_htlcs = Vec::new();
@@ -4283,6 +4314,11 @@ where
                                        let pending_msg_events = &mut peer_state.pending_msg_events;
                                        let counterparty_node_id = *counterparty_node_id;
                                        peer_state.channel_by_id.retain(|chan_id, chan| {
+                                               let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+                                                       min_mempool_feerate
+                                               } else {
+                                                       normal_feerate
+                                               };
                                                let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
                                                if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
 
@@ -5003,24 +5039,29 @@ where
                if peer_state_mutex_opt.is_none() { return }
                peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                let peer_state = &mut *peer_state_lock;
-               let mut channel = {
-                       match peer_state.channel_by_id.entry(funding_txo.to_channel_id()){
-                               hash_map::Entry::Occupied(chan) => chan,
-                               hash_map::Entry::Vacant(_) => return,
-                       }
-               };
+               let channel =
+                       if let Some(chan) = peer_state.channel_by_id.get_mut(&funding_txo.to_channel_id()) {
+                               chan
+                       } else {
+                               let update_actions = peer_state.monitor_update_blocked_actions
+                                       .remove(&funding_txo.to_channel_id()).unwrap_or(Vec::new());
+                               mem::drop(peer_state_lock);
+                               mem::drop(per_peer_state);
+                               self.handle_monitor_update_completion_actions(update_actions);
+                               return;
+                       };
                let remaining_in_flight =
                        if let Some(pending) = peer_state.in_flight_monitor_updates.get_mut(funding_txo) {
                                pending.retain(|upd| upd.update_id > highest_applied_update_id);
                                pending.len()
                        } else { 0 };
                log_trace!(self.logger, "ChannelMonitor updated to {}. Current highest is {}. {} pending in-flight updates.",
-                       highest_applied_update_id, channel.get().context.get_latest_monitor_update_id(),
+                       highest_applied_update_id, channel.context.get_latest_monitor_update_id(),
                        remaining_in_flight);
-               if !channel.get().is_awaiting_monitor_update() || channel.get().context.get_latest_monitor_update_id() != highest_applied_update_id {
+               if !channel.is_awaiting_monitor_update() || channel.context.get_latest_monitor_update_id() != highest_applied_update_id {
                        return;
                }
-               handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, channel.get_mut());
+               handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, channel);
        }
 
        /// Accepts a request to open a channel after a [`Event::OpenChannelRequest`].
@@ -6899,13 +6940,13 @@ where
                provided_node_features(&self.default_configuration)
        }
 
-       /// Fetches the set of [`InvoiceFeatures`] flags which are provided by or required by
+       /// Fetches the set of [`Bolt11InvoiceFeatures`] flags which are provided by or required by
        /// [`ChannelManager`].
        ///
        /// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice"
        /// or not. Thus, this method is not public.
        #[cfg(any(feature = "_test_utils", test))]
-       pub fn invoice_features(&self) -> InvoiceFeatures {
+       pub fn invoice_features(&self) -> Bolt11InvoiceFeatures {
                provided_invoice_features(&self.default_configuration)
        }
 
@@ -7249,7 +7290,7 @@ where
                                let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
                                if let Some(chan) = peer_state.outbound_v1_channel_by_id.get_mut(&msg.channel_id) {
-                                       if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) {
+                                       if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash, &self.fee_estimator) {
                                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
                                                        node_id: *counterparty_node_id,
                                                        msg,
@@ -7337,13 +7378,13 @@ pub(crate) fn provided_node_features(config: &UserConfig) -> NodeFeatures {
        provided_init_features(config).to_context()
 }
 
-/// Fetches the set of [`InvoiceFeatures`] flags which are provided by or required by
+/// Fetches the set of [`Bolt11InvoiceFeatures`] flags which are provided by or required by
 /// [`ChannelManager`].
 ///
 /// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice"
 /// or not. Thus, this method is not public.
 #[cfg(any(feature = "_test_utils", test))]
-pub(crate) fn provided_invoice_features(config: &UserConfig) -> InvoiceFeatures {
+pub(crate) fn provided_invoice_features(config: &UserConfig) -> Bolt11InvoiceFeatures {
        provided_init_features(config).to_context()
 }
 
@@ -7515,7 +7556,7 @@ impl Readable for ChannelDetails {
 }
 
 impl_writeable_tlv_based!(PhantomRouteHints, {
-       (2, channels, vec_type),
+       (2, channels, required_vec),
        (4, phantom_scid, required),
        (6, real_node_pubkey, required),
 });
@@ -7707,7 +7748,7 @@ impl Readable for HTLCSource {
                        0 => {
                                let mut session_priv: crate::util::ser::RequiredWrapper<SecretKey> = crate::util::ser::RequiredWrapper(None);
                                let mut first_hop_htlc_msat: u64 = 0;
-                               let mut path_hops: Option<Vec<RouteHop>> = Some(Vec::new());
+                               let mut path_hops = Vec::new();
                                let mut payment_id = None;
                                let mut payment_params: Option<PaymentParameters> = None;
                                let mut blinded_tail: Option<BlindedTail> = None;
@@ -7715,7 +7756,7 @@ impl Readable for HTLCSource {
                                        (0, session_priv, required),
                                        (1, payment_id, option),
                                        (2, first_hop_htlc_msat, required),
-                                       (4, path_hops, vec_type),
+                                       (4, path_hops, required_vec),
                                        (5, payment_params, (option: ReadableArgs, 0)),
                                        (6, blinded_tail, option),
                                });
@@ -7724,7 +7765,7 @@ impl Readable for HTLCSource {
                                        // instead.
                                        payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref()));
                                }
-                               let path = Path { hops: path_hops.ok_or(DecodeError::InvalidValue)?, blinded_tail };
+                               let path = Path { hops: path_hops, blinded_tail };
                                if path.hops.len() == 0 {
                                        return Err(DecodeError::InvalidValue);
                                }
@@ -7759,7 +7800,7 @@ impl Writeable for HTLCSource {
                                        (1, payment_id_opt, option),
                                        (2, first_hop_htlc_msat, required),
                                        // 3 was previously used to write a PaymentSecret for the payment.
-                                       (4, path.hops, vec_type),
+                                       (4, path.hops, required_vec),
                                        (5, None::<PaymentParameters>, option), // payment_params in LDK versions prior to 0.0.115
                                        (6, path.blinded_tail, option),
                                 });
@@ -8009,7 +8050,7 @@ where
                        (6, monitor_update_blocked_actions_per_peer, option),
                        (7, self.fake_scid_rand_bytes, required),
                        (8, if events_not_backwards_compatible { Some(&*events) } else { None }, option),
-                       (9, htlc_purposes, vec_type),
+                       (9, htlc_purposes, required_vec),
                        (10, in_flight_monitor_updates, option),
                        (11, self.probing_cookie_secret, required),
                        (13, htlc_onion_fields, optional_vec),
@@ -8456,7 +8497,7 @@ where
                        (6, monitor_update_blocked_actions_per_peer, option),
                        (7, fake_scid_rand_bytes, option),
                        (8, events_override, option),
-                       (9, claimable_htlc_purposes, vec_type),
+                       (9, claimable_htlc_purposes, optional_vec),
                        (10, in_flight_monitor_updates, option),
                        (11, probing_cookie_secret, option),
                        (13, claimable_htlc_onion_fields, optional_vec),
@@ -8520,6 +8561,16 @@ where
                                                        update: update.clone(),
                                                });
                                }
+                               if $chan_in_flight_upds.is_empty() {
+                                       // We had some updates to apply, but it turns out they had completed before we
+                                       // were serialized, we just weren't notified of that. Thus, we may have to run
+                                       // the completion actions for any monitor updates, but otherwise are done.
+                                       pending_background_events.push(
+                                               BackgroundEvent::MonitorUpdatesComplete {
+                                                       counterparty_node_id: $counterparty_node_id,
+                                                       channel_id: $funding_txo.to_channel_id(),
+                                               });
+                               }
                                if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() {
                                        log_error!(args.logger, "Duplicate in-flight monitor update set for the same channel!");
                                        return Err(DecodeError::InvalidValue);
@@ -8912,6 +8963,12 @@ where
                                                                blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
                                                                        .entry(blocked_channel_outpoint.to_channel_id())
                                                                        .or_insert_with(Vec::new).push(blocking_action.clone());
+                                                       } else {
+                                                               // If the channel we were blocking has closed, we don't need to
+                                                               // worry about it - the blocked monitor update should never have
+                                                               // been released from the `Channel` object so it can't have
+                                                               // completed, and if the channel closed there's no reason to bother
+                                                               // anymore.
                                                        }
                                                }
                                        }