Merge pull request #2307 from benthecarman/verify-funcs
[rust-lightning] / lightning / src / ln / channelmanager.rs
index 5dacda7155f67f086be96a3aaa3d9a2b97cfad56..1fa5bd4d519f3bc49e566c7f055b4b46f1a04c0f 100644 (file)
@@ -752,7 +752,23 @@ pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<
 /// of [`KeysManager`] and [`DefaultRouter`].
 ///
 /// This is not exported to bindings users as Arcs don't make sense in bindings
-pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>, ProbabilisticScoringFeeParameters, ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>, &'g L>;
+pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> =
+       ChannelManager<
+               &'a M,
+               &'b T,
+               &'c KeysManager,
+               &'c KeysManager,
+               &'c KeysManager,
+               &'d F,
+               &'e DefaultRouter<
+                       &'f NetworkGraph<&'g L>,
+                       &'g L,
+                       &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>,
+                       ProbabilisticScoringFeeParameters,
+                       ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>
+               >,
+               &'g L
+       >;
 
 macro_rules! define_test_pub_trait { ($vis: vis) => {
 /// A trivial trait which describes any [`ChannelManager`] used in testing.
@@ -1464,6 +1480,9 @@ pub struct ChannelDetails {
        ///
        /// [`confirmations_required`]: ChannelDetails::confirmations_required
        pub is_channel_ready: bool,
+       /// The stage of the channel's shutdown.
+       /// `None` for `ChannelDetails` serialized on LDK versions prior to 0.0.116.
+       pub channel_shutdown_state: Option<ChannelShutdownState>,
        /// True if the channel is (a) confirmed and channel_ready messages have been exchanged, (b)
        /// the peer is connected, and (c) the channel is not currently negotiating a shutdown.
        ///
@@ -1503,10 +1522,13 @@ impl ChannelDetails {
                self.short_channel_id.or(self.outbound_scid_alias)
        }
 
-       fn from_channel_context<Signer: WriteableEcdsaChannelSigner>(context: &ChannelContext<Signer>,
-               best_block_height: u32, latest_features: InitFeatures) -> Self {
-
-               let balance = context.get_available_balances();
+       fn from_channel_context<Signer: WriteableEcdsaChannelSigner, F: Deref>(
+               context: &ChannelContext<Signer>, best_block_height: u32, latest_features: InitFeatures,
+               fee_estimator: &LowerBoundedFeeEstimator<F>
+       ) -> Self
+       where F::Target: FeeEstimator
+       {
+               let balance = context.get_available_balances(fee_estimator);
                let (to_remote_reserve_satoshis, to_self_reserve_satoshis) =
                        context.get_holder_counterparty_selected_channel_reserve_satoshis();
                ChannelDetails {
@@ -1551,10 +1573,33 @@ impl ChannelDetails {
                        inbound_htlc_minimum_msat: Some(context.get_holder_htlc_minimum_msat()),
                        inbound_htlc_maximum_msat: context.get_holder_htlc_maximum_msat(),
                        config: Some(context.config()),
+                       channel_shutdown_state: Some(context.shutdown_state()),
                }
        }
 }
 
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+/// Further information on the details of the channel shutdown.
+/// Upon channels being forced closed (i.e. commitment transaction confirmation detected
+/// by `ChainMonitor`), ChannelShutdownState will be set to `ShutdownComplete` or
+/// the channel will be removed shortly.
+/// Also note, that in normal operation, peers could disconnect at any of these states
+/// and require peer re-connection before making progress onto other states
+pub enum ChannelShutdownState {
+       /// Channel has not sent or received a shutdown message.
+       NotShuttingDown,
+       /// Local node has sent a shutdown message for this channel.
+       ShutdownInitiated,
+       /// Shutdown message exchanges have concluded and the channels are in the midst of
+       /// resolving all existing open HTLCs before closing can continue.
+       ResolvingHTLCs,
+       /// All HTLCs have been resolved, nodes are currently negotiating channel close onchain fee rates.
+       NegotiatingClosingFee,
+       /// We've successfully negotiated a closing_signed dance. At this point `ChannelManager` is about
+       /// to drop the channel.
+       ShutdownComplete,
+}
+
 /// Used by [`ChannelManager::list_recent_payments`] to express the status of recent payments.
 /// These include payments that have yet to find a successful path, or have unresolved HTLCs.
 #[derive(Debug, PartialEq)]
@@ -2007,6 +2052,8 @@ where
 {
        /// Constructs a new `ChannelManager` to hold several channels and route between them.
        ///
+       /// The current time or latest block header time can be provided as the `current_timestamp`.
+       ///
        /// This is the main "logic hub" for all channel-related actions, and implements
        /// [`ChannelMessageHandler`].
        ///
@@ -2020,7 +2067,11 @@ where
        /// [`block_connected`]: chain::Listen::block_connected
        /// [`block_disconnected`]: chain::Listen::block_disconnected
        /// [`params.best_block.block_hash`]: chain::BestBlock::block_hash
-       pub fn new(fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, entropy_source: ES, node_signer: NS, signer_provider: SP, config: UserConfig, params: ChainParameters) -> Self {
+       pub fn new(
+               fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, entropy_source: ES,
+               node_signer: NS, signer_provider: SP, config: UserConfig, params: ChainParameters,
+               current_timestamp: u32,
+       ) -> Self {
                let mut secp_ctx = Secp256k1::new();
                secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
                let inbound_pmt_key_material = node_signer.get_inbound_payment_key_material();
@@ -2052,7 +2103,7 @@ where
 
                        probing_cookie_secret: entropy_source.get_secure_random_bytes(),
 
-                       highest_seen_timestamp: AtomicUsize::new(0),
+                       highest_seen_timestamp: AtomicUsize::new(current_timestamp as usize),
 
                        per_peer_state: FairRwLock::new(HashMap::new()),
 
@@ -2192,7 +2243,7 @@ where
                                let peer_state = &mut *peer_state_lock;
                                for (_channel_id, channel) in peer_state.channel_by_id.iter().filter(f) {
                                        let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
-                                               peer_state.latest_features.clone());
+                                               peer_state.latest_features.clone(), &self.fee_estimator);
                                        res.push(details);
                                }
                        }
@@ -2218,17 +2269,17 @@ where
                                let peer_state = &mut *peer_state_lock;
                                for (_channel_id, channel) in peer_state.channel_by_id.iter() {
                                        let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
-                                               peer_state.latest_features.clone());
+                                               peer_state.latest_features.clone(), &self.fee_estimator);
                                        res.push(details);
                                }
                                for (_channel_id, channel) in peer_state.inbound_v1_channel_by_id.iter() {
                                        let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
-                                               peer_state.latest_features.clone());
+                                               peer_state.latest_features.clone(), &self.fee_estimator);
                                        res.push(details);
                                }
                                for (_channel_id, channel) in peer_state.outbound_v1_channel_by_id.iter() {
                                        let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
-                                               peer_state.latest_features.clone());
+                                               peer_state.latest_features.clone(), &self.fee_estimator);
                                        res.push(details);
                                }
                        }
@@ -2261,7 +2312,8 @@ where
                        return peer_state.channel_by_id
                                .iter()
                                .map(|(_, channel)|
-                                       ChannelDetails::from_channel_context(&channel.context, best_block_height, features.clone()))
+                                       ChannelDetails::from_channel_context(&channel.context, best_block_height,
+                                       features.clone(), &self.fee_estimator))
                                .collect();
                }
                vec![]
@@ -3065,7 +3117,7 @@ where
                                                session_priv: session_priv.clone(),
                                                first_hop_htlc_msat: htlc_msat,
                                                payment_id,
-                                       }, onion_packet, None, &self.logger);
+                                       }, onion_packet, None, &self.fee_estimator, &self.logger);
                                match break_chan_entry!(self, send_res, chan) {
                                        Some(monitor_update) => {
                                                match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
@@ -3782,7 +3834,8 @@ where
                                                                                });
                                                                                if let Err(e) = chan.get_mut().queue_add_htlc(outgoing_amt_msat,
                                                                                        payment_hash, outgoing_cltv_value, htlc_source.clone(),
-                                                                                       onion_packet, skimmed_fee_msat, &self.logger)
+                                                                                       onion_packet, skimmed_fee_msat, &self.fee_estimator,
+                                                                                       &self.logger)
                                                                                {
                                                                                        if let ChannelError::Ignore(msg) = e {
                                                                                                log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg);
@@ -4171,7 +4224,7 @@ where
                log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.",
                        log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
 
-               chan.queue_update_fee(new_feerate, &self.logger);
+               chan.queue_update_fee(new_feerate, &self.fee_estimator, &self.logger);
                NotifyOption::DoPersist
        }
 
@@ -5507,7 +5560,7 @@ where
                                                _ => pending_forward_info
                                        }
                                };
-                               try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), chan);
+                               try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &self.logger), chan);
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
@@ -5724,7 +5777,7 @@ where
                        match peer_state.channel_by_id.entry(msg.channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
                                        let funding_txo = chan.get().context.get_funding_txo();
-                                       let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
+                                       let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), chan);
                                        let res = if let Some(monitor_update) = monitor_update_opt {
                                                handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
                                                        peer_state_lock, peer_state, per_peer_state, chan).map(|_| ())
@@ -5995,7 +6048,7 @@ where
                                                let counterparty_node_id = chan.context.get_counterparty_node_id();
                                                let funding_txo = chan.context.get_funding_txo();
                                                let (monitor_opt, holding_cell_failed_htlcs) =
-                                                       chan.maybe_free_holding_cell_htlcs(&self.logger);
+                                                       chan.maybe_free_holding_cell_htlcs(&self.fee_estimator, &self.logger);
                                                if !holding_cell_failed_htlcs.is_empty() {
                                                        failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id));
                                                }
@@ -7343,6 +7396,7 @@ impl Writeable for ChannelDetails {
                        (35, self.inbound_htlc_maximum_msat, option),
                        (37, user_channel_id_high_opt, option),
                        (39, self.feerate_sat_per_1000_weight, option),
+                       (41, self.channel_shutdown_state, option),
                });
                Ok(())
        }
@@ -7380,6 +7434,7 @@ impl Readable for ChannelDetails {
                        (35, inbound_htlc_maximum_msat, option),
                        (37, user_channel_id_high_opt, option),
                        (39, feerate_sat_per_1000_weight, option),
+                       (41, channel_shutdown_state, option),
                });
 
                // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with
@@ -7415,6 +7470,7 @@ impl Readable for ChannelDetails {
                        inbound_htlc_minimum_msat,
                        inbound_htlc_maximum_msat,
                        feerate_sat_per_1000_weight,
+                       channel_shutdown_state,
                })
        }
 }
@@ -7965,6 +8021,14 @@ impl Readable for VecDeque<(Event, Option<EventCompletionAction>)> {
        }
 }
 
+impl_writeable_tlv_based_enum!(ChannelShutdownState,
+       (0, NotShuttingDown) => {},
+       (2, ShutdownInitiated) => {},
+       (4, ResolvingHTLCs) => {},
+       (6, NegotiatingClosingFee) => {},
+       (8, ShutdownComplete) => {}, ;
+);
+
 /// Arguments for the creation of a ChannelManager that are not deserialized.
 ///
 /// At a high-level, the process for deserializing a ChannelManager and resuming normal operation
@@ -10001,7 +10065,7 @@ pub mod bench {
        use crate::routing::gossip::NetworkGraph;
        use crate::routing::router::{PaymentParameters, RouteParameters};
        use crate::util::test_utils;
-       use crate::util::config::UserConfig;
+       use crate::util::config::{UserConfig, MaxDustHTLCExposure};
 
        use bitcoin::hashes::Hash;
        use bitcoin::hashes::sha256::Hash as Sha256;
@@ -10039,6 +10103,7 @@ pub mod bench {
                // Note that this is unrealistic as each payment send will require at least two fsync
                // calls per node.
                let network = bitcoin::Network::Testnet;
+               let genesis_block = bitcoin::blockdata::constants::genesis_block(network);
 
                let tx_broadcaster = test_utils::TestBroadcaster::new(network);
                let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
@@ -10047,6 +10112,7 @@ pub mod bench {
                let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &scorer);
 
                let mut config: UserConfig = Default::default();
+               config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253);
                config.channel_handshake_config.minimum_depth = 1;
 
                let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a);
@@ -10055,7 +10121,7 @@ pub mod bench {
                let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &router, &logger_a, &keys_manager_a, &keys_manager_a, &keys_manager_a, config.clone(), ChainParameters {
                        network,
                        best_block: BestBlock::from_network(network),
-               });
+               }, genesis_block.header.time);
                let node_a_holder = ANodeHolder { node: &node_a };
 
                let logger_b = test_utils::TestLogger::with_id("node a".to_owned());
@@ -10065,7 +10131,7 @@ pub mod bench {
                let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &router, &logger_b, &keys_manager_b, &keys_manager_b, &keys_manager_b, config.clone(), ChainParameters {
                        network,
                        best_block: BestBlock::from_network(network),
-               });
+               }, genesis_block.header.time);
                let node_b_holder = ANodeHolder { node: &node_b };
 
                node_a.peer_connected(&node_b.get_our_node_id(), &Init {