Ensure a 1:1 mapping of value sendable to send success in fuzzing
[rust-lightning] / lightning / src / ln / channelmanager.rs
index 2e17664ea882d9a61e1b939fe70862365449a5d2..b5e7d603411f259ff3b9dda02c9f153661717cc8 100644 (file)
@@ -45,8 +45,8 @@ use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, No
 #[cfg(any(feature = "_test_utils", test))]
 use crate::ln::features::InvoiceFeatures;
 use crate::routing::gossip::NetworkGraph;
-use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, PaymentParameters, Route, RouteHop, RouteParameters, Router};
-use crate::routing::scoring::ProbabilisticScorer;
+use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteHop, RouteParameters, Router};
+use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
 use crate::ln::msgs;
 use crate::ln::onion_utils;
 use crate::ln::onion_utils::HTLCFailReason;
@@ -55,7 +55,7 @@ use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VA
 use crate::ln::outbound_payment;
 use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment};
 use crate::ln::wire::Encode;
-use crate::chain::keysinterface::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner, WriteableEcdsaChannelSigner};
+use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner, WriteableEcdsaChannelSigner};
 use crate::util::config::{UserConfig, ChannelConfig};
 use crate::util::wakers::{Future, Notifier};
 use crate::util::scid_utils::fake_scid;
@@ -78,6 +78,7 @@ use core::ops::Deref;
 
 // Re-export this for use in the public API.
 pub use crate::ln::outbound_payment::{PaymentSendFailure, Retry, RetryableSendFailure, RecipientOnionFields};
+use crate::ln::script::ShutdownScript;
 
 // We hold various information about HTLC relay in the HTLC objects in Channel itself:
 //
@@ -500,9 +501,11 @@ struct ClaimablePayments {
 /// for some reason. They are handled in timer_tick_occurred, so may be processed with
 /// quite some time lag.
 enum BackgroundEvent {
-       /// Handle a ChannelMonitorUpdate that closes a channel, broadcasting its current latest holder
-       /// commitment transaction.
-       ClosingMonitorUpdate((OutPoint, ChannelMonitorUpdate)),
+       /// Handle a ChannelMonitorUpdate
+       ///
+       /// Note that any such events are lost on shutdown, so in general they must be updates which
+       /// are regenerated on startup.
+       MonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)),
 }
 
 #[derive(Debug)]
@@ -622,7 +625,9 @@ pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<
        Arc<DefaultRouter<
                Arc<NetworkGraph<Arc<L>>>,
                Arc<L>,
-               Arc<Mutex<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>
+               Arc<Mutex<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>,
+               ProbabilisticScoringFeeParameters,
+               ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>,
        >>,
        Arc<L>
 >;
@@ -638,7 +643,7 @@ pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<
 /// of [`KeysManager`] and [`DefaultRouter`].
 ///
 /// This is not exported to bindings users as Arcs don't make sense in bindings
-pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>>, &'g L>;
+pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>, ProbabilisticScoringFeeParameters, ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>, &'g L>;
 
 /// A trivial trait which describes any [`ChannelManager`] used in testing.
 #[cfg(any(test, feature = "_test_utils"))]
@@ -1269,8 +1274,14 @@ pub struct ChannelDetails {
        /// the current state and per-HTLC limit(s). This is intended for use when routing, allowing us
        /// to use a limit as close as possible to the HTLC limit we can currently send.
        ///
-       /// See also [`ChannelDetails::balance_msat`] and [`ChannelDetails::outbound_capacity_msat`].
+       /// See also [`ChannelDetails::next_outbound_htlc_minimum_msat`],
+       /// [`ChannelDetails::balance_msat`], and [`ChannelDetails::outbound_capacity_msat`].
        pub next_outbound_htlc_limit_msat: u64,
+       /// The minimum value for sending a single HTLC to the remote peer. This is the equivalent of
+       /// [`ChannelDetails::next_outbound_htlc_limit_msat`] but represents a lower-bound, rather than
+       /// an upper-bound. This is intended for use when routing, allowing us to ensure we pick a
+       /// route which is valid.
+       pub next_outbound_htlc_minimum_msat: u64,
        /// The available inbound capacity for the remote peer to send HTLCs to us. This does not
        /// include any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not
        /// available for inclusion in new inbound HTLCs).
@@ -1390,6 +1401,7 @@ impl ChannelDetails {
                        inbound_capacity_msat: balance.inbound_capacity_msat,
                        outbound_capacity_msat: balance.outbound_capacity_msat,
                        next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
+                       next_outbound_htlc_minimum_msat: balance.next_outbound_htlc_minimum_msat,
                        user_channel_id: channel.get_user_id(),
                        confirmations_required: channel.minimum_depth(),
                        confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)),
@@ -1437,7 +1449,7 @@ pub enum RecentPaymentDetails {
 
 /// Route hints used in constructing invoices for [phantom node payents].
 ///
-/// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager
+/// [phantom node payments]: crate::sign::PhantomKeysManager
 #[derive(Clone)]
 pub struct PhantomRouteHints {
        /// The list of channels to be included in the invoice route hints.
@@ -1885,6 +1897,10 @@ where
        /// Raises [`APIError::APIMisuseError`] when `channel_value_satoshis` > 2**24 or `push_msat` is
        /// greater than `channel_value_satoshis * 1k` or `channel_value_satoshis < 1000`.
        ///
+       /// Raises [`APIError::ChannelUnavailable`] if the channel cannot be opened due to failing to
+       /// generate a shutdown scriptpubkey or destination script set by
+       /// [`SignerProvider::get_shutdown_scriptpubkey`] or [`SignerProvider::get_destination_script`].
+       ///
        /// Note that we do not check if you are currently connected to the given peer. If no
        /// connection is available, the outbound `open_channel` message may fail to send, resulting in
        /// the channel eventually being silently forgotten (dropped on reload).
@@ -2057,7 +2073,7 @@ where
                }, None));
        }
 
-       fn close_channel_internal(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>) -> Result<(), APIError> {
+       fn close_channel_internal(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
                let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
@@ -2074,7 +2090,7 @@ where
                                        let funding_txo_opt = chan_entry.get().get_funding_txo();
                                        let their_features = &peer_state.latest_features;
                                        let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut()
-                                               .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight)?;
+                                               .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
                                        failed_htlcs = htlcs;
 
                                        // We can send the `shutdown` message before updating the `ChannelMonitor`
@@ -2131,12 +2147,17 @@ where
        ///
        /// May generate a [`SendShutdown`] message event on success, which should be relayed.
        ///
+       /// Raises [`APIError::ChannelUnavailable`] if the channel cannot be closed due to failing to
+       /// generate a shutdown scriptpubkey or destination script set by
+       /// [`SignerProvider::get_shutdown_scriptpubkey`]. A force-closure may be needed to close the
+       /// channel.
+       ///
        /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
        /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
        /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
        /// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown
        pub fn close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> {
-               self.close_channel_internal(channel_id, counterparty_node_id, None)
+               self.close_channel_internal(channel_id, counterparty_node_id, None, None)
        }
 
        /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
@@ -2153,14 +2174,24 @@ where
        ///    transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which
        ///    will appear on a force-closure transaction, whichever is lower).
        ///
+       /// The `shutdown_script` provided  will be used as the `scriptPubKey` for the closing transaction.
+       /// Will fail if a shutdown script has already been set for this channel by
+       /// ['ChannelHandshakeConfig::commit_upfront_shutdown_pubkey`]. The given shutdown script must
+       /// also be compatible with our and the counterparty's features.
+       ///
        /// May generate a [`SendShutdown`] message event on success, which should be relayed.
        ///
+       /// Raises [`APIError::ChannelUnavailable`] if the channel cannot be closed due to failing to
+       /// generate a shutdown scriptpubkey or destination script set by
+       /// [`SignerProvider::get_shutdown_scriptpubkey`]. A force-closure may be needed to close the
+       /// channel.
+       ///
        /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
        /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
        /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
        /// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown
-       pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> {
-               self.close_channel_internal(channel_id, counterparty_node_id, Some(target_feerate_sats_per_1000_weight))
+       pub fn close_channel_with_feerate_and_script(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
+               self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
        }
 
        #[inline]
@@ -2708,10 +2739,9 @@ where
                let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv)
                        .map_err(|_| APIError::InvalidRoute{err: "Pubkey along hop was maliciously selected".to_owned()})?;
                let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, recipient_onion, cur_height, keysend_preimage)?;
-               if onion_utils::route_size_insane(&onion_payloads) {
-                       return Err(APIError::InvalidRoute{err: "Route size too large considering onion data".to_owned()});
-               }
-               let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash);
+
+               let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash)
+                       .map_err(|_| APIError::InvalidRoute { err: "Route size too large considering onion data".to_owned()})?;
 
                let err: Result<(), _> = loop {
                        let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.hops.first().unwrap().short_channel_id) {
@@ -3070,6 +3100,12 @@ where
                        }
                }
                self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |chan, tx| {
+                       if tx.output.len() > u16::max_value() as usize {
+                               return Err(APIError::APIMisuseError {
+                                       err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
+                               });
+                       }
+
                        let mut output_index = None;
                        let expected_spk = chan.get_funding_redeemscript().to_v0_p2wsh();
                        for (idx, outp) in tx.output.iter().enumerate() {
@@ -3079,11 +3115,6 @@ where
                                                        err: "Multiple outputs matched the expected script and value".to_owned()
                                                });
                                        }
-                                       if idx > u16::max_value() as usize {
-                                               return Err(APIError::APIMisuseError {
-                                                       err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
-                                               });
-                                       }
                                        output_index = Some(idx as u16);
                                }
                        }
@@ -3753,7 +3784,7 @@ where
 
                for event in background_events.drain(..) {
                        match event {
-                               BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)) => {
+                               BackgroundEvent::MonitorUpdateRegeneratedOnStartup((funding_txo, update)) => {
                                        // The channel has already been closed, so no use bothering to care about the
                                        // monitor updating completing.
                                        let _ = self.chain_monitor.update_channel(funding_txo, &update);
@@ -4493,7 +4524,7 @@ where
 
                if let Some(tx) = funding_broadcastable {
                        log_info!(self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
-                       self.tx_broadcaster.broadcast_transaction(&tx);
+                       self.tx_broadcaster.broadcast_transactions(&[&tx]);
                }
 
                {
@@ -5023,7 +5054,7 @@ where
                };
                if let Some(broadcast_tx) = tx {
                        log_info!(self.logger, "Broadcasting {}", log_tx!(broadcast_tx));
-                       self.tx_broadcaster.broadcast_transaction(&broadcast_tx);
+                       self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
                }
                if let Some(chan) = chan_option {
                        if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
@@ -5633,7 +5664,7 @@ where
                                                                self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure);
 
                                                                log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
-                                                               self.tx_broadcaster.broadcast_transaction(&tx);
+                                                               self.tx_broadcaster.broadcast_transactions(&[&tx]);
                                                                update_maps_on_chan_removal!(self, chan);
                                                                false
                                                        } else { true }
@@ -5673,7 +5704,7 @@ where
                                if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
                                        assert!(should_broadcast);
                                } else { unreachable!(); }
-                               self.pending_background_events.lock().unwrap().push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)));
+                               self.pending_background_events.lock().unwrap().push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup((funding_txo, update)));
                        }
                        self.finish_force_close_channel(failure);
                }
@@ -5844,7 +5875,7 @@ where
        /// Gets a fake short channel id for use in receiving [phantom node payments]. These fake scids
        /// are used when constructing the phantom invoice's route hints.
        ///
-       /// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager
+       /// [phantom node payments]: crate::sign::PhantomKeysManager
        pub fn get_phantom_scid(&self) -> u64 {
                let best_block_height = self.best_block.read().unwrap().height();
                let short_to_chan_info = self.short_to_chan_info.read().unwrap();
@@ -5860,7 +5891,7 @@ where
 
        /// Gets route hints for use in receiving [phantom node payments].
        ///
-       /// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager
+       /// [phantom node payments]: crate::sign::PhantomKeysManager
        pub fn get_phantom_route_hints(&self) -> PhantomRouteHints {
                PhantomRouteHints {
                        channels: self.list_usable_channels(),
@@ -6468,11 +6499,23 @@ where
                let _ = handle_error!(self, self.internal_open_channel(counterparty_node_id, msg), *counterparty_node_id);
        }
 
+       fn handle_open_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannelV2) {
+               let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+                       "Dual-funded channels not supported".to_owned(),
+                        msg.temporary_channel_id.clone())), *counterparty_node_id);
+       }
+
        fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
                let _ = handle_error!(self, self.internal_accept_channel(counterparty_node_id, msg), *counterparty_node_id);
        }
 
+       fn handle_accept_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannelV2) {
+               let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+                       "Dual-funded channels not supported".to_owned(),
+                        msg.temporary_channel_id.clone())), *counterparty_node_id);
+       }
+
        fn handle_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
                let _ = handle_error!(self, self.internal_funding_created(counterparty_node_id, msg), *counterparty_node_id);
@@ -6575,23 +6618,40 @@ where
                                });
                                pending_msg_events.retain(|msg| {
                                        match msg {
+                                               // V1 Channel Establishment
                                                &events::MessageSendEvent::SendAcceptChannel { .. } => false,
                                                &events::MessageSendEvent::SendOpenChannel { .. } => false,
                                                &events::MessageSendEvent::SendFundingCreated { .. } => false,
                                                &events::MessageSendEvent::SendFundingSigned { .. } => false,
+                                               // V2 Channel Establishment
+                                               &events::MessageSendEvent::SendAcceptChannelV2 { .. } => false,
+                                               &events::MessageSendEvent::SendOpenChannelV2 { .. } => false,
+                                               // Common Channel Establishment
                                                &events::MessageSendEvent::SendChannelReady { .. } => false,
                                                &events::MessageSendEvent::SendAnnouncementSignatures { .. } => false,
+                                               // Interactive Transaction Construction
+                                               &events::MessageSendEvent::SendTxAddInput { .. } => false,
+                                               &events::MessageSendEvent::SendTxAddOutput { .. } => false,
+                                               &events::MessageSendEvent::SendTxRemoveInput { .. } => false,
+                                               &events::MessageSendEvent::SendTxRemoveOutput { .. } => false,
+                                               &events::MessageSendEvent::SendTxComplete { .. } => false,
+                                               &events::MessageSendEvent::SendTxSignatures { .. } => false,
+                                               &events::MessageSendEvent::SendTxInitRbf { .. } => false,
+                                               &events::MessageSendEvent::SendTxAckRbf { .. } => false,
+                                               &events::MessageSendEvent::SendTxAbort { .. } => false,
+                                               // Channel Operations
                                                &events::MessageSendEvent::UpdateHTLCs { .. } => false,
                                                &events::MessageSendEvent::SendRevokeAndACK { .. } => false,
                                                &events::MessageSendEvent::SendClosingSigned { .. } => false,
                                                &events::MessageSendEvent::SendShutdown { .. } => false,
                                                &events::MessageSendEvent::SendChannelReestablish { .. } => false,
+                                               &events::MessageSendEvent::HandleError { .. } => false,
+                                               // Gossip
                                                &events::MessageSendEvent::SendChannelAnnouncement { .. } => false,
                                                &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
                                                &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
                                                &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
                                                &events::MessageSendEvent::SendChannelUpdate { .. } => false,
-                                               &events::MessageSendEvent::HandleError { .. } => false,
                                                &events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
                                                &events::MessageSendEvent::SendShortIdsQuery { .. } => false,
                                                &events::MessageSendEvent::SendReplyChannelRange { .. } => false,
@@ -6748,6 +6808,60 @@ where
        fn provided_init_features(&self, _their_init_features: &PublicKey) -> InitFeatures {
                provided_init_features(&self.default_configuration)
        }
+
+       fn handle_tx_add_input(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAddInput) {
+               let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+                       "Dual-funded channels not supported".to_owned(),
+                        msg.channel_id.clone())), *counterparty_node_id);
+       }
+
+       fn handle_tx_add_output(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAddOutput) {
+               let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+                       "Dual-funded channels not supported".to_owned(),
+                        msg.channel_id.clone())), *counterparty_node_id);
+       }
+
+       fn handle_tx_remove_input(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxRemoveInput) {
+               let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+                       "Dual-funded channels not supported".to_owned(),
+                        msg.channel_id.clone())), *counterparty_node_id);
+       }
+
+       fn handle_tx_remove_output(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxRemoveOutput) {
+               let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+                       "Dual-funded channels not supported".to_owned(),
+                        msg.channel_id.clone())), *counterparty_node_id);
+       }
+
+       fn handle_tx_complete(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxComplete) {
+               let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+                       "Dual-funded channels not supported".to_owned(),
+                        msg.channel_id.clone())), *counterparty_node_id);
+       }
+
+       fn handle_tx_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxSignatures) {
+               let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+                       "Dual-funded channels not supported".to_owned(),
+                        msg.channel_id.clone())), *counterparty_node_id);
+       }
+
+       fn handle_tx_init_rbf(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxInitRbf) {
+               let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+                       "Dual-funded channels not supported".to_owned(),
+                        msg.channel_id.clone())), *counterparty_node_id);
+       }
+
+       fn handle_tx_ack_rbf(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAckRbf) {
+               let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+                       "Dual-funded channels not supported".to_owned(),
+                        msg.channel_id.clone())), *counterparty_node_id);
+       }
+
+       fn handle_tx_abort(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAbort) {
+               let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+                       "Dual-funded channels not supported".to_owned(),
+                        msg.channel_id.clone())), *counterparty_node_id);
+       }
 }
 
 /// Fetches the set of [`NodeFeatures`] flags which are provided by or required by
@@ -6785,7 +6899,7 @@ pub fn provided_init_features(_config: &UserConfig) -> InitFeatures {
        // should also add the corresponding (optional) bit to the [`ChannelMessageHandler`] impl for
        // [`ErroringMessageHandler`].
        let mut features = InitFeatures::empty();
-       features.set_data_loss_protect_optional();
+       features.set_data_loss_protect_required();
        features.set_upfront_shutdown_script_optional();
        features.set_variable_length_onion_required();
        features.set_static_remote_key_required();
@@ -6844,10 +6958,9 @@ impl Writeable for ChannelDetails {
                        (14, user_channel_id_low, required),
                        (16, self.balance_msat, required),
                        (18, self.outbound_capacity_msat, required),
-                       // Note that by the time we get past the required read above, outbound_capacity_msat will be
-                       // filled in, so we can safely unwrap it here.
-                       (19, self.next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)),
+                       (19, self.next_outbound_htlc_limit_msat, required),
                        (20, self.inbound_capacity_msat, required),
+                       (21, self.next_outbound_htlc_minimum_msat, required),
                        (22, self.confirmations_required, option),
                        (24, self.force_close_spend_delay, option),
                        (26, self.is_outbound, required),
@@ -6884,6 +6997,7 @@ impl Readable for ChannelDetails {
                        // filled in, so we can safely unwrap it here.
                        (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)),
                        (20, inbound_capacity_msat, required),
+                       (21, next_outbound_htlc_minimum_msat, (default_value, 0)),
                        (22, confirmations_required, option),
                        (24, force_close_spend_delay, option),
                        (26, is_outbound, required),
@@ -6917,6 +7031,7 @@ impl Readable for ChannelDetails {
                        balance_msat: balance_msat.0.unwrap(),
                        outbound_capacity_msat: outbound_capacity_msat.0.unwrap(),
                        next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(),
+                       next_outbound_htlc_minimum_msat: next_outbound_htlc_minimum_msat.0.unwrap(),
                        inbound_capacity_msat: inbound_capacity_msat.0.unwrap(),
                        confirmations_required,
                        confirmations,
@@ -7148,8 +7263,10 @@ impl Readable for HTLCSource {
                                        return Err(DecodeError::InvalidValue);
                                }
                                if let Some(params) = payment_params.as_mut() {
-                                       if params.final_cltv_expiry_delta == 0 {
-                                               params.final_cltv_expiry_delta = path.final_cltv_expiry_delta().ok_or(DecodeError::InvalidValue)?;
+                                       if let Payee::Clear { ref mut final_cltv_expiry_delta, .. } = params.payee {
+                                               if final_cltv_expiry_delta == &0 {
+                                                       *final_cltv_expiry_delta = path.final_cltv_expiry_delta().ok_or(DecodeError::InvalidValue)?;
+                                               }
                                        }
                                }
                                Ok(HTLCSource::OutboundRoute {
@@ -7342,17 +7459,12 @@ where
                        }
                }
 
-               let background_events = self.pending_background_events.lock().unwrap();
-               (background_events.len() as u64).write(writer)?;
-               for event in background_events.iter() {
-                       match event {
-                               BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update)) => {
-                                       0u8.write(writer)?;
-                                       funding_txo.write(writer)?;
-                                       monitor_update.write(writer)?;
-                               },
-                       }
-               }
+               // LDK versions prior to 0.0.116 wrote the `pending_background_events`
+               // `MonitorUpdateRegeneratedOnStartup`s here, however there was never a reason to do so -
+               // the closing monitor updates were always effectively replayed on startup (either directly
+               // by calling `broadcast_latest_holder_commitment_txn` on a `ChannelMonitor` during
+               // deserialization or, in 0.0.115, by regenerating the monitor update itself).
+               0u64.write(writer)?;
 
                // Prior to 0.0.111 we tracked node_announcement serials here, however that now happens in
                // `PeerManager`, and thus we simply write the `highest_seen_timestamp` twice, which is
@@ -7646,14 +7758,11 @@ where
                        let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
                        funding_txo_set.insert(funding_txo.clone());
                        if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
-                               if channel.get_cur_holder_commitment_transaction_number() < monitor.get_cur_holder_commitment_number() ||
-                                               channel.get_revoked_counterparty_commitment_transaction_number() < monitor.get_min_seen_secret() ||
-                                               channel.get_cur_counterparty_commitment_transaction_number() < monitor.get_cur_counterparty_commitment_number() ||
-                                               channel.get_latest_monitor_update_id() > monitor.get_latest_update_id() {
+                               if channel.get_latest_complete_monitor_update_id() > monitor.get_latest_update_id() {
                                        // If the channel is ahead of the monitor, return InvalidValue:
                                        log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
                                        log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
-                                               log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
+                                               log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_complete_monitor_update_id());
                                        log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
                                        log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
                                        log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
@@ -7670,7 +7779,7 @@ where
                                                log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
                                        let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true);
                                        if let Some(monitor_update) = monitor_update {
-                                               pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate(monitor_update));
+                                               pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup(monitor_update));
                                        }
                                        failed_htlcs.append(&mut new_failed_htlcs);
                                        channel_closures.push_back((events::Event::ChannelClosed {
@@ -7739,11 +7848,13 @@ where
 
                for (funding_txo, _) in args.channel_monitors.iter() {
                        if !funding_txo_set.contains(funding_txo) {
+                               log_info!(args.logger, "Queueing monitor update to ensure missing channel {} is force closed",
+                                       log_bytes!(funding_txo.to_channel_id()));
                                let monitor_update = ChannelMonitorUpdate {
                                        update_id: CLOSED_CHANNEL_UPDATE_ID,
                                        updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
                                };
-                               pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((*funding_txo, monitor_update)));
+                               pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update)));
                        }
                }
 
@@ -7800,13 +7911,11 @@ where
                for _ in 0..background_event_count {
                        match <u8 as Readable>::read(reader)? {
                                0 => {
-                                       let (funding_txo, monitor_update): (OutPoint, ChannelMonitorUpdate) = (Readable::read(reader)?, Readable::read(reader)?);
-                                       if pending_background_events.iter().find(|e| {
-                                               let BackgroundEvent::ClosingMonitorUpdate((pending_funding_txo, pending_monitor_update)) = e;
-                                               *pending_funding_txo == funding_txo && *pending_monitor_update == monitor_update
-                                       }).is_none() {
-                                               pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update)));
-                                       }
+                                       // LDK versions prior to 0.0.116 wrote pending `MonitorUpdateRegeneratedOnStartup`s here,
+                                       // however we really don't (and never did) need them - we regenerate all
+                                       // on-startup monitor updates.
+                                       let _: OutPoint = Readable::read(reader)?;
+                                       let _: ChannelMonitorUpdate = Readable::read(reader)?;
                                }
                                _ => return Err(DecodeError::InvalidValue),
                        }
@@ -8253,7 +8362,7 @@ mod tests {
        use crate::util::errors::APIError;
        use crate::util::test_utils;
        use crate::util::config::ChannelConfig;
-       use crate::chain::keysinterface::EntropySource;
+       use crate::sign::EntropySource;
 
        #[test]
        fn test_notify_limits() {
@@ -8484,7 +8593,7 @@ mod tests {
                };
                let route = find_route(
                        &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
-                       None, nodes[0].logger, &scorer, &random_seed_bytes
+                       None, nodes[0].logger, &scorer, &(), &random_seed_bytes
                ).unwrap();
                nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
                        RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap();
@@ -8518,7 +8627,7 @@ mod tests {
                let payment_preimage = PaymentPreimage([42; 32]);
                let route = find_route(
                        &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
-                       None, nodes[0].logger, &scorer, &random_seed_bytes
+                       None, nodes[0].logger, &scorer, &(), &random_seed_bytes
                ).unwrap();
                let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
                        RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap();
@@ -8581,7 +8690,7 @@ mod tests {
                let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
                let route = find_route(
                        &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
-                       nodes[0].logger, &scorer, &random_seed_bytes
+                       nodes[0].logger, &scorer, &(), &random_seed_bytes
                ).unwrap();
 
                let test_preimage = PaymentPreimage([42; 32]);
@@ -8625,7 +8734,7 @@ mod tests {
                let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
                let route = find_route(
                        &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
-                       nodes[0].logger, &scorer, &random_seed_bytes
+                       nodes[0].logger, &scorer, &(), &random_seed_bytes
                ).unwrap();
 
                let test_preimage = PaymentPreimage([42; 32]);
@@ -9165,11 +9274,11 @@ mod tests {
        }
 }
 
-#[cfg(all(any(test, feature = "_test_utils"), feature = "_bench_unstable"))]
+#[cfg(ldk_bench)]
 pub mod bench {
        use crate::chain::Listen;
        use crate::chain::chainmonitor::{ChainMonitor, Persist};
-       use crate::chain::keysinterface::{KeysManager, InMemorySigner};
+       use crate::sign::{KeysManager, InMemorySigner};
        use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider};
        use crate::ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId, RecipientOnionFields, Retry};
        use crate::ln::functional_test_utils::*;
@@ -9185,7 +9294,7 @@ pub mod bench {
 
        use crate::sync::{Arc, Mutex};
 
-       use test::Bencher;
+       use criterion::Criterion;
 
        type Manager<'a, P> = ChannelManager<
                &'a ChainMonitor<InMemorySigner, &'a test_utils::TestChainSource,
@@ -9206,13 +9315,11 @@ pub mod bench {
                fn chain_monitor(&self) -> Option<&test_utils::TestChainMonitor> { None }
        }
 
-       #[cfg(test)]
-       #[bench]
-       fn bench_sends(bench: &mut Bencher) {
-               bench_two_sends(bench, test_utils::TestPersister::new(), test_utils::TestPersister::new());
+       pub fn bench_sends(bench: &mut Criterion) {
+               bench_two_sends(bench, "bench_sends", test_utils::TestPersister::new(), test_utils::TestPersister::new());
        }
 
-       pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Bencher, persister_a: P, persister_b: P) {
+       pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Criterion, bench_name: &str, persister_a: P, persister_b: P) {
                // Do a simple benchmark of sending a payment back and forth between two nodes.
                // Note that this is unrealistic as each payment send will require at least two fsync
                // calls per node.
@@ -9282,10 +9389,7 @@ pub mod bench {
 
                assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
 
-               let block = Block {
-                       header: BlockHeader { version: 0x20000000, prev_blockhash: BestBlock::from_network(network).block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
-                       txdata: vec![tx],
-               };
+               let block = create_dummy_block(BestBlock::from_network(network).block_hash(), 42, vec![tx]);
                Listen::block_connected(&node_a, &block, 1);
                Listen::block_connected(&node_b, &block, 1);
 
@@ -9326,7 +9430,7 @@ pub mod bench {
                macro_rules! send_payment {
                        ($node_a: expr, $node_b: expr) => {
                                let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id(), TEST_FINAL_CLTV)
-                                       .with_features($node_b.invoice_features());
+                                       .with_bolt11_features($node_b.invoice_features()).unwrap();
                                let mut payment_preimage = PaymentPreimage([0; 32]);
                                payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes());
                                payment_count += 1;
@@ -9368,9 +9472,9 @@ pub mod bench {
                        }
                }
 
-               bench.iter(|| {
+               bench.bench_function(bench_name, |b| b.iter(|| {
                        send_payment!(node_a, node_b);
                        send_payment!(node_b, node_a);
-               });
+               }));
        }
 }