Merge pull request #2059 from wpaulino/broadcast-missing-anchors-event
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Wed, 29 Mar 2023 21:54:58 +0000 (21:54 +0000)
committerGitHub <noreply@github.com>
Wed, 29 Mar 2023 21:54:58 +0000 (21:54 +0000)
Queue BackgroundEvent to force close channels upon ChannelManager::read

1  2 
lightning-persister/src/lib.rs
lightning/src/chain/channelmonitor.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/monitor_tests.rs
lightning/src/ln/payment_tests.rs
lightning/src/ln/reload_tests.rs
lightning/src/ln/reorg_tests.rs

index 3bdf444dd99c1b5d37da31e5c67073e723a48095,01cb4081ccc745d7775b86db079dcf25d49610ec..e6687fef7b8a24a2e6e9cd2cefd8b73f77adbcb4
@@@ -141,10 -141,11 +141,11 @@@ mod tests 
        use bitcoin::{Txid, TxMerkleNode};
        use lightning::chain::ChannelMonitorUpdateStatus;
        use lightning::chain::chainmonitor::Persist;
+       use lightning::chain::channelmonitor::CLOSED_CHANNEL_UPDATE_ID;
        use lightning::chain::transaction::OutPoint;
        use lightning::{check_closed_broadcast, check_closed_event, check_added_monitors};
 +      use lightning::events::{ClosureReason, MessageSendEventsProvider};
        use lightning::ln::functional_test_utils::*;
 -      use lightning::util::events::{ClosureReason, MessageSendEventsProvider};
        use lightning::util::test_utils;
        use std::fs;
        use bitcoin::hashes::Hash;
                check_added_monitors!(nodes[1], 1);
  
                // Make sure everything is persisted as expected after close.
-               check_persisted_data!(11);
+               check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
        }
  
        // Test that if the persister's path to channel data is read-only, writing a
index 9044eec63e4a3c5b4a23700647c571d38f51cb4e,4e6c67982f71f27505ee7b6eccdb75b11ef4fcde..20a9ebf66e0980baeabaa214c2af12ccea11ecb6
@@@ -51,9 -51,9 +51,9 @@@ use crate::chain::Filter
  use crate::util::logger::Logger;
  use crate::util::ser::{Readable, ReadableArgs, RequiredWrapper, MaybeReadable, UpgradableRequired, Writer, Writeable, U48};
  use crate::util::byte_utils;
 -use crate::util::events::Event;
 +use crate::events::Event;
  #[cfg(anchors)]
 -use crate::util::events::{AnchorDescriptor, HTLCDescriptor, BumpTransactionEvent};
 +use crate::events::bump_transaction::{AnchorDescriptor, HTLCDescriptor, BumpTransactionEvent};
  
  use crate::prelude::*;
  use core::{cmp, mem};
@@@ -69,34 -69,36 +69,36 @@@ use crate::sync::{Mutex, LockTestExt}
  /// much smaller than a full [`ChannelMonitor`]. However, for large single commitment transaction
  /// updates (e.g. ones during which there are hundreds of HTLCs pending on the commitment
  /// transaction), a single update may reach upwards of 1 MiB in serialized size.
- #[cfg_attr(any(test, fuzzing, feature = "_test_utils"), derive(PartialEq, Eq))]
- #[derive(Clone)]
+ #[derive(Clone, PartialEq, Eq)]
  #[must_use]
  pub struct ChannelMonitorUpdate {
        pub(crate) updates: Vec<ChannelMonitorUpdateStep>,
        /// The sequence number of this update. Updates *must* be replayed in-order according to this
        /// sequence number (and updates may panic if they are not). The update_id values are strictly
-       /// increasing and increase by one for each new update, with one exception specified below.
+       /// increasing and increase by one for each new update, with two exceptions specified below.
        ///
        /// This sequence number is also used to track up to which points updates which returned
        /// [`ChannelMonitorUpdateStatus::InProgress`] have been applied to all copies of a given
        /// ChannelMonitor when ChannelManager::channel_monitor_updated is called.
        ///
-       /// The only instance where update_id values are not strictly increasing is the case where we
-       /// allow post-force-close updates with a special update ID of [`CLOSED_CHANNEL_UPDATE_ID`]. See
-       /// its docs for more details.
+       /// The only instances we allow where update_id values are not strictly increasing have a
+       /// special update ID of [`CLOSED_CHANNEL_UPDATE_ID`]. This update ID is used for updates that
+       /// will force close the channel by broadcasting the latest commitment transaction or
+       /// special post-force-close updates, like providing preimages necessary to claim outputs on the
+       /// broadcast commitment transaction. See its docs for more details.
        ///
        /// [`ChannelMonitorUpdateStatus::InProgress`]: super::ChannelMonitorUpdateStatus::InProgress
        pub update_id: u64,
  }
  
- /// If:
- ///    (1) a channel has been force closed and
- ///    (2) we receive a preimage from a forward link that allows us to spend an HTLC output on
- ///        this channel's (the backward link's) broadcasted commitment transaction
- /// then we allow the `ChannelManager` to send a `ChannelMonitorUpdate` with this update ID,
- /// with the update providing said payment preimage. No other update types are allowed after
- /// force-close.
+ /// The update ID used for a [`ChannelMonitorUpdate`] that is either:
+ ///
+ ///   (1) attempting to force close the channel by broadcasting our latest commitment transaction or
+ ///   (2) providing a preimage (after the channel has been force closed) from a forward link that
+ ///           allows us to spend an HTLC output on this channel's (the backward link's) broadcasted
+ ///           commitment transaction.
+ ///
+ /// No other [`ChannelMonitorUpdate`]s are allowed after force-close.
  pub const CLOSED_CHANNEL_UPDATE_ID: u64 = core::u64::MAX;
  
  impl Writeable for ChannelMonitorUpdate {
@@@ -488,8 -490,7 +490,7 @@@ impl_writeable_tlv_based_enum_upgradabl
  
  );
  
- #[cfg_attr(any(test, fuzzing, feature = "_test_utils"), derive(PartialEq, Eq))]
- #[derive(Clone)]
+ #[derive(Clone, PartialEq, Eq)]
  pub(crate) enum ChannelMonitorUpdateStep {
        LatestHolderCommitmentTXInfo {
                commitment_tx: HolderCommitmentTransaction,
@@@ -1201,17 -1202,6 +1202,6 @@@ impl<Signer: WriteableEcdsaChannelSigne
                        payment_hash, payment_preimage, broadcaster, fee_estimator, logger)
        }
  
-       pub(crate) fn broadcast_latest_holder_commitment_txn<B: Deref, L: Deref>(
-               &self,
-               broadcaster: &B,
-               logger: &L,
-       ) where
-               B::Target: BroadcasterInterface,
-               L::Target: Logger,
-       {
-               self.inner.lock().unwrap().broadcast_latest_holder_commitment_txn(broadcaster, logger);
-       }
        /// Updates a ChannelMonitor on the basis of some new information provided by the Channel
        /// itself.
        ///
        /// This is called by the [`EventsProvider::process_pending_events`] implementation for
        /// [`ChainMonitor`].
        ///
 -      /// [`EventsProvider::process_pending_events`]: crate::util::events::EventsProvider::process_pending_events
 +      /// [`EventsProvider::process_pending_events`]: crate::events::EventsProvider::process_pending_events
        /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
        pub fn get_and_clear_pending_events(&self) -> Vec<Event> {
                self.inner.lock().unwrap().get_and_clear_pending_events()
@@@ -2265,14 -2255,22 +2255,22 @@@ impl<Signer: WriteableEcdsaChannelSigne
        {
                log_info!(logger, "Applying update to monitor {}, bringing update_id from {} to {} with {} changes.",
                        log_funding_info!(self), self.latest_update_id, updates.update_id, updates.updates.len());
-               // ChannelMonitor updates may be applied after force close if we receive a
-               // preimage for a broadcasted commitment transaction HTLC output that we'd
-               // like to claim on-chain. If this is the case, we no longer have guaranteed
-               // access to the monitor's update ID, so we use a sentinel value instead.
+               // ChannelMonitor updates may be applied after force close if we receive a preimage for a
+               // broadcasted commitment transaction HTLC output that we'd like to claim on-chain. If this
+               // is the case, we no longer have guaranteed access to the monitor's update ID, so we use a
+               // sentinel value instead.
+               //
+               // The `ChannelManager` may also queue redundant `ChannelForceClosed` updates if it still
+               // thinks the channel needs to have its commitment transaction broadcast, so we'll allow
+               // them as well.
                if updates.update_id == CLOSED_CHANNEL_UPDATE_ID {
                        assert_eq!(updates.updates.len(), 1);
                        match updates.updates[0] {
-                               ChannelMonitorUpdateStep::PaymentPreimage { .. } => {},
+                               ChannelMonitorUpdateStep::ChannelForceClosed { .. } => {},
+                               // We should have already seen a `ChannelForceClosed` update if we're trying to
+                               // provide a preimage at this point.
+                               ChannelMonitorUpdateStep::PaymentPreimage { .. } =>
+                                       debug_assert_eq!(self.latest_update_id, CLOSED_CHANNEL_UPDATE_ID),
                                _ => {
                                        log_error!(logger, "Attempted to apply post-force-close ChannelMonitorUpdate of type {}", updates.updates[0].variant_name());
                                        panic!("Attempted to apply post-force-close ChannelMonitorUpdate that wasn't providing a payment preimage");
                                },
                        }
                }
+               // If the updates succeeded and we were in an already closed channel state, then there's no
+               // need to refuse any updates we expect to receive afer seeing a confirmed commitment.
+               if ret.is_ok() && updates.update_id == CLOSED_CHANNEL_UPDATE_ID && self.latest_update_id == updates.update_id {
+                       return Ok(());
+               }
                self.latest_update_id = updates.update_id;
  
                if ret.is_ok() && self.funding_spend_seen {
                                        }));
                                },
                                ClaimEvent::BumpHTLC {
 -                                      target_feerate_sat_per_1000_weight, htlcs,
 +                                      target_feerate_sat_per_1000_weight, htlcs, tx_lock_time,
                                } => {
                                        let mut htlc_descriptors = Vec::with_capacity(htlcs.len());
                                        for htlc in htlcs {
                                        ret.push(Event::BumpTransaction(BumpTransactionEvent::HTLCResolution {
                                                target_feerate_sat_per_1000_weight,
                                                htlc_descriptors,
 +                                              tx_lock_time,
                                        }));
                                }
                        }
@@@ -4006,7 -4010,6 +4011,7 @@@ mod tests 
        use crate::chain::package::{weight_offered_htlc, weight_received_htlc, weight_revoked_offered_htlc, weight_revoked_received_htlc, WEIGHT_REVOKED_OUTPUT};
        use crate::chain::transaction::OutPoint;
        use crate::chain::keysinterface::InMemorySigner;
 +      use crate::events::ClosureReason;
        use crate::ln::{PaymentPreimage, PaymentHash};
        use crate::ln::chan_utils;
        use crate::ln::chan_utils::{HTLCOutputInCommitment, ChannelPublicKeys, ChannelTransactionParameters, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
        use crate::ln::functional_test_utils::*;
        use crate::ln::script::ShutdownScript;
        use crate::util::errors::APIError;
 -      use crate::util::events::ClosureReason;
        use crate::util::test_utils::{TestLogger, TestBroadcaster, TestFeeEstimator};
        use crate::util::ser::{ReadableArgs, Writeable};
        use crate::sync::{Arc, Mutex};
index f470469426e206ece9fd81785eaeed6ff1e6644a,ae45f5e8fb61550880ee81336953cd7d9d69c078..f1c5cae25ae8bbfbc84b85640c3367de18aa56c1
@@@ -33,11 -33,11 +33,11 @@@ use crate::ln::chan_utils
  use crate::ln::onion_utils::HTLCFailReason;
  use crate::chain::BestBlock;
  use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
- use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS};
+ use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
  use crate::chain::transaction::{OutPoint, TransactionData};
  use crate::chain::keysinterface::{WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
 +use crate::events::ClosureReason;
  use crate::routing::gossip::NodeId;
 -use crate::util::events::ClosureReason;
  use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
  use crate::util::logger::Logger;
  use crate::util::errors::APIError;
@@@ -6081,7 -6081,7 +6081,7 @@@ impl<Signer: WriteableEcdsaChannelSigne
                        // monitor update to the user, even if we return one).
                        // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
                        if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
-                               self.latest_monitor_update_id += 1;
+                               self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
                                Some((funding_txo, ChannelMonitorUpdate {
                                        update_id: self.latest_monitor_update_id,
                                        updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
index c8adc721235470dc37bce5a9a2651fc8e74dc8ed,2c62cc74d698212b4bc918a4a383859d282746c7..de12812544aa795bf0f8e5e3f7f2c1283d9a85ac
@@@ -35,8 -35,6 +35,8 @@@ use crate::chain::{Confirm, ChannelMoni
  use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
  use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
  use crate::chain::transaction::{OutPoint, TransactionData};
 +use crate::events;
 +use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
  // Since this struct is returned in `list_channels` methods, expose it here in case users want to
  // construct one themselves.
  use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret};
@@@ -57,9 -55,10 +57,9 @@@ use crate::ln::outbound_payment::{Outbo
  use crate::ln::wire::Encode;
  use crate::chain::keysinterface::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner, WriteableEcdsaChannelSigner};
  use crate::util::config::{UserConfig, ChannelConfig};
 -use crate::util::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
 -use crate::util::events;
  use crate::util::wakers::{Future, Notifier};
  use crate::util::scid_utils::fake_scid;
 +use crate::util::string::UntrustedString;
  use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
  use crate::util::logger::{Level, Logger};
  use crate::util::errors::APIError;
@@@ -120,10 -119,7 +120,10 @@@ pub(super) struct PendingHTLCInfo 
        pub(super) routing: PendingHTLCRouting,
        pub(super) incoming_shared_secret: [u8; 32],
        payment_hash: PaymentHash,
 +      /// Amount received
        pub(super) incoming_amt_msat: Option<u64>, // Added in 0.0.113
 +      /// Sender intended amount to forward or receive (actual amount received
 +      /// may overshoot this in either case)
        pub(super) outgoing_amt_msat: u64,
        pub(super) outgoing_cltv_value: u32,
  }
@@@ -195,21 -191,14 +195,21 @@@ struct ClaimableHTLC 
        cltv_expiry: u32,
        /// The amount (in msats) of this MPP part
        value: u64,
 +      /// The amount (in msats) that the sender intended to be sent in this MPP
 +      /// part (used for validating total MPP amount)
 +      sender_intended_value: u64,
        onion_payload: OnionPayload,
        timer_ticks: u8,
 -      /// The sum total of all MPP parts
 +      /// The total value received for a payment (sum of all MPP parts if the payment is a MPP).
 +      /// Gets set to the amount reported when pushing [`Event::PaymentClaimable`].
 +      total_value_received: Option<u64>,
 +      /// The sender intended sum total of all MPP parts specified in the onion
        total_msat: u64,
  }
  
  /// A payment identifier used to uniquely identify a payment to LDK.
 -/// (C-not exported) as we just use [u8; 32] directly
 +///
 +/// This is not exported to bindings users as we just use [u8; 32] directly
  #[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
  pub struct PaymentId(pub [u8; 32]);
  
@@@ -227,8 -216,7 +227,8 @@@ impl Readable for PaymentId 
  }
  
  /// An identifier used to uniquely identify an intercepted HTLC to LDK.
 -/// (C-not exported) as we just use [u8; 32] directly
 +///
 +/// This is not exported to bindings users as we just use [u8; 32] directly
  #[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
  pub struct InterceptId(pub [u8; 32]);
  
@@@ -580,7 -568,7 +580,7 @@@ struct PendingInboundPayment 
  /// or, respectively, [`Router`] for its router, but this type alias chooses the concrete types
  /// of [`KeysManager`] and [`DefaultRouter`].
  ///
 -/// (C-not exported) as Arcs don't make sense in bindings
 +/// This is not exported to bindings users as Arcs don't make sense in bindings
  pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<
        Arc<M>,
        Arc<T>,
  /// or, respectively, [`Router`]  for its router, but this type alias chooses the concrete types
  /// of [`KeysManager`] and [`DefaultRouter`].
  ///
 -/// (C-not exported) as Arcs don't make sense in bindings
 +/// This is not exported to bindings users as Arcs don't make sense in bindings
  pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>>, &'g L>;
  
  /// Manager which keeps track of a number of channels and sends messages to the appropriate
@@@ -1958,7 -1946,7 +1958,7 @@@ wher
        /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
        /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
        /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
 -      /// [`SendShutdown`]: crate::util::events::MessageSendEvent::SendShutdown
 +      /// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown
        pub fn close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> {
                self.close_channel_internal(channel_id, counterparty_node_id, None)
        }
        /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
        /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
        /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
 -      /// [`SendShutdown`]: crate::util::events::MessageSendEvent::SendShutdown
 +      /// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown
        pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> {
                self.close_channel_internal(channel_id, counterparty_node_id, Some(target_feerate_sats_per_1000_weight))
        }
                        let peer_state = &mut *peer_state_lock;
                        if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) {
                                if let Some(peer_msg) = peer_msg {
 -                                      self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() });
 +                                      self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) });
                                } else {
                                        self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed);
                                }
                payment_hash: PaymentHash, amt_msat: u64, cltv_expiry: u32, phantom_shared_secret: Option<[u8; 32]>) -> Result<PendingHTLCInfo, ReceiveError>
        {
                // final_incorrect_cltv_expiry
 -              if hop_data.outgoing_cltv_value != cltv_expiry {
 +              if hop_data.outgoing_cltv_value > cltv_expiry {
                        return Err(ReceiveError {
 -                              msg: "Upstream node set CLTV to the wrong value",
 +                              msg: "Upstream node set CLTV to less than the CLTV set by the sender",
                                err_code: 18,
                                err_data: cltv_expiry.to_be_bytes().to_vec()
                        })
                        payment_hash,
                        incoming_shared_secret: shared_secret,
                        incoming_amt_msat: Some(amt_msat),
 -                      outgoing_amt_msat: amt_msat,
 +                      outgoing_amt_msat: hop_data.amt_to_forward,
                        outgoing_cltv_value: hop_data.outgoing_cltv_value,
                })
        }
        }
  
        #[cfg(test)]
 -      fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
 +      pub(super) fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
                let best_block_height = self.best_block.read().unwrap().height();
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
                self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, payment_secret, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, best_block_height,
        /// implemented by Bitcoin Core wallet. See <https://bitcoinops.org/en/topics/fee-sniping/>
        /// for more details.
        ///
 -      /// [`Event::FundingGenerationReady`]: crate::util::events::Event::FundingGenerationReady
 -      /// [`Event::ChannelClosed`]: crate::util::events::Event::ChannelClosed
 +      /// [`Event::FundingGenerationReady`]: crate::events::Event::FundingGenerationReady
 +      /// [`Event::ChannelClosed`]: crate::events::Event::ChannelClosed
        pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
  
                                                        HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
                                                                prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
                                                                forward_info: PendingHTLCInfo {
 -                                                                      routing, incoming_shared_secret, payment_hash, outgoing_amt_msat, ..
 +                                                                      routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat, ..
                                                                }
                                                        }) => {
                                                                let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret) = match routing {
                                                                                panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
                                                                        }
                                                                };
 -                                                              let claimable_htlc = ClaimableHTLC {
 +                                                              let mut claimable_htlc = ClaimableHTLC {
                                                                        prev_hop: HTLCPreviousHopData {
                                                                                short_channel_id: prev_short_channel_id,
                                                                                outpoint: prev_funding_outpoint,
                                                                                incoming_packet_shared_secret: incoming_shared_secret,
                                                                                phantom_shared_secret,
                                                                        },
 -                                                                      value: outgoing_amt_msat,
 +                                                                      // We differentiate the received value from the sender intended value
 +                                                                      // if possible so that we don't prematurely mark MPP payments complete
 +                                                                      // if routing nodes overpay
 +                                                                      value: incoming_amt_msat.unwrap_or(outgoing_amt_msat),
 +                                                                      sender_intended_value: outgoing_amt_msat,
                                                                        timer_ticks: 0,
 +                                                                      total_value_received: None,
                                                                        total_msat: if let Some(data) = &payment_data { data.total_msat } else { outgoing_amt_msat },
                                                                        cltv_expiry,
                                                                        onion_payload,
                                                                                        fail_htlc!(claimable_htlc, payment_hash);
                                                                                        continue
                                                                                }
 -                                                                              let (_, htlcs) = claimable_payments.claimable_htlcs.entry(payment_hash)
 +                                                                              let (_, ref mut htlcs) = claimable_payments.claimable_htlcs.entry(payment_hash)
                                                                                        .or_insert_with(|| (purpose(), Vec::new()));
                                                                                if htlcs.len() == 1 {
                                                                                        if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
                                                                                                continue
                                                                                        }
                                                                                }
 -                                                                              let mut total_value = claimable_htlc.value;
 +                                                                              let mut total_value = claimable_htlc.sender_intended_value;
                                                                                for htlc in htlcs.iter() {
 -                                                                                      total_value += htlc.value;
 +                                                                                      total_value += htlc.sender_intended_value;
                                                                                        match &htlc.onion_payload {
                                                                                                OnionPayload::Invoice { .. } => {
                                                                                                        if htlc.total_msat != $payment_data.total_msat {
                                                                                                _ => unreachable!(),
                                                                                        }
                                                                                }
 -                                                                              if total_value >= msgs::MAX_VALUE_MSAT || total_value > $payment_data.total_msat {
 -                                                                                      log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)",
 -                                                                                              log_bytes!(payment_hash.0), total_value, $payment_data.total_msat);
 +                                                                              // The condition determining whether an MPP is complete must
 +                                                                              // match exactly the condition used in `timer_tick_occurred`
 +                                                                              if total_value >= msgs::MAX_VALUE_MSAT {
                                                                                        fail_htlc!(claimable_htlc, payment_hash);
 -                                                                              } else if total_value == $payment_data.total_msat {
 +                                                                              } else if total_value - claimable_htlc.sender_intended_value >= $payment_data.total_msat {
 +                                                                                      log_trace!(self.logger, "Failing HTLC with payment_hash {} as payment is already claimable",
 +                                                                                              log_bytes!(payment_hash.0));
 +                                                                                      fail_htlc!(claimable_htlc, payment_hash);
 +                                                                              } else if total_value >= $payment_data.total_msat {
                                                                                        let prev_channel_id = prev_funding_outpoint.to_channel_id();
                                                                                        htlcs.push(claimable_htlc);
 +                                                                                      let amount_msat = htlcs.iter().map(|htlc| htlc.value).sum();
 +                                                                                      htlcs.iter_mut().for_each(|htlc| htlc.total_value_received = Some(amount_msat));
                                                                                        new_events.push(events::Event::PaymentClaimable {
                                                                                                receiver_node_id: Some(receiver_node_id),
                                                                                                payment_hash,
                                                                                                purpose: purpose(),
 -                                                                                              amount_msat: total_value,
 +                                                                                              amount_msat,
                                                                                                via_channel_id: Some(prev_channel_id),
                                                                                                via_user_channel_id: Some(prev_user_channel_id),
                                                                                        });
                                                                                                }
                                                                                                match claimable_payments.claimable_htlcs.entry(payment_hash) {
                                                                                                        hash_map::Entry::Vacant(e) => {
 +                                                                                                              let amount_msat = claimable_htlc.value;
 +                                                                                                              claimable_htlc.total_value_received = Some(amount_msat);
                                                                                                                let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
                                                                                                                e.insert((purpose.clone(), vec![claimable_htlc]));
                                                                                                                let prev_channel_id = prev_funding_outpoint.to_channel_id();
                                                                                                                new_events.push(events::Event::PaymentClaimable {
                                                                                                                        receiver_node_id: Some(receiver_node_id),
                                                                                                                        payment_hash,
 -                                                                                                                      amount_msat: outgoing_amt_msat,
 +                                                                                                                      amount_msat,
                                                                                                                        purpose,
                                                                                                                        via_channel_id: Some(prev_channel_id),
                                                                                                                        via_user_channel_id: Some(prev_user_channel_id),
                                if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload {
                                        // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
                                        // In this case we're not going to handle any timeouts of the parts here.
 -                                      if htlcs[0].total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
 +                                      // This condition determining whether the MPP is complete here must match
 +                                      // exactly the condition used in `process_pending_htlc_forwards`.
 +                                      if htlcs[0].total_msat <= htlcs.iter().fold(0, |total, htlc| total + htlc.sender_intended_value) {
                                                return true;
                                        } else if htlcs.into_iter().any(|htlc| {
                                                htlc.timer_ticks += 1;
        /// event matches your expectation. If you fail to do so and call this method, you may provide
        /// the sender "proof-of-payment" when they did not fulfill the full expected payment.
        ///
 -      /// [`Event::PaymentClaimable`]: crate::util::events::Event::PaymentClaimable
 -      /// [`Event::PaymentClaimed`]: crate::util::events::Event::PaymentClaimed
 +      /// [`Event::PaymentClaimable`]: crate::events::Event::PaymentClaimable
 +      /// [`Event::PaymentClaimed`]: crate::events::Event::PaymentClaimed
        /// [`process_pending_events`]: EventsProvider::process_pending_events
        /// [`create_inbound_payment`]: Self::create_inbound_payment
        /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
                // provide the preimage, so worrying too much about the optimal handling isn't worth
                // it.
                let mut claimable_amt_msat = 0;
 +              let mut prev_total_msat = None;
                let mut expected_amt_msat = None;
                let mut valid_mpp = true;
                let mut errs = Vec::new();
                                break;
                        }
  
 -                      if expected_amt_msat.is_some() && expected_amt_msat != Some(htlc.total_msat) {
 -                              log_error!(self.logger, "Somehow ended up with an MPP payment with different total amounts - this should not be reachable!");
 +                      if prev_total_msat.is_some() && prev_total_msat != Some(htlc.total_msat) {
 +                              log_error!(self.logger, "Somehow ended up with an MPP payment with different expected total amounts - this should not be reachable!");
 +                              debug_assert!(false);
 +                              valid_mpp = false;
 +                              break;
 +                      }
 +                      prev_total_msat = Some(htlc.total_msat);
 +
 +                      if expected_amt_msat.is_some() && expected_amt_msat != htlc.total_value_received {
 +                              log_error!(self.logger, "Somehow ended up with an MPP payment with different received total amounts - this should not be reachable!");
                                debug_assert!(false);
                                valid_mpp = false;
                                break;
                        }
 +                      expected_amt_msat = htlc.total_value_received;
  
 -                      expected_amt_msat = Some(htlc.total_msat);
                        if let OnionPayload::Spontaneous(_) = &htlc.onion_payload {
                                // We don't currently support MPP for spontaneous payments, so just check
                                // that there's one payment here and move on.
@@@ -6827,9 -6791,7 +6827,9 @@@ impl Writeable for ClaimableHTLC 
                        (0, self.prev_hop, required),
                        (1, self.total_msat, required),
                        (2, self.value, required),
 +                      (3, self.sender_intended_value, required),
                        (4, payment_data, option),
 +                      (5, self.total_value_received, option),
                        (6, self.cltv_expiry, required),
                        (8, keysend_preimage, option),
                });
@@@ -6841,19 -6803,15 +6841,19 @@@ impl Readable for ClaimableHTLC 
        fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
                let mut prev_hop = crate::util::ser::RequiredWrapper(None);
                let mut value = 0;
 +              let mut sender_intended_value = None;
                let mut payment_data: Option<msgs::FinalOnionHopData> = None;
                let mut cltv_expiry = 0;
 +              let mut total_value_received = None;
                let mut total_msat = None;
                let mut keysend_preimage: Option<PaymentPreimage> = None;
                read_tlv_fields!(reader, {
                        (0, prev_hop, required),
                        (1, total_msat, option),
                        (2, value, required),
 +                      (3, sender_intended_value, option),
                        (4, payment_data, option),
 +                      (5, total_value_received, option),
                        (6, cltv_expiry, required),
                        (8, keysend_preimage, option)
                });
                        prev_hop: prev_hop.0.unwrap(),
                        timer_ticks: 0,
                        value,
 +                      sender_intended_value: sender_intended_value.unwrap_or(value),
 +                      total_value_received,
                        total_msat: total_msat.unwrap(),
                        onion_payload,
                        cltv_expiry,
@@@ -7279,7 -7235,7 +7279,7 @@@ wher
        /// In such cases the latest local transactions will be sent to the tx_broadcaster included in
        /// this struct.
        ///
 -      /// (C-not exported) because we have no HashMap bindings
 +      /// This is not exported to bindings users because we have no HashMap bindings
        pub channel_monitors: HashMap<OutPoint, &'a mut ChannelMonitor<<SP::Target as SignerProvider>::Signer>>,
  }
  
@@@ -7354,6 -7310,7 +7354,7 @@@ wher
                let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                let mut channel_closures = Vec::new();
+               let mut pending_background_events = Vec::new();
                for _ in 0..channel_count {
                        let mut channel: Channel<<SP::Target as SignerProvider>::Signer> = Channel::read(reader, (
                                &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
                                        log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
                                        log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
                                                log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
-                                       let (_, mut new_failed_htlcs) = channel.force_shutdown(true);
+                                       let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true);
+                                       if let Some(monitor_update) = monitor_update {
+                                               pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate(monitor_update));
+                                       }
                                        failed_htlcs.append(&mut new_failed_htlcs);
-                                       monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
                                        channel_closures.push(events::Event::ChannelClosed {
                                                channel_id: channel.channel_id(),
                                                user_channel_id: channel.get_user_id(),
                        }
                }
  
-               for (funding_txo, monitor) in args.channel_monitors.iter_mut() {
+               for (funding_txo, _) in args.channel_monitors.iter() {
                        if !funding_txo_set.contains(funding_txo) {
-                               log_info!(args.logger, "Broadcasting latest holder commitment transaction for closed channel {}", log_bytes!(funding_txo.to_channel_id()));
-                               monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
+                               let monitor_update = ChannelMonitorUpdate {
+                                       update_id: CLOSED_CHANNEL_UPDATE_ID,
+                                       updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
+                               };
+                               pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((*funding_txo, monitor_update)));
                        }
                }
  
                }
  
                let background_event_count: u64 = Readable::read(reader)?;
-               let mut pending_background_events_read: Vec<BackgroundEvent> = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<BackgroundEvent>()));
                for _ in 0..background_event_count {
                        match <u8 as Readable>::read(reader)? {
-                               0 => pending_background_events_read.push(BackgroundEvent::ClosingMonitorUpdate((Readable::read(reader)?, Readable::read(reader)?))),
+                               0 => {
+                                       let (funding_txo, monitor_update): (OutPoint, ChannelMonitorUpdate) = (Readable::read(reader)?, Readable::read(reader)?);
+                                       if pending_background_events.iter().find(|e| {
+                                               let BackgroundEvent::ClosingMonitorUpdate((pending_funding_txo, pending_monitor_update)) = e;
+                                               *pending_funding_txo == funding_txo && *pending_monitor_update == monitor_update
+                                       }).is_none() {
+                                               pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update)));
+                                       }
+                               }
                                _ => return Err(DecodeError::InvalidValue),
                        }
                }
                        per_peer_state: FairRwLock::new(per_peer_state),
  
                        pending_events: Mutex::new(pending_events_read),
-                       pending_background_events: Mutex::new(pending_background_events_read),
+                       pending_background_events: Mutex::new(pending_background_events),
                        total_consistency_lock: RwLock::new(()),
                        persistence_notifier: Notifier::new(),
  
@@@ -7917,7 -7886,6 +7930,7 @@@ mod tests 
        use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
        use core::time::Duration;
        use core::sync::atomic::Ordering;
 +      use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
        use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
        use crate::ln::channelmanager::{inbound_payment, PaymentId, PaymentSendFailure, InterceptId};
        use crate::ln::functional_test_utils::*;
        use crate::ln::msgs::ChannelMessageHandler;
        use crate::routing::router::{PaymentParameters, RouteParameters, find_route};
        use crate::util::errors::APIError;
 -      use crate::util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
        use crate::util::test_utils;
        use crate::util::config::ChannelConfig;
        use crate::chain::keysinterface::EntropySource;
@@@ -8825,7 -8794,6 +8838,7 @@@ pub mod bench 
        use crate::chain::Listen;
        use crate::chain::chainmonitor::{ChainMonitor, Persist};
        use crate::chain::keysinterface::{EntropySource, KeysManager, InMemorySigner};
 +      use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider};
        use crate::ln::channelmanager::{self, BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId};
        use crate::ln::functional_test_utils::*;
        use crate::ln::msgs::{ChannelMessageHandler, Init};
        use crate::routing::router::{PaymentParameters, get_route};
        use crate::util::test_utils;
        use crate::util::config::UserConfig;
 -      use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
  
        use bitcoin::hashes::Hash;
        use bitcoin::hashes::sha256::Hash as Sha256;
index 15d0167ad46c6eabf6a3a9d0413a97538fe0df00,7ed996820053388a1de650282c828b6cafe8a52b..5bd2e87ba5c15ca3f1ef998bafc15b92e5c88c68
@@@ -16,9 -16,6 +16,9 @@@ use crate::chain::channelmonitor::LATEN
  use crate::chain::channelmonitor::{ANTI_REORG_DELAY, Balance};
  use crate::chain::transaction::OutPoint;
  use crate::chain::chaininterface::LowerBoundedFeeEstimator;
 +#[cfg(anchors)]
 +use crate::events::bump_transaction::BumpTransactionEvent;
 +use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
  use crate::ln::channel;
  #[cfg(anchors)]
  use crate::ln::chan_utils;
@@@ -31,6 -28,9 +31,6 @@@ use crate::util::config::UserConfig
  #[cfg(anchors)]
  use crate::util::crypto::sign;
  #[cfg(anchors)]
 -use crate::util::events::BumpTransactionEvent;
 -use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
 -#[cfg(anchors)]
  use crate::util::ser::Writeable;
  #[cfg(anchors)]
  use crate::util::test_utils;
@@@ -1775,7 -1775,7 +1775,7 @@@ fn test_yield_anchors_events() 
        let mut htlc_txs = Vec::with_capacity(2);
        for event in holder_events {
                match event {
 -                      Event::BumpTransaction(BumpTransactionEvent::HTLCResolution { htlc_descriptors, .. }) => {
 +                      Event::BumpTransaction(BumpTransactionEvent::HTLCResolution { htlc_descriptors, tx_lock_time, .. }) => {
                                assert_eq!(htlc_descriptors.len(), 1);
                                let htlc_descriptor = &htlc_descriptors[0];
                                let signer = nodes[0].keys_manager.derive_channel_keys(
                                let per_commitment_point = signer.get_per_commitment_point(htlc_descriptor.per_commitment_number, &secp);
                                let mut htlc_tx = Transaction {
                                        version: 2,
 -                                      lock_time: if htlc_descriptor.htlc.offered {
 -                                              PackedLockTime(htlc_descriptor.htlc.cltv_expiry)
 -                                      } else {
 -                                              PackedLockTime::ZERO
 -                                      },
 +                                      lock_time: tx_lock_time,
                                        input: vec![
                                                htlc_descriptor.unsigned_tx_input(), // HTLC input
                                                TxIn { ..Default::default() } // Fee input
@@@ -1855,15 -1859,18 +1855,18 @@@ fn test_anchors_aggregated_revoked_htlc
        let chan_a = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000);
        let chan_b = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000);
  
+       // Serialize Bob with the initial state of both channels, which we'll use later.
+       let bob_serialized = nodes[1].node.encode();
        // Route two payments for each channel from Alice to Bob to lock in the HTLCs.
        let payment_a = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
        let payment_b = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
        let payment_c = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
        let payment_d = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
  
-       // Serialize Bob with the HTLCs locked in. We'll restart Bob later on with the state at this
-       // point such that he broadcasts a revoked commitment transaction.
-       let bob_serialized = nodes[1].node.encode();
+       // Serialize Bob's monitors with the HTLCs locked in. We'll restart Bob later on with the state
+       // at this point such that he broadcasts a revoked commitment transaction with the HTLCs
+       // present.
        let bob_serialized_monitor_a = get_monitor!(nodes[1], chan_a.2).encode();
        let bob_serialized_monitor_b = get_monitor!(nodes[1], chan_b.2).encode();
  
                }
        }
  
-       // Bob force closes by broadcasting his revoked state for each channel.
-       nodes[1].node.force_close_broadcasting_latest_txn(&chan_a.2, &nodes[0].node.get_our_node_id()).unwrap();
-       check_added_monitors(&nodes[1], 1);
-       check_closed_broadcast(&nodes[1], 1, true);
-       check_closed_event!(&nodes[1], 1, ClosureReason::HolderForceClosed);
-       let revoked_commitment_a = {
-               let mut txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
-               assert_eq!(txn.len(), 1);
-               let revoked_commitment = txn.pop().unwrap();
-               assert_eq!(revoked_commitment.output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
-               check_spends!(revoked_commitment, chan_a.3);
-               revoked_commitment
-       };
-       nodes[1].node.force_close_broadcasting_latest_txn(&chan_b.2, &nodes[0].node.get_our_node_id()).unwrap();
-       check_added_monitors(&nodes[1], 1);
-       check_closed_broadcast(&nodes[1], 1, true);
-       check_closed_event!(&nodes[1], 1, ClosureReason::HolderForceClosed);
-       let revoked_commitment_b = {
-               let mut txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
-               assert_eq!(txn.len(), 1);
-               let revoked_commitment = txn.pop().unwrap();
-               assert_eq!(revoked_commitment.output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
-               check_spends!(revoked_commitment, chan_b.3);
-               revoked_commitment
+       // Bob force closes by restarting with the outdated state, prompting the ChannelMonitors to
+       // broadcast the latest commitment transaction known to them, which in our case is the one with
+       // the HTLCs still pending.
+       nodes[1].node.timer_tick_occurred();
+       check_added_monitors(&nodes[1], 2);
+       check_closed_event!(&nodes[1], 2, ClosureReason::OutdatedChannelManager);
+       let (revoked_commitment_a, revoked_commitment_b) = {
+               let txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+               assert_eq!(txn.len(), 2);
+               assert_eq!(txn[0].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
+               assert_eq!(txn[1].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
+               if txn[0].input[0].previous_output.txid == chan_a.3.txid() {
+                       check_spends!(&txn[0], &chan_a.3);
+                       check_spends!(&txn[1], &chan_b.3);
+                       (txn[0].clone(), txn[1].clone())
+               } else {
+                       check_spends!(&txn[1], &chan_a.3);
+                       check_spends!(&txn[0], &chan_b.3);
+                       (txn[1].clone(), txn[0].clone())
+               }
        };
  
        // Bob should now receive two events to bump his revoked commitment transaction fees.
                };
                let mut descriptors = Vec::with_capacity(4);
                for event in events {
 -                      if let Event::BumpTransaction(BumpTransactionEvent::HTLCResolution { mut htlc_descriptors, .. }) = event {
 +                      if let Event::BumpTransaction(BumpTransactionEvent::HTLCResolution { mut htlc_descriptors, tx_lock_time, .. }) = event {
                                assert_eq!(htlc_descriptors.len(), 2);
                                for htlc_descriptor in &htlc_descriptors {
                                        assert!(!htlc_descriptor.htlc.offered);
                                        htlc_tx.output.push(htlc_descriptor.tx_output(&per_commitment_point, &secp));
                                }
                                descriptors.append(&mut htlc_descriptors);
 +                              htlc_tx.lock_time = tx_lock_time;
                        } else {
                                panic!("Unexpected event");
                        }
index c0c037fdba4b7508d651b33fbc87a82bccbb5e26,8d5c4b54b7b33809016557e6e6508f1af1ad55bb..1ce0cc0345869dd9a392ebf78944c124d471aefc
@@@ -15,7 -15,6 +15,7 @@@ use crate::chain::{ChannelMonitorUpdate
  use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS};
  use crate::chain::keysinterface::EntropySource;
  use crate::chain::transaction::OutPoint;
 +use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure};
  use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS;
  use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChannelManager, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, IDEMPOTENCY_TIMEOUT_TICKS, RecentPaymentDetails};
  use crate::ln::features::InvoiceFeatures;
@@@ -25,10 -24,10 +25,10 @@@ use crate::ln::outbound_payment::Retry
  use crate::routing::gossip::{EffectiveCapacity, RoutingFees};
  use crate::routing::router::{get_route, PaymentParameters, Route, RouteHint, RouteHintHop, RouteHop, RouteParameters};
  use crate::routing::scoring::ChannelUsage;
 -use crate::util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure};
  use crate::util::test_utils;
  use crate::util::errors::APIError;
  use crate::util::ser::Writeable;
 +use crate::util::string::UntrustedString;
  
  use bitcoin::{Block, BlockHeader, TxMerkleNode};
  use bitcoin::hashes::Hash;
@@@ -335,9 -334,15 +335,15 @@@ fn do_retry_with_no_persist(confirm_bef
        check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
        assert!(nodes[0].node.list_channels().is_empty());
        assert!(nodes[0].node.has_pending_payments());
-       let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
-       assert_eq!(as_broadcasted_txn.len(), 1);
-       assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
+       nodes[0].node.timer_tick_occurred();
+       if !confirm_before_reload {
+               let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+               assert_eq!(as_broadcasted_txn.len(), 1);
+               assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
+       } else {
+               assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+       }
+       check_added_monitors!(nodes[0], 1);
  
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
        nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
                MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
                        assert_eq!(node_id, nodes[1].node.get_our_node_id());
                        nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), msg);
 -                      check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) });
 +                      check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) });
                        check_added_monitors!(nodes[1], 1);
                        assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
                },
@@@ -500,9 -505,11 +506,11 @@@ fn do_test_completed_payment_not_retrya
        // On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and
        // force-close the channel.
        check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
+       nodes[0].node.timer_tick_occurred();
        assert!(nodes[0].node.list_channels().is_empty());
        assert!(nodes[0].node.has_pending_payments());
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
+       check_added_monitors!(nodes[0], 1);
  
        nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
                MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
                        assert_eq!(node_id, nodes[1].node.get_our_node_id());
                        nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), msg);
 -                      check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) });
 +                      check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) });
                        check_added_monitors!(nodes[1], 1);
                        bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
                },
@@@ -934,7 -941,7 +942,7 @@@ fn successful_probe_yields_event() 
        let mut events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        match events.drain(..).next().unwrap() {
 -              crate::util::events::Event::ProbeSuccessful { payment_id: ev_pid, payment_hash: ev_ph, .. } => {
 +              crate::events::Event::ProbeSuccessful { payment_id: ev_pid, payment_hash: ev_ph, .. } => {
                        assert_eq!(payment_id, ev_pid);
                        assert_eq!(payment_hash, ev_ph);
                },
@@@ -980,7 -987,7 +988,7 @@@ fn failed_probe_yields_event() 
        let mut events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        match events.drain(..).next().unwrap() {
 -              crate::util::events::Event::ProbeFailed { payment_id: ev_pid, payment_hash: ev_ph, .. } => {
 +              crate::events::Event::ProbeFailed { payment_id: ev_pid, payment_hash: ev_ph, .. } => {
                        assert_eq!(payment_id, ev_pid);
                        assert_eq!(payment_hash, ev_ph);
                },
@@@ -1414,7 -1421,7 +1422,7 @@@ fn do_test_intercepted_payment(test: In
        let events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        let (intercept_id, expected_outbound_amount_msat) = match events[0] {
 -              crate::util::events::Event::HTLCIntercepted {
 +              crate::events::Event::HTLCIntercepted {
                        intercept_id, expected_outbound_amount_msat, payment_hash: pmt_hash, inbound_amount_msat, requested_next_hop_scid: short_channel_id
                } => {
                        assert_eq!(pmt_hash, payment_hash);
@@@ -2795,6 -2802,7 +2803,7 @@@ fn do_no_missing_sent_on_midpoint_reloa
        if let Event::PaymentSent { payment_preimage, .. } = events[1] { assert_eq!(payment_preimage, our_payment_preimage); } else { panic!(); }
        // Note that we don't get a PaymentPathSuccessful here as we leave the HTLC pending to avoid
        // the double-claim that would otherwise appear at the end of this test.
+       nodes[0].node.timer_tick_occurred();
        let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(as_broadcasted_txn.len(), 1);
  
index bdd327382510f89dd62e4565dd91e27a1485d274,70570c68b46e1243c927853bc57ce9616669f68e..ffb6ba0e85a1340da52bc88d28168e05f0f5dfe1
@@@ -14,16 -14,15 +14,16 @@@ use crate::chain::chaininterface::Lower
  use crate::chain::channelmonitor::ChannelMonitor;
  use crate::chain::keysinterface::EntropySource;
  use crate::chain::transaction::OutPoint;
 +use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
  use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId};
  use crate::ln::msgs;
  use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
  use crate::util::enforcing_trait_impls::EnforcingSigner;
  use crate::util::test_utils;
  use crate::util::errors::APIError;
 -use crate::util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
  use crate::util::ser::{Writeable, ReadableArgs};
  use crate::util::config::UserConfig;
 +use crate::util::string::UntrustedString;
  
  use bitcoin::hash_types::BlockHash;
  
@@@ -423,20 -422,22 +423,22 @@@ fn test_manager_serialize_deserialize_i
        nodes_0_deserialized = nodes_0_deserialized_tmp;
        assert!(nodes_0_read.is_empty());
  
-       { // Channel close should result in a commitment tx
-               let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
-               assert_eq!(txn.len(), 1);
-               check_spends!(txn[0], funding_tx);
-               assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid());
-       }
        for monitor in node_0_monitors.drain(..) {
                assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
                        ChannelMonitorUpdateStatus::Completed);
                check_added_monitors!(nodes[0], 1);
        }
        nodes[0].node = &nodes_0_deserialized;
        check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
+       { // Channel close should result in a commitment tx
+               nodes[0].node.timer_tick_occurred();
+               let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+               assert_eq!(txn.len(), 1);
+               check_spends!(txn[0], funding_tx);
+               assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid());
+       }
+       check_added_monitors!(nodes[0], 1);
  
        // nodes[1] and nodes[2] have no lost state with nodes[0]...
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
@@@ -567,7 -568,7 +569,7 @@@ fn do_test_data_loss_protect(reconnect_
        nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), &err_msgs_0[0]);
        assert!(nodes[1].node.list_usable_channels().is_empty());
        check_added_monitors!(nodes[1], 1);
 -      check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) });
 +      check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) });
        check_closed_broadcast!(nodes[1], false);
  }
  
@@@ -921,8 -922,10 +923,10 @@@ fn do_forwarded_payment_no_manager_pers
                });
        }
  
+       nodes[1].node.timer_tick_occurred();
        let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(bs_commitment_tx.len(), 1);
+       check_added_monitors!(nodes[1], 1);
  
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
index 1a159bf3244d03c857c265068fb99ca2351f4163,c22458830e3c59e461641c698aa502caf8d68077..6d89268cdc34357808178ec5a99d2330dcff61dd
  use crate::chain::channelmonitor::ANTI_REORG_DELAY;
  use crate::chain::transaction::OutPoint;
  use crate::chain::Confirm;
 +use crate::events::{Event, MessageSendEventsProvider, ClosureReason, HTLCDestination};
  use crate::ln::channelmanager::ChannelManager;
  use crate::ln::msgs::{ChannelMessageHandler, Init};
 -use crate::util::events::{Event, MessageSendEventsProvider, ClosureReason, HTLCDestination};
  use crate::util::test_utils;
  use crate::util::ser::Writeable;
 +use crate::util::string::UntrustedString;
  
  use bitcoin::blockdata::block::{Block, BlockHeader};
  use bitcoin::blockdata::script::Builder;
@@@ -321,12 -320,7 +321,7 @@@ fn do_test_unconf_chan(reload_node: boo
                let chan_0_monitor_serialized = get_monitor!(nodes[0], chan.2).encode();
  
                reload_node!(nodes[0], *nodes[0].node.get_current_default_configuration(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
-               if !reorg_after_reload {
-                       // If the channel is already closed when we reload the node, we'll broadcast a closing
-                       // transaction via the ChannelMonitor which is missing a corresponding channel.
-                       assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
-                       nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
-               }
+               assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
        }
  
        if reorg_after_reload {
        nodes[0].node.test_process_background_events(); // Required to free the pending background monitor update
        check_added_monitors!(nodes[0], 1);
        let expected_err = "Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.";
 -      check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Channel closed because of an exception: ".to_owned() + expected_err });
 +      check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Channel closed because of an exception: {}", expected_err)) });
        check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: expected_err.to_owned() });
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
        nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();