Merge pull request #2661 from TheBlueMatt/2023-10-dup-claim-chan-hang
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Thu, 19 Oct 2023 17:53:46 +0000 (17:53 +0000)
committerGitHub <noreply@github.com>
Thu, 19 Oct 2023 17:53:46 +0000 (17:53 +0000)
Immediately unblock channels on duplicate claims

1  2 
lightning/src/ln/channelmanager.rs

index 113d690215599d87618b535ab6259bab89635a69,1a4bdfbf6eee8bfc9c34c38df1baeb86cde80c1d..9af65d96c3b088e931ecd029fcff6961fa27b5b1
@@@ -19,7 -19,7 +19,7 @@@
  
  use bitcoin::blockdata::block::BlockHeader;
  use bitcoin::blockdata::transaction::Transaction;
 -use bitcoin::blockdata::constants::{genesis_block, ChainHash};
 +use bitcoin::blockdata::constants::ChainHash;
  use bitcoin::network::constants::Network;
  
  use bitcoin::hashes::Hash;
@@@ -30,7 -30,6 +30,7 @@@ use bitcoin::secp256k1::{SecretKey,Publ
  use bitcoin::secp256k1::Secp256k1;
  use bitcoin::{LockTime, secp256k1, Sequence};
  
 +use crate::blinded_path::BlindedPath;
  use crate::chain;
  use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock};
  use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
@@@ -56,9 -55,6 +56,9 @@@ use crate::ln::msgs::{ChannelMessageHan
  use crate::ln::outbound_payment;
  use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs};
  use crate::ln::wire::Encode;
 +use crate::offers::offer::{DerivedMetadata, OfferBuilder};
 +use crate::offers::parse::Bolt12SemanticError;
 +use crate::offers::refund::RefundBuilder;
  use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, WriteableEcdsaChannelSigner};
  use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate};
  use crate::util::wakers::{Future, Notifier};
@@@ -451,17 -447,16 +451,17 @@@ impl MsgHandleErrInternal 
        }
        #[inline]
        fn from_finish_shutdown(err: String, channel_id: ChannelId, user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>, channel_capacity: u64) -> Self {
 +              let err_msg = msgs::ErrorMessage { channel_id, data: err.clone() };
 +              let action = if let (Some(_), ..) = &shutdown_res {
 +                      // We have a closing `ChannelMonitorUpdate`, which means the channel was funded and we
 +                      // should disconnect our peer such that we force them to broadcast their latest
 +                      // commitment upon reconnecting.
 +                      msgs::ErrorAction::DisconnectPeer { msg: Some(err_msg) }
 +              } else {
 +                      msgs::ErrorAction::SendErrorMessage { msg: err_msg }
 +              };
                Self {
 -                      err: LightningError {
 -                              err: err.clone(),
 -                              action: msgs::ErrorAction::SendErrorMessage {
 -                                      msg: msgs::ErrorMessage {
 -                                              channel_id,
 -                                              data: err
 -                                      },
 -                              },
 -                      },
 +                      err: LightningError { err, action },
                        chan_id: Some((channel_id, user_channel_id)),
                        shutdown_finish: Some((shutdown_res, channel_update)),
                        channel_capacity: Some(channel_capacity)
@@@ -568,6 -563,7 +568,7 @@@ struct ClaimablePayments 
  /// usually because we're running pre-full-init. They are handled immediately once we detect we are
  /// running normally, and specifically must be processed before any other non-background
  /// [`ChannelMonitorUpdate`]s are applied.
+ #[derive(Debug)]
  enum BackgroundEvent {
        /// Handle a ChannelMonitorUpdate which closes the channel or for an already-closed channel.
        /// This is only separated from [`Self::MonitorUpdateRegeneratedOnStartup`] as the
@@@ -620,10 -616,34 +621,34 @@@ pub(crate) enum MonitorUpdateCompletion
                event: events::Event,
                downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, RAAMonitorUpdateBlockingAction)>,
        },
+       /// Indicates we should immediately resume the operation of another channel, unless there is
+       /// some other reason why the channel is blocked. In practice this simply means immediately
+       /// removing the [`RAAMonitorUpdateBlockingAction`] provided from the blocking set.
+       ///
+       /// This is usually generated when we've forwarded an HTLC and want to block the outbound edge
+       /// from completing a monitor update which removes the payment preimage until the inbound edge
+       /// completes a monitor update containing the payment preimage. However, we use this variant
+       /// instead of [`Self::EmitEventAndFreeOtherChannel`] when we discover that the claim was in
+       /// fact duplicative and we simply want to resume the outbound edge channel immediately.
+       ///
+       /// This variant should thus never be written to disk, as it is processed inline rather than
+       /// stored for later processing.
+       FreeOtherChannelImmediately {
+               downstream_counterparty_node_id: PublicKey,
+               downstream_funding_outpoint: OutPoint,
+               blocking_action: RAAMonitorUpdateBlockingAction,
+       },
  }
  
  impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction,
        (0, PaymentClaimed) => { (0, payment_hash, required) },
+       // Note that FreeOtherChannelImmediately should never be written - we were supposed to free
+       // *immediately*. However, for simplicity we implement read/write here.
+       (1, FreeOtherChannelImmediately) => {
+               (0, downstream_counterparty_node_id, required),
+               (2, downstream_funding_outpoint, required),
+               (4, blocking_action, required),
+       },
        (2, EmitEventAndFreeOtherChannel) => {
                (0, event, upgradable_required),
                // LDK prior to 0.0.116 did not have this field as the monitor update application order was
@@@ -1023,7 -1043,7 +1048,7 @@@ wher
        L::Target: Logger,
  {
        default_configuration: UserConfig,
 -      genesis_hash: BlockHash,
 +      chain_hash: ChainHash,
        fee_estimator: LowerBoundedFeeEstimator<F>,
        chain_monitor: M,
        tx_broadcaster: T,
@@@ -2017,7 -2037,7 +2042,7 @@@ macro_rules! emit_channel_ready_event 
  macro_rules! handle_monitor_update_completion {
        ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
                let mut updates = $chan.monitor_updating_restored(&$self.logger,
 -                      &$self.node_signer, $self.genesis_hash, &$self.default_configuration,
 +                      &$self.node_signer, $self.chain_hash, &$self.default_configuration,
                        $self.best_block.read().unwrap().height());
                let counterparty_node_id = $chan.context.get_counterparty_node_id();
                let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() {
@@@ -2263,7 -2283,7 +2288,7 @@@ wher
                let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
                ChannelManager {
                        default_configuration: config.clone(),
 -                      genesis_hash: genesis_block(params.network).header.block_hash(),
 +                      chain_hash: ChainHash::using_genesis_block(params.network),
                        fee_estimator: LowerBoundedFeeEstimator::new(fee_est),
                        chain_monitor,
                        tx_broadcaster,
                        if cfg!(fuzzing) { // fuzzing chacha20 doesn't use the key at all so we always get the same alias
                                outbound_scid_alias += 1;
                        } else {
 -                              outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
 +                              outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
                        }
                        if outbound_scid_alias != 0 && self.outbound_scid_aliases.lock().unwrap().insert(outbound_scid_alias) {
                                break;
                                },
                        }
                };
 -              let res = channel.get_open_channel(self.genesis_hash.clone());
 +              let res = channel.get_open_channel(self.chain_hash);
  
                let temporary_channel_id = channel.context.channel_id();
                match peer_state.channel_by_id.entry(temporary_channel_id) {
                                        peer_state.pending_msg_events.push(
                                                events::MessageSendEvent::HandleError {
                                                        node_id: counterparty_node_id,
 -                                                      action: msgs::ErrorAction::SendErrorMessage {
 -                                                              msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() }
 +                                                      action: msgs::ErrorAction::DisconnectPeer {
 +                                                              msg: Some(msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() })
                                                        },
                                                }
                                        );
                // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a
                // channel closure (see HTLC_FAIL_BACK_BUFFER rationale).
                let current_height: u32 = self.best_block.read().unwrap().height();
 -              if (outgoing_cltv_value as u64) <= current_height as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
 +              if cltv_expiry <= current_height + HTLC_FAIL_BACK_BUFFER + 1 {
                        let mut err_data = Vec::with_capacity(12);
                        err_data.extend_from_slice(&amt_msat.to_be_bytes());
                        err_data.extend_from_slice(&current_height.to_be_bytes());
                                        // Note that this is likely a timing oracle for detecting whether an scid is a
                                        // phantom or an intercept.
                                        if (self.default_configuration.accept_intercept_htlcs &&
 -                                              fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.genesis_hash)) ||
 -                                              fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.genesis_hash)
 +                                              fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)) ||
 +                                              fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)
                                        {
                                                None
                                        } else {
                };
  
                let unsigned = msgs::UnsignedChannelUpdate {
 -                      chain_hash: self.genesis_hash,
 +                      chain_hash: self.chain_hash,
                        short_channel_id,
                        timestamp: chan.context.get_update_time_counter(),
                        flags: (!were_node_one) as u8 | ((!enabled as u8) << 1),
                                                                                }
                                                                                if let PendingHTLCRouting::Forward { onion_packet, .. } = routing {
                                                                                        let phantom_pubkey_res = self.node_signer.get_node_id(Recipient::PhantomNode);
 -                                                                                      if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.genesis_hash) {
 +                                                                                      if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.chain_hash) {
                                                                                                let phantom_shared_secret = self.node_signer.ecdh(Recipient::PhantomNode, &onion_packet.public_key.unwrap(), None).unwrap().secret_bytes();
                                                                                                let next_hop = match onion_utils::decode_next_payment_hop(
                                                                                                        phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac,
                                                        }
                                                }
                                        }
 -                                      let (counterparty_node_id, forward_chan_id) = match self.short_to_chan_info.read().unwrap().get(&short_chan_id) {
 -                                              Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
 +                                      let chan_info_opt = self.short_to_chan_info.read().unwrap().get(&short_chan_id).cloned();
 +                                      let (counterparty_node_id, forward_chan_id) = match chan_info_opt {
 +                                              Some((cp_id, chan_id)) => (cp_id, chan_id),
                                                None => {
                                                        forwarding_channel_not_found!();
                                                        continue;
        ///    with the current [`ChannelConfig`].
        ///  * Removing peers which have disconnected but and no longer have any channels.
        ///  * Force-closing and removing channels which have not completed establishment in a timely manner.
 +      ///  * Forgetting about stale outbound payments, either those that have already been fulfilled
 +      ///    or those awaiting an invoice that hasn't been delivered in the necessary amount of time.
 +      ///    The latter is determined using the system clock in `std` and the block time minus two
 +      ///    hours in `no-std`.
        ///
        /// Note that this may cause reentrancy through [`chain::Watch::update_channel`] calls or feerate
        /// estimate fetches.
                                self.finish_close_channel(shutdown_res);
                        }
  
 -                      self.pending_outbound_payments.remove_stale_payments(&self.pending_events);
 +                      #[cfg(feature = "std")]
 +                      let duration_since_epoch = std::time::SystemTime::now()
 +                              .duration_since(std::time::SystemTime::UNIX_EPOCH)
 +                              .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
 +                      #[cfg(not(feature = "std"))]
 +                      let duration_since_epoch = Duration::from_secs(
 +                              self.highest_seen_timestamp.load(Ordering::Acquire).saturating_sub(7200) as u64
 +                      );
 +
 +                      self.pending_outbound_payments.remove_stale_payments(
 +                              duration_since_epoch, &self.pending_events
 +                      );
  
                        // Technically we don't need to do this here, but if we have holding cell entries in a
                        // channel that need freeing, it's better to do that here and block a background task
                        for htlc in sources.drain(..) {
                                if let Err((pk, err)) = self.claim_funds_from_hop(
                                        htlc.prev_hop, payment_preimage,
-                                       |_| Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash }))
-                               {
+                                       |_, definitely_duplicate| {
+                                               debug_assert!(!definitely_duplicate, "We shouldn't claim duplicatively from a payment");
+                                               Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash })
+                                       }
+                               ) {
                                        if let msgs::ErrorAction::IgnoreError = err.err.action {
                                                // We got a temporary failure updating monitor, but will claim the
                                                // HTLC when the monitor updating is restored (or on chain).
                }
        }
  
-       fn claim_funds_from_hop<ComplFunc: FnOnce(Option<u64>) -> Option<MonitorUpdateCompletionAction>>(&self,
+       fn claim_funds_from_hop<ComplFunc: FnOnce(Option<u64>, bool) -> Option<MonitorUpdateCompletionAction>>(&self,
                prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc)
        -> Result<(), (PublicKey, MsgHandleErrInternal)> {
                //TODO: Delay the claimed_funds relaying just like we do outbound relay!
                // `BackgroundEvent`s.
                let during_init = !self.background_events_processed_since_startup.load(Ordering::Acquire);
  
+               // As we may call handle_monitor_update_completion_actions in rather rare cases, check that
+               // the required mutexes are not held before we start.
+               debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
+               debug_assert_ne!(self.claimable_payments.held_by_thread(), LockHeldState::HeldByThread);
                {
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        let chan_id = prev_hop.outpoint.to_channel_id();
                                                let counterparty_node_id = chan.context.get_counterparty_node_id();
                                                let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
  
-                                               if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res {
-                                                       if let Some(action) = completion_action(Some(htlc_value_msat)) {
-                                                               log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
-                                                                       chan_id, action);
-                                                               peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
+                                               match fulfill_res {
+                                                       UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } => {
+                                                               if let Some(action) = completion_action(Some(htlc_value_msat), false) {
+                                                                       log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
+                                                                               chan_id, action);
+                                                                       peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
+                                                               }
+                                                               if !during_init {
+                                                                       handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
+                                                                               peer_state, per_peer_state, chan);
+                                                               } else {
+                                                                       // If we're running during init we cannot update a monitor directly -
+                                                                       // they probably haven't actually been loaded yet. Instead, push the
+                                                                       // monitor update as a background event.
+                                                                       self.pending_background_events.lock().unwrap().push(
+                                                                               BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
+                                                                                       counterparty_node_id,
+                                                                                       funding_txo: prev_hop.outpoint,
+                                                                                       update: monitor_update.clone(),
+                                                                               });
+                                                               }
                                                        }
-                                                       if !during_init {
-                                                               handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
-                                                                       peer_state, per_peer_state, chan);
-                                                       } else {
-                                                               // If we're running during init we cannot update a monitor directly -
-                                                               // they probably haven't actually been loaded yet. Instead, push the
-                                                               // monitor update as a background event.
-                                                               self.pending_background_events.lock().unwrap().push(
-                                                                       BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
-                                                                               counterparty_node_id,
-                                                                               funding_txo: prev_hop.outpoint,
-                                                                               update: monitor_update.clone(),
-                                                                       });
+                                                       UpdateFulfillCommitFetch::DuplicateClaim {} => {
+                                                               let action = if let Some(action) = completion_action(None, true) {
+                                                                       action
+                                                               } else {
+                                                                       return Ok(());
+                                                               };
+                                                               mem::drop(peer_state_lock);
+                                                               log_trace!(self.logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}",
+                                                                       chan_id, action);
+                                                               let (node_id, funding_outpoint, blocker) =
+                                                               if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
+                                                                       downstream_counterparty_node_id: node_id,
+                                                                       downstream_funding_outpoint: funding_outpoint,
+                                                                       blocking_action: blocker,
+                                                               } = action {
+                                                                       (node_id, funding_outpoint, blocker)
+                                                               } else {
+                                                                       debug_assert!(false,
+                                                                               "Duplicate claims should always free another channel immediately");
+                                                                       return Ok(());
+                                                               };
+                                                               if let Some(peer_state_mtx) = per_peer_state.get(&node_id) {
+                                                                       let mut peer_state = peer_state_mtx.lock().unwrap();
+                                                                       if let Some(blockers) = peer_state
+                                                                               .actions_blocking_raa_monitor_updates
+                                                                               .get_mut(&funding_outpoint.to_channel_id())
+                                                                       {
+                                                                               let mut found_blocker = false;
+                                                                               blockers.retain(|iter| {
+                                                                                       // Note that we could actually be blocked, in
+                                                                                       // which case we need to only remove the one
+                                                                                       // blocker which was added duplicatively.
+                                                                                       let first_blocker = !found_blocker;
+                                                                                       if *iter == blocker { found_blocker = true; }
+                                                                                       *iter != blocker || !first_blocker
+                                                                               });
+                                                                               debug_assert!(found_blocker);
+                                                                       }
+                                                               } else {
+                                                                       debug_assert!(false);
+                                                               }
                                                        }
                                                }
                                        }
                // `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
                // generally always allowed to be duplicative (and it's specifically noted in
                // `PaymentForwarded`).
-               self.handle_monitor_update_completion_actions(completion_action(None));
+               self.handle_monitor_update_completion_actions(completion_action(None, false));
                Ok(())
        }
  
        }
  
        fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage,
-               forwarded_htlc_value_msat: Option<u64>, from_onchain: bool,
+               forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, startup_replay: bool,
                next_channel_counterparty_node_id: Option<PublicKey>, next_channel_outpoint: OutPoint
        ) {
                match source {
                        HTLCSource::PreviousHopData(hop_data) => {
                                let prev_outpoint = hop_data.outpoint;
                                let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
+                               #[cfg(debug_assertions)]
+                               let claiming_chan_funding_outpoint = hop_data.outpoint;
                                let res = self.claim_funds_from_hop(hop_data, payment_preimage,
-                                       |htlc_claim_value_msat| {
-                                               if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
-                                                       let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat {
-                                                               Some(claimed_htlc_value - forwarded_htlc_value)
-                                                       } else { None };
+                                       |htlc_claim_value_msat, definitely_duplicate| {
+                                               let chan_to_release =
+                                                       if let Some(node_id) = next_channel_counterparty_node_id {
+                                                               Some((node_id, next_channel_outpoint, completed_blocker))
+                                                       } else {
+                                                               // We can only get `None` here if we are processing a
+                                                               // `ChannelMonitor`-originated event, in which case we
+                                                               // don't care about ensuring we wake the downstream
+                                                               // channel's monitor updating - the channel is already
+                                                               // closed.
+                                                               None
+                                                       };
  
+                                               if definitely_duplicate && startup_replay {
+                                                       // On startup we may get redundant claims which are related to
+                                                       // monitor updates still in flight. In that case, we shouldn't
+                                                       // immediately free, but instead let that monitor update complete
+                                                       // in the background.
+                                                       #[cfg(debug_assertions)] {
+                                                               let background_events = self.pending_background_events.lock().unwrap();
+                                                               // There should be a `BackgroundEvent` pending...
+                                                               assert!(background_events.iter().any(|ev| {
+                                                                       match ev {
+                                                                               // to apply a monitor update that blocked the claiming channel,
+                                                                               BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
+                                                                                       funding_txo, update, ..
+                                                                               } => {
+                                                                                       if *funding_txo == claiming_chan_funding_outpoint {
+                                                                                               assert!(update.updates.iter().any(|upd|
+                                                                                                       if let ChannelMonitorUpdateStep::PaymentPreimage {
+                                                                                                               payment_preimage: update_preimage
+                                                                                                       } = upd {
+                                                                                                               payment_preimage == *update_preimage
+                                                                                                       } else { false }
+                                                                                               ), "{:?}", update);
+                                                                                               true
+                                                                                       } else { false }
+                                                                               },
+                                                                               // or the channel we'd unblock is already closed,
+                                                                               BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup(
+                                                                                       (funding_txo, monitor_update)
+                                                                               ) => {
+                                                                                       if *funding_txo == next_channel_outpoint {
+                                                                                               assert_eq!(monitor_update.updates.len(), 1);
+                                                                                               assert!(matches!(
+                                                                                                       monitor_update.updates[0],
+                                                                                                       ChannelMonitorUpdateStep::ChannelForceClosed { .. }
+                                                                                               ));
+                                                                                               true
+                                                                                       } else { false }
+                                                                               },
+                                                                               // or the monitor update has completed and will unblock
+                                                                               // immediately once we get going.
+                                                                               BackgroundEvent::MonitorUpdatesComplete {
+                                                                                       channel_id, ..
+                                                                               } =>
+                                                                                       *channel_id == claiming_chan_funding_outpoint.to_channel_id(),
+                                                                       }
+                                                               }), "{:?}", *background_events);
+                                                       }
+                                                       None
+                                               } else if definitely_duplicate {
+                                                       if let Some(other_chan) = chan_to_release {
+                                                               Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
+                                                                       downstream_counterparty_node_id: other_chan.0,
+                                                                       downstream_funding_outpoint: other_chan.1,
+                                                                       blocking_action: other_chan.2,
+                                                               })
+                                                       } else { None }
+                                               } else {
+                                                       let fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
+                                                               if let Some(claimed_htlc_value) = htlc_claim_value_msat {
+                                                                       Some(claimed_htlc_value - forwarded_htlc_value)
+                                                               } else { None }
+                                                       } else { None };
                                                        Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
                                                                event: events::Event::PaymentForwarded {
                                                                        fee_earned_msat,
                                                                        next_channel_id: Some(next_channel_outpoint.to_channel_id()),
                                                                        outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
                                                                },
-                                                               downstream_counterparty_and_funding_outpoint:
-                                                                       if let Some(node_id) = next_channel_counterparty_node_id {
-                                                                               Some((node_id, next_channel_outpoint, completed_blocker))
-                                                                       } else {
-                                                                               // We can only get `None` here if we are processing a
-                                                                               // `ChannelMonitor`-originated event, in which case we
-                                                                               // don't care about ensuring we wake the downstream
-                                                                               // channel's monitor updating - the channel is already
-                                                                               // closed.
-                                                                               None
-                                                                       },
+                                                               downstream_counterparty_and_funding_outpoint: chan_to_release,
                                                        })
-                                               } else { None }
+                                               }
                                        });
                                if let Err((pk, err)) = res {
                                        let result: Result<(), _> = Err(err);
        }
  
        fn handle_monitor_update_completion_actions<I: IntoIterator<Item=MonitorUpdateCompletionAction>>(&self, actions: I) {
+               debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
+               debug_assert_ne!(self.claimable_payments.held_by_thread(), LockHeldState::HeldByThread);
+               debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
                for action in actions.into_iter() {
                        match action {
                                MonitorUpdateCompletionAction::PaymentClaimed { payment_hash } => {
                                                self.handle_monitor_update_release(node_id, funding_outpoint, Some(blocker));
                                        }
                                },
+                               MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
+                                       downstream_counterparty_node_id, downstream_funding_outpoint, blocking_action,
+                               } => {
+                                       self.handle_monitor_update_release(
+                                               downstream_counterparty_node_id,
+                                               downstream_funding_outpoint,
+                                               Some(blocking_action),
+                                       );
+                               },
                        }
                }
        }
        fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
                // Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are
                // likely to be lost on restart!
 -              if msg.chain_hash != self.genesis_hash {
 +              if msg.chain_hash != self.chain_hash {
                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
                }
  
                        hash_map::Entry::Occupied(mut chan_phase_entry) => {
                                if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
                                        let announcement_sigs_opt = try_chan_phase_entry!(self, chan.channel_ready(&msg, &self.node_signer,
 -                                              self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan_phase_entry);
 +                                              self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan_phase_entry);
                                        if let Some(announcement_sigs) = announcement_sigs_opt {
                                                log_trace!(self.logger, "Sending announcement_signatures for channel {}", chan.context.channel_id());
                                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
                                        if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
                                                let res = try_chan_phase_entry!(self, chan.update_fulfill_htlc(&msg), chan_phase_entry);
                                                if let HTLCSource::PreviousHopData(prev_hop) = &res.0 {
+                                                       log_trace!(self.logger,
+                                                               "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
+                                                               msg.channel_id);
                                                        peer_state.actions_blocking_raa_monitor_updates.entry(msg.channel_id)
                                                                .or_insert_with(Vec::new)
                                                                .push(RAAMonitorUpdateBlockingAction::from_prev_hop_data(&prev_hop));
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
                };
-               self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, Some(*counterparty_node_id), funding_txo);
+               self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, false, Some(*counterparty_node_id), funding_txo);
                Ok(())
        }
  
                                                },
                                                hash_map::Entry::Vacant(entry) => {
                                                        if !is_our_scid && forward_info.incoming_amt_msat.is_some() &&
 -                                                         fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, scid, &self.genesis_hash)
 +                                                         fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, scid, &self.chain_hash)
                                                        {
                                                                let intercept_id = InterceptId(Sha256::hash(&forward_info.incoming_shared_secret).into_inner());
                                                                let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
  
                                        peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
                                                msg: try_chan_phase_entry!(self, chan.announcement_signatures(
 -                                                      &self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(),
 +                                                      &self.node_signer, self.chain_hash, self.best_block.read().unwrap().height(),
                                                        msg, &self.default_configuration
                                                ), chan_phase_entry),
                                                // Note that announcement_signatures fails if the channel cannot be announced,
                        let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                                .ok_or_else(|| {
                                        debug_assert!(false);
 -                                      MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
 +                                      MsgHandleErrInternal::send_err_msg_no_close(
 +                                              format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
 +                                              msg.channel_id
 +                                      )
                                })?;
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                                                // freed HTLCs to fail backwards. If in the future we no longer drop pending
                                                // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
                                                let responses = try_chan_phase_entry!(self, chan.channel_reestablish(
 -                                                      msg, &self.logger, &self.node_signer, self.genesis_hash,
 +                                                      msg, &self.logger, &self.node_signer, self.chain_hash,
                                                        &self.default_configuration, &*self.best_block.read().unwrap()), chan_phase_entry);
                                                let mut channel_update = None;
                                                if let Some(msg) = responses.shutdown_msg {
                                                        "Got a channel_reestablish message for an unfunded channel!".into())), chan_phase_entry);
                                        }
                                },
 -                              hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
 +                              hash_map::Entry::Vacant(_) => {
 +                                      log_debug!(self.logger, "Sending bogus ChannelReestablish for unknown channel {} to force channel closure",
 +                                              log_bytes!(msg.channel_id.0));
 +                                      // Unfortunately, lnd doesn't force close on errors
 +                                      // (https://github.com/lightningnetwork/lnd/blob/abb1e3463f3a83bbb843d5c399869dbe930ad94f/htlcswitch/link.go#L2119).
 +                                      // One of the few ways to get an lnd counterparty to force close is by
 +                                      // replicating what they do when restoring static channel backups (SCBs). They
 +                                      // send an invalid `ChannelReestablish` with `0` commitment numbers and an
 +                                      // invalid `your_last_per_commitment_secret`.
 +                                      //
 +                                      // Since we received a `ChannelReestablish` for a channel that doesn't exist, we
 +                                      // can assume it's likely the channel closed from our point of view, but it
 +                                      // remains open on the counterparty's side. By sending this bogus
 +                                      // `ChannelReestablish` message now as a response to theirs, we trigger them to
 +                                      // force close broadcasting their latest state. If the closing transaction from
 +                                      // our point of view remains unconfirmed, it'll enter a race with the
 +                                      // counterparty's to-be-broadcast latest commitment transaction.
 +                                      peer_state.pending_msg_events.push(MessageSendEvent::SendChannelReestablish {
 +                                              node_id: *counterparty_node_id,
 +                                              msg: msgs::ChannelReestablish {
 +                                                      channel_id: msg.channel_id,
 +                                                      next_local_commitment_number: 0,
 +                                                      next_remote_commitment_number: 0,
 +                                                      your_last_per_commitment_secret: [1u8; 32],
 +                                                      my_current_per_commitment_point: PublicKey::from_slice(&[2u8; 33]).unwrap(),
 +                                                      next_funding_txid: None,
 +                                              },
 +                                      });
 +                                      return Err(MsgHandleErrInternal::send_err_msg_no_close(
 +                                              format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}",
 +                                                      counterparty_node_id), msg.channel_id)
 +                                      )
 +                              }
                        }
                };
  
                                        MonitorEvent::HTLCEvent(htlc_update) => {
                                                if let Some(preimage) = htlc_update.payment_preimage {
                                                        log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", preimage);
-                                                       self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, counterparty_node_id, funding_outpoint);
+                                                       self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, false, counterparty_node_id, funding_outpoint);
                                                } else {
                                                        log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
                                                        let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
                                                                                self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
                                                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                                                        node_id: chan.context.get_counterparty_node_id(),
 -                                                                                      action: msgs::ErrorAction::SendErrorMessage {
 -                                                                                              msg: msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() }
 +                                                                                      action: msgs::ErrorAction::DisconnectPeer {
 +                                                                                              msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() })
                                                                                        },
                                                                                });
                                                                        }
                }
        }
  
 +      /// Creates an [`OfferBuilder`] such that the [`Offer`] it builds is recognized by the
 +      /// [`ChannelManager`] when handling [`InvoiceRequest`] messages for the offer. The offer will
 +      /// not have an expiration unless otherwise set on the builder.
 +      ///
 +      /// Uses a one-hop [`BlindedPath`] for the offer with [`ChannelManager::get_our_node_id`] as the
 +      /// introduction node and a derived signing pubkey for recipient privacy. As such, currently,
 +      /// the node must be announced. Otherwise, there is no way to find a path to the introduction
 +      /// node in order to send the [`InvoiceRequest`].
 +      ///
 +      /// [`Offer`]: crate::offers::offer::Offer
 +      /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
 +      pub fn create_offer_builder(
 +              &self, description: String
 +      ) -> OfferBuilder<DerivedMetadata, secp256k1::All> {
 +              let node_id = self.get_our_node_id();
 +              let expanded_key = &self.inbound_payment_key;
 +              let entropy = &*self.entropy_source;
 +              let secp_ctx = &self.secp_ctx;
 +              let path = self.create_one_hop_blinded_path();
 +
 +              OfferBuilder::deriving_signing_pubkey(description, node_id, expanded_key, entropy, secp_ctx)
 +                      .chain_hash(self.chain_hash)
 +                      .path(path)
 +      }
 +
 +      /// Creates a [`RefundBuilder`] such that the [`Refund`] it builds is recognized by the
 +      /// [`ChannelManager`] when handling [`Bolt12Invoice`] messages for the refund. The builder will
 +      /// have the provided expiration set. Any changes to the expiration on the returned builder will
 +      /// not be honored by [`ChannelManager`].
 +      ///
 +      /// The provided `payment_id` is used to ensure that only one invoice is paid for the refund.
 +      ///
 +      /// Uses a one-hop [`BlindedPath`] for the refund with [`ChannelManager::get_our_node_id`] as
 +      /// the introduction node and a derived payer id for sender privacy. As such, currently, the
 +      /// node must be announced. Otherwise, there is no way to find a path to the introduction node
 +      /// in order to send the [`Bolt12Invoice`].
 +      ///
 +      /// [`Refund`]: crate::offers::refund::Refund
 +      /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
 +      pub fn create_refund_builder(
 +              &self, description: String, amount_msats: u64, absolute_expiry: Duration,
 +              payment_id: PaymentId, retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>
 +      ) -> Result<RefundBuilder<secp256k1::All>, Bolt12SemanticError> {
 +              let node_id = self.get_our_node_id();
 +              let expanded_key = &self.inbound_payment_key;
 +              let entropy = &*self.entropy_source;
 +              let secp_ctx = &self.secp_ctx;
 +              let path = self.create_one_hop_blinded_path();
 +
 +              let builder = RefundBuilder::deriving_payer_id(
 +                      description, node_id, expanded_key, entropy, secp_ctx, amount_msats, payment_id
 +              )?
 +                      .chain_hash(self.chain_hash)
 +                      .absolute_expiry(absolute_expiry)
 +                      .path(path);
 +
 +              self.pending_outbound_payments
 +                      .add_new_awaiting_invoice(
 +                              payment_id, absolute_expiry, retry_strategy, max_total_routing_fee_msat,
 +                      )
 +                      .map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?;
 +
 +              Ok(builder)
 +      }
 +
        /// Gets a payment secret and payment hash for use in an invoice given to a third party wishing
        /// to pay us.
        ///
                inbound_payment::get_payment_preimage(payment_hash, payment_secret, &self.inbound_payment_key)
        }
  
 +      /// Creates a one-hop blinded path with [`ChannelManager::get_our_node_id`] as the introduction
 +      /// node.
 +      fn create_one_hop_blinded_path(&self) -> BlindedPath {
 +              let entropy_source = self.entropy_source.deref();
 +              let secp_ctx = &self.secp_ctx;
 +              BlindedPath::one_hop_for_message(self.get_our_node_id(), entropy_source, secp_ctx).unwrap()
 +      }
 +
        /// Gets a fake short channel id for use in receiving [phantom node payments]. These fake scids
        /// are used when constructing the phantom invoice's route hints.
        ///
                let best_block_height = self.best_block.read().unwrap().height();
                let short_to_chan_info = self.short_to_chan_info.read().unwrap();
                loop {
 -                      let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
 +                      let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
                        // Ensure the generated scid doesn't conflict with a real channel.
                        match short_to_chan_info.get(&scid_candidate) {
                                Some(_) => continue,
                let best_block_height = self.best_block.read().unwrap().height();
                let short_to_chan_info = self.short_to_chan_info.read().unwrap();
                loop {
 -                      let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
 +                      let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
                        // Ensure the generated scid doesn't conflict with a real channel.
                        if short_to_chan_info.contains_key(&scid_candidate) { continue }
                        return scid_candidate
@@@ -7601,7 -7627,7 +7756,7 @@@ wher
                        *best_block = BestBlock::new(header.prev_blockhash, new_height)
                }
  
 -              self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
 +              self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger));
        }
  }
  
@@@ -7627,13 -7653,13 +7782,13 @@@ wher
                let _persistence_guard =
                        PersistenceNotifierGuard::optionally_notify_skipping_background_events(
                                self, || -> NotifyOption { NotifyOption::DoPersist });
 -              self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger)
 +              self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger)
                        .map(|(a, b)| (a, Vec::new(), b)));
  
                let last_best_block_height = self.best_block.read().unwrap().height();
                if height < last_best_block_height {
                        let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
 -                      self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
 +                      self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger));
                }
        }
  
                                self, || -> NotifyOption { NotifyOption::DoPersist });
                *self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
  
 -              self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
 +              self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger));
  
                macro_rules! max_time {
                        ($timestamp: expr) => {
@@@ -7770,7 -7796,7 +7925,7 @@@ wher
                                                                                msg: announcement_sigs,
                                                                        });
                                                                        if let Some(height) = height_opt {
 -                                                                              if let Some(announcement) = channel.get_signed_channel_announcement(&self.node_signer, self.genesis_hash, height, &self.default_configuration) {
 +                                                                              if let Some(announcement) = channel.get_signed_channel_announcement(&self.node_signer, self.chain_hash, height, &self.default_configuration) {
                                                                                        pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
                                                                                                msg: announcement,
                                                                                                // Note that announcement_signatures fails if the channel cannot be announced,
                                                                self.issue_channel_close_events(&channel.context, reason);
                                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                                        node_id: channel.context.get_counterparty_node_id(),
 -                                                                      action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage {
 -                                                                              channel_id: channel.context.channel_id(),
 -                                                                              data: reason_message,
 -                                                                      } },
 +                                                                      action: msgs::ErrorAction::DisconnectPeer {
 +                                                                              msg: Some(msgs::ErrorMessage {
 +                                                                                      channel_id: channel.context.channel_id(),
 +                                                                                      data: reason_message,
 +                                                                              })
 +                                                                      },
                                                                });
                                                                return false;
                                                        }
@@@ -8382,7 -8406,7 +8537,7 @@@ wher
                                let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
                                if let Some(ChannelPhase::UnfundedOutboundV1(chan)) = peer_state.channel_by_id.get_mut(&msg.channel_id) {
 -                                      if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash, &self.fee_estimator) {
 +                                      if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) {
                                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
                                                        node_id: *counterparty_node_id,
                                                        msg,
                provided_init_features(&self.default_configuration)
        }
  
 -      fn get_genesis_hashes(&self) -> Option<Vec<ChainHash>> {
 -              Some(vec![ChainHash::from(&self.genesis_hash[..])])
 +      fn get_chain_hashes(&self) -> Option<Vec<ChainHash>> {
 +              Some(vec![self.chain_hash])
        }
  
        fn handle_tx_add_input(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAddInput) {
@@@ -8951,7 -8975,7 +9106,7 @@@ wher
  
                write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
  
 -              self.genesis_hash.write(writer)?;
 +              self.chain_hash.write(writer)?;
                {
                        let best_block = self.best_block.read().unwrap();
                        best_block.height().write(writer)?;
@@@ -9362,7 -9386,7 +9517,7 @@@ wher
        fn read<Reader: io::Read>(reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>) -> Result<Self, DecodeError> {
                let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
  
 -              let genesis_hash: BlockHash = Readable::read(reader)?;
 +              let chain_hash: ChainHash = Readable::read(reader)?;
                let best_block_height: u32 = Readable::read(reader)?;
                let best_block_hash: BlockHash = Readable::read(reader)?;
  
                                                let mut outbound_scid_alias;
                                                loop {
                                                        outbound_scid_alias = fake_scid::Namespace::OutboundAlias
 -                                                              .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
 +                                                              .get_fake_scid(best_block_height, &chain_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
                                                        if outbound_scid_aliases.insert(outbound_scid_alias) { break; }
                                                }
                                                chan.context.set_outbound_scid_alias(outbound_scid_alias);
                                                                Some((blocked_node_id, blocked_channel_outpoint, blocking_action)), ..
                                                } = action {
                                                        if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) {
+                                                               log_trace!(args.logger,
+                                                                       "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
+                                                                       blocked_channel_outpoint.to_channel_id());
                                                                blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
                                                                        .entry(blocked_channel_outpoint.to_channel_id())
                                                                        .or_insert_with(Vec::new).push(blocking_action.clone());
                                                                // anymore.
                                                        }
                                                }
+                                               if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately { .. } = action {
+                                                       debug_assert!(false, "Non-event-generating channel freeing should not appear in our queue");
+                                               }
                                        }
                                }
                                peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions;
                }
  
                let channel_manager = ChannelManager {
 -                      genesis_hash,
 +                      chain_hash,
                        fee_estimator: bounded_fee_estimator,
                        chain_monitor: args.chain_monitor,
                        tx_broadcaster: args.tx_broadcaster,
                        // don't remember in the `ChannelMonitor` where we got a preimage from, but if the
                        // channel is closed we just assume that it probably came from an on-chain claim.
                        channel_manager.claim_funds_internal(source, preimage, Some(downstream_value),
-                               downstream_closed, downstream_node_id, downstream_funding);
+                               downstream_closed, true, downstream_node_id, downstream_funding);
                }
  
                //TODO: Broadcast channel update for closed channels, but only after we've made a
@@@ -11227,30 -11257,6 +11388,30 @@@ mod tests 
                        sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat)).is_ok());
        }
  
 +      #[test]
 +      fn test_final_incorrect_cltv(){
 +              let chanmon_cfg = create_chanmon_cfgs(1);
 +              let node_cfg = create_node_cfgs(1, &chanmon_cfg);
 +              let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]);
 +              let node = create_network(1, &node_cfg, &node_chanmgr);
 +
 +              let result = node[0].node.construct_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive {
 +                      amt_msat: 100,
 +                      outgoing_cltv_value: 22,
 +                      payment_metadata: None,
 +                      keysend_preimage: None,
 +                      payment_data: Some(msgs::FinalOnionHopData {
 +                              payment_secret: PaymentSecret([0; 32]), total_msat: 100,
 +                      }),
 +                      custom_tlvs: Vec::new(),
 +              }, [0; 32], PaymentHash([0; 32]), 100, 23, None, true, None);
 +
 +              // Should not return an error as this condition:
 +              // https://github.com/lightning/bolts/blob/4dcc377209509b13cf89a4b91fde7d478f5b46d8/04-onion-routing.md?plain=1#L334
 +              // is not satisfied.
 +              assert!(result.is_ok());
 +      }
 +
        #[test]
        fn test_inbound_anchors_manual_acceptance() {
                // Tests that we properly limit inbound channels when we have the manual-channel-acceptance
                let payment_preimage = PaymentPreimage([42; 32]);
                assert_eq!(format!("{}", &payment_preimage), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
        }
 +
 +      #[test]
 +      fn test_trigger_lnd_force_close() {
 +              let chanmon_cfg = create_chanmon_cfgs(2);
 +              let node_cfg = create_node_cfgs(2, &chanmon_cfg);
 +              let user_config = test_default_channel_config();
 +              let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]);
 +              let nodes = create_network(2, &node_cfg, &node_chanmgr);
 +
 +              // Open a channel, immediately disconnect each other, and broadcast Alice's latest state.
 +              let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
 +              nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 +              nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 +              nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id()).unwrap();
 +              check_closed_broadcast(&nodes[0], 1, true);
 +              check_added_monitors(&nodes[0], 1);
 +              check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
 +              {
 +                      let txn = nodes[0].tx_broadcaster.txn_broadcast();
 +                      assert_eq!(txn.len(), 1);
 +                      check_spends!(txn[0], funding_tx);
 +              }
 +
 +              // Since they're disconnected, Bob won't receive Alice's `Error` message. Reconnect them
 +              // such that Bob sends a `ChannelReestablish` to Alice since the channel is still open from
 +              // their side.
 +              nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
 +                      features: nodes[1].node.init_features(), networks: None, remote_network_address: None
 +              }, true).unwrap();
 +              nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
 +                      features: nodes[0].node.init_features(), networks: None, remote_network_address: None
 +              }, false).unwrap();
 +              assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 +              let channel_reestablish = get_event_msg!(
 +                      nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()
 +              );
 +              nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &channel_reestablish);
 +
 +              // Alice should respond with an error since the channel isn't known, but a bogus
 +              // `ChannelReestablish` should be sent first, such that we actually trigger Bob to force
 +              // close even if it was an lnd node.
 +              let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
 +              assert_eq!(msg_events.len(), 2);
 +              if let MessageSendEvent::SendChannelReestablish { node_id, msg } = &msg_events[0] {
 +                      assert_eq!(*node_id, nodes[1].node.get_our_node_id());
 +                      assert_eq!(msg.next_local_commitment_number, 0);
 +                      assert_eq!(msg.next_remote_commitment_number, 0);
 +                      nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &msg);
 +              } else { panic!() };
 +              check_closed_broadcast(&nodes[1], 1, true);
 +              check_added_monitors(&nodes[1], 1);
 +              let expected_close_reason = ClosureReason::ProcessingError {
 +                      err: "Peer sent an invalid channel_reestablish to force close in a non-standard way".to_string()
 +              };
 +              check_closed_event!(nodes[1], 1, expected_close_reason, [nodes[0].node.get_our_node_id()], 100000);
 +              {
 +                      let txn = nodes[1].tx_broadcaster.txn_broadcast();
 +                      assert_eq!(txn.len(), 1);
 +                      check_spends!(txn[0], funding_tx);
 +              }
 +      }
  }
  
  #[cfg(ldk_bench)]