Merge pull request #2658 from wpaulino/bogus-channel-reestablish
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Wed, 18 Oct 2023 21:47:31 +0000 (21:47 +0000)
committerGitHub <noreply@github.com>
Wed, 18 Oct 2023 21:47:31 +0000 (21:47 +0000)
Send bogus ChannelReestablish for unknown channels

1  2 
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/payment_tests.rs

index ef5bf6895a81e2fe2d5e33d47297b579dee15b97,2359d1ef0e3723bdf1ea28c62a2720fd6fd47bf2..b9a40df91a0117ceb2ff6c9f0f80b16d6fe109c6
@@@ -7,7 -7,6 +7,7 @@@
  // You may not use this file except in accordance with one or both of these
  // licenses.
  
 +use bitcoin::blockdata::constants::ChainHash;
  use bitcoin::blockdata::script::{Script,Builder};
  use bitcoin::blockdata::transaction::{Transaction, EcdsaSighashType};
  use bitcoin::util::sighash;
@@@ -2662,7 -2661,7 +2662,7 @@@ impl<SP: Deref> Channel<SP> wher
        /// and the channel is now usable (and public), this may generate an announcement_signatures to
        /// reply with.
        pub fn channel_ready<NS: Deref, L: Deref>(
 -              &mut self, msg: &msgs::ChannelReady, node_signer: &NS, genesis_block_hash: BlockHash,
 +              &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
                user_config: &UserConfig, best_block: &BestBlock, logger: &L
        ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
        where
  
                log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
  
 -              Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger))
 +              Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
        }
  
        pub fn update_add_htlc<F, FE: Deref, L: Deref>(
        /// successfully and we should restore normal operation. Returns messages which should be sent
        /// to the remote side.
        pub fn monitor_updating_restored<L: Deref, NS: Deref>(
 -              &mut self, logger: &L, node_signer: &NS, genesis_block_hash: BlockHash,
 +              &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
                user_config: &UserConfig, best_block_height: u32
        ) -> MonitorRestoreUpdates
        where
                        })
                } else { None };
  
 -              let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block_height, logger);
 +              let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
  
                let mut accepted_htlcs = Vec::new();
                mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
        /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
        pub fn channel_reestablish<L: Deref, NS: Deref>(
                &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
 -              genesis_block_hash: BlockHash, user_config: &UserConfig, best_block: &BestBlock
 +              chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
        ) -> Result<ReestablishResponses, ChannelError>
        where
                L::Target: Logger,
  
                if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
                        msg.next_local_commitment_number == 0 {
-                       return Err(ChannelError::Close("Peer sent a garbage channel_reestablish (usually an lnd node with lost state asking us to force-close for them)".to_owned()));
+                       return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
                }
  
                if msg.next_remote_commitment_number > 0 {
  
                let shutdown_msg = self.get_outbound_shutdown();
  
 -              let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger);
 +              let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
  
                if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
                        // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
        /// In the second, we simply return an Err indicating we need to be force-closed now.
        pub fn transactions_confirmed<NS: Deref, L: Deref>(
                &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
 -              genesis_block_hash: BlockHash, node_signer: &NS, user_config: &UserConfig, logger: &L
 +              chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
        ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
        where
                NS::Target: NodeSigner,
                                        // may have already happened for this block).
                                        if let Some(channel_ready) = self.check_get_channel_ready(height) {
                                                log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
 -                                              let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger);
 +                                              let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
                                                msgs = (Some(channel_ready), announcement_sigs);
                                        }
                                }
        /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
        /// back.
        pub fn best_block_updated<NS: Deref, L: Deref>(
 -              &mut self, height: u32, highest_header_time: u32, genesis_block_hash: BlockHash,
 +              &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
                node_signer: &NS, user_config: &UserConfig, logger: &L
        ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
        where
                NS::Target: NodeSigner,
                L::Target: Logger
        {
 -              self.do_best_block_updated(height, highest_header_time, Some((genesis_block_hash, node_signer, user_config)), logger)
 +              self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
        }
  
        fn do_best_block_updated<NS: Deref, L: Deref>(
                &mut self, height: u32, highest_header_time: u32,
 -              genesis_node_signer: Option<(BlockHash, &NS, &UserConfig)>, logger: &L
 +              chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
        ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
        where
                NS::Target: NodeSigner,
                self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
  
                if let Some(channel_ready) = self.check_get_channel_ready(height) {
 -                      let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer {
 -                              self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger)
 +                      let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
 +                              self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
                        } else { None };
                        log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
                        return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
                        return Err(ClosureReason::FundingTimedOut);
                }
  
 -              let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer {
 -                      self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger)
 +              let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
 +                      self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
                } else { None };
                Ok((None, timed_out_htlcs, announcement_sigs))
        }
                        // larger. If we don't know that time has moved forward, we can just set it to the last
                        // time we saw and it will be ignored.
                        let best_time = self.context.update_time_counter;
 -                      match self.do_best_block_updated(reorg_height, best_time, None::<(BlockHash, &&NodeSigner, &UserConfig)>, logger) {
 +                      match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) {
                                Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
                                        assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
                                        assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
        ///
        /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
        fn get_channel_announcement<NS: Deref>(
 -              &self, node_signer: &NS, chain_hash: BlockHash, user_config: &UserConfig,
 +              &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
        ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
                if !self.context.config.announced_channel {
                        return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
        }
  
        fn get_announcement_sigs<NS: Deref, L: Deref>(
 -              &mut self, node_signer: &NS, genesis_block_hash: BlockHash, user_config: &UserConfig,
 +              &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
                best_block_height: u32, logger: &L
        ) -> Option<msgs::AnnouncementSignatures>
        where
                }
  
                log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
 -              let announcement = match self.get_channel_announcement(node_signer, genesis_block_hash, user_config) {
 +              let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
                        Ok(a) => a,
                        Err(e) => {
                                log_trace!(logger, "{:?}", e);
        /// channel_announcement message which we can broadcast and storing our counterparty's
        /// signatures for later reconstruction/rebroadcast of the channel_announcement.
        pub fn announcement_signatures<NS: Deref>(
 -              &mut self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32,
 +              &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
                msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
        ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
                let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
        /// Gets a signed channel_announcement for this channel, if we previously received an
        /// announcement_signatures from our counterparty.
        pub fn get_signed_channel_announcement<NS: Deref>(
 -              &self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32, user_config: &UserConfig
 +              &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
        ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
                if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
                        return None;
@@@ -5999,7 -5998,7 +5999,7 @@@ impl<SP: Deref> OutboundV1Channel<SP> w
        /// not of our ability to open any channel at all. Thus, on error, we should first call this
        /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
        pub(crate) fn maybe_handle_error_without_close<F: Deref>(
 -              &mut self, chain_hash: BlockHash, fee_estimator: &LowerBoundedFeeEstimator<F>
 +              &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
        ) -> Result<msgs::OpenChannel, ()>
        where
                F::Target: FeeEstimator
                Ok(self.get_open_channel(chain_hash))
        }
  
 -      pub fn get_open_channel(&self, chain_hash: BlockHash) -> msgs::OpenChannel {
 +      pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
                if !self.context.is_outbound() {
                        panic!("Tried to open a channel for an inbound channel?");
                }
@@@ -7634,9 -7633,9 +7634,9 @@@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> 
  #[cfg(test)]
  mod tests {
        use std::cmp;
 +      use bitcoin::blockdata::constants::ChainHash;
        use bitcoin::blockdata::script::{Script, Builder};
        use bitcoin::blockdata::transaction::{Transaction, TxOut};
 -      use bitcoin::blockdata::constants::genesis_block;
        use bitcoin::blockdata::opcodes;
        use bitcoin::network::constants::Network;
        use hex;
                // Now change the fee so we can check that the fee in the open_channel message is the
                // same as the old fee.
                fee_est.fee_est = 500;
 -              let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
 +              let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
                assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
        }
  
  
                // Create Node B's channel by receiving Node A's open_channel message
                // Make sure A's dust limit is as we expect.
 -              let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
 +              let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
                let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
  
                let seed = [42; 32];
                let network = Network::Testnet;
                let best_block = BestBlock::from_network(network);
 -              let chain_hash = best_block.block_hash();
 +              let chain_hash = ChainHash::using_genesis_block(network);
                let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
  
                // Go through the flow of opening a channel between two nodes.
                let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
                assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
  
 -              let chan_1_open_channel_msg = chan_1.get_open_channel(genesis_block(network).header.block_hash());
 +              let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
  
                // Test that `InboundV1Channel::new` creates a channel with the correct value for
                // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
                let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
                assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
  
 -              let chan_open_channel_msg = chan.get_open_channel(genesis_block(network).header.block_hash());
 +              let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
                let mut inbound_node_config = UserConfig::default();
                inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
  
                let seed = [42; 32];
                let network = Network::Testnet;
                let best_block = BestBlock::from_network(network);
 -              let chain_hash = genesis_block(network).header.block_hash();
 +              let chain_hash = ChainHash::using_genesis_block(network);
                let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
  
                // Create Node A's channel pointing to Node B's pubkey
  
                // Create Node B's channel by receiving Node A's open_channel message
                // Make sure A's dust limit is as we expect.
 -              let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
 +              let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
                let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
  
                let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
                channel_type_features.set_zero_conf_required();
  
 -              let mut open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
 +              let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
                open_channel_msg.channel_type = Some(channel_type_features);
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
                let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
                        &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
                ).unwrap();
  
 -              let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
 +              let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
                let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
                        &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
                        &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
                ).unwrap();
  
                // Set `channel_type` to `None` to force the implicit feature negotiation.
 -              let mut open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
 +              let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
                open_channel_msg.channel_type = None;
  
                // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
                        &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
                ).unwrap();
  
 -              let mut open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
 +              let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
                open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
  
                let res = InboundV1Channel::<&TestKeysInterface>::new(
                        10000000, 100000, 42, &config, 0, 42
                ).unwrap();
  
 -              let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
 +              let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
  
                let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
                        &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
                let seed = [42; 32];
                let network = Network::Testnet;
                let best_block = BestBlock::from_network(network);
 -              let chain_hash = genesis_block(network).header.block_hash();
 +              let chain_hash = ChainHash::using_genesis_block(network);
                let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
  
                let mut config = UserConfig::default();
                        42,
                ).unwrap();
  
 -              let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
 +              let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
                let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
                        &feeest,
index a4079734241e4b80f929a10514f885846ee8b816,203d8f09e7df7e08b5a347cde0730c1b289469c8..d1a1208c8091fed6b5ffad403f41160dcc75ba99
@@@ -19,7 -19,7 +19,7 @@@
  
  use bitcoin::blockdata::block::BlockHeader;
  use bitcoin::blockdata::transaction::Transaction;
 -use bitcoin::blockdata::constants::{genesis_block, ChainHash};
 +use bitcoin::blockdata::constants::ChainHash;
  use bitcoin::network::constants::Network;
  
  use bitcoin::hashes::Hash;
@@@ -447,16 -447,17 +447,17 @@@ impl MsgHandleErrInternal 
        }
        #[inline]
        fn from_finish_shutdown(err: String, channel_id: ChannelId, user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>, channel_capacity: u64) -> Self {
+               let err_msg = msgs::ErrorMessage { channel_id, data: err.clone() };
+               let action = if let (Some(_), ..) = &shutdown_res {
+                       // We have a closing `ChannelMonitorUpdate`, which means the channel was funded and we
+                       // should disconnect our peer such that we force them to broadcast their latest
+                       // commitment upon reconnecting.
+                       msgs::ErrorAction::DisconnectPeer { msg: Some(err_msg) }
+               } else {
+                       msgs::ErrorAction::SendErrorMessage { msg: err_msg }
+               };
                Self {
-                       err: LightningError {
-                               err: err.clone(),
-                               action: msgs::ErrorAction::SendErrorMessage {
-                                       msg: msgs::ErrorMessage {
-                                               channel_id,
-                                               data: err
-                                       },
-                               },
-                       },
+                       err: LightningError { err, action },
                        chan_id: Some((channel_id, user_channel_id)),
                        shutdown_finish: Some((shutdown_res, channel_update)),
                        channel_capacity: Some(channel_capacity)
@@@ -1018,7 -1019,7 +1019,7 @@@ wher
        L::Target: Logger,
  {
        default_configuration: UserConfig,
 -      genesis_hash: BlockHash,
 +      chain_hash: ChainHash,
        fee_estimator: LowerBoundedFeeEstimator<F>,
        chain_monitor: M,
        tx_broadcaster: T,
@@@ -2012,7 -2013,7 +2013,7 @@@ macro_rules! emit_channel_ready_event 
  macro_rules! handle_monitor_update_completion {
        ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
                let mut updates = $chan.monitor_updating_restored(&$self.logger,
 -                      &$self.node_signer, $self.genesis_hash, &$self.default_configuration,
 +                      &$self.node_signer, $self.chain_hash, &$self.default_configuration,
                        $self.best_block.read().unwrap().height());
                let counterparty_node_id = $chan.context.get_counterparty_node_id();
                let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() {
@@@ -2258,7 -2259,7 +2259,7 @@@ wher
                let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
                ChannelManager {
                        default_configuration: config.clone(),
 -                      genesis_hash: genesis_block(params.network).header.block_hash(),
 +                      chain_hash: ChainHash::using_genesis_block(params.network),
                        fee_estimator: LowerBoundedFeeEstimator::new(fee_est),
                        chain_monitor,
                        tx_broadcaster,
                        if cfg!(fuzzing) { // fuzzing chacha20 doesn't use the key at all so we always get the same alias
                                outbound_scid_alias += 1;
                        } else {
 -                              outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
 +                              outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
                        }
                        if outbound_scid_alias != 0 && self.outbound_scid_aliases.lock().unwrap().insert(outbound_scid_alias) {
                                break;
                                },
                        }
                };
 -              let res = channel.get_open_channel(self.genesis_hash.clone());
 +              let res = channel.get_open_channel(self.chain_hash);
  
                let temporary_channel_id = channel.context.channel_id();
                match peer_state.channel_by_id.entry(temporary_channel_id) {
                                        // it does not exist for this peer. Either way, we can attempt to force-close it.
                                        //
                                        // An appropriate error will be returned for non-existence of the channel if that's the case.
 +                                      mem::drop(peer_state_lock);
 +                                      mem::drop(per_peer_state);
                                        return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ())
                                },
                        }
                                        peer_state.pending_msg_events.push(
                                                events::MessageSendEvent::HandleError {
                                                        node_id: counterparty_node_id,
-                                                       action: msgs::ErrorAction::SendErrorMessage {
-                                                               msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() }
+                                                       action: msgs::ErrorAction::DisconnectPeer {
+                                                               msg: Some(msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() })
                                                        },
                                                }
                                        );
                // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a
                // channel closure (see HTLC_FAIL_BACK_BUFFER rationale).
                let current_height: u32 = self.best_block.read().unwrap().height();
 -              if (outgoing_cltv_value as u64) <= current_height as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
 +              if cltv_expiry <= current_height + HTLC_FAIL_BACK_BUFFER + 1 {
                        let mut err_data = Vec::with_capacity(12);
                        err_data.extend_from_slice(&amt_msat.to_be_bytes());
                        err_data.extend_from_slice(&current_height.to_be_bytes());
                                        // Note that this is likely a timing oracle for detecting whether an scid is a
                                        // phantom or an intercept.
                                        if (self.default_configuration.accept_intercept_htlcs &&
 -                                              fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.genesis_hash)) ||
 -                                              fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.genesis_hash)
 +                                              fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)) ||
 +                                              fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)
                                        {
                                                None
                                        } else {
                };
  
                let unsigned = msgs::UnsignedChannelUpdate {
 -                      chain_hash: self.genesis_hash,
 +                      chain_hash: self.chain_hash,
                        short_channel_id,
                        timestamp: chan.context.get_update_time_counter(),
                        flags: (!were_node_one) as u8 | ((!enabled as u8) << 1),
                for channel_id in channel_ids {
                        if !peer_state.has_channel(channel_id) {
                                return Err(APIError::ChannelUnavailable {
 -                                      err: format!("Channel with ID {} was not found for the passed counterparty_node_id {}", channel_id, counterparty_node_id),
 +                                      err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, counterparty_node_id),
                                });
                        };
                }
                                                next_hop_channel_id, next_node_id)
                                }),
                                None => return Err(APIError::ChannelUnavailable {
 -                                      err: format!("Channel with id {} not found for the passed counterparty node_id {}.",
 +                                      err: format!("Channel with id {} not found for the passed counterparty node_id {}",
                                                next_hop_channel_id, next_node_id)
                                })
                        }
                                                                                }
                                                                                if let PendingHTLCRouting::Forward { onion_packet, .. } = routing {
                                                                                        let phantom_pubkey_res = self.node_signer.get_node_id(Recipient::PhantomNode);
 -                                                                                      if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.genesis_hash) {
 +                                                                                      if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.chain_hash) {
                                                                                                let phantom_shared_secret = self.node_signer.ecdh(Recipient::PhantomNode, &onion_packet.public_key.unwrap(), None).unwrap().secret_bytes();
                                                                                                let next_hop = match onion_utils::decode_next_payment_hop(
                                                                                                        phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac,
                                                        }
                                                }
                                        }
-                                       let (counterparty_node_id, forward_chan_id) = match self.short_to_chan_info.read().unwrap().get(&short_chan_id) {
-                                               Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
+                                       let chan_info_opt = self.short_to_chan_info.read().unwrap().get(&short_chan_id).cloned();
+                                       let (counterparty_node_id, forward_chan_id) = match chan_info_opt {
+                                               Some((cp_id, chan_id)) => (cp_id, chan_id),
                                                None => {
                                                        forwarding_channel_not_found!();
                                                        continue;
        fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
                // Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are
                // likely to be lost on restart!
 -              if msg.chain_hash != self.genesis_hash {
 +              if msg.chain_hash != self.chain_hash {
                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
                }
  
                        hash_map::Entry::Occupied(mut chan_phase_entry) => {
                                if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
                                        let announcement_sigs_opt = try_chan_phase_entry!(self, chan.channel_ready(&msg, &self.node_signer,
 -                                              self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan_phase_entry);
 +                                              self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan_phase_entry);
                                        if let Some(announcement_sigs) = announcement_sigs_opt {
                                                log_trace!(self.logger, "Sending announcement_signatures for channel {}", chan.context.channel_id());
                                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
                                                },
                                                hash_map::Entry::Vacant(entry) => {
                                                        if !is_our_scid && forward_info.incoming_amt_msat.is_some() &&
 -                                                         fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, scid, &self.genesis_hash)
 +                                                         fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, scid, &self.chain_hash)
                                                        {
                                                                let intercept_id = InterceptId(Sha256::hash(&forward_info.incoming_shared_secret).into_inner());
                                                                let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
  
                                        peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
                                                msg: try_chan_phase_entry!(self, chan.announcement_signatures(
 -                                                      &self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(),
 +                                                      &self.node_signer, self.chain_hash, self.best_block.read().unwrap().height(),
                                                        msg, &self.default_configuration
                                                ), chan_phase_entry),
                                                // Note that announcement_signatures fails if the channel cannot be announced,
                        let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                                .ok_or_else(|| {
                                        debug_assert!(false);
-                                       MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+                                       MsgHandleErrInternal::send_err_msg_no_close(
+                                               format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
+                                               msg.channel_id
+                                       )
                                })?;
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                                                // freed HTLCs to fail backwards. If in the future we no longer drop pending
                                                // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
                                                let responses = try_chan_phase_entry!(self, chan.channel_reestablish(
 -                                                      msg, &self.logger, &self.node_signer, self.genesis_hash,
 +                                                      msg, &self.logger, &self.node_signer, self.chain_hash,
                                                        &self.default_configuration, &*self.best_block.read().unwrap()), chan_phase_entry);
                                                let mut channel_update = None;
                                                if let Some(msg) = responses.shutdown_msg {
                                                        "Got a channel_reestablish message for an unfunded channel!".into())), chan_phase_entry);
                                        }
                                },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+                               hash_map::Entry::Vacant(_) => {
+                                       log_debug!(self.logger, "Sending bogus ChannelReestablish for unknown channel {} to force channel closure",
+                                               log_bytes!(msg.channel_id.0));
+                                       // Unfortunately, lnd doesn't force close on errors
+                                       // (https://github.com/lightningnetwork/lnd/blob/abb1e3463f3a83bbb843d5c399869dbe930ad94f/htlcswitch/link.go#L2119).
+                                       // One of the few ways to get an lnd counterparty to force close is by
+                                       // replicating what they do when restoring static channel backups (SCBs). They
+                                       // send an invalid `ChannelReestablish` with `0` commitment numbers and an
+                                       // invalid `your_last_per_commitment_secret`.
+                                       //
+                                       // Since we received a `ChannelReestablish` for a channel that doesn't exist, we
+                                       // can assume it's likely the channel closed from our point of view, but it
+                                       // remains open on the counterparty's side. By sending this bogus
+                                       // `ChannelReestablish` message now as a response to theirs, we trigger them to
+                                       // force close broadcasting their latest state. If the closing transaction from
+                                       // our point of view remains unconfirmed, it'll enter a race with the
+                                       // counterparty's to-be-broadcast latest commitment transaction.
+                                       peer_state.pending_msg_events.push(MessageSendEvent::SendChannelReestablish {
+                                               node_id: *counterparty_node_id,
+                                               msg: msgs::ChannelReestablish {
+                                                       channel_id: msg.channel_id,
+                                                       next_local_commitment_number: 0,
+                                                       next_remote_commitment_number: 0,
+                                                       your_last_per_commitment_secret: [1u8; 32],
+                                                       my_current_per_commitment_point: PublicKey::from_slice(&[2u8; 33]).unwrap(),
+                                                       next_funding_txid: None,
+                                               },
+                                       });
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close(
+                                               format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}",
+                                                       counterparty_node_id), msg.channel_id)
+                                       )
+                               }
                        }
                };
  
                                                                                self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
                                                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                                                        node_id: chan.context.get_counterparty_node_id(),
-                                                                                       action: msgs::ErrorAction::SendErrorMessage {
-                                                                                               msg: msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() }
+                                                                                       action: msgs::ErrorAction::DisconnectPeer {
+                                                                                               msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() })
                                                                                        },
                                                                                });
                                                                        }
                let best_block_height = self.best_block.read().unwrap().height();
                let short_to_chan_info = self.short_to_chan_info.read().unwrap();
                loop {
 -                      let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
 +                      let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
                        // Ensure the generated scid doesn't conflict with a real channel.
                        match short_to_chan_info.get(&scid_candidate) {
                                Some(_) => continue,
                let best_block_height = self.best_block.read().unwrap().height();
                let short_to_chan_info = self.short_to_chan_info.read().unwrap();
                loop {
 -                      let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
 +                      let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
                        // Ensure the generated scid doesn't conflict with a real channel.
                        if short_to_chan_info.contains_key(&scid_candidate) { continue }
                        return scid_candidate
@@@ -7472,7 -7507,7 +7509,7 @@@ wher
                        *best_block = BestBlock::new(header.prev_blockhash, new_height)
                }
  
 -              self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
 +              self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger));
        }
  }
  
@@@ -7498,13 -7533,13 +7535,13 @@@ wher
                let _persistence_guard =
                        PersistenceNotifierGuard::optionally_notify_skipping_background_events(
                                self, || -> NotifyOption { NotifyOption::DoPersist });
 -              self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger)
 +              self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger)
                        .map(|(a, b)| (a, Vec::new(), b)));
  
                let last_best_block_height = self.best_block.read().unwrap().height();
                if height < last_best_block_height {
                        let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
 -                      self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
 +                      self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger));
                }
        }
  
                                self, || -> NotifyOption { NotifyOption::DoPersist });
                *self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
  
 -              self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
 +              self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger));
  
                macro_rules! max_time {
                        ($timestamp: expr) => {
@@@ -7641,7 -7676,7 +7678,7 @@@ wher
                                                                                msg: announcement_sigs,
                                                                        });
                                                                        if let Some(height) = height_opt {
 -                                                                              if let Some(announcement) = channel.get_signed_channel_announcement(&self.node_signer, self.genesis_hash, height, &self.default_configuration) {
 +                                                                              if let Some(announcement) = channel.get_signed_channel_announcement(&self.node_signer, self.chain_hash, height, &self.default_configuration) {
                                                                                        pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
                                                                                                msg: announcement,
                                                                                                // Note that announcement_signatures fails if the channel cannot be announced,
                                                                self.issue_channel_close_events(&channel.context, reason);
                                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                                        node_id: channel.context.get_counterparty_node_id(),
-                                                                       action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage {
-                                                                               channel_id: channel.context.channel_id(),
-                                                                               data: reason_message,
-                                                                       } },
+                                                                       action: msgs::ErrorAction::DisconnectPeer {
+                                                                               msg: Some(msgs::ErrorMessage {
+                                                                                       channel_id: channel.context.channel_id(),
+                                                                                       data: reason_message,
+                                                                               })
+                                                                       },
                                                                });
                                                                return false;
                                                        }
@@@ -8251,7 -8288,7 +8290,7 @@@ wher
                                let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
                                if let Some(ChannelPhase::UnfundedOutboundV1(chan)) = peer_state.channel_by_id.get_mut(&msg.channel_id) {
 -                                      if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash, &self.fee_estimator) {
 +                                      if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) {
                                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
                                                        node_id: *counterparty_node_id,
                                                        msg,
                provided_init_features(&self.default_configuration)
        }
  
 -      fn get_genesis_hashes(&self) -> Option<Vec<ChainHash>> {
 -              Some(vec![ChainHash::from(&self.genesis_hash[..])])
 +      fn get_chain_hashes(&self) -> Option<Vec<ChainHash>> {
 +              Some(vec![self.chain_hash])
        }
  
        fn handle_tx_add_input(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAddInput) {
@@@ -8820,7 -8857,7 +8859,7 @@@ wher
  
                write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
  
 -              self.genesis_hash.write(writer)?;
 +              self.chain_hash.write(writer)?;
                {
                        let best_block = self.best_block.read().unwrap();
                        best_block.height().write(writer)?;
@@@ -9231,7 -9268,7 +9270,7 @@@ wher
        fn read<Reader: io::Read>(reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>) -> Result<Self, DecodeError> {
                let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
  
 -              let genesis_hash: BlockHash = Readable::read(reader)?;
 +              let chain_hash: ChainHash = Readable::read(reader)?;
                let best_block_height: u32 = Readable::read(reader)?;
                let best_block_hash: BlockHash = Readable::read(reader)?;
  
                                                let mut outbound_scid_alias;
                                                loop {
                                                        outbound_scid_alias = fake_scid::Namespace::OutboundAlias
 -                                                              .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
 +                                                              .get_fake_scid(best_block_height, &chain_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
                                                        if outbound_scid_aliases.insert(outbound_scid_alias) { break; }
                                                }
                                                chan.context.set_outbound_scid_alias(outbound_scid_alias);
                }
  
                let channel_manager = ChannelManager {
 -                      genesis_hash,
 +                      chain_hash,
                        fee_estimator: bounded_fee_estimator,
                        chain_monitor: args.chain_monitor,
                        tx_broadcaster: args.tx_broadcaster,
@@@ -10752,16 -10789,6 +10791,16 @@@ mod tests 
                check_api_error_message(expected_message, res_err)
        }
  
 +      fn check_channel_unavailable_error<T>(res_err: Result<T, APIError>, expected_channel_id: ChannelId, peer_node_id: PublicKey) {
 +              let expected_message = format!("Channel with id {} not found for the passed counterparty node_id {}", expected_channel_id, peer_node_id);
 +              check_api_error_message(expected_message, res_err)
 +      }
 +
 +      fn check_api_misuse_error<T>(res_err: Result<T, APIError>) {
 +              let expected_message = "No such channel awaiting to be accepted.".to_string();
 +              check_api_error_message(expected_message, res_err)
 +      }
 +
        fn check_api_error_message<T>(expected_err_message: String, res_err: Result<T, APIError>) {
                match res_err {
                        Err(APIError::APIMisuseError { err }) => {
                check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key);
        }
  
 +      #[test]
 +      fn test_api_calls_with_unavailable_channel() {
 +              // Tests that our API functions that expects a `counterparty_node_id` and a `channel_id`
 +              // as input, behaves as expected if the `counterparty_node_id` is a known peer in the
 +              // `ChannelManager::per_peer_state` map, but the peer state doesn't contain a channel with
 +              // the given `channel_id`.
 +              let chanmon_cfg = create_chanmon_cfgs(2);
 +              let node_cfg = create_node_cfgs(2, &chanmon_cfg);
 +              let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
 +              let nodes = create_network(2, &node_cfg, &node_chanmgr);
 +
 +              let counterparty_node_id = nodes[1].node.get_our_node_id();
 +
 +              // Dummy values
 +              let channel_id = ChannelId::from_bytes([4; 32]);
 +
 +              // Test the API functions.
 +              check_api_misuse_error(nodes[0].node.accept_inbound_channel(&channel_id, &counterparty_node_id, 42));
 +
 +              check_channel_unavailable_error(nodes[0].node.close_channel(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
 +
 +              check_channel_unavailable_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
 +
 +              check_channel_unavailable_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
 +
 +              check_channel_unavailable_error(nodes[0].node.forward_intercepted_htlc(InterceptId([0; 32]), &channel_id, counterparty_node_id, 1_000_000), channel_id, counterparty_node_id);
 +
 +              check_channel_unavailable_error(nodes[0].node.update_channel_config(&counterparty_node_id, &[channel_id], &ChannelConfig::default()), channel_id, counterparty_node_id);
 +      }
 +
        #[test]
        fn test_connection_limiting() {
                // Test that we limit un-channel'd peers and un-funded channels properly.
                        sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat)).is_ok());
        }
  
 +      #[test]
 +      fn test_final_incorrect_cltv(){
 +              let chanmon_cfg = create_chanmon_cfgs(1);
 +              let node_cfg = create_node_cfgs(1, &chanmon_cfg);
 +              let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]);
 +              let node = create_network(1, &node_cfg, &node_chanmgr);
 +
 +              let result = node[0].node.construct_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive {
 +                      amt_msat: 100,
 +                      outgoing_cltv_value: 22,
 +                      payment_metadata: None,
 +                      keysend_preimage: None,
 +                      payment_data: Some(msgs::FinalOnionHopData {
 +                              payment_secret: PaymentSecret([0; 32]), total_msat: 100,
 +                      }),
 +                      custom_tlvs: Vec::new(),
 +              }, [0; 32], PaymentHash([0; 32]), 100, 23, None, true, None);
 +
 +              // Should not return an error as this condition:
 +              // https://github.com/lightning/bolts/blob/4dcc377209509b13cf89a4b91fde7d478f5b46d8/04-onion-routing.md?plain=1#L334
 +              // is not satisfied.
 +              assert!(result.is_ok());
 +      }
 +
        #[test]
        fn test_inbound_anchors_manual_acceptance() {
                // Tests that we properly limit inbound channels when we have the manual-channel-acceptance
                let payment_preimage = PaymentPreimage([42; 32]);
                assert_eq!(format!("{}", &payment_preimage), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
        }
+       #[test]
+       fn test_trigger_lnd_force_close() {
+               let chanmon_cfg = create_chanmon_cfgs(2);
+               let node_cfg = create_node_cfgs(2, &chanmon_cfg);
+               let user_config = test_default_channel_config();
+               let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]);
+               let nodes = create_network(2, &node_cfg, &node_chanmgr);
+               // Open a channel, immediately disconnect each other, and broadcast Alice's latest state.
+               let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
+               nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+               nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id()).unwrap();
+               check_closed_broadcast(&nodes[0], 1, true);
+               check_added_monitors(&nodes[0], 1);
+               check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+               {
+                       let txn = nodes[0].tx_broadcaster.txn_broadcast();
+                       assert_eq!(txn.len(), 1);
+                       check_spends!(txn[0], funding_tx);
+               }
+               // Since they're disconnected, Bob won't receive Alice's `Error` message. Reconnect them
+               // such that Bob sends a `ChannelReestablish` to Alice since the channel is still open from
+               // their side.
+               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+                       features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+               }, true).unwrap();
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+                       features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+               }, false).unwrap();
+               assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+               let channel_reestablish = get_event_msg!(
+                       nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()
+               );
+               nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &channel_reestablish);
+               // Alice should respond with an error since the channel isn't known, but a bogus
+               // `ChannelReestablish` should be sent first, such that we actually trigger Bob to force
+               // close even if it was an lnd node.
+               let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(msg_events.len(), 2);
+               if let MessageSendEvent::SendChannelReestablish { node_id, msg } = &msg_events[0] {
+                       assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+                       assert_eq!(msg.next_local_commitment_number, 0);
+                       assert_eq!(msg.next_remote_commitment_number, 0);
+                       nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &msg);
+               } else { panic!() };
+               check_closed_broadcast(&nodes[1], 1, true);
+               check_added_monitors(&nodes[1], 1);
+               let expected_close_reason = ClosureReason::ProcessingError {
+                       err: "Peer sent an invalid channel_reestablish to force close in a non-standard way".to_string()
+               };
+               check_closed_event!(nodes[1], 1, expected_close_reason, [nodes[0].node.get_our_node_id()], 100000);
+               {
+                       let txn = nodes[1].tx_broadcaster.txn_broadcast();
+                       assert_eq!(txn.len(), 1);
+                       check_spends!(txn[0], funding_tx);
+               }
+       }
  }
  
  #[cfg(ldk_bench)]
index c75ee379d8dae1e49b51d4ddf74b183e94172886,cabbba8f3ae06914ffc209f4f1b56c02d5d09ed3..714fe2f248a828a273f373eb7607285a2bac3953
@@@ -40,7 -40,7 +40,7 @@@ use crate::util::config::{UserConfig, M
  use bitcoin::hash_types::BlockHash;
  use bitcoin::blockdata::script::{Builder, Script};
  use bitcoin::blockdata::opcodes;
 -use bitcoin::blockdata::constants::genesis_block;
 +use bitcoin::blockdata::constants::ChainHash;
  use bitcoin::network::constants::Network;
  use bitcoin::{PackedLockTime, Sequence, Transaction, TxIn, TxOut, Witness};
  use bitcoin::OutPoint as BitcoinOutPoint;
@@@ -1338,9 -1338,9 +1338,9 @@@ fn test_duplicate_htlc_different_direct
        for e in events {
                match e {
                        MessageSendEvent::BroadcastChannelUpdate { .. } => {},
-                       MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
+                       MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::DisconnectPeer { ref msg } } => {
                                assert_eq!(node_id, nodes[1].node.get_our_node_id());
-                               assert_eq!(msg.data, "Channel closed because commitment or closing transaction was confirmed on chain.");
+                               assert_eq!(msg.as_ref().unwrap().data, "Channel closed because commitment or closing transaction was confirmed on chain.");
                        },
                        MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
                                assert!(update_add_htlcs.is_empty());
@@@ -2369,7 -2369,7 +2369,7 @@@ fn channel_monitor_network_test() 
                        _ => panic!("Unexpected event"),
                };
                match events[1] {
-                       MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id } => {
+                       MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
                                assert_eq!(node_id, nodes[4].node.get_our_node_id());
                        },
                        _ => panic!("Unexpected event"),
                        _ => panic!("Unexpected event"),
                };
                match events[1] {
-                       MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id } => {
+                       MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
                                assert_eq!(node_id, nodes[3].node.get_our_node_id());
                        },
                        _ => panic!("Unexpected event"),
@@@ -2913,7 -2913,7 +2913,7 @@@ fn test_htlc_on_chain_success() 
        let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events);
  
        match nodes_2_event {
-               MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id: _ } => {},
+               MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id: _ } => {},
                _ => panic!("Unexpected event"),
        }
  
@@@ -3358,7 -3358,7 +3358,7 @@@ fn do_test_commitment_revoked_fail_back
  
        let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
        match nodes_2_event {
-               MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id, ref data } }, node_id: _ } => {
+               MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { msg: Some(msgs::ErrorMessage { channel_id, ref data }) }, node_id: _ } => {
                        assert_eq!(channel_id, chan_2.2);
                        assert_eq!(data.as_str(), "Channel closed because commitment or closing transaction was confirmed on chain.");
                },
@@@ -4920,7 -4920,7 +4920,7 @@@ fn test_onchain_to_onchain_claim() 
        let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut msg_events);
  
        match nodes_2_event {
-               MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id: _ } => {},
+               MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id: _ } => {},
                _ => panic!("Unexpected event"),
        }
  
@@@ -5830,8 -5830,8 +5830,8 @@@ fn bolt2_open_channel_sending_node_chec
        assert!(node0_to_1_send_open_channel.to_self_delay==BREAKDOWN_TIMEOUT);
  
        // BOLT #2 spec: Sending node must ensure the chain_hash value identifies the chain it wishes to open the channel within.
 -      let chain_hash=genesis_block(Network::Testnet).header.block_hash();
 -      assert_eq!(node0_to_1_send_open_channel.chain_hash,chain_hash);
 +      let chain_hash = ChainHash::using_genesis_block(Network::Testnet);
 +      assert_eq!(node0_to_1_send_open_channel.chain_hash, chain_hash);
  
        // BOLT #2 spec: Sending node must set funding_pubkey, revocation_basepoint, htlc_basepoint, payment_basepoint, and delayed_payment_basepoint to valid DER-encoded, compressed, secp256k1 pubkeys.
        assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.funding_pubkey.serialize()).is_ok());
@@@ -7860,9 -7860,9 +7860,9 @@@ fn test_channel_conf_timeout() 
        let close_ev = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(close_ev.len(), 1);
        match close_ev[0] {
-               MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, ref node_id } => {
+               MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { ref msg }, ref node_id } => {
                        assert_eq!(*node_id, nodes[0].node.get_our_node_id());
-                       assert_eq!(msg.data, "Channel closed because funding transaction failed to confirm within 2016 blocks");
+                       assert_eq!(msg.as_ref().unwrap().data, "Channel closed because funding transaction failed to confirm within 2016 blocks");
                },
                _ => panic!("Unexpected event"),
        }
@@@ -9212,8 -9212,8 +9212,8 @@@ fn test_invalid_funding_tx() 
        assert_eq!(events_2.len(), 1);
        if let MessageSendEvent::HandleError { node_id, action } = &events_2[0] {
                assert_eq!(*node_id, nodes[0].node.get_our_node_id());
-               if let msgs::ErrorAction::SendErrorMessage { msg } = action {
-                       assert_eq!(msg.data, "Channel closed because of an exception: ".to_owned() + expected_err);
+               if let msgs::ErrorAction::DisconnectPeer { msg } = action {
+                       assert_eq!(msg.as_ref().unwrap().data, "Channel closed because of an exception: ".to_owned() + expected_err);
                } else { panic!(); }
        } else { panic!(); }
        assert_eq!(nodes[1].node.list_channels().len(), 0);
@@@ -10652,7 -10652,7 +10652,7 @@@ fn do_test_funding_and_commitment_tx_co
        let mut msg_events = closing_node.node.get_and_clear_pending_msg_events();
        assert_eq!(msg_events.len(), 1);
        match msg_events.pop().unwrap() {
-               MessageSendEvent::HandleError { action: msgs::ErrorAction::SendErrorMessage { .. }, .. } => {},
+               MessageSendEvent::HandleError { action: msgs::ErrorAction::DisconnectPeer { .. }, .. } => {},
                _ => panic!("Unexpected event"),
        }
        check_added_monitors(closing_node, 1);
index 9f6f233861b57a01f5bb6b6fc3a8b2ff843a9b5a,f35b67e98f1c703813095c991deb3424ae775960..9f98f84fa425310d97a36d9fec9793739ba5bbf3
@@@ -147,7 -147,6 +147,7 @@@ fn mpp_retry() 
        // Check the remaining max total routing fee for the second attempt is 50_000 - 1_000 msat fee
        // used by the first path
        route_params.max_total_routing_fee_msat = Some(max_total_routing_fee_msat - 1_000);
 +      route.route_params = Some(route_params.clone());
        nodes[0].router.expect_find_route(route_params, Ok(route));
        nodes[0].node.process_pending_htlc_forwards();
        check_added_monitors!(nodes[0], 1);
@@@ -254,12 -253,12 +254,12 @@@ fn mpp_retry_overpay() 
  
        route.paths.remove(0);
        route_params.final_value_msat -= first_path_value;
 -      route.route_params.as_mut().map(|p| p.final_value_msat -= first_path_value);
        route_params.payment_params.previously_failed_channels.push(chan_4_update.contents.short_channel_id);
 -
        // Check the remaining max total routing fee for the second attempt accounts only for 1_000 msat
        // base fee, but not for overpaid value of the first try.
        route_params.max_total_routing_fee_msat.as_mut().map(|m| *m -= 1000);
 +
 +      route.route_params = Some(route_params.clone());
        nodes[0].router.expect_find_route(route_params, Ok(route));
        nodes[0].node.process_pending_htlc_forwards();
  
@@@ -707,8 -706,8 +707,8 @@@ fn do_retry_with_no_persist(confirm_bef
        let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
        let as_err = nodes[0].node.get_and_clear_pending_msg_events();
-       assert_eq!(as_err.len(), 1);
-       match as_err[0] {
+       assert_eq!(as_err.len(), 2);
+       match as_err[1] {
                MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
                        assert_eq!(node_id, nodes[1].node.get_our_node_id());
                        nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), msg);
@@@ -882,9 -881,9 +882,9 @@@ fn do_test_completed_payment_not_retrya
        let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
        let as_err = nodes[0].node.get_and_clear_pending_msg_events();
-       assert_eq!(as_err.len(), 1);
+       assert_eq!(as_err.len(), 2);
        let bs_commitment_tx;
-       match as_err[0] {
+       match as_err[1] {
                MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
                        assert_eq!(node_id, nodes[1].node.get_our_node_id());
                        nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), msg);
@@@ -1907,7 -1906,7 +1907,7 @@@ fn do_test_intercepted_payment(test: In
        // Check for unknown channel id error.
        let unknown_chan_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &ChannelId::from_bytes([42; 32]), nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err();
        assert_eq!(unknown_chan_id_err , APIError::ChannelUnavailable  {
 -              err: format!("Channel with id {} not found for the passed counterparty node_id {}.",
 +              err: format!("Channel with id {} not found for the passed counterparty node_id {}",
                        log_bytes!([42; 32]), nodes[2].node.get_our_node_id()) });
  
        if test == InterceptTest::Fail {
@@@ -2739,7 -2738,7 +2739,7 @@@ fn retry_multi_path_single_failed_payme
  
        let mut retry_params = RouteParameters::from_payment_params_and_value(pay_params, 100_000_000);
        retry_params.max_total_routing_fee_msat = None;
 -      route.route_params.as_mut().unwrap().final_value_msat = 100_000_000;
 +      route.route_params = Some(retry_params.clone());
        nodes[0].router.expect_find_route(retry_params, Ok(route.clone()));
  
        {
@@@ -2810,7 -2809,9 +2810,7 @@@ fn immediate_retry_on_failure() 
                                maybe_announced_channel: true,
                        }], blinded_tail: None },
                ],
 -              route_params: Some(RouteParameters::from_payment_params_and_value(
 -                      PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV),
 -                      100_000_001)),
 +              route_params: Some(route_params.clone()),
        };
        nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
        // On retry, split the payment across both channels.
        route.paths[1].hops[0].fee_msat = 50_000_001;
        let mut pay_params = route_params.payment_params.clone();
        pay_params.previously_failed_channels.push(chans[0].short_channel_id.unwrap());
 -      nodes[0].router.expect_find_route(
 -              RouteParameters::from_payment_params_and_value(pay_params, amt_msat),
 -              Ok(route.clone()));
 +      let retry_params = RouteParameters::from_payment_params_and_value(pay_params, amt_msat);
 +      route.route_params = Some(retry_params.clone());
 +      nodes[0].router.expect_find_route(retry_params, Ok(route.clone()));
  
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
                PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
@@@ -2932,7 -2933,6 +2932,7 @@@ fn no_extra_retries_on_back_to_back_fai
        route.paths[0].hops[1].fee_msat = amt_msat;
        let mut retry_params = RouteParameters::from_payment_params_and_value(second_payment_params, amt_msat);
        retry_params.max_total_routing_fee_msat = None;
 +      route.route_params = Some(retry_params.clone());
        nodes[0].router.expect_find_route(retry_params, Ok(route.clone()));
  
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
@@@ -3137,7 -3137,7 +3137,7 @@@ fn test_simple_partial_retry() 
        route.paths.remove(0);
        let mut retry_params = RouteParameters::from_payment_params_and_value(second_payment_params, amt_msat / 2);
        retry_params.max_total_routing_fee_msat = None;
 -      route.route_params.as_mut().unwrap().final_value_msat = amt_msat / 2;
 +      route.route_params = Some(retry_params.clone());
        nodes[0].router.expect_find_route(retry_params, Ok(route.clone()));
  
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
@@@ -3316,7 -3316,7 +3316,7 @@@ fn test_threaded_payment_retries() 
  
        // from here on out, the retry `RouteParameters` amount will be amt/1000
        route_params.final_value_msat /= 1000;
 -      route.route_params.as_mut().unwrap().final_value_msat /= 1000;
 +      route.route_params = Some(route_params.clone());
        route.paths.pop();
  
        let end_time = Instant::now() + Duration::from_secs(1);
                new_route_params.payment_params.previously_failed_channels = previously_failed_channels.clone();
                new_route_params.max_total_routing_fee_msat.as_mut().map(|m| *m -= 100_000);
                route.paths[0].hops[1].short_channel_id += 1;
 +              route.route_params = Some(new_route_params.clone());
                nodes[0].router.expect_find_route(new_route_params, Ok(route.clone()));
  
                let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@@ -3721,7 -3720,7 +3721,7 @@@ fn test_retry_custom_tlvs() 
        send_payment(&nodes[2], &vec!(&nodes[1])[..], 1_500_000);
  
        let amt_msat = 1_000_000;
 -      let (route, payment_hash, payment_preimage, payment_secret) =
 +      let (mut route, payment_hash, payment_preimage, payment_secret) =
                get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat);
  
        // Initiate the payment
  
        // Retry the payment and make sure it succeeds
        route_params.payment_params.previously_failed_channels.push(chan_2_update.contents.short_channel_id);
 +      route.route_params = Some(route_params.clone());
        nodes[0].router.expect_find_route(route_params, Ok(route));
        nodes[0].node.process_pending_htlc_forwards();
        check_added_monitors!(nodes[0], 1);