Construct ShutdownResult as a struct in Channel
[rust-lightning] / lightning / src / ln / channelmanager.rs
index 755cb2c76845678dc97e75c804884e0c3dcdec3c..9620e793ec4a549f8aaebe14479330c7f14dce35 100644 (file)
@@ -19,7 +19,7 @@
 
 use bitcoin::blockdata::block::BlockHeader;
 use bitcoin::blockdata::transaction::Transaction;
-use bitcoin::blockdata::constants::{genesis_block, ChainHash};
+use bitcoin::blockdata::constants::ChainHash;
 use bitcoin::network::constants::Network;
 
 use bitcoin::hashes::Hash;
@@ -30,6 +30,7 @@ use bitcoin::secp256k1::{SecretKey,PublicKey};
 use bitcoin::secp256k1::Secp256k1;
 use bitcoin::{LockTime, secp256k1, Sequence};
 
+use crate::blinded_path::BlindedPath;
 use crate::chain;
 use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock};
 use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
@@ -55,6 +56,9 @@ use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError};
 use crate::ln::outbound_payment;
 use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs};
 use crate::ln::wire::Encode;
+use crate::offers::offer::{DerivedMetadata, OfferBuilder};
+use crate::offers::parse::Bolt12SemanticError;
+use crate::offers::refund::RefundBuilder;
 use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, WriteableEcdsaChannelSigner};
 use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate};
 use crate::util::wakers::{Future, Notifier};
@@ -447,16 +451,17 @@ impl MsgHandleErrInternal {
        }
        #[inline]
        fn from_finish_shutdown(err: String, channel_id: ChannelId, user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>, channel_capacity: u64) -> Self {
+               let err_msg = msgs::ErrorMessage { channel_id, data: err.clone() };
+               let action = if shutdown_res.monitor_update.is_some() {
+                       // We have a closing `ChannelMonitorUpdate`, which means the channel was funded and we
+                       // should disconnect our peer such that we force them to broadcast their latest
+                       // commitment upon reconnecting.
+                       msgs::ErrorAction::DisconnectPeer { msg: Some(err_msg) }
+               } else {
+                       msgs::ErrorAction::SendErrorMessage { msg: err_msg }
+               };
                Self {
-                       err: LightningError {
-                               err: err.clone(),
-                               action: msgs::ErrorAction::SendErrorMessage {
-                                       msg: msgs::ErrorMessage {
-                                               channel_id,
-                                               data: err
-                                       },
-                               },
-                       },
+                       err: LightningError { err, action },
                        chan_id: Some((channel_id, user_channel_id)),
                        shutdown_finish: Some((shutdown_res, channel_update)),
                        channel_capacity: Some(channel_capacity)
@@ -1018,7 +1023,7 @@ where
        L::Target: Logger,
 {
        default_configuration: UserConfig,
-       genesis_hash: BlockHash,
+       chain_hash: ChainHash,
        fee_estimator: LowerBoundedFeeEstimator<F>,
        chain_monitor: M,
        tx_broadcaster: T,
@@ -2012,7 +2017,7 @@ macro_rules! emit_channel_ready_event {
 macro_rules! handle_monitor_update_completion {
        ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
                let mut updates = $chan.monitor_updating_restored(&$self.logger,
-                       &$self.node_signer, $self.genesis_hash, &$self.default_configuration,
+                       &$self.node_signer, $self.chain_hash, &$self.default_configuration,
                        $self.best_block.read().unwrap().height());
                let counterparty_node_id = $chan.context.get_counterparty_node_id();
                let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() {
@@ -2258,7 +2263,7 @@ where
                let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
                ChannelManager {
                        default_configuration: config.clone(),
-                       genesis_hash: genesis_block(params.network).header.block_hash(),
+                       chain_hash: ChainHash::using_genesis_block(params.network),
                        fee_estimator: LowerBoundedFeeEstimator::new(fee_est),
                        chain_monitor,
                        tx_broadcaster,
@@ -2317,7 +2322,7 @@ where
                        if cfg!(fuzzing) { // fuzzing chacha20 doesn't use the key at all so we always get the same alias
                                outbound_scid_alias += 1;
                        } else {
-                               outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
+                               outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
                        }
                        if outbound_scid_alias != 0 && self.outbound_scid_aliases.lock().unwrap().insert(outbound_scid_alias) {
                                break;
@@ -2387,7 +2392,7 @@ where
                                },
                        }
                };
-               let res = channel.get_open_channel(self.genesis_hash.clone());
+               let res = channel.get_open_channel(self.chain_hash);
 
                let temporary_channel_id = channel.context.channel_id();
                match peer_state.channel_by_id.entry(temporary_channel_id) {
@@ -2559,7 +2564,7 @@ where
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
                let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
-               let mut shutdown_result = None;
+               let shutdown_result;
                loop {
                        let per_peer_state = self.per_peer_state.read().unwrap();
 
@@ -2574,10 +2579,11 @@ where
                                        if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
                                                let funding_txo_opt = chan.context.get_funding_txo();
                                                let their_features = &peer_state.latest_features;
-                                               let unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid();
-                                               let (shutdown_msg, mut monitor_update_opt, htlcs) =
+                                               let (shutdown_msg, mut monitor_update_opt, htlcs, local_shutdown_result) =
                                                        chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
                                                failed_htlcs = htlcs;
+                                               shutdown_result = local_shutdown_result;
+                                               debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown());
 
                                                // We can send the `shutdown` message before updating the `ChannelMonitor`
                                                // here as we don't need the monitor update to complete until we send a
@@ -2605,7 +2611,6 @@ where
                                                                        });
                                                                }
                                                                self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
-                                                               shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid));
                                                        }
                                                }
                                                break;
@@ -2697,22 +2702,21 @@ where
                self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
        }
 
-       fn finish_close_channel(&self, shutdown_res: ShutdownResult) {
+       fn finish_close_channel(&self, mut shutdown_res: ShutdownResult) {
                debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
                #[cfg(debug_assertions)]
                for (_, peer) in self.per_peer_state.read().unwrap().iter() {
                        debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
                }
 
-               let (monitor_update_option, mut failed_htlcs, unbroadcasted_batch_funding_txid) = shutdown_res;
-               log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
-               for htlc_source in failed_htlcs.drain(..) {
+               log_debug!(self.logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len());
+               for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) {
                        let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
                        let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
                        let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
                        self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
                }
-               if let Some((_, funding_txo, monitor_update)) = monitor_update_option {
+               if let Some((_, funding_txo, monitor_update)) = shutdown_res.monitor_update {
                        // There isn't anything we can do if we get an update failure - we're already
                        // force-closing. The monitor update on the required in-memory copy should broadcast
                        // the latest local state, which is the best we can do anyway. Thus, it is safe to
@@ -2720,7 +2724,7 @@ where
                        let _ = self.chain_monitor.update_channel(funding_txo, &monitor_update);
                }
                let mut shutdown_results = Vec::new();
-               if let Some(txid) = unbroadcasted_batch_funding_txid {
+               if let Some(txid) = shutdown_res.unbroadcasted_batch_funding_txid {
                        let mut funding_batch_states = self.funding_batch_states.lock().unwrap();
                        let affected_channels = funding_batch_states.remove(&txid).into_iter().flatten();
                        let per_peer_state = self.per_peer_state.read().unwrap();
@@ -2814,8 +2818,8 @@ where
                                        peer_state.pending_msg_events.push(
                                                events::MessageSendEvent::HandleError {
                                                        node_id: counterparty_node_id,
-                                                       action: msgs::ErrorAction::SendErrorMessage {
-                                                               msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() }
+                                                       action: msgs::ErrorAction::DisconnectPeer {
+                                                               msg: Some(msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() })
                                                        },
                                                }
                                        );
@@ -3103,8 +3107,8 @@ where
                                        // Note that this is likely a timing oracle for detecting whether an scid is a
                                        // phantom or an intercept.
                                        if (self.default_configuration.accept_intercept_htlcs &&
-                                               fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.genesis_hash)) ||
-                                               fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.genesis_hash)
+                                               fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)) ||
+                                               fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)
                                        {
                                                None
                                        } else {
@@ -3332,7 +3336,7 @@ where
                };
 
                let unsigned = msgs::UnsignedChannelUpdate {
-                       chain_hash: self.genesis_hash,
+                       chain_hash: self.chain_hash,
                        short_channel_id,
                        timestamp: chan.context.get_update_time_counter(),
                        flags: (!were_node_one) as u8 | ((!enabled as u8) << 1),
@@ -3844,7 +3848,7 @@ where
        /// Return values are identical to [`Self::funding_transaction_generated`], respective to
        /// each individual channel and transaction output.
        ///
-       /// Do NOT broadcast the funding transaction yourself. This batch funding transcaction
+       /// Do NOT broadcast the funding transaction yourself. This batch funding transaction
        /// will only be broadcast when we have safely received and persisted the counterparty's
        /// signature for each channel.
        ///
@@ -3898,7 +3902,7 @@ where
                                btree_map::Entry::Vacant(vacant) => Some(vacant.insert(Vec::new())),
                        }
                });
-               for &(temporary_channel_id, counterparty_node_id) in temporary_channels.iter() {
+               for &(temporary_channel_id, counterparty_node_id) in temporary_channels {
                        result = result.and_then(|_| self.funding_transaction_generated_intern(
                                temporary_channel_id,
                                counterparty_node_id,
@@ -4250,7 +4254,7 @@ where
                                                                                }
                                                                                if let PendingHTLCRouting::Forward { onion_packet, .. } = routing {
                                                                                        let phantom_pubkey_res = self.node_signer.get_node_id(Recipient::PhantomNode);
-                                                                                       if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.genesis_hash) {
+                                                                                       if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.chain_hash) {
                                                                                                let phantom_shared_secret = self.node_signer.ecdh(Recipient::PhantomNode, &onion_packet.public_key.unwrap(), None).unwrap().secret_bytes();
                                                                                                let next_hop = match onion_utils::decode_next_payment_hop(
                                                                                                        phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac,
@@ -4298,8 +4302,9 @@ where
                                                        }
                                                }
                                        }
-                                       let (counterparty_node_id, forward_chan_id) = match self.short_to_chan_info.read().unwrap().get(&short_chan_id) {
-                                               Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
+                                       let chan_info_opt = self.short_to_chan_info.read().unwrap().get(&short_chan_id).cloned();
+                                       let (counterparty_node_id, forward_chan_id) = match chan_info_opt {
+                                               Some((cp_id, chan_id)) => (cp_id, chan_id),
                                                None => {
                                                        forwarding_channel_not_found!();
                                                        continue;
@@ -4789,6 +4794,10 @@ where
        ///    with the current [`ChannelConfig`].
        ///  * Removing peers which have disconnected but and no longer have any channels.
        ///  * Force-closing and removing channels which have not completed establishment in a timely manner.
+       ///  * Forgetting about stale outbound payments, either those that have already been fulfilled
+       ///    or those awaiting an invoice that hasn't been delivered in the necessary amount of time.
+       ///    The latter is determined using the system clock in `std` and the block time minus two
+       ///    hours in `no-std`.
        ///
        /// Note that this may cause reentrancy through [`chain::Watch::update_channel`] calls or feerate
        /// estimate fetches.
@@ -5017,7 +5026,18 @@ where
                                self.finish_close_channel(shutdown_res);
                        }
 
-                       self.pending_outbound_payments.remove_stale_payments(&self.pending_events);
+                       #[cfg(feature = "std")]
+                       let duration_since_epoch = std::time::SystemTime::now()
+                               .duration_since(std::time::SystemTime::UNIX_EPOCH)
+                               .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
+                       #[cfg(not(feature = "std"))]
+                       let duration_since_epoch = Duration::from_secs(
+                               self.highest_seen_timestamp.load(Ordering::Acquire).saturating_sub(7200) as u64
+                       );
+
+                       self.pending_outbound_payments.remove_stale_payments(
+                               duration_since_epoch, &self.pending_events
+                       );
 
                        // Technically we don't need to do this here, but if we have holding cell entries in a
                        // channel that need freeing, it's better to do that here and block a background task
@@ -5876,7 +5896,7 @@ where
        fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
                // Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are
                // likely to be lost on restart!
-               if msg.chain_hash != self.genesis_hash {
+               if msg.chain_hash != self.chain_hash {
                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
                }
 
@@ -6141,7 +6161,7 @@ where
                        hash_map::Entry::Occupied(mut chan_phase_entry) => {
                                if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
                                        let announcement_sigs_opt = try_chan_phase_entry!(self, chan.channel_ready(&msg, &self.node_signer,
-                                               self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan_phase_entry);
+                                               self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan_phase_entry);
                                        if let Some(announcement_sigs) = announcement_sigs_opt {
                                                log_trace!(self.logger, "Sending announcement_signatures for channel {}", chan.context.channel_id());
                                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
@@ -6247,22 +6267,20 @@ where
        }
 
        fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
-               let mut shutdown_result = None;
-               let unbroadcasted_batch_funding_txid;
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                        .ok_or_else(|| {
                                debug_assert!(false);
                                MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
                        })?;
-               let (tx, chan_option) = {
+               let (tx, chan_option, shutdown_result) = {
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                        match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
                                hash_map::Entry::Occupied(mut chan_phase_entry) => {
                                        if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
-                                               unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid();
-                                               let (closing_signed, tx) = try_chan_phase_entry!(self, chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry);
+                                               let (closing_signed, tx, shutdown_result) = try_chan_phase_entry!(self, chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry);
+                                               debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown());
                                                if let Some(msg) = closing_signed {
                                                        peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
                                                                node_id: counterparty_node_id.clone(),
@@ -6275,8 +6293,8 @@ where
                                                        // also implies there are no pending HTLCs left on the channel, so we can
                                                        // fully delete it from tracking (the channel monitor is still around to
                                                        // watch for old state broadcasts)!
-                                                       (tx, Some(remove_channel_phase!(self, chan_phase_entry)))
-                                               } else { (tx, None) }
+                                                       (tx, Some(remove_channel_phase!(self, chan_phase_entry)), shutdown_result)
+                                               } else { (tx, None, shutdown_result) }
                                        } else {
                                                return try_chan_phase_entry!(self, Err(ChannelError::Close(
                                                        "Got a closing_signed message for an unfunded channel!".into())), chan_phase_entry);
@@ -6298,7 +6316,6 @@ where
                                });
                        }
                        self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
-                       shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid));
                }
                mem::drop(per_peer_state);
                if let Some(shutdown_result) = shutdown_result {
@@ -6518,7 +6535,7 @@ where
                                                },
                                                hash_map::Entry::Vacant(entry) => {
                                                        if !is_our_scid && forward_info.incoming_amt_msat.is_some() &&
-                                                          fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, scid, &self.genesis_hash)
+                                                          fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, scid, &self.chain_hash)
                                                        {
                                                                let intercept_id = InterceptId(Sha256::hash(&forward_info.incoming_shared_secret).into_inner());
                                                                let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
@@ -6711,7 +6728,7 @@ where
 
                                        peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
                                                msg: try_chan_phase_entry!(self, chan.announcement_signatures(
-                                                       &self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(),
+                                                       &self.node_signer, self.chain_hash, self.best_block.read().unwrap().height(),
                                                        msg, &self.default_configuration
                                                ), chan_phase_entry),
                                                // Note that announcement_signatures fails if the channel cannot be announced,
@@ -6787,7 +6804,10 @@ where
                        let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                                .ok_or_else(|| {
                                        debug_assert!(false);
-                                       MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+                                       MsgHandleErrInternal::send_err_msg_no_close(
+                                               format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
+                                               msg.channel_id
+                                       )
                                })?;
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
@@ -6799,7 +6819,7 @@ where
                                                // freed HTLCs to fail backwards. If in the future we no longer drop pending
                                                // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
                                                let responses = try_chan_phase_entry!(self, chan.channel_reestablish(
-                                                       msg, &self.logger, &self.node_signer, self.genesis_hash,
+                                                       msg, &self.logger, &self.node_signer, self.chain_hash,
                                                        &self.default_configuration, &*self.best_block.read().unwrap()), chan_phase_entry);
                                                let mut channel_update = None;
                                                if let Some(msg) = responses.shutdown_msg {
@@ -6831,7 +6851,39 @@ where
                                                        "Got a channel_reestablish message for an unfunded channel!".into())), chan_phase_entry);
                                        }
                                },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+                               hash_map::Entry::Vacant(_) => {
+                                       log_debug!(self.logger, "Sending bogus ChannelReestablish for unknown channel {} to force channel closure",
+                                               log_bytes!(msg.channel_id.0));
+                                       // Unfortunately, lnd doesn't force close on errors
+                                       // (https://github.com/lightningnetwork/lnd/blob/abb1e3463f3a83bbb843d5c399869dbe930ad94f/htlcswitch/link.go#L2119).
+                                       // One of the few ways to get an lnd counterparty to force close is by
+                                       // replicating what they do when restoring static channel backups (SCBs). They
+                                       // send an invalid `ChannelReestablish` with `0` commitment numbers and an
+                                       // invalid `your_last_per_commitment_secret`.
+                                       //
+                                       // Since we received a `ChannelReestablish` for a channel that doesn't exist, we
+                                       // can assume it's likely the channel closed from our point of view, but it
+                                       // remains open on the counterparty's side. By sending this bogus
+                                       // `ChannelReestablish` message now as a response to theirs, we trigger them to
+                                       // force close broadcasting their latest state. If the closing transaction from
+                                       // our point of view remains unconfirmed, it'll enter a race with the
+                                       // counterparty's to-be-broadcast latest commitment transaction.
+                                       peer_state.pending_msg_events.push(MessageSendEvent::SendChannelReestablish {
+                                               node_id: *counterparty_node_id,
+                                               msg: msgs::ChannelReestablish {
+                                                       channel_id: msg.channel_id,
+                                                       next_local_commitment_number: 0,
+                                                       next_remote_commitment_number: 0,
+                                                       your_last_per_commitment_secret: [1u8; 32],
+                                                       my_current_per_commitment_point: PublicKey::from_slice(&[2u8; 33]).unwrap(),
+                                                       next_funding_txid: None,
+                                               },
+                                       });
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close(
+                                               format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}",
+                                                       counterparty_node_id), msg.channel_id)
+                                       )
+                               }
                        }
                };
 
@@ -6895,8 +6947,8 @@ where
                                                                                self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
                                                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                                                        node_id: chan.context.get_counterparty_node_id(),
-                                                                                       action: msgs::ErrorAction::SendErrorMessage {
-                                                                                               msg: msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() }
+                                                                                       action: msgs::ErrorAction::DisconnectPeer {
+                                                                                               msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() })
                                                                                        },
                                                                                });
                                                                        }
@@ -6993,15 +7045,18 @@ where
                                peer_state.channel_by_id.retain(|channel_id, phase| {
                                        match phase {
                                                ChannelPhase::Funded(chan) => {
-                                                       let unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid();
                                                        match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
-                                                               Ok((msg_opt, tx_opt)) => {
+                                                               Ok((msg_opt, tx_opt, shutdown_result_opt)) => {
                                                                        if let Some(msg) = msg_opt {
                                                                                has_update = true;
                                                                                pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
                                                                                        node_id: chan.context.get_counterparty_node_id(), msg,
                                                                                });
                                                                        }
+                                                                       debug_assert_eq!(shutdown_result_opt.is_some(), chan.is_shutdown());
+                                                                       if let Some(shutdown_result) = shutdown_result_opt {
+                                                                               shutdown_results.push(shutdown_result);
+                                                                       }
                                                                        if let Some(tx) = tx_opt {
                                                                                // We're done with this channel. We got a closing_signed and sent back
                                                                                // a closing_signed with a closing transaction to broadcast.
@@ -7016,7 +7071,6 @@ where
                                                                                log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
                                                                                self.tx_broadcaster.broadcast_transactions(&[&tx]);
                                                                                update_maps_on_chan_removal!(self, &chan.context);
-                                                                               shutdown_results.push((None, Vec::new(), unbroadcasted_batch_funding_txid));
                                                                                false
                                                                        } else { true }
                                                                },
@@ -7057,7 +7111,7 @@ where
                        // Channel::force_shutdown tries to make us do) as we may still be in initialization,
                        // so we track the update internally and handle it when the user next calls
                        // timer_tick_occurred, guaranteeing we're running normally.
-                       if let Some((counterparty_node_id, funding_txo, update)) = failure.0.take() {
+                       if let Some((counterparty_node_id, funding_txo, update)) = failure.monitor_update.take() {
                                assert_eq!(update.updates.len(), 1);
                                if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
                                        assert!(should_broadcast);
@@ -7071,6 +7125,71 @@ where
                }
        }
 
+       /// Creates an [`OfferBuilder`] such that the [`Offer`] it builds is recognized by the
+       /// [`ChannelManager`] when handling [`InvoiceRequest`] messages for the offer. The offer will
+       /// not have an expiration unless otherwise set on the builder.
+       ///
+       /// Uses a one-hop [`BlindedPath`] for the offer with [`ChannelManager::get_our_node_id`] as the
+       /// introduction node and a derived signing pubkey for recipient privacy. As such, currently,
+       /// the node must be announced. Otherwise, there is no way to find a path to the introduction
+       /// node in order to send the [`InvoiceRequest`].
+       ///
+       /// [`Offer`]: crate::offers::offer::Offer
+       /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
+       pub fn create_offer_builder(
+               &self, description: String
+       ) -> OfferBuilder<DerivedMetadata, secp256k1::All> {
+               let node_id = self.get_our_node_id();
+               let expanded_key = &self.inbound_payment_key;
+               let entropy = &*self.entropy_source;
+               let secp_ctx = &self.secp_ctx;
+               let path = self.create_one_hop_blinded_path();
+
+               OfferBuilder::deriving_signing_pubkey(description, node_id, expanded_key, entropy, secp_ctx)
+                       .chain_hash(self.chain_hash)
+                       .path(path)
+       }
+
+       /// Creates a [`RefundBuilder`] such that the [`Refund`] it builds is recognized by the
+       /// [`ChannelManager`] when handling [`Bolt12Invoice`] messages for the refund. The builder will
+       /// have the provided expiration set. Any changes to the expiration on the returned builder will
+       /// not be honored by [`ChannelManager`].
+       ///
+       /// The provided `payment_id` is used to ensure that only one invoice is paid for the refund.
+       ///
+       /// Uses a one-hop [`BlindedPath`] for the refund with [`ChannelManager::get_our_node_id`] as
+       /// the introduction node and a derived payer id for sender privacy. As such, currently, the
+       /// node must be announced. Otherwise, there is no way to find a path to the introduction node
+       /// in order to send the [`Bolt12Invoice`].
+       ///
+       /// [`Refund`]: crate::offers::refund::Refund
+       /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
+       pub fn create_refund_builder(
+               &self, description: String, amount_msats: u64, absolute_expiry: Duration,
+               payment_id: PaymentId, retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>
+       ) -> Result<RefundBuilder<secp256k1::All>, Bolt12SemanticError> {
+               let node_id = self.get_our_node_id();
+               let expanded_key = &self.inbound_payment_key;
+               let entropy = &*self.entropy_source;
+               let secp_ctx = &self.secp_ctx;
+               let path = self.create_one_hop_blinded_path();
+
+               let builder = RefundBuilder::deriving_payer_id(
+                       description, node_id, expanded_key, entropy, secp_ctx, amount_msats, payment_id
+               )?
+                       .chain_hash(self.chain_hash)
+                       .absolute_expiry(absolute_expiry)
+                       .path(path);
+
+               self.pending_outbound_payments
+                       .add_new_awaiting_invoice(
+                               payment_id, absolute_expiry, retry_strategy, max_total_routing_fee_msat,
+                       )
+                       .map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?;
+
+               Ok(builder)
+       }
+
        /// Gets a payment secret and payment hash for use in an invoice given to a third party wishing
        /// to pay us.
        ///
@@ -7171,6 +7290,14 @@ where
                inbound_payment::get_payment_preimage(payment_hash, payment_secret, &self.inbound_payment_key)
        }
 
+       /// Creates a one-hop blinded path with [`ChannelManager::get_our_node_id`] as the introduction
+       /// node.
+       fn create_one_hop_blinded_path(&self) -> BlindedPath {
+               let entropy_source = self.entropy_source.deref();
+               let secp_ctx = &self.secp_ctx;
+               BlindedPath::one_hop_for_message(self.get_our_node_id(), entropy_source, secp_ctx).unwrap()
+       }
+
        /// Gets a fake short channel id for use in receiving [phantom node payments]. These fake scids
        /// are used when constructing the phantom invoice's route hints.
        ///
@@ -7179,7 +7306,7 @@ where
                let best_block_height = self.best_block.read().unwrap().height();
                let short_to_chan_info = self.short_to_chan_info.read().unwrap();
                loop {
-                       let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
+                       let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
                        // Ensure the generated scid doesn't conflict with a real channel.
                        match short_to_chan_info.get(&scid_candidate) {
                                Some(_) => continue,
@@ -7209,7 +7336,7 @@ where
                let best_block_height = self.best_block.read().unwrap().height();
                let short_to_chan_info = self.short_to_chan_info.read().unwrap();
                loop {
-                       let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
+                       let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
                        // Ensure the generated scid doesn't conflict with a real channel.
                        if short_to_chan_info.contains_key(&scid_candidate) { continue }
                        return scid_candidate
@@ -7472,7 +7599,7 @@ where
                        *best_block = BestBlock::new(header.prev_blockhash, new_height)
                }
 
-               self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
+               self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger));
        }
 }
 
@@ -7498,13 +7625,13 @@ where
                let _persistence_guard =
                        PersistenceNotifierGuard::optionally_notify_skipping_background_events(
                                self, || -> NotifyOption { NotifyOption::DoPersist });
-               self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger)
+               self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger)
                        .map(|(a, b)| (a, Vec::new(), b)));
 
                let last_best_block_height = self.best_block.read().unwrap().height();
                if height < last_best_block_height {
                        let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
-                       self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
+                       self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger));
                }
        }
 
@@ -7521,7 +7648,7 @@ where
                                self, || -> NotifyOption { NotifyOption::DoPersist });
                *self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
 
-               self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
+               self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger));
 
                macro_rules! max_time {
                        ($timestamp: expr) => {
@@ -7641,7 +7768,7 @@ where
                                                                                msg: announcement_sigs,
                                                                        });
                                                                        if let Some(height) = height_opt {
-                                                                               if let Some(announcement) = channel.get_signed_channel_announcement(&self.node_signer, self.genesis_hash, height, &self.default_configuration) {
+                                                                               if let Some(announcement) = channel.get_signed_channel_announcement(&self.node_signer, self.chain_hash, height, &self.default_configuration) {
                                                                                        pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
                                                                                                msg: announcement,
                                                                                                // Note that announcement_signatures fails if the channel cannot be announced,
@@ -7680,10 +7807,12 @@ where
                                                                self.issue_channel_close_events(&channel.context, reason);
                                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                                        node_id: channel.context.get_counterparty_node_id(),
-                                                                       action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage {
-                                                                               channel_id: channel.context.channel_id(),
-                                                                               data: reason_message,
-                                                                       } },
+                                                                       action: msgs::ErrorAction::DisconnectPeer {
+                                                                               msg: Some(msgs::ErrorMessage {
+                                                                                       channel_id: channel.context.channel_id(),
+                                                                                       data: reason_message,
+                                                                               })
+                                                                       },
                                                                });
                                                                return false;
                                                        }
@@ -8251,7 +8380,7 @@ where
                                let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
                                if let Some(ChannelPhase::UnfundedOutboundV1(chan)) = peer_state.channel_by_id.get_mut(&msg.channel_id) {
-                                       if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash, &self.fee_estimator) {
+                                       if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) {
                                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
                                                        node_id: *counterparty_node_id,
                                                        msg,
@@ -8274,8 +8403,8 @@ where
                provided_init_features(&self.default_configuration)
        }
 
-       fn get_genesis_hashes(&self) -> Option<Vec<ChainHash>> {
-               Some(vec![ChainHash::from(&self.genesis_hash[..])])
+       fn get_chain_hashes(&self) -> Option<Vec<ChainHash>> {
+               Some(vec![self.chain_hash])
        }
 
        fn handle_tx_add_input(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAddInput) {
@@ -8820,7 +8949,7 @@ where
 
                write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
 
-               self.genesis_hash.write(writer)?;
+               self.chain_hash.write(writer)?;
                {
                        let best_block = self.best_block.read().unwrap();
                        best_block.height().write(writer)?;
@@ -9231,7 +9360,7 @@ where
        fn read<Reader: io::Read>(reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>) -> Result<Self, DecodeError> {
                let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
 
-               let genesis_hash: BlockHash = Readable::read(reader)?;
+               let chain_hash: ChainHash = Readable::read(reader)?;
                let best_block_height: u32 = Readable::read(reader)?;
                let best_block_hash: BlockHash = Readable::read(reader)?;
 
@@ -9274,16 +9403,16 @@ where
                                                log_error!(args.logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
                                                        &channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
                                        }
-                                       let (monitor_update, mut new_failed_htlcs, batch_funding_txid) = channel.context.force_shutdown(true);
-                                       if batch_funding_txid.is_some() {
+                                       let mut shutdown_result = channel.context.force_shutdown(true);
+                                       if shutdown_result.unbroadcasted_batch_funding_txid.is_some() {
                                                return Err(DecodeError::InvalidValue);
                                        }
-                                       if let Some((counterparty_node_id, funding_txo, update)) = monitor_update {
+                                       if let Some((counterparty_node_id, funding_txo, update)) = shutdown_result.monitor_update {
                                                close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
                                                        counterparty_node_id, funding_txo, update
                                                });
                                        }
-                                       failed_htlcs.append(&mut new_failed_htlcs);
+                                       failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs);
                                        channel_closures.push_back((events::Event::ChannelClosed {
                                                channel_id: channel.context.channel_id(),
                                                user_channel_id: channel.context.get_user_id(),
@@ -9879,7 +10008,7 @@ where
                                                let mut outbound_scid_alias;
                                                loop {
                                                        outbound_scid_alias = fake_scid::Namespace::OutboundAlias
-                                                               .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
+                                                               .get_fake_scid(best_block_height, &chain_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
                                                        if outbound_scid_aliases.insert(outbound_scid_alias) { break; }
                                                }
                                                chan.context.set_outbound_scid_alias(outbound_scid_alias);
@@ -9993,7 +10122,7 @@ where
                }
 
                let channel_manager = ChannelManager {
-                       genesis_hash,
+                       chain_hash,
                        fee_estimator: bounded_fee_estimator,
                        chain_monitor: args.chain_monitor,
                        tx_broadcaster: args.tx_broadcaster,
@@ -11285,6 +11414,67 @@ mod tests {
                let payment_preimage = PaymentPreimage([42; 32]);
                assert_eq!(format!("{}", &payment_preimage), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
        }
+
+       #[test]
+       fn test_trigger_lnd_force_close() {
+               let chanmon_cfg = create_chanmon_cfgs(2);
+               let node_cfg = create_node_cfgs(2, &chanmon_cfg);
+               let user_config = test_default_channel_config();
+               let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]);
+               let nodes = create_network(2, &node_cfg, &node_chanmgr);
+
+               // Open a channel, immediately disconnect each other, and broadcast Alice's latest state.
+               let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
+               nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+               nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id()).unwrap();
+               check_closed_broadcast(&nodes[0], 1, true);
+               check_added_monitors(&nodes[0], 1);
+               check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+               {
+                       let txn = nodes[0].tx_broadcaster.txn_broadcast();
+                       assert_eq!(txn.len(), 1);
+                       check_spends!(txn[0], funding_tx);
+               }
+
+               // Since they're disconnected, Bob won't receive Alice's `Error` message. Reconnect them
+               // such that Bob sends a `ChannelReestablish` to Alice since the channel is still open from
+               // their side.
+               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+                       features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+               }, true).unwrap();
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+                       features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+               }, false).unwrap();
+               assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+               let channel_reestablish = get_event_msg!(
+                       nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()
+               );
+               nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &channel_reestablish);
+
+               // Alice should respond with an error since the channel isn't known, but a bogus
+               // `ChannelReestablish` should be sent first, such that we actually trigger Bob to force
+               // close even if it was an lnd node.
+               let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(msg_events.len(), 2);
+               if let MessageSendEvent::SendChannelReestablish { node_id, msg } = &msg_events[0] {
+                       assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+                       assert_eq!(msg.next_local_commitment_number, 0);
+                       assert_eq!(msg.next_remote_commitment_number, 0);
+                       nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &msg);
+               } else { panic!() };
+               check_closed_broadcast(&nodes[1], 1, true);
+               check_added_monitors(&nodes[1], 1);
+               let expected_close_reason = ClosureReason::ProcessingError {
+                       err: "Peer sent an invalid channel_reestablish to force close in a non-standard way".to_string()
+               };
+               check_closed_event!(nodes[1], 1, expected_close_reason, [nodes[0].node.get_our_node_id()], 100000);
+               {
+                       let txn = nodes[1].tx_broadcaster.txn_broadcast();
+                       assert_eq!(txn.len(), 1);
+                       check_spends!(txn[0], funding_tx);
+               }
+       }
 }
 
 #[cfg(ldk_bench)]