Merge pull request #2821 from TheBlueMatt/2024-01-om-direct-export
authorvalentinewallace <valentinewallace@users.noreply.github.com>
Thu, 11 Jan 2024 14:52:00 +0000 (09:52 -0500)
committerGitHub <noreply@github.com>
Thu, 11 Jan 2024 14:52:00 +0000 (09:52 -0500)
Expose `onion_message` items directly rather than via re-exports

1  2 
lightning/src/ln/channelmanager.rs

index d51349728fea4ebdde5be0f364703ac7ee7df2f6,09c64df1dbf04b816195d3133e4b3f9dd411e9b1..db2e160430313b1dd3a80e12b1267ba8006bafd3
@@@ -63,7 -63,8 +63,8 @@@ use crate::offers::merkle::SignError
  use crate::offers::offer::{DerivedMetadata, Offer, OfferBuilder};
  use crate::offers::parse::Bolt12SemanticError;
  use crate::offers::refund::{Refund, RefundBuilder};
- use crate::onion_message::{Destination, MessageRouter, OffersMessage, OffersMessageHandler, PendingOnionMessage, new_pending_onion_message};
+ use crate::onion_message::messenger::{Destination, MessageRouter, PendingOnionMessage, new_pending_onion_message};
+ use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
  use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider};
  use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
  use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate};
@@@ -550,8 -551,9 +551,8 @@@ impl Into<u16> for FailureCode 
  
  struct MsgHandleErrInternal {
        err: msgs::LightningError,
 -      chan_id: Option<(ChannelId, u128)>, // If Some a channel of ours has been closed
 +      closes_channel: bool,
        shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
 -      channel_capacity: Option<u64>,
  }
  impl MsgHandleErrInternal {
        #[inline]
                                        },
                                },
                        },
 -                      chan_id: None,
 +                      closes_channel: false,
                        shutdown_finish: None,
 -                      channel_capacity: None,
                }
        }
        #[inline]
        fn from_no_close(err: msgs::LightningError) -> Self {
 -              Self { err, chan_id: None, shutdown_finish: None, channel_capacity: None }
 +              Self { err, closes_channel: false, shutdown_finish: None }
        }
        #[inline]
 -      fn from_finish_shutdown(err: String, channel_id: ChannelId, user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>, channel_capacity: u64) -> Self {
 +      fn from_finish_shutdown(err: String, channel_id: ChannelId, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
                let err_msg = msgs::ErrorMessage { channel_id, data: err.clone() };
                let action = if shutdown_res.monitor_update.is_some() {
                        // We have a closing `ChannelMonitorUpdate`, which means the channel was funded and we
                };
                Self {
                        err: LightningError { err, action },
 -                      chan_id: Some((channel_id, user_channel_id)),
 +                      closes_channel: true,
                        shutdown_finish: Some((shutdown_res, channel_update)),
 -                      channel_capacity: Some(channel_capacity)
                }
        }
        #[inline]
                                        },
                                },
                        },
 -                      chan_id: None,
 +                      closes_channel: false,
                        shutdown_finish: None,
 -                      channel_capacity: None,
                }
        }
  
        fn closes_channel(&self) -> bool {
 -              self.chan_id.is_some()
 +              self.closes_channel
        }
  }
  
@@@ -1957,27 -1962,30 +1958,27 @@@ macro_rules! handle_error 
  
                match $internal {
                        Ok(msg) => Ok(msg),
 -                      Err(MsgHandleErrInternal { err, chan_id, shutdown_finish, channel_capacity }) => {
 +                      Err(MsgHandleErrInternal { err, shutdown_finish, .. }) => {
                                let mut msg_events = Vec::with_capacity(2);
  
                                if let Some((shutdown_res, update_option)) = shutdown_finish {
 +                                      let counterparty_node_id = shutdown_res.counterparty_node_id;
 +                                      let channel_id = shutdown_res.channel_id;
 +                                      let logger = WithContext::from(
 +                                              &$self.logger, Some(counterparty_node_id), Some(channel_id),
 +                                      );
 +                                      log_error!(logger, "Force-closing channel: {}", err.err);
 +
                                        $self.finish_close_channel(shutdown_res);
                                        if let Some(update) = update_option {
                                                msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                        msg: update
                                                });
                                        }
 -                                      if let Some((channel_id, user_channel_id)) = chan_id {
 -                                              $self.pending_events.lock().unwrap().push_back((events::Event::ChannelClosed {
 -                                                      channel_id, user_channel_id,
 -                                                      reason: ClosureReason::ProcessingError { err: err.err.clone() },
 -                                                      counterparty_node_id: Some($counterparty_node_id),
 -                                                      channel_capacity_sats: channel_capacity,
 -                                              }, None));
 -                                      }
 +                              } else {
 +                                      log_error!($self.logger, "Got non-closing error: {}", err.err);
                                }
  
 -                              let logger = WithContext::from(
 -                                      &$self.logger, Some($counterparty_node_id), chan_id.map(|(chan_id, _)| chan_id)
 -                              );
 -                              log_error!(logger, "{}", err.err);
                                if let msgs::ErrorAction::IgnoreError = err.action {
                                } else {
                                        msg_events.push(events::MessageSendEvent::HandleError {
@@@ -2037,11 -2045,12 +2038,11 @@@ macro_rules! convert_chan_phase_err 
                                let logger = WithChannelContext::from(&$self.logger, &$channel.context);
                                log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg);
                                update_maps_on_chan_removal!($self, $channel.context);
 -                              let shutdown_res = $channel.context.force_shutdown(true);
 -                              let user_id = $channel.context.get_user_id();
 -                              let channel_capacity_satoshis = $channel.context.get_value_satoshis();
 -
 -                              (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, user_id,
 -                                      shutdown_res, $channel_update, channel_capacity_satoshis))
 +                              let reason = ClosureReason::ProcessingError { err: msg.clone() };
 +                              let shutdown_res = $channel.context.force_shutdown(true, reason);
 +                              let err =
 +                                      MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $channel_update);
 +                              (true, err)
                        },
                }
        };
@@@ -2698,6 -2707,26 +2699,6 @@@ wher
                        .collect()
        }
  
 -      /// Helper function that issues the channel close events
 -      fn issue_channel_close_events(&self, context: &ChannelContext<SP>, closure_reason: ClosureReason) {
 -              let mut pending_events_lock = self.pending_events.lock().unwrap();
 -              match context.unbroadcasted_funding() {
 -                      Some(transaction) => {
 -                              pending_events_lock.push_back((events::Event::DiscardFunding {
 -                                      channel_id: context.channel_id(), transaction
 -                              }, None));
 -                      },
 -                      None => {},
 -              }
 -              pending_events_lock.push_back((events::Event::ChannelClosed {
 -                      channel_id: context.channel_id(),
 -                      user_channel_id: context.get_user_id(),
 -                      reason: closure_reason,
 -                      counterparty_node_id: Some(context.get_counterparty_node_id()),
 -                      channel_capacity_sats: Some(context.get_value_satoshis()),
 -              }, None));
 -      }
 -
        fn close_channel_internal(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
  
                                                                peer_state_lock, peer_state, per_peer_state, chan);
                                                }
                                        } else {
 -                                              self.issue_channel_close_events(chan_phase_entry.get().context(), ClosureReason::HolderForceClosed);
                                                let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
 -                                              shutdown_result = Some(chan_phase.context_mut().force_shutdown(false));
 +                                              shutdown_result = Some(chan_phase.context_mut().force_shutdown(false, ClosureReason::HolderForceClosed));
                                        }
                                },
                                hash_map::Entry::Vacant(_) => {
                let logger = WithContext::from(
                        &self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id),
                );
 -              log_debug!(logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len());
 +
 +              log_debug!(logger, "Finishing closure of channel due to {} with {} HTLCs to fail",
 +                      shutdown_res.closure_reason, shutdown_res.dropped_outbound_htlcs.len());
                for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) {
                        let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
                        let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
                                        let mut peer_state = peer_state_mutex.lock().unwrap();
                                        if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) {
                                                update_maps_on_chan_removal!(self, &chan.context());
 -                                              self.issue_channel_close_events(&chan.context(), ClosureReason::FundingBatchClosure);
 -                                              shutdown_results.push(chan.context_mut().force_shutdown(false));
 +                                              shutdown_results.push(chan.context_mut().force_shutdown(false, ClosureReason::FundingBatchClosure));
                                        }
                                }
                                has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state));
                                "Closing a batch where all channels have completed initial monitor update",
                        );
                }
 +
 +              {
 +                      let mut pending_events = self.pending_events.lock().unwrap();
 +                      pending_events.push_back((events::Event::ChannelClosed {
 +                              channel_id: shutdown_res.channel_id,
 +                              user_channel_id: shutdown_res.user_channel_id,
 +                              reason: shutdown_res.closure_reason,
 +                              counterparty_node_id: Some(shutdown_res.counterparty_node_id),
 +                              channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis),
 +                      }, None));
 +
 +                      if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx {
 +                              pending_events.push_back((events::Event::DiscardFunding {
 +                                      channel_id: shutdown_res.channel_id, transaction
 +                              }, None));
 +                      }
 +              }
                for shutdown_result in shutdown_results.drain(..) {
                        self.finish_close_channel(shutdown_result);
                }
                        let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id));
                        if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
                                log_error!(logger, "Force-closing channel {}", channel_id);
 -                              self.issue_channel_close_events(&chan_phase_entry.get().context(), closure_reason);
                                let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
                                mem::drop(peer_state);
                                mem::drop(per_peer_state);
                                match chan_phase {
                                        ChannelPhase::Funded(mut chan) => {
 -                                              self.finish_close_channel(chan.context.force_shutdown(broadcast));
 +                                              self.finish_close_channel(chan.context.force_shutdown(broadcast, closure_reason));
                                                (self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id())
                                        },
                                        ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => {
 -                                              self.finish_close_channel(chan_phase.context_mut().force_shutdown(false));
 +                                              self.finish_close_channel(chan_phase.context_mut().force_shutdown(false, closure_reason));
                                                // Unfunded channel has no update
                                                (None, chan_phase.context().get_counterparty_node_id())
                                        },
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                let funding_txo;
 -              let (chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) {
 +              let (mut chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) {
                        Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => {
                                funding_txo = find_funding_output(&chan, &funding_transaction)?;
  
                                let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger)
                                        .map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e {
                                                let channel_id = chan.context.channel_id();
 -                                              let user_id = chan.context.get_user_id();
 -                                              let shutdown_res = chan.context.force_shutdown(false);
 -                                              let channel_capacity = chan.context.get_value_satoshis();
 -                                              (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None, channel_capacity))
 +                                              let reason = ClosureReason::ProcessingError { err: msg.clone() };
 +                                              let shutdown_res = chan.context.force_shutdown(false, reason);
 +                                              (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None))
                                        } else { unreachable!(); });
                                match funding_res {
                                        Ok(funding_msg) => (chan, funding_msg),
                        },
                        hash_map::Entry::Vacant(e) => {
                                let mut outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
 -                              if outpoint_to_peer.insert(funding_txo, chan.context.get_counterparty_node_id()).is_some() {
 -                                      panic!("outpoint_to_peer map already contained funding outpoint, which shouldn't be possible");
 +                              match outpoint_to_peer.entry(funding_txo) {
 +                                      hash_map::Entry::Vacant(e) => { e.insert(chan.context.get_counterparty_node_id()); },
 +                                      hash_map::Entry::Occupied(o) => {
 +                                              let err = format!(
 +                                                      "An existing channel using outpoint {} is open with peer {}",
 +                                                      funding_txo, o.get()
 +                                              );
 +                                              mem::drop(outpoint_to_peer);
 +                                              mem::drop(peer_state_lock);
 +                                              mem::drop(per_peer_state);
 +                                              let reason = ClosureReason::ProcessingError { err: err.clone() };
 +                                              self.finish_close_channel(chan.context.force_shutdown(true, reason));
 +                                              return Err(APIError::ChannelUnavailable { err });
 +                                      }
                                }
                                e.insert(ChannelPhase::UnfundedOutboundV1(chan));
                        }
                                                .and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id))
                                                .map(|mut chan| {
                                                        update_maps_on_chan_removal!(self, &chan.context());
 -                                                      self.issue_channel_close_events(&chan.context(), ClosureReason::ProcessingError { err: e.clone() });
 -                                                      shutdown_results.push(chan.context_mut().force_shutdown(false));
 +                                                      let closure_reason = ClosureReason::ProcessingError { err: e.clone() };
 +                                                      shutdown_results.push(chan.context_mut().force_shutdown(false, closure_reason));
                                                });
                                }
                        }
                                        if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
                                                let logger = WithChannelContext::from(&self.logger, &chan.context);
                                                for forward_info in pending_forwards.drain(..) {
 -                                                      match forward_info {
 +                                                      let queue_fail_htlc_res = match forward_info {
                                                                HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
                                                                        prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
                                                                        forward_info: PendingHTLCInfo {
                                                                                ));
                                                                                continue;
                                                                        }
 +                                                                      None
                                                                },
                                                                HTLCForwardInfo::AddHTLC { .. } => {
                                                                        panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
                                                                },
                                                                HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
                                                                        log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
 -                                                                      if let Err(e) = chan.queue_fail_htlc(
 -                                                                              htlc_id, err_packet, &&logger
 -                                                                      ) {
 -                                                                              if let ChannelError::Ignore(msg) = e {
 -                                                                                      log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
 -                                                                              } else {
 -                                                                                      panic!("Stated return value requirements in queue_fail_htlc() were not met");
 -                                                                              }
 -                                                                              // fail-backs are best-effort, we probably already have one
 -                                                                              // pending, and if not that's OK, if not, the channel is on
 -                                                                              // the chain and sending the HTLC-Timeout is their problem.
 -                                                                              continue;
 -                                                                      }
 +                                                                      Some((chan.queue_fail_htlc(htlc_id, err_packet, &&logger), htlc_id))
                                                                },
                                                                HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
 -                                                                      log_trace!(self.logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
 -                                                                      if let Err(e) = chan.queue_fail_malformed_htlc(htlc_id, failure_code, sha256_of_onion, &self.logger) {
 -                                                                              if let ChannelError::Ignore(msg) = e {
 -                                                                                      log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
 -                                                                              } else {
 -                                                                                      panic!("Stated return value requirements in queue_fail_malformed_htlc() were not met");
 -                                                                              }
 -                                                                              // fail-backs are best-effort, we probably already have one
 -                                                                              // pending, and if not that's OK, if not, the channel is on
 -                                                                              // the chain and sending the HTLC-Timeout is their problem.
 -                                                                              continue;
 -                                                                      }
 +                                                                      log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
 +                                                                      let res = chan.queue_fail_malformed_htlc(
 +                                                                              htlc_id, failure_code, sha256_of_onion, &&logger
 +                                                                      );
 +                                                                      Some((res, htlc_id))
                                                                },
 +                                                      };
 +                                                      if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
 +                                                              if let Err(e) = queue_fail_htlc_res {
 +                                                                      if let ChannelError::Ignore(msg) = e {
 +                                                                              log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
 +                                                                      } else {
 +                                                                              panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met");
 +                                                                      }
 +                                                                      // fail-backs are best-effort, we probably already have one
 +                                                                      // pending, and if not that's OK, if not, the channel is on
 +                                                                      // the chain and sending the HTLC-Timeout is their problem.
 +                                                                      continue;
 +                                                              }
                                                        }
                                                }
                                        } else {
                                        log_error!(logger,
                                                "Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id);
                                        update_maps_on_chan_removal!(self, &context);
 -                                      self.issue_channel_close_events(&context, ClosureReason::HolderForceClosed);
 -                                      shutdown_channels.push(context.force_shutdown(false));
 +                                      shutdown_channels.push(context.force_shutdown(false, ClosureReason::HolderForceClosed));
                                        pending_msg_events.push(MessageSendEvent::HandleError {
                                                node_id: counterparty_node_id,
                                                action: msgs::ErrorAction::SendErrorMessage {
                                        let res =
                                                chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger);
                                        match res {
 -                                              Ok((chan, monitor)) => {
 +                                              Ok((mut chan, monitor)) => {
                                                        if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
                                                                // We really should be able to insert here without doing a second
                                                                // lookup, but sadly rust stdlib doesn't currently allow keeping
                                                                Ok(())
                                                        } else {
                                                                let e = ChannelError::Close("Channel funding outpoint was a duplicate".to_owned());
 +                                                              // We weren't able to watch the channel to begin with, so no
 +                                                              // updates should be made on it. Previously, full_stack_target
 +                                                              // found an (unreachable) panic when the monitor update contained
 +                                                              // within `shutdown_finish` was applied.
 +                                                              chan.unset_funding_info(msg.channel_id);
                                                                return Err(convert_chan_phase_err!(self, e, &mut ChannelPhase::Funded(chan), &msg.channel_id).1);
                                                        }
                                                },
                                                let context = phase.context_mut();
                                                let logger = WithChannelContext::from(&self.logger, context);
                                                log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
 -                                              self.issue_channel_close_events(&context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
                                                let mut chan = remove_channel_phase!(self, chan_phase_entry);
 -                                              finish_shutdown = Some(chan.context_mut().force_shutdown(false));
 +                                              finish_shutdown = Some(chan.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel));
                                        },
                                }
                        } else {
                                        msg: update
                                });
                        }
 -                      self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
                }
                mem::drop(per_peer_state);
                if let Some(shutdown_result) = shutdown_result {
                                                                let pending_msg_events = &mut peer_state.pending_msg_events;
                                                                if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
                                                                        if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) {
 -                                                                              failed_channels.push(chan.context.force_shutdown(false));
 +                                                                              failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed));
                                                                                if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                                                                        pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                                                msg: update
                                                                                        });
                                                                                }
 -                                                                              self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
                                                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                                                        node_id: chan.context.get_counterparty_node_id(),
                                                                                        action: msgs::ErrorAction::DisconnectPeer {
                                                                                        });
                                                                                }
  
 -                                                                              self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
 -
                                                                                log_info!(logger, "Broadcasting {}", log_tx!(tx));
                                                                                self.tx_broadcaster.broadcast_transactions(&[&tx]);
                                                                                update_maps_on_chan_removal!(self, &chan.context);
@@@ -8466,13 -8474,14 +8467,13 @@@ wher
                                                                update_maps_on_chan_removal!(self, &channel.context);
                                                                // It looks like our counterparty went on-chain or funding transaction was
                                                                // reorged out of the main chain. Close the channel.
 -                                                              failed_channels.push(channel.context.force_shutdown(true));
 +                                                              let reason_message = format!("{}", reason);
 +                                                              failed_channels.push(channel.context.force_shutdown(true, reason));
                                                                if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
                                                                        pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                                msg: update
                                                                        });
                                                                }
 -                                                              let reason_message = format!("{}", reason);
 -                                                              self.issue_channel_close_events(&channel.context, reason);
                                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                                        node_id: channel.context.get_counterparty_node_id(),
                                                                        action: msgs::ErrorAction::DisconnectPeer {
@@@ -8870,7 -8879,8 +8871,7 @@@ wher
                                        };
                                        // Clean up for removal.
                                        update_maps_on_chan_removal!(self, &context);
 -                                      self.issue_channel_close_events(&context, ClosureReason::DisconnectedPeer);
 -                                      failed_channels.push(context.force_shutdown(false));
 +                                      failed_channels.push(context.force_shutdown(false, ClosureReason::DisconnectedPeer));
                                        false
                                });
                                // Note that we don't bother generating any events for pre-accept channels -
@@@ -10310,7 -10320,7 +10311,7 @@@ wher
                                                log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
                                                        &channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
                                        }
 -                                      let mut shutdown_result = channel.context.force_shutdown(true);
 +                                      let mut shutdown_result = channel.context.force_shutdown(true, ClosureReason::OutdatedChannelManager);
                                        if shutdown_result.unbroadcasted_batch_funding_txid.is_some() {
                                                return Err(DecodeError::InvalidValue);
                                        }
                                // If we were persisted and shut down while the initial ChannelMonitor persistence
                                // was in-progress, we never broadcasted the funding transaction and can still
                                // safely discard the channel.
 -                              let _ = channel.context.force_shutdown(false);
 +                              let _ = channel.context.force_shutdown(false, ClosureReason::DisconnectedPeer);
                                channel_closures.push_back((events::Event::ChannelClosed {
                                        channel_id: channel.context.channel_id(),
                                        user_channel_id: channel.context.get_user_id(),