Merge pull request #2809 from TheBlueMatt/2023-12-closing-event-cleanup-fixes
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Wed, 10 Jan 2024 22:37:07 +0000 (22:37 +0000)
committerGitHub <noreply@github.com>
Wed, 10 Jan 2024 22:37:07 +0000 (22:37 +0000)
Clean Up Funding Error Handling and shutdown

1  2 
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs

index cd98418c5204d369759637fc88667b6d4255dca5,7026ab6d3766f04cad24c31f429f3626077ad8d8..cddb63fd775bb6026aafb1fa008d13bb74fee92a
@@@ -732,8 -732,8 +732,8 @@@ struct CommitmentStats<'a> 
        total_fee_sat: u64, // the total fee included in the transaction
        num_nondust_htlcs: usize,  // the number of HTLC outputs (dust HTLCs *non*-included)
        htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
 -      local_balance_msat: u64, // local balance before fees but considering dust limits
 -      remote_balance_msat: u64, // remote balance before fees but considering dust limits
 +      local_balance_msat: u64, // local balance before fees *not* considering dust limits
 +      remote_balance_msat: u64, // remote balance before fees *not* considering dust limits
        outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
        inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
  }
@@@ -814,6 -814,7 +814,7 @@@ pub(super) struct ReestablishResponses 
  /// The result of a shutdown that should be handled.
  #[must_use]
  pub(crate) struct ShutdownResult {
+       pub(crate) closure_reason: ClosureReason,
        /// A channel monitor update to apply.
        pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
        /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
        /// propagated to the remainder of the batch.
        pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
        pub(crate) channel_id: ChannelId,
+       pub(crate) user_channel_id: u128,
+       pub(crate) channel_capacity_satoshis: u64,
        pub(crate) counterparty_node_id: PublicKey,
+       pub(crate) unbroadcasted_funding_tx: Option<Transaction>,
  }
  
  /// If the majority of the channels funds are to the fundee and the initiator holds only just
@@@ -1728,13 -1732,13 +1732,13 @@@ impl<SP: Deref> ChannelContext<SP> wher
                        }
                }
  
 -              let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
 +              let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
                assert!(value_to_self_msat >= 0);
                // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
                // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
                // "violate" their reserve value by couting those against it. Thus, we have to convert
                // everything to i64 before subtracting as otherwise we can overflow.
 -              let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
 +              let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
                assert!(value_to_remote_msat >= 0);
  
                #[cfg(debug_assertions)]
                htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
                htlcs_included.append(&mut included_dust_htlcs);
  
 -              // For the stats, trimmed-to-0 the value in msats accordingly
 -              value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
 -              value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
 -
                CommitmentStats {
                        tx,
                        feerate_per_kw,
        /// will sign and send to our counterparty.
        /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
        fn build_remote_transaction_keys(&self) -> TxCreationKeys {
 -              //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
 -              //may see payments to it!
                let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
                let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
                let counterparty_pubkeys = self.get_counterparty_pubkeys();
                if let Some(feerate) = outbound_feerate_update {
                        feerate_per_kw = cmp::max(feerate_per_kw, feerate);
                }
 -              cmp::max(2530, feerate_per_kw * 1250 / 1000)
 +              let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000);
 +              cmp::max(2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
        }
  
        /// Get forwarding information for the counterparty.
                res
        }
  
-       fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
-               where F: Fn() -> Option<O> {
+       fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O> where F: Fn() -> Option<O> {
                match self.channel_state {
                        ChannelState::FundingNegotiated => f(),
-                       ChannelState::AwaitingChannelReady(flags) => if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) {
-                               f()
-                       } else {
-                               None
-                       },
+                       ChannelState::AwaitingChannelReady(flags) =>
+                               if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) ||
+                                       flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into())
+                               {
+                                       f()
+                               } else {
+                                       None
+                               },
                        _ => None,
                }
        }
        /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
        /// Also returns the list of payment_hashes for channels which we can safely fail backwards
        /// immediately (others we will have to allow to time out).
-       pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
+       pub fn force_shutdown(&mut self, should_broadcast: bool, closure_reason: ClosureReason) -> ShutdownResult {
                // Note that we MUST only generate a monitor update that indicates force-closure - we're
                // called during initialization prior to the chain_monitor in the encompassing ChannelManager
                // being fully configured in some cases. Thus, its likely any monitor events we generate will
                        } else { None }
                } else { None };
                let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
+               let unbroadcasted_funding_tx = self.unbroadcasted_funding();
  
                self.channel_state = ChannelState::ShutdownComplete;
                self.update_time_counter += 1;
                ShutdownResult {
+                       closure_reason,
                        monitor_update,
                        dropped_outbound_htlcs,
                        unbroadcasted_batch_funding_txid,
                        channel_id: self.channel_id,
+                       user_channel_id: self.user_id,
+                       channel_capacity_satoshis: self.channel_value_satoshis,
                        counterparty_node_id: self.counterparty_node_id,
+                       unbroadcasted_funding_tx,
                }
        }
  
@@@ -4931,11 -4947,15 +4942,15 @@@ impl<SP: Deref> Channel<SP> wher
                if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
                        if last_fee == msg.fee_satoshis {
                                let shutdown_result = ShutdownResult {
+                                       closure_reason: ClosureReason::CooperativeClosure,
                                        monitor_update: None,
                                        dropped_outbound_htlcs: Vec::new(),
                                        unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
                                        channel_id: self.context.channel_id,
+                                       user_channel_id: self.context.user_id,
+                                       channel_capacity_satoshis: self.context.channel_value_satoshis,
                                        counterparty_node_id: self.context.counterparty_node_id,
+                                       unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
                                };
                                let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
                                self.context.channel_state = ChannelState::ShutdownComplete;
                                                        .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
                                                let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
                                                        let shutdown_result = ShutdownResult {
+                                                               closure_reason: ClosureReason::CooperativeClosure,
                                                                monitor_update: None,
                                                                dropped_outbound_htlcs: Vec::new(),
                                                                unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
                                                                channel_id: self.context.channel_id,
+                                                               user_channel_id: self.context.user_id,
+                                                               channel_capacity_satoshis: self.context.channel_value_satoshis,
                                                                counterparty_node_id: self.context.counterparty_node_id,
+                                                               unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
                                                        };
                                                        self.context.channel_state = ChannelState::ShutdownComplete;
                                                        self.context.update_time_counter += 1;
@@@ -6843,41 -6867,6 +6862,41 @@@ pub(super) struct InboundV1Channel<SP: 
        pub unfunded_context: UnfundedChannelContext,
  }
  
 +/// Fetches the [`ChannelTypeFeatures`] that will be used for a channel built from a given
 +/// [`msgs::OpenChannel`].
 +pub(super) fn channel_type_from_open_channel(
 +      msg: &msgs::OpenChannel, their_features: &InitFeatures,
 +      our_supported_features: &ChannelTypeFeatures
 +) -> Result<ChannelTypeFeatures, ChannelError> {
 +      if let Some(channel_type) = &msg.channel_type {
 +              if channel_type.supports_any_optional_bits() {
 +                      return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
 +              }
 +
 +              // We only support the channel types defined by the `ChannelManager` in
 +              // `provided_channel_type_features`. The channel type must always support
 +              // `static_remote_key`.
 +              if !channel_type.requires_static_remote_key() {
 +                      return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
 +              }
 +              // Make sure we support all of the features behind the channel type.
 +              if !channel_type.is_subset(our_supported_features) {
 +                      return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
 +              }
 +              let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
 +              if channel_type.requires_scid_privacy() && announced_channel {
 +                      return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
 +              }
 +              Ok(channel_type.clone())
 +      } else {
 +              let channel_type = ChannelTypeFeatures::from_init(&their_features);
 +              if channel_type != ChannelTypeFeatures::only_static_remote_key() {
 +                      return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
 +              }
 +              Ok(channel_type)
 +      }
 +}
 +
  impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
        /// Creates a new channel from a remote sides' request for one.
        /// Assumes chain_hash has already been checked and corresponds with what we expect!
  
                // First check the channel type is known, failing before we do anything else if we don't
                // support this channel type.
 -              let channel_type = if let Some(channel_type) = &msg.channel_type {
 -                      if channel_type.supports_any_optional_bits() {
 -                              return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
 -                      }
 -
 -                      // We only support the channel types defined by the `ChannelManager` in
 -                      // `provided_channel_type_features`. The channel type must always support
 -                      // `static_remote_key`.
 -                      if !channel_type.requires_static_remote_key() {
 -                              return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
 -                      }
 -                      // Make sure we support all of the features behind the channel type.
 -                      if !channel_type.is_subset(our_supported_features) {
 -                              return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
 -                      }
 -                      if channel_type.requires_scid_privacy() && announced_channel {
 -                              return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
 -                      }
 -                      channel_type.clone()
 -              } else {
 -                      let channel_type = ChannelTypeFeatures::from_init(&their_features);
 -                      if channel_type != ChannelTypeFeatures::only_static_remote_key() {
 -                              return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
 -                      }
 -                      channel_type
 -              };
 +              let channel_type = channel_type_from_open_channel(msg, their_features, our_supported_features)?;
  
                let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
                let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
index e04a3cd1a34bdfda679c677e3a09518be526132d,81b9d766d7b39a3e91b633e62ea72b94e1eab5bb..a9bcbd286315cdc10e0528c2fe949a162afc863a
@@@ -43,11 -43,13 +43,11 @@@ use crate::events::{Event, EventHandler
  // Since this struct is returned in `list_channels` methods, expose it here in case users want to
  // construct one themselves.
  use crate::ln::{inbound_payment, ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
 -use crate::ln::channel::{Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext};
 +use crate::ln::channel::{self, Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext};
  use crate::ln::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
  #[cfg(any(feature = "_test_utils", test))]
  use crate::ln::features::Bolt11InvoiceFeatures;
 -use crate::routing::gossip::NetworkGraph;
 -use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router};
 -use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
 +use crate::routing::router::{BlindedTail, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router};
  use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundOnionErr, NextPacketDetails};
  use crate::ln::msgs;
  use crate::ln::onion_utils;
@@@ -64,7 -66,7 +64,7 @@@ use crate::offers::offer::{DerivedMetad
  use crate::offers::parse::Bolt12SemanticError;
  use crate::offers::refund::{Refund, RefundBuilder};
  use crate::onion_message::{Destination, MessageRouter, OffersMessage, OffersMessageHandler, PendingOnionMessage, new_pending_onion_message};
 -use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider};
 +use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider};
  use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
  use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate};
  use crate::util::wakers::{Future, Notifier};
@@@ -73,13 -75,6 +73,13 @@@ use crate::util::string::UntrustedStrin
  use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
  use crate::util::logger::{Level, Logger, WithContext};
  use crate::util::errors::APIError;
 +#[cfg(not(c_bindings))]
 +use {
 +      crate::routing::router::DefaultRouter,
 +      crate::routing::gossip::NetworkGraph,
 +      crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters},
 +      crate::sign::KeysManager,
 +};
  
  use alloc::collections::{btree_map, BTreeMap};
  
@@@ -550,9 -545,8 +550,8 @@@ impl Into<u16> for FailureCode 
  
  struct MsgHandleErrInternal {
        err: msgs::LightningError,
-       chan_id: Option<(ChannelId, u128)>, // If Some a channel of ours has been closed
+       closes_channel: bool,
        shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
-       channel_capacity: Option<u64>,
  }
  impl MsgHandleErrInternal {
        #[inline]
                                        },
                                },
                        },
-                       chan_id: None,
+                       closes_channel: false,
                        shutdown_finish: None,
-                       channel_capacity: None,
                }
        }
        #[inline]
        fn from_no_close(err: msgs::LightningError) -> Self {
-               Self { err, chan_id: None, shutdown_finish: None, channel_capacity: None }
+               Self { err, closes_channel: false, shutdown_finish: None }
        }
        #[inline]
-       fn from_finish_shutdown(err: String, channel_id: ChannelId, user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>, channel_capacity: u64) -> Self {
+       fn from_finish_shutdown(err: String, channel_id: ChannelId, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
                let err_msg = msgs::ErrorMessage { channel_id, data: err.clone() };
                let action = if shutdown_res.monitor_update.is_some() {
                        // We have a closing `ChannelMonitorUpdate`, which means the channel was funded and we
                };
                Self {
                        err: LightningError { err, action },
-                       chan_id: Some((channel_id, user_channel_id)),
+                       closes_channel: true,
                        shutdown_finish: Some((shutdown_res, channel_update)),
-                       channel_capacity: Some(channel_capacity)
                }
        }
        #[inline]
                                        },
                                },
                        },
-                       chan_id: None,
+                       closes_channel: false,
                        shutdown_finish: None,
-                       channel_capacity: None,
                }
        }
  
        fn closes_channel(&self) -> bool {
-               self.chan_id.is_some()
+               self.closes_channel
        }
  }
  
@@@ -1961,30 -1952,27 +1957,27 @@@ macro_rules! handle_error 
  
                match $internal {
                        Ok(msg) => Ok(msg),
-                       Err(MsgHandleErrInternal { err, chan_id, shutdown_finish, channel_capacity }) => {
+                       Err(MsgHandleErrInternal { err, shutdown_finish, .. }) => {
                                let mut msg_events = Vec::with_capacity(2);
  
                                if let Some((shutdown_res, update_option)) = shutdown_finish {
+                                       let counterparty_node_id = shutdown_res.counterparty_node_id;
+                                       let channel_id = shutdown_res.channel_id;
+                                       let logger = WithContext::from(
+                                               &$self.logger, Some(counterparty_node_id), Some(channel_id),
+                                       );
+                                       log_error!(logger, "Force-closing channel: {}", err.err);
                                        $self.finish_close_channel(shutdown_res);
                                        if let Some(update) = update_option {
                                                msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                        msg: update
                                                });
                                        }
-                                       if let Some((channel_id, user_channel_id)) = chan_id {
-                                               $self.pending_events.lock().unwrap().push_back((events::Event::ChannelClosed {
-                                                       channel_id, user_channel_id,
-                                                       reason: ClosureReason::ProcessingError { err: err.err.clone() },
-                                                       counterparty_node_id: Some($counterparty_node_id),
-                                                       channel_capacity_sats: channel_capacity,
-                                               }, None));
-                                       }
+                               } else {
+                                       log_error!($self.logger, "Got non-closing error: {}", err.err);
                                }
  
-                               let logger = WithContext::from(
-                                       &$self.logger, Some($counterparty_node_id), chan_id.map(|(chan_id, _)| chan_id)
-                               );
-                               log_error!(logger, "{}", err.err);
                                if let msgs::ErrorAction::IgnoreError = err.action {
                                } else {
                                        msg_events.push(events::MessageSendEvent::HandleError {
@@@ -2044,12 -2032,11 +2037,11 @@@ macro_rules! convert_chan_phase_err 
                                let logger = WithChannelContext::from(&$self.logger, &$channel.context);
                                log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg);
                                update_maps_on_chan_removal!($self, $channel.context);
-                               let shutdown_res = $channel.context.force_shutdown(true);
-                               let user_id = $channel.context.get_user_id();
-                               let channel_capacity_satoshis = $channel.context.get_value_satoshis();
-                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, user_id,
-                                       shutdown_res, $channel_update, channel_capacity_satoshis))
+                               let reason = ClosureReason::ProcessingError { err: msg.clone() };
+                               let shutdown_res = $channel.context.force_shutdown(true, reason);
+                               let err =
+                                       MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $channel_update);
+                               (true, err)
                        },
                }
        };
@@@ -2706,26 -2693,6 +2698,6 @@@ wher
                        .collect()
        }
  
-       /// Helper function that issues the channel close events
-       fn issue_channel_close_events(&self, context: &ChannelContext<SP>, closure_reason: ClosureReason) {
-               let mut pending_events_lock = self.pending_events.lock().unwrap();
-               match context.unbroadcasted_funding() {
-                       Some(transaction) => {
-                               pending_events_lock.push_back((events::Event::DiscardFunding {
-                                       channel_id: context.channel_id(), transaction
-                               }, None));
-                       },
-                       None => {},
-               }
-               pending_events_lock.push_back((events::Event::ChannelClosed {
-                       channel_id: context.channel_id(),
-                       user_channel_id: context.get_user_id(),
-                       reason: closure_reason,
-                       counterparty_node_id: Some(context.get_counterparty_node_id()),
-                       channel_capacity_sats: Some(context.get_value_satoshis()),
-               }, None));
-       }
        fn close_channel_internal(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
  
                                                                peer_state_lock, peer_state, per_peer_state, chan);
                                                }
                                        } else {
-                                               self.issue_channel_close_events(chan_phase_entry.get().context(), ClosureReason::HolderForceClosed);
                                                let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
-                                               shutdown_result = Some(chan_phase.context_mut().force_shutdown(false));
+                                               shutdown_result = Some(chan_phase.context_mut().force_shutdown(false, ClosureReason::HolderForceClosed));
                                        }
                                },
                                hash_map::Entry::Vacant(_) => {
                let logger = WithContext::from(
                        &self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id),
                );
-               log_debug!(logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len());
+               log_debug!(logger, "Finishing closure of channel due to {} with {} HTLCs to fail",
+                       shutdown_res.closure_reason, shutdown_res.dropped_outbound_htlcs.len());
                for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) {
                        let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
                        let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
                                        let mut peer_state = peer_state_mutex.lock().unwrap();
                                        if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) {
                                                update_maps_on_chan_removal!(self, &chan.context());
-                                               self.issue_channel_close_events(&chan.context(), ClosureReason::FundingBatchClosure);
-                                               shutdown_results.push(chan.context_mut().force_shutdown(false));
+                                               shutdown_results.push(chan.context_mut().force_shutdown(false, ClosureReason::FundingBatchClosure));
                                        }
                                }
                                has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state));
                                "Closing a batch where all channels have completed initial monitor update",
                        );
                }
+               {
+                       let mut pending_events = self.pending_events.lock().unwrap();
+                       pending_events.push_back((events::Event::ChannelClosed {
+                               channel_id: shutdown_res.channel_id,
+                               user_channel_id: shutdown_res.user_channel_id,
+                               reason: shutdown_res.closure_reason,
+                               counterparty_node_id: Some(shutdown_res.counterparty_node_id),
+                               channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis),
+                       }, None));
+                       if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx {
+                               pending_events.push_back((events::Event::DiscardFunding {
+                                       channel_id: shutdown_res.channel_id, transaction
+                               }, None));
+                       }
+               }
                for shutdown_result in shutdown_results.drain(..) {
                        self.finish_close_channel(shutdown_result);
                }
                        let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id));
                        if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
                                log_error!(logger, "Force-closing channel {}", channel_id);
-                               self.issue_channel_close_events(&chan_phase_entry.get().context(), closure_reason);
                                let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
                                mem::drop(peer_state);
                                mem::drop(per_peer_state);
                                match chan_phase {
                                        ChannelPhase::Funded(mut chan) => {
-                                               self.finish_close_channel(chan.context.force_shutdown(broadcast));
+                                               self.finish_close_channel(chan.context.force_shutdown(broadcast, closure_reason));
                                                (self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id())
                                        },
                                        ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => {
-                                               self.finish_close_channel(chan_phase.context_mut().force_shutdown(false));
+                                               self.finish_close_channel(chan_phase.context_mut().force_shutdown(false, closure_reason));
                                                // Unfunded channel has no update
                                                (None, chan_phase.context().get_counterparty_node_id())
                                        },
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                let funding_txo;
-               let (chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) {
+               let (mut chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) {
                        Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => {
                                funding_txo = find_funding_output(&chan, &funding_transaction)?;
  
                                let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger)
                                        .map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e {
                                                let channel_id = chan.context.channel_id();
-                                               let user_id = chan.context.get_user_id();
-                                               let shutdown_res = chan.context.force_shutdown(false);
-                                               let channel_capacity = chan.context.get_value_satoshis();
-                                               (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None, channel_capacity))
+                                               let reason = ClosureReason::ProcessingError { err: msg.clone() };
+                                               let shutdown_res = chan.context.force_shutdown(false, reason);
+                                               (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None))
                                        } else { unreachable!(); });
                                match funding_res {
                                        Ok(funding_msg) => (chan, funding_msg),
                        },
                        hash_map::Entry::Vacant(e) => {
                                let mut outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
-                               if outpoint_to_peer.insert(funding_txo, chan.context.get_counterparty_node_id()).is_some() {
-                                       panic!("outpoint_to_peer map already contained funding outpoint, which shouldn't be possible");
+                               match outpoint_to_peer.entry(funding_txo) {
+                                       hash_map::Entry::Vacant(e) => { e.insert(chan.context.get_counterparty_node_id()); },
+                                       hash_map::Entry::Occupied(o) => {
+                                               let err = format!(
+                                                       "An existing channel using outpoint {} is open with peer {}",
+                                                       funding_txo, o.get()
+                                               );
+                                               mem::drop(outpoint_to_peer);
+                                               mem::drop(peer_state_lock);
+                                               mem::drop(per_peer_state);
+                                               let reason = ClosureReason::ProcessingError { err: err.clone() };
+                                               self.finish_close_channel(chan.context.force_shutdown(true, reason));
+                                               return Err(APIError::ChannelUnavailable { err });
+                                       }
                                }
                                e.insert(ChannelPhase::UnfundedOutboundV1(chan));
                        }
                                                .and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id))
                                                .map(|mut chan| {
                                                        update_maps_on_chan_removal!(self, &chan.context());
-                                                       self.issue_channel_close_events(&chan.context(), ClosureReason::ProcessingError { err: e.clone() });
-                                                       shutdown_results.push(chan.context_mut().force_shutdown(false));
+                                                       let closure_reason = ClosureReason::ProcessingError { err: e.clone() };
+                                                       shutdown_results.push(chan.context_mut().force_shutdown(false, closure_reason));
                                                });
                                }
                        }
                                        log_error!(logger,
                                                "Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id);
                                        update_maps_on_chan_removal!(self, &context);
-                                       self.issue_channel_close_events(&context, ClosureReason::HolderForceClosed);
-                                       shutdown_channels.push(context.force_shutdown(false));
+                                       shutdown_channels.push(context.force_shutdown(false, ClosureReason::HolderForceClosed));
                                        pending_msg_events.push(MessageSendEvent::HandleError {
                                                node_id: counterparty_node_id,
                                                action: msgs::ErrorAction::SendErrorMessage {
        }
  
        fn do_accept_inbound_channel(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> {
 +
 +              let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*temporary_channel_id));
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
  
                let peers_without_funded_channels =
                        self.peers_without_funded_channels(|peer| { peer.total_channel_count() > 0 });
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
 -                      .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
 +              .ok_or_else(|| {
 +                      let err_str = format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id); 
 +                      log_error!(logger, "{}", err_str);
 +
 +                      APIError::ChannelUnavailable { err: err_str } 
 +              })?;
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                let is_only_peer_channel = peer_state.total_channel_count() == 1;
                                InboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
                                        counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features,
                                        &unaccepted_channel.open_channel_msg, user_channel_id, &self.default_configuration, best_block_height,
 -                                      &self.logger, accept_0conf).map_err(|e| APIError::ChannelUnavailable { err: e.to_string() })
 +                                      &self.logger, accept_0conf).map_err(|e| {
 +                                              let err_str = e.to_string();
 +                                              log_error!(logger, "{}", err_str);
 +
 +                                              APIError::ChannelUnavailable { err: err_str }
 +                                      })
 +                              }
 +                      _ => { 
 +                              let err_str = "No such channel awaiting to be accepted.".to_owned();
 +                              log_error!(logger, "{}", err_str);
 +
 +                              Err(APIError::APIMisuseError { err: err_str })
                        }
 -                      _ => Err(APIError::APIMisuseError { err: "No such channel awaiting to be accepted.".to_owned() })
                }?;
  
                if accept_0conf {
                                }
                        };
                        peer_state.pending_msg_events.push(send_msg_err_event);
 -                      return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() });
 +                      let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
 +                      log_error!(logger, "{}", err_str);
 +
 +                      return Err(APIError::APIMisuseError { err: err_str });
                } else {
                        // If this peer already has some channels, a new channel won't increase our number of peers
                        // with unfunded channels, so as long as we aren't over the maximum number of unfunded
                                        }
                                };
                                peer_state.pending_msg_events.push(send_msg_err_event);
 -                              return Err(APIError::APIMisuseError { err: "Too many peers with unfunded channels, refusing to accept new ones".to_owned() });
 +                              let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
 +                              log_error!(logger, "{}", err_str);
 +
 +                              return Err(APIError::APIMisuseError { err: err_str });
                        }
                }
  
  
                // If we're doing manual acceptance checks on the channel, then defer creation until we're sure we want to accept.
                if self.default_configuration.manually_accept_inbound_channels {
 +                      let channel_type = channel::channel_type_from_open_channel(
 +                                      &msg, &peer_state.latest_features, &self.channel_type_features()
 +                              ).map_err(|e|
 +                                      MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id)
 +                              )?;
                        let mut pending_events = self.pending_events.lock().unwrap();
                        pending_events.push_back((events::Event::OpenChannelRequest {
                                temporary_channel_id: msg.temporary_channel_id.clone(),
                                counterparty_node_id: counterparty_node_id.clone(),
                                funding_satoshis: msg.funding_satoshis,
                                push_msat: msg.push_msat,
 -                              channel_type: msg.channel_type.clone().unwrap(),
 +                              channel_type,
                        }, None));
                        peer_state.inbound_channel_request_by_id.insert(channel_id, InboundChannelRequest {
                                open_channel_msg: msg.clone(),
                                        let res =
                                                chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger);
                                        match res {
-                                               Ok((chan, monitor)) => {
+                                               Ok((mut chan, monitor)) => {
                                                        if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
                                                                // We really should be able to insert here without doing a second
                                                                // lookup, but sadly rust stdlib doesn't currently allow keeping
                                                                Ok(())
                                                        } else {
                                                                let e = ChannelError::Close("Channel funding outpoint was a duplicate".to_owned());
+                                                               // We weren't able to watch the channel to begin with, so no
+                                                               // updates should be made on it. Previously, full_stack_target
+                                                               // found an (unreachable) panic when the monitor update contained
+                                                               // within `shutdown_finish` was applied.
+                                                               chan.unset_funding_info(msg.channel_id);
                                                                return Err(convert_chan_phase_err!(self, e, &mut ChannelPhase::Funded(chan), &msg.channel_id).1);
                                                        }
                                                },
                                                let context = phase.context_mut();
                                                let logger = WithChannelContext::from(&self.logger, context);
                                                log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
-                                               self.issue_channel_close_events(&context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
                                                let mut chan = remove_channel_phase!(self, chan_phase_entry);
-                                               finish_shutdown = Some(chan.context_mut().force_shutdown(false));
+                                               finish_shutdown = Some(chan.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel));
                                        },
                                }
                        } else {
                                        msg: update
                                });
                        }
-                       self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
                }
                mem::drop(per_peer_state);
                if let Some(shutdown_result) = shutdown_result {
                                                                let pending_msg_events = &mut peer_state.pending_msg_events;
                                                                if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
                                                                        if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) {
-                                                                               failed_channels.push(chan.context.force_shutdown(false));
+                                                                               failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed));
                                                                                if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                                                                        pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                                                msg: update
                                                                                        });
                                                                                }
-                                                                               self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
                                                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                                                        node_id: chan.context.get_counterparty_node_id(),
                                                                                        action: msgs::ErrorAction::DisconnectPeer {
                                                                                        });
                                                                                }
  
-                                                                               self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
                                                                                log_info!(logger, "Broadcasting {}", log_tx!(tx));
                                                                                self.tx_broadcaster.broadcast_transactions(&[&tx]);
                                                                                update_maps_on_chan_removal!(self, &chan.context);
@@@ -8473,14 -8438,13 +8471,13 @@@ wher
                                                                update_maps_on_chan_removal!(self, &channel.context);
                                                                // It looks like our counterparty went on-chain or funding transaction was
                                                                // reorged out of the main chain. Close the channel.
-                                                               failed_channels.push(channel.context.force_shutdown(true));
+                                                               let reason_message = format!("{}", reason);
+                                                               failed_channels.push(channel.context.force_shutdown(true, reason));
                                                                if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
                                                                        pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                                msg: update
                                                                        });
                                                                }
-                                                               let reason_message = format!("{}", reason);
-                                                               self.issue_channel_close_events(&channel.context, reason);
                                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                                        node_id: channel.context.get_counterparty_node_id(),
                                                                        action: msgs::ErrorAction::DisconnectPeer {
@@@ -8878,8 -8842,7 +8875,7 @@@ wher
                                        };
                                        // Clean up for removal.
                                        update_maps_on_chan_removal!(self, &context);
-                                       self.issue_channel_close_events(&context, ClosureReason::DisconnectedPeer);
-                                       failed_channels.push(context.force_shutdown(false));
+                                       failed_channels.push(context.force_shutdown(false, ClosureReason::DisconnectedPeer));
                                        false
                                });
                                // Note that we don't bother generating any events for pre-accept channels -
                                let pending_msg_events = &mut peer_state.pending_msg_events;
  
                                peer_state.channel_by_id.iter_mut().filter_map(|(_, phase)|
 -                                      if let ChannelPhase::Funded(chan) = phase { Some(chan) } else {
 -                                              // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted
 -                                              // (so won't be recovered after a crash), they shouldn't exist here and we would never need to
 -                                              // worry about closing and removing them.
 -                                              debug_assert!(false);
 -                                              None
 -                                      }
 +                                      if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
                                ).for_each(|chan| {
                                        let logger = WithChannelContext::from(&self.logger, &chan.context);
                                        pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
@@@ -10319,7 -10288,7 +10315,7 @@@ wher
                                                log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
                                                        &channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
                                        }
-                                       let mut shutdown_result = channel.context.force_shutdown(true);
+                                       let mut shutdown_result = channel.context.force_shutdown(true, ClosureReason::OutdatedChannelManager);
                                        if shutdown_result.unbroadcasted_batch_funding_txid.is_some() {
                                                return Err(DecodeError::InvalidValue);
                                        }
                                // If we were persisted and shut down while the initial ChannelMonitor persistence
                                // was in-progress, we never broadcasted the funding transaction and can still
                                // safely discard the channel.
-                               let _ = channel.context.force_shutdown(false);
+                               let _ = channel.context.force_shutdown(false, ClosureReason::DisconnectedPeer);
                                channel_closures.push_back((events::Event::ChannelClosed {
                                        channel_id: channel.context.channel_id(),
                                        user_channel_id: channel.context.get_user_id(),