Merge pull request #2731 from shaavan/issue2711
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Thu, 4 Apr 2024 08:12:36 +0000 (08:12 +0000)
committerGitHub <noreply@github.com>
Thu, 4 Apr 2024 08:12:36 +0000 (08:12 +0000)
Delay broadcasting Channel Updates until connected to peers

1  2 
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs

index b3581fd6920ad503ea835e95fb9024ea0a08623d,09a17e42bfa599141254d58bd411ecdbbaad0b15..11d0b299efef3a35d0d8872d5a016ad807498838
@@@ -44,7 -44,6 +44,7 @@@ use crate::events::{Event, EventHandler
  // construct one themselves.
  use crate::ln::{inbound_payment, ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
  use crate::ln::channel::{self, Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext};
 +pub use crate::ln::channel::{InboundHTLCDetails, InboundHTLCStateDetails, OutboundHTLCDetails, OutboundHTLCStateDetails};
  use crate::ln::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
  #[cfg(any(feature = "_test_utils", test))]
  use crate::ln::features::Bolt11InvoiceFeatures;
@@@ -58,11 -57,10 +58,11 @@@ use crate::ln::msgs::{ChannelMessageHan
  use crate::ln::outbound_payment;
  use crate::ln::outbound_payment::{Bolt12PaymentError, OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs, StaleExpiration};
  use crate::ln::wire::Encode;
 -use crate::offers::invoice::{BlindedPayInfo, Bolt12Invoice, DEFAULT_RELATIVE_EXPIRY, DerivedSigningPubkey, InvoiceBuilder};
 +use crate::offers::invoice::{BlindedPayInfo, Bolt12Invoice, DEFAULT_RELATIVE_EXPIRY, DerivedSigningPubkey, ExplicitSigningPubkey, InvoiceBuilder, UnsignedBolt12Invoice};
  use crate::offers::invoice_error::InvoiceError;
 +use crate::offers::invoice_request::{DerivedPayerId, InvoiceRequestBuilder};
  use crate::offers::merkle::SignError;
 -use crate::offers::offer::{DerivedMetadata, Offer, OfferBuilder};
 +use crate::offers::offer::{Offer, OfferBuilder};
  use crate::offers::parse::Bolt12SemanticError;
  use crate::offers::refund::{Refund, RefundBuilder};
  use crate::onion_message::messenger::{Destination, MessageRouter, PendingOnionMessage, new_pending_onion_message};
@@@ -78,17 -76,11 +78,17 @@@ use crate::util::logger::{Level, Logger
  use crate::util::errors::APIError;
  #[cfg(not(c_bindings))]
  use {
 +      crate::offers::offer::DerivedMetadata,
        crate::routing::router::DefaultRouter,
        crate::routing::gossip::NetworkGraph,
        crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters},
        crate::sign::KeysManager,
  };
 +#[cfg(c_bindings)]
 +use {
 +      crate::offers::offer::OfferWithDerivedMetadataBuilder,
 +      crate::offers::refund::RefundMaybeWithDerivedMetadataBuilder,
 +};
  
  use alloc::collections::{btree_map, BTreeMap};
  
@@@ -200,8 -192,6 +200,8 @@@ pub enum PendingHTLCRouting 
                /// For HTLCs received by LDK, these will ultimately bubble back up as
                /// [`RecipientOnionFields::custom_tlvs`].
                custom_tlvs: Vec<(u64, Vec<u8>)>,
 +              /// Set if this HTLC is the final hop in a multi-hop blinded path.
 +              requires_blinded_error: bool,
        },
  }
  
@@@ -223,7 -213,6 +223,7 @@@ impl PendingHTLCRouting 
                match self {
                        Self::Forward { blinded: Some(BlindedForward { failure, .. }), .. } => Some(*failure),
                        Self::Receive { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
 +                      Self::ReceiveKeysend { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
                        _ => None,
                }
        }
@@@ -903,7 -892,7 +903,7 @@@ pub(super) struct PeerState<SP: Deref> 
        /// The peer is currently connected (i.e. we've seen a
        /// [`ChannelMessageHandler::peer_connected`] and no corresponding
        /// [`ChannelMessageHandler::peer_disconnected`].
-       is_connected: bool,
+       pub is_connected: bool,
  }
  
  impl <SP: Deref> PeerState<SP> where SP::Target: SignerProvider {
                if require_disconnected && self.is_connected {
                        return false
                }
 -              self.channel_by_id.iter().filter(|(_, phase)| matches!(phase, ChannelPhase::Funded(_))).count() == 0
 +              !self.channel_by_id.iter().any(|(_, phase)|
 +                      match phase {
 +                              ChannelPhase::Funded(_) | ChannelPhase::UnfundedOutboundV1(_) => true,
 +                              ChannelPhase::UnfundedInboundV1(_) => false,
 +                              #[cfg(dual_funding)]
 +                              ChannelPhase::UnfundedOutboundV2(_) => true,
 +                              #[cfg(dual_funding)]
 +                              ChannelPhase::UnfundedInboundV2(_) => false,
 +                      }
 +              )
                        && self.monitor_update_blocked_actions.is_empty()
                        && self.in_flight_monitor_updates.is_empty()
        }
@@@ -995,7 -975,6 +995,7 @@@ pub type SimpleArcChannelManager<M, T, 
        Arc<DefaultRouter<
                Arc<NetworkGraph<Arc<L>>>,
                Arc<L>,
 +              Arc<KeysManager>,
                Arc<RwLock<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>,
                ProbabilisticScoringFeeParameters,
                ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>,
@@@ -1026,7 -1005,6 +1026,7 @@@ pub type SimpleRefChannelManager<'a, 'b
                &'e DefaultRouter<
                        &'f NetworkGraph<&'g L>,
                        &'g L,
 +                      &'c KeysManager,
                        &'h RwLock<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>,
                        ProbabilisticScoringFeeParameters,
                        ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>
@@@ -1181,8 -1159,6 +1181,8 @@@ wher
  //  |   |
  //  |   |__`pending_intercepted_htlcs`
  //  |
 +//  |__`decode_update_add_htlcs`
 +//  |
  //  |__`per_peer_state`
  //      |
  //      |__`pending_inbound_payments`
@@@ -1273,18 -1249,6 +1273,18 @@@ wher
        /// See `ChannelManager` struct-level documentation for lock order requirements.
        pending_intercepted_htlcs: Mutex<HashMap<InterceptId, PendingAddHTLCInfo>>,
  
 +      /// SCID/SCID Alias -> pending `update_add_htlc`s to decode.
 +      ///
 +      /// Note that because we may have an SCID Alias as the key we can have two entries per channel,
 +      /// though in practice we probably won't be receiving HTLCs for a channel both via the alias
 +      /// and via the classic SCID.
 +      ///
 +      /// Note that no consistency guarantees are made about the existence of a channel with the
 +      /// `short_channel_id` here, nor the `channel_id` in `UpdateAddHTLC`!
 +      ///
 +      /// See `ChannelManager` struct-level documentation for lock order requirements.
 +      decode_update_add_htlcs: Mutex<HashMap<u64, Vec<msgs::UpdateAddHTLC>>>,
 +
        /// The sets of payments which are claimable or currently being claimed. See
        /// [`ClaimablePayments`]' individual field docs for more info.
        ///
  
        pending_offers_messages: Mutex<Vec<PendingOnionMessage<OffersMessage>>>,
  
+       /// Tracks the message events that are to be broadcasted when we are connected to some peer.
+       pending_broadcast_messages: Mutex<Vec<MessageSendEvent>>,
        entropy_source: ES,
        node_signer: NS,
        signer_provider: SP,
@@@ -1672,6 -1639,9 +1675,6 @@@ pub struct ChannelDetails 
        pub counterparty: ChannelCounterparty,
        /// The Channel's funding transaction output, if we've negotiated the funding transaction with
        /// our counterparty already.
 -      ///
 -      /// Note that, if this has been set, `channel_id` for V1-established channels will be equivalent to
 -      /// `ChannelId::v1_from_funding_outpoint(funding_txo.unwrap())`.
        pub funding_txo: Option<OutPoint>,
        /// The features which this channel operates with. See individual features for more info.
        ///
        ///
        /// This field is only `None` for `ChannelDetails` objects serialized prior to LDK 0.0.109.
        pub config: Option<ChannelConfig>,
 +      /// Pending inbound HTLCs.
 +      ///
 +      /// This field is empty for objects serialized with LDK versions prior to 0.0.122.
 +      pub pending_inbound_htlcs: Vec<InboundHTLCDetails>,
 +      /// Pending outbound HTLCs.
 +      ///
 +      /// This field is empty for objects serialized with LDK versions prior to 0.0.122.
 +      pub pending_outbound_htlcs: Vec<OutboundHTLCDetails>,
  }
  
  impl ChannelDetails {
                        inbound_htlc_maximum_msat: context.get_holder_htlc_maximum_msat(),
                        config: Some(context.config()),
                        channel_shutdown_state: Some(context.shutdown_state()),
 +                      pending_inbound_htlcs: context.get_pending_inbound_htlc_details(),
 +                      pending_outbound_htlcs: context.get_pending_outbound_htlc_details(),
                }
        }
  }
@@@ -2019,7 -1979,7 +2022,7 @@@ macro_rules! handle_error 
                match $internal {
                        Ok(msg) => Ok(msg),
                        Err(MsgHandleErrInternal { err, shutdown_finish, .. }) => {
-                               let mut msg_events = Vec::with_capacity(2);
+                               let mut msg_event = None;
  
                                if let Some((shutdown_res, update_option)) = shutdown_finish {
                                        let counterparty_node_id = shutdown_res.counterparty_node_id;
  
                                        $self.finish_close_channel(shutdown_res);
                                        if let Some(update) = update_option {
-                                               msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                               let mut pending_broadcast_messages = $self.pending_broadcast_messages.lock().unwrap();
+                                               pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                        msg: update
                                                });
                                        }
  
                                if let msgs::ErrorAction::IgnoreError = err.action {
                                } else {
-                                       msg_events.push(events::MessageSendEvent::HandleError {
+                                       msg_event = Some(events::MessageSendEvent::HandleError {
                                                node_id: $counterparty_node_id,
                                                action: err.action.clone()
                                        });
                                }
  
-                               if !msg_events.is_empty() {
+                               if let Some(msg_event) = msg_event {
                                        let per_peer_state = $self.per_peer_state.read().unwrap();
                                        if let Some(peer_state_mutex) = per_peer_state.get(&$counterparty_node_id) {
                                                let mut peer_state = peer_state_mutex.lock().unwrap();
-                                               peer_state.pending_msg_events.append(&mut msg_events);
+                                               peer_state.pending_msg_events.push(msg_event);
                                        }
                                }
  
@@@ -2123,14 -2084,6 +2127,14 @@@ macro_rules! convert_chan_phase_err 
                        ChannelPhase::UnfundedInboundV1(channel) => {
                                convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL)
                        },
 +                      #[cfg(dual_funding)]
 +                      ChannelPhase::UnfundedOutboundV2(channel) => {
 +                              convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL)
 +                      },
 +                      #[cfg(dual_funding)]
 +                      ChannelPhase::UnfundedInboundV2(channel) => {
 +                              convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL)
 +                      },
                }
        };
  }
@@@ -2206,7 -2159,6 +2210,7 @@@ macro_rules! emit_channel_pending_even
                                counterparty_node_id: $channel.context.get_counterparty_node_id(),
                                user_channel_id: $channel.context.get_user_id(),
                                funding_txo: $channel.context.get_funding_txo().unwrap().into_bitcoin_outpoint(),
 +                              channel_type: Some($channel.context.get_channel_type().clone()),
                        }, None));
                        $channel.context.set_channel_pending_event_emitted();
                }
@@@ -2233,7 -2185,7 +2237,7 @@@ macro_rules! handle_monitor_update_comp
                let logger = WithChannelContext::from(&$self.logger, &$chan.context);
                let mut updates = $chan.monitor_updating_restored(&&logger,
                        &$self.node_signer, $self.chain_hash, &$self.default_configuration,
 -                      $self.best_block.read().unwrap().height());
 +                      $self.best_block.read().unwrap().height);
                let counterparty_node_id = $chan.context.get_counterparty_node_id();
                let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() {
                        // We only send a channel_update in the case where we are just now sending a
                let update_actions = $peer_state.monitor_update_blocked_actions
                        .remove(&$chan.context.channel_id()).unwrap_or(Vec::new());
  
 -              let htlc_forwards = $self.handle_channel_resumption(
 +              let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption(
                        &mut $peer_state.pending_msg_events, $chan, updates.raa,
 -                      updates.commitment_update, updates.order, updates.accepted_htlcs,
 +                      updates.commitment_update, updates.order, updates.accepted_htlcs, updates.pending_update_adds,
                        updates.funding_broadcastable, updates.channel_ready,
                        updates.announcement_sigs);
                if let Some(upd) = channel_update {
                if let Some(forwards) = htlc_forwards {
                        $self.forward_htlcs(&mut [forwards][..]);
                }
 +              if let Some(decode) = decode_update_add_htlcs {
 +                      $self.push_decode_update_add_htlcs(decode);
 +              }
                $self.finalize_claims(updates.finalized_claimed_htlcs);
                for failure in updates.failed_htlcs.drain(..) {
                        let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
@@@ -2351,7 -2300,7 +2355,7 @@@ macro_rules! handle_new_monitor_update 
                handle_new_monitor_update!($self, $update_res, $chan, _internal,
                        handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan))
        };
 -      ($self: ident, $funding_txo: expr, $channel_id: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
 +      ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
                let in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo)
                        .or_insert_with(Vec::new);
                // During startup, we push monitor updates as background events through to here in
@@@ -2490,15 -2439,14 +2494,15 @@@ wher
  
                        best_block: RwLock::new(params.best_block),
  
 -                      outbound_scid_aliases: Mutex::new(HashSet::new()),
 -                      pending_inbound_payments: Mutex::new(HashMap::new()),
 +                      outbound_scid_aliases: Mutex::new(new_hash_set()),
 +                      pending_inbound_payments: Mutex::new(new_hash_map()),
                        pending_outbound_payments: OutboundPayments::new(),
 -                      forward_htlcs: Mutex::new(HashMap::new()),
 -                      claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: HashMap::new(), pending_claiming_payments: HashMap::new() }),
 -                      pending_intercepted_htlcs: Mutex::new(HashMap::new()),
 -                      outpoint_to_peer: Mutex::new(HashMap::new()),
 -                      short_to_chan_info: FairRwLock::new(HashMap::new()),
 +                      forward_htlcs: Mutex::new(new_hash_map()),
 +                      decode_update_add_htlcs: Mutex::new(new_hash_map()),
 +                      claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: new_hash_map(), pending_claiming_payments: new_hash_map() }),
 +                      pending_intercepted_htlcs: Mutex::new(new_hash_map()),
 +                      outpoint_to_peer: Mutex::new(new_hash_map()),
 +                      short_to_chan_info: FairRwLock::new(new_hash_map()),
  
                        our_network_pubkey: node_signer.get_node_id(Recipient::Node).unwrap(),
                        secp_ctx,
  
                        highest_seen_timestamp: AtomicUsize::new(current_timestamp as usize),
  
 -                      per_peer_state: FairRwLock::new(HashMap::new()),
 +                      per_peer_state: FairRwLock::new(new_hash_map()),
  
                        pending_events: Mutex::new(VecDeque::new()),
                        pending_events_processor: AtomicBool::new(false),
                        funding_batch_states: Mutex::new(BTreeMap::new()),
  
                        pending_offers_messages: Mutex::new(Vec::new()),
+                       pending_broadcast_messages: Mutex::new(Vec::new()),
  
                        entropy_source,
                        node_signer,
        }
  
        fn create_and_insert_outbound_scid_alias(&self) -> u64 {
 -              let height = self.best_block.read().unwrap().height();
 +              let height = self.best_block.read().unwrap().height;
                let mut outbound_scid_alias = 0;
                let mut i = 0;
                loop {
                        let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
                        match OutboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key,
                                their_features, channel_value_satoshis, push_msat, user_channel_id, config,
 -                              self.best_block.read().unwrap().height(), outbound_scid_alias, temporary_channel_id)
 +                              self.best_block.read().unwrap().height, outbound_scid_alias, temporary_channel_id)
                        {
                                Ok(res) => res,
                                Err(e) => {
                // the same channel.
                let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
                {
 -                      let best_block_height = self.best_block.read().unwrap().height();
 +                      let best_block_height = self.best_block.read().unwrap().height;
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                // the same channel.
                let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
                {
 -                      let best_block_height = self.best_block.read().unwrap().height();
 +                      let best_block_height = self.best_block.read().unwrap().height;
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
  
        /// Gets the list of channels we have with a given counterparty, in random order.
        pub fn list_channels_with_counterparty(&self, counterparty_node_id: &PublicKey) -> Vec<ChannelDetails> {
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                let per_peer_state = self.per_peer_state.read().unwrap();
  
                if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
  
                                                // Update the monitor with the shutdown script if necessary.
                                                if let Some(monitor_update) = monitor_update_opt.take() {
 -                                                      handle_new_monitor_update!(self, funding_txo_opt.unwrap(), *channel_id, monitor_update,
 +                                                      handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
                                                                peer_state_lock, peer_state, per_peer_state, chan);
                                                }
                                        } else {
                                                // Unfunded channel has no update
                                                (None, chan_phase.context().get_counterparty_node_id())
                                        },
 +                                      // TODO(dual_funding): Combine this match arm with above once #[cfg(dual_funding)] is removed.
 +                                      #[cfg(dual_funding)]
 +                                      ChannelPhase::UnfundedOutboundV2(_) | ChannelPhase::UnfundedInboundV2(_) => {
 +                                              self.finish_close_channel(chan_phase.context_mut().force_shutdown(false, closure_reason));
 +                                              // Unfunded channel has no update
 +                                              (None, chan_phase.context().get_counterparty_node_id())
 +                                      },
                                }
                        } else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() {
                                log_error!(logger, "Force-closing channel {}", &channel_id);
                        }
                };
                if let Some(update) = update_opt {
-                       // Try to send the `BroadcastChannelUpdate` to the peer we just force-closed on, but if
-                       // not try to broadcast it via whatever peer we have.
-                       let per_peer_state = self.per_peer_state.read().unwrap();
-                       let a_peer_state_opt = per_peer_state.get(peer_node_id)
-                               .ok_or(per_peer_state.values().next());
-                       if let Ok(a_peer_state_mutex) = a_peer_state_opt {
-                               let mut a_peer_state = a_peer_state_mutex.lock().unwrap();
-                               a_peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                       msg: update
-                               });
-                       }
+                       // If we have some Channel Update to broadcast, we cache it and broadcast it later.
+                       let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+                       pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                               msg: update
+                       });
                }
  
                Ok(counterparty_node_id)
        /// the latest local transaction(s). Fails if `channel_id` is unknown to the manager, or if the
        /// `counterparty_node_id` isn't the counterparty of the corresponding channel.
        ///
 -      /// You can always get the latest local transaction(s) to broadcast from
 -      /// [`ChannelMonitor::get_latest_holder_commitment_txn`].
 +      /// You can always broadcast the latest local transaction(s) via
 +      /// [`ChannelMonitor::broadcast_latest_holder_commitment_txn`].
        pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey)
        -> Result<(), APIError> {
                self.force_close_sending_error(channel_id, counterparty_node_id, false)
                }
        }
  
 +      fn can_forward_htlc_to_outgoing_channel(
 +              &self, chan: &mut Channel<SP>, msg: &msgs::UpdateAddHTLC, next_packet: &NextPacketDetails
 +      ) -> Result<(), (&'static str, u16, Option<msgs::ChannelUpdate>)> {
 +              if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
 +                      // Note that the behavior here should be identical to the above block - we
 +                      // should NOT reveal the existence or non-existence of a private channel if
 +                      // we don't allow forwards outbound over them.
 +                      return Err(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None));
 +              }
 +              if chan.context.get_channel_type().supports_scid_privacy() && next_packet.outgoing_scid != chan.context.outbound_scid_alias() {
 +                      // `option_scid_alias` (referred to in LDK as `scid_privacy`) means
 +                      // "refuse to forward unless the SCID alias was used", so we pretend
 +                      // we don't have the channel here.
 +                      return Err(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None));
 +              }
 +
 +              // Note that we could technically not return an error yet here and just hope
 +              // that the connection is reestablished or monitor updated by the time we get
 +              // around to doing the actual forward, but better to fail early if we can and
 +              // hopefully an attacker trying to path-trace payments cannot make this occur
 +              // on a small/per-node/per-channel scale.
 +              if !chan.context.is_live() { // channel_disabled
 +                      // If the channel_update we're going to return is disabled (i.e. the
 +                      // peer has been disabled for some time), return `channel_disabled`,
 +                      // otherwise return `temporary_channel_failure`.
 +                      let chan_update_opt = self.get_channel_update_for_onion(next_packet.outgoing_scid, chan).ok();
 +                      if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) {
 +                              return Err(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt));
 +                      } else {
 +                              return Err(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt));
 +                      }
 +              }
 +              if next_packet.outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
 +                      let chan_update_opt = self.get_channel_update_for_onion(next_packet.outgoing_scid, chan).ok();
 +                      return Err(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
 +              }
 +              if let Err((err, code)) = chan.htlc_satisfies_config(msg, next_packet.outgoing_amt_msat, next_packet.outgoing_cltv_value) {
 +                      let chan_update_opt = self.get_channel_update_for_onion(next_packet.outgoing_scid, chan).ok();
 +                      return Err((err, code, chan_update_opt));
 +              }
 +
 +              Ok(())
 +      }
 +
 +      /// Executes a callback `C` that returns some value `X` on the channel found with the given
 +      /// `scid`. `None` is returned when the channel is not found.
 +      fn do_funded_channel_callback<X, C: Fn(&mut Channel<SP>) -> X>(
 +              &self, scid: u64, callback: C,
 +      ) -> Option<X> {
 +              let (counterparty_node_id, channel_id) = match self.short_to_chan_info.read().unwrap().get(&scid).cloned() {
 +                      None => return None,
 +                      Some((cp_id, id)) => (cp_id, id),
 +              };
 +              let per_peer_state = self.per_peer_state.read().unwrap();
 +              let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
 +              if peer_state_mutex_opt.is_none() {
 +                      return None;
 +              }
 +              let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
 +              let peer_state = &mut *peer_state_lock;
 +              match peer_state.channel_by_id.get_mut(&channel_id).and_then(
 +                      |chan_phase| if let ChannelPhase::Funded(chan) = chan_phase { Some(chan) } else { None }
 +              ) {
 +                      None => None,
 +                      Some(chan) => Some(callback(chan)),
 +              }
 +      }
 +
 +      fn can_forward_htlc(
 +              &self, msg: &msgs::UpdateAddHTLC, next_packet_details: &NextPacketDetails
 +      ) -> Result<(), (&'static str, u16, Option<msgs::ChannelUpdate>)> {
 +              match self.do_funded_channel_callback(next_packet_details.outgoing_scid, |chan: &mut Channel<SP>| {
 +                      self.can_forward_htlc_to_outgoing_channel(chan, msg, next_packet_details)
 +              }) {
 +                      Some(Ok(())) => {},
 +                      Some(Err(e)) => return Err(e),
 +                      None => {
 +                              // If we couldn't find the channel info for the scid, it may be a phantom or
 +                              // intercept forward.
 +                              if (self.default_configuration.accept_intercept_htlcs &&
 +                                      fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, next_packet_details.outgoing_scid, &self.chain_hash)) ||
 +                                      fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, next_packet_details.outgoing_scid, &self.chain_hash)
 +                              {} else {
 +                                      return Err(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
 +                              }
 +                      }
 +              }
 +
 +              let cur_height = self.best_block.read().unwrap().height + 1;
 +              if let Err((err_msg, err_code)) = check_incoming_htlc_cltv(
 +                      cur_height, next_packet_details.outgoing_cltv_value, msg.cltv_expiry
 +              ) {
 +                      let chan_update_opt = self.do_funded_channel_callback(next_packet_details.outgoing_scid, |chan: &mut Channel<SP>| {
 +                              self.get_channel_update_for_onion(next_packet_details.outgoing_scid, chan).ok()
 +                      }).flatten();
 +                      return Err((err_msg, err_code, chan_update_opt));
 +              }
 +
 +              Ok(())
 +      }
 +
 +      fn htlc_failure_from_update_add_err(
 +              &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, err_msg: &'static str,
 +              mut err_code: u16, chan_update: Option<msgs::ChannelUpdate>, is_intro_node_blinded_forward: bool,
 +              shared_secret: &[u8; 32]
 +      ) -> HTLCFailureMsg {
 +              let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2));
 +              if chan_update.is_some() && err_code & 0x1000 == 0x1000 {
 +                      let chan_update = chan_update.unwrap();
 +                      if err_code == 0x1000 | 11 || err_code == 0x1000 | 12 {
 +                              msg.amount_msat.write(&mut res).expect("Writes cannot fail");
 +                      }
 +                      else if err_code == 0x1000 | 13 {
 +                              msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
 +                      }
 +                      else if err_code == 0x1000 | 20 {
 +                              // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
 +                              0u16.write(&mut res).expect("Writes cannot fail");
 +                      }
 +                      (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
 +                      msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
 +                      chan_update.write(&mut res).expect("Writes cannot fail");
 +              } else if err_code & 0x1000 == 0x1000 {
 +                      // If we're trying to return an error that requires a `channel_update` but
 +                      // we're forwarding to a phantom or intercept "channel" (i.e. cannot
 +                      // generate an update), just use the generic "temporary_node_failure"
 +                      // instead.
 +                      err_code = 0x2000 | 2;
 +              }
 +
 +              log_info!(
 +                      WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id)),
 +                      "Failed to accept/forward incoming HTLC: {}", err_msg
 +              );
 +              // If `msg.blinding_point` is set, we must always fail with malformed.
 +              if msg.blinding_point.is_some() {
 +                      return HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
 +                              channel_id: msg.channel_id,
 +                              htlc_id: msg.htlc_id,
 +                              sha256_of_onion: [0; 32],
 +                              failure_code: INVALID_ONION_BLINDING,
 +                      });
 +              }
 +
 +              let (err_code, err_data) = if is_intro_node_blinded_forward {
 +                      (INVALID_ONION_BLINDING, &[0; 32][..])
 +              } else {
 +                      (err_code, &res.0[..])
 +              };
 +              HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
 +                      channel_id: msg.channel_id,
 +                      htlc_id: msg.htlc_id,
 +                      reason: HTLCFailReason::reason(err_code, err_data.to_vec())
 +                              .get_encrypted_failure_packet(shared_secret, &None),
 +              })
 +      }
 +
        fn decode_update_add_htlc_onion(
                &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey,
        ) -> Result<
                        msg, &self.node_signer, &self.logger, &self.secp_ctx
                )?;
  
 -              let is_intro_node_forward = match next_hop {
 -                      onion_utils::Hop::Forward {
 -                              next_hop_data: msgs::InboundOnionPayload::BlindedForward {
 -                                      intro_node_blinding_point: Some(_), ..
 -                              }, ..
 -                      } => true,
 -                      _ => false,
 -              };
 -
 -              macro_rules! return_err {
 -                      ($msg: expr, $err_code: expr, $data: expr) => {
 -                              {
 -                                      log_info!(
 -                                              WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id)),
 -                                              "Failed to accept/forward incoming HTLC: {}", $msg
 -                                      );
 -                                      // If `msg.blinding_point` is set, we must always fail with malformed.
 -                                      if msg.blinding_point.is_some() {
 -                                              return Err(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
 -                                                      channel_id: msg.channel_id,
 -                                                      htlc_id: msg.htlc_id,
 -                                                      sha256_of_onion: [0; 32],
 -                                                      failure_code: INVALID_ONION_BLINDING,
 -                                              }));
 -                                      }
 -
 -                                      let (err_code, err_data) = if is_intro_node_forward {
 -                                              (INVALID_ONION_BLINDING, &[0; 32][..])
 -                                      } else { ($err_code, $data) };
 -                                      return Err(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
 -                                              channel_id: msg.channel_id,
 -                                              htlc_id: msg.htlc_id,
 -                                              reason: HTLCFailReason::reason(err_code, err_data.to_vec())
 -                                                      .get_encrypted_failure_packet(&shared_secret, &None),
 -                                      }));
 -                              }
 -                      }
 -              }
 -
 -              let NextPacketDetails {
 -                      next_packet_pubkey, outgoing_amt_msat, outgoing_scid, outgoing_cltv_value
 -              } = match next_packet_details_opt {
 +              let next_packet_details = match next_packet_details_opt {
                        Some(next_packet_details) => next_packet_details,
                        // it is a receive, so no need for outbound checks
                        None => return Ok((next_hop, shared_secret, None)),
  
                // Perform outbound checks here instead of in [`Self::construct_pending_htlc_info`] because we
                // can't hold the outbound peer state lock at the same time as the inbound peer state lock.
 -              if let Some((err, mut code, chan_update)) = loop {
 -                      let id_option = self.short_to_chan_info.read().unwrap().get(&outgoing_scid).cloned();
 -                      let forwarding_chan_info_opt = match id_option {
 -                              None => { // unknown_next_peer
 -                                      // Note that this is likely a timing oracle for detecting whether an scid is a
 -                                      // phantom or an intercept.
 -                                      if (self.default_configuration.accept_intercept_htlcs &&
 -                                              fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)) ||
 -                                              fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)
 -                                      {
 -                                              None
 -                                      } else {
 -                                              break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
 -                                      }
 -                              },
 -                              Some((cp_id, id)) => Some((cp_id.clone(), id.clone())),
 -                      };
 -                      let chan_update_opt = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt {
 -                              let per_peer_state = self.per_peer_state.read().unwrap();
 -                              let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
 -                              if peer_state_mutex_opt.is_none() {
 -                                      break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
 -                              }
 -                              let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
 -                              let peer_state = &mut *peer_state_lock;
 -                              let chan = match peer_state.channel_by_id.get_mut(&forwarding_id).map(
 -                                      |chan_phase| if let ChannelPhase::Funded(chan) = chan_phase { Some(chan) } else { None }
 -                              ).flatten() {
 -                                      None => {
 -                                              // Channel was removed. The short_to_chan_info and channel_by_id maps
 -                                              // have no consistency guarantees.
 -                                              break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
 -                                      },
 -                                      Some(chan) => chan
 -                              };
 -                              if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
 -                                      // Note that the behavior here should be identical to the above block - we
 -                                      // should NOT reveal the existence or non-existence of a private channel if
 -                                      // we don't allow forwards outbound over them.
 -                                      break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None));
 -                              }
 -                              if chan.context.get_channel_type().supports_scid_privacy() && outgoing_scid != chan.context.outbound_scid_alias() {
 -                                      // `option_scid_alias` (referred to in LDK as `scid_privacy`) means
 -                                      // "refuse to forward unless the SCID alias was used", so we pretend
 -                                      // we don't have the channel here.
 -                                      break Some(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None));
 -                              }
 -                              let chan_update_opt = self.get_channel_update_for_onion(outgoing_scid, chan).ok();
 -
 -                              // Note that we could technically not return an error yet here and just hope
 -                              // that the connection is reestablished or monitor updated by the time we get
 -                              // around to doing the actual forward, but better to fail early if we can and
 -                              // hopefully an attacker trying to path-trace payments cannot make this occur
 -                              // on a small/per-node/per-channel scale.
 -                              if !chan.context.is_live() { // channel_disabled
 -                                      // If the channel_update we're going to return is disabled (i.e. the
 -                                      // peer has been disabled for some time), return `channel_disabled`,
 -                                      // otherwise return `temporary_channel_failure`.
 -                                      if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) {
 -                                              break Some(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt));
 -                                      } else {
 -                                              break Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt));
 -                                      }
 -                              }
 -                              if outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
 -                                      break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
 -                              }
 -                              if let Err((err, code)) = chan.htlc_satisfies_config(&msg, outgoing_amt_msat, outgoing_cltv_value) {
 -                                      break Some((err, code, chan_update_opt));
 -                              }
 -                              chan_update_opt
 -                      } else {
 -                              None
 -                      };
 -
 -                      let cur_height = self.best_block.read().unwrap().height() + 1;
 -
 -                      if let Err((err_msg, code)) = check_incoming_htlc_cltv(
 -                              cur_height, outgoing_cltv_value, msg.cltv_expiry
 -                      ) {
 -                              if code & 0x1000 != 0 && chan_update_opt.is_none() {
 -                                      // We really should set `incorrect_cltv_expiry` here but as we're not
 -                                      // forwarding over a real channel we can't generate a channel_update
 -                                      // for it. Instead we just return a generic temporary_node_failure.
 -                                      break Some((err_msg, 0x2000 | 2, None))
 -                              }
 -                              let chan_update_opt = if code & 0x1000 != 0 { chan_update_opt } else { None };
 -                              break Some((err_msg, code, chan_update_opt));
 -                      }
 +              self.can_forward_htlc(&msg, &next_packet_details).map_err(|e| {
 +                      let (err_msg, err_code, chan_update_opt) = e;
 +                      self.htlc_failure_from_update_add_err(
 +                              msg, counterparty_node_id, err_msg, err_code, chan_update_opt,
 +                              next_hop.is_intro_node_blinded_forward(), &shared_secret
 +                      )
 +              })?;
  
 -                      break None;
 -              }
 -              {
 -                      let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2));
 -                      if let Some(chan_update) = chan_update {
 -                              if code == 0x1000 | 11 || code == 0x1000 | 12 {
 -                                      msg.amount_msat.write(&mut res).expect("Writes cannot fail");
 -                              }
 -                              else if code == 0x1000 | 13 {
 -                                      msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
 -                              }
 -                              else if code == 0x1000 | 20 {
 -                                      // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
 -                                      0u16.write(&mut res).expect("Writes cannot fail");
 -                              }
 -                              (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
 -                              msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
 -                              chan_update.write(&mut res).expect("Writes cannot fail");
 -                      } else if code & 0x1000 == 0x1000 {
 -                              // If we're trying to return an error that requires a `channel_update` but
 -                              // we're forwarding to a phantom or intercept "channel" (i.e. cannot
 -                              // generate an update), just use the generic "temporary_node_failure"
 -                              // instead.
 -                              code = 0x2000 | 2;
 -                      }
 -                      return_err!(err, code, &res.0[..]);
 -              }
 -              Ok((next_hop, shared_secret, Some(next_packet_pubkey)))
 +              Ok((next_hop, shared_secret, Some(next_packet_details.next_packet_pubkey)))
        }
  
        fn construct_pending_htlc_status<'a>(
                match decoded_hop {
                        onion_utils::Hop::Receive(next_hop_data) => {
                                // OUR PAYMENT!
 -                              let current_height: u32 = self.best_block.read().unwrap().height();
 +                              let current_height: u32 = self.best_block.read().unwrap().height;
                                match create_recv_pending_htlc_info(next_hop_data, shared_secret, msg.payment_hash,
                                        msg.amount_msat, msg.cltv_expiry, None, allow_underpay, msg.skimmed_fee_msat,
                                        current_height, self.default_configuration.accept_mpp_keysend)
                                                        }, onion_packet, None, &self.fee_estimator, &&logger);
                                                match break_chan_phase_entry!(self, send_res, chan_phase_entry) {
                                                        Some(monitor_update) => {
 -                                                              match handle_new_monitor_update!(self, funding_txo, channel_id, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
 +                                                              match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
                                                                        false => {
                                                                                // Note that MonitorUpdateInProgress here indicates (per function
                                                                                // docs) that we will resend the commitment update once monitor
        /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
        /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
        pub fn send_payment_with_route(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                self.pending_outbound_payments
                        .send_payment_with_route(route, payment_hash, recipient_onion, payment_id,
        /// Similar to [`ChannelManager::send_payment_with_route`], but will automatically find a route based on
        /// `route_params` and retry failed payment paths based on `retry_strategy`.
        pub fn send_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), RetryableSendFailure> {
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                self.pending_outbound_payments
                        .send_payment(payment_hash, recipient_onion, payment_id, retry_strategy, route_params,
  
        #[cfg(test)]
        pub(super) fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, recipient_onion,
                        keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer,
  
        #[cfg(test)]
        pub(crate) fn test_add_new_pending_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route: &Route) -> Result<Vec<[u8; 32]>, PaymentSendFailure> {
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, recipient_onion, payment_id, route, None, &self.entropy_source, best_block_height)
        }
  
        }
  
        pub(super) fn send_payment_for_bolt12_invoice(&self, invoice: &Bolt12Invoice, payment_id: PaymentId) -> Result<(), Bolt12PaymentError> {
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                self.pending_outbound_payments
                        .send_payment_for_bolt12_invoice(
        ///
        /// [`send_payment`]: Self::send_payment
        pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields, payment_id: PaymentId) -> Result<PaymentHash, PaymentSendFailure> {
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                self.pending_outbound_payments.send_spontaneous_payment_with_route(
                        route, payment_preimage, recipient_onion, payment_id, &self.entropy_source,
        ///
        /// [`PaymentParameters::for_keysend`]: crate::routing::router::PaymentParameters::for_keysend
        pub fn send_spontaneous_payment_with_retry(&self, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<PaymentHash, RetryableSendFailure> {
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, recipient_onion,
                        payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(),
        /// [`PaymentHash`] of probes based on a static secret and a random [`PaymentId`], which allows
        /// us to easily discern them from real payments.
        pub fn send_probe(&self, path: Path) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                self.pending_outbound_payments.send_probe(path, self.probing_cookie_secret,
                        &self.entropy_source, &self.node_signer, best_block_height,
                                ProbeSendFailure::RouteNotFound
                        })?;
  
 -              let mut used_liquidity_map = HashMap::with_capacity(first_hops.len());
 +              let mut used_liquidity_map = hash_map_with_capacity(first_hops.len());
  
                let mut res = Vec::new();
  
                        }));
                }
                {
 -                      let height = self.best_block.read().unwrap().height();
 +                      let height = self.best_block.read().unwrap().height;
                        // Transactions are evaluated as final by network mempools if their locktime is strictly
                        // lower than the next block height. However, the modules constituting our Lightning
                        // node might not have perfect sync about their blockchain views. Thus, if the wallet
                        .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                for channel_id in channel_ids {
                        if !peer_state.has_channel(channel_id) {
                                return Err(APIError::ChannelUnavailable {
                                }
                                if let ChannelPhase::Funded(channel) = channel_phase {
                                        if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
-                                               peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
+                                               let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+                                               pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
                                        } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
                                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
                                                        node_id: channel.context.get_counterparty_node_id(),
                Ok(())
        }
  
 +      fn process_pending_update_add_htlcs(&self) {
 +              let mut decode_update_add_htlcs = new_hash_map();
 +              mem::swap(&mut decode_update_add_htlcs, &mut self.decode_update_add_htlcs.lock().unwrap());
 +
 +              let get_failed_htlc_destination = |outgoing_scid_opt: Option<u64>, payment_hash: PaymentHash| {
 +                      if let Some(outgoing_scid) = outgoing_scid_opt {
 +                              match self.short_to_chan_info.read().unwrap().get(&outgoing_scid) {
 +                                      Some((outgoing_counterparty_node_id, outgoing_channel_id)) =>
 +                                              HTLCDestination::NextHopChannel {
 +                                                      node_id: Some(*outgoing_counterparty_node_id),
 +                                                      channel_id: *outgoing_channel_id,
 +                                              },
 +                                      None => HTLCDestination::UnknownNextHop {
 +                                              requested_forward_scid: outgoing_scid,
 +                                      },
 +                              }
 +                      } else {
 +                              HTLCDestination::FailedPayment { payment_hash }
 +                      }
 +              };
 +
 +              'outer_loop: for (incoming_scid, update_add_htlcs) in decode_update_add_htlcs {
 +                      let incoming_channel_details_opt = self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel<SP>| {
 +                              let counterparty_node_id = chan.context.get_counterparty_node_id();
 +                              let channel_id = chan.context.channel_id();
 +                              let funding_txo = chan.context.get_funding_txo().unwrap();
 +                              let user_channel_id = chan.context.get_user_id();
 +                              let accept_underpaying_htlcs = chan.context.config().accept_underpaying_htlcs;
 +                              (counterparty_node_id, channel_id, funding_txo, user_channel_id, accept_underpaying_htlcs)
 +                      });
 +                      let (
 +                              incoming_counterparty_node_id, incoming_channel_id, incoming_funding_txo,
 +                              incoming_user_channel_id, incoming_accept_underpaying_htlcs
 +                       ) = if let Some(incoming_channel_details) = incoming_channel_details_opt {
 +                              incoming_channel_details
 +                      } else {
 +                              // The incoming channel no longer exists, HTLCs should be resolved onchain instead.
 +                              continue;
 +                      };
 +
 +                      let mut htlc_forwards = Vec::new();
 +                      let mut htlc_fails = Vec::new();
 +                      for update_add_htlc in &update_add_htlcs {
 +                              let (next_hop, shared_secret, next_packet_details_opt) = match decode_incoming_update_add_htlc_onion(
 +                                      &update_add_htlc, &self.node_signer, &self.logger, &self.secp_ctx
 +                              ) {
 +                                      Ok(decoded_onion) => decoded_onion,
 +                                      Err(htlc_fail) => {
 +                                              htlc_fails.push((htlc_fail, HTLCDestination::InvalidOnion));
 +                                              continue;
 +                                      },
 +                              };
 +
 +                              let is_intro_node_blinded_forward = next_hop.is_intro_node_blinded_forward();
 +                              let outgoing_scid_opt = next_packet_details_opt.as_ref().map(|d| d.outgoing_scid);
 +
 +                              // Process the HTLC on the incoming channel.
 +                              match self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel<SP>| {
 +                                      let logger = WithChannelContext::from(&self.logger, &chan.context);
 +                                      chan.can_accept_incoming_htlc(
 +                                              update_add_htlc, &self.fee_estimator, &logger,
 +                                      )
 +                              }) {
 +                                      Some(Ok(_)) => {},
 +                                      Some(Err((err, code))) => {
 +                                              let outgoing_chan_update_opt = if let Some(outgoing_scid) = outgoing_scid_opt.as_ref() {
 +                                                      self.do_funded_channel_callback(*outgoing_scid, |chan: &mut Channel<SP>| {
 +                                                              self.get_channel_update_for_onion(*outgoing_scid, chan).ok()
 +                                                      }).flatten()
 +                                              } else {
 +                                                      None
 +                                              };
 +                                              let htlc_fail = self.htlc_failure_from_update_add_err(
 +                                                      &update_add_htlc, &incoming_counterparty_node_id, err, code,
 +                                                      outgoing_chan_update_opt, is_intro_node_blinded_forward, &shared_secret,
 +                                              );
 +                                              let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
 +                                              htlc_fails.push((htlc_fail, htlc_destination));
 +                                              continue;
 +                                      },
 +                                      // The incoming channel no longer exists, HTLCs should be resolved onchain instead.
 +                                      None => continue 'outer_loop,
 +                              }
 +
 +                              // Now process the HTLC on the outgoing channel if it's a forward.
 +                              if let Some(next_packet_details) = next_packet_details_opt.as_ref() {
 +                                      if let Err((err, code, chan_update_opt)) = self.can_forward_htlc(
 +                                              &update_add_htlc, next_packet_details
 +                                      ) {
 +                                              let htlc_fail = self.htlc_failure_from_update_add_err(
 +                                                      &update_add_htlc, &incoming_counterparty_node_id, err, code,
 +                                                      chan_update_opt, is_intro_node_blinded_forward, &shared_secret,
 +                                              );
 +                                              let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
 +                                              htlc_fails.push((htlc_fail, htlc_destination));
 +                                              continue;
 +                                      }
 +                              }
 +
 +                              match self.construct_pending_htlc_status(
 +                                      &update_add_htlc, &incoming_counterparty_node_id, shared_secret, next_hop,
 +                                      incoming_accept_underpaying_htlcs, next_packet_details_opt.map(|d| d.next_packet_pubkey),
 +                              ) {
 +                                      PendingHTLCStatus::Forward(htlc_forward) => {
 +                                              htlc_forwards.push((htlc_forward, update_add_htlc.htlc_id));
 +                                      },
 +                                      PendingHTLCStatus::Fail(htlc_fail) => {
 +                                              let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
 +                                              htlc_fails.push((htlc_fail, htlc_destination));
 +                                      },
 +                              }
 +                      }
 +
 +                      // Process all of the forwards and failures for the channel in which the HTLCs were
 +                      // proposed to as a batch.
 +                      let pending_forwards = (incoming_scid, incoming_funding_txo, incoming_channel_id,
 +                              incoming_user_channel_id, htlc_forwards.drain(..).collect());
 +                      self.forward_htlcs_without_forward_event(&mut [pending_forwards]);
 +                      for (htlc_fail, htlc_destination) in htlc_fails.drain(..) {
 +                              let failure = match htlc_fail {
 +                                      HTLCFailureMsg::Relay(fail_htlc) => HTLCForwardInfo::FailHTLC {
 +                                              htlc_id: fail_htlc.htlc_id,
 +                                              err_packet: fail_htlc.reason,
 +                                      },
 +                                      HTLCFailureMsg::Malformed(fail_malformed_htlc) => HTLCForwardInfo::FailMalformedHTLC {
 +                                              htlc_id: fail_malformed_htlc.htlc_id,
 +                                              sha256_of_onion: fail_malformed_htlc.sha256_of_onion,
 +                                              failure_code: fail_malformed_htlc.failure_code,
 +                                      },
 +                              };
 +                              self.forward_htlcs.lock().unwrap().entry(incoming_scid).or_insert(vec![]).push(failure);
 +                              self.pending_events.lock().unwrap().push_back((events::Event::HTLCHandlingFailed {
 +                                      prev_channel_id: incoming_channel_id,
 +                                      failed_next_destination: htlc_destination,
 +                              }, None));
 +                      }
 +              }
 +      }
 +
        /// Processes HTLCs which are pending waiting on random forward delay.
        ///
        /// Should only really ever be called in response to a PendingHTLCsForwardable event.
        pub fn process_pending_htlc_forwards(&self) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
  
 +              self.process_pending_update_add_htlcs();
 +
                let mut new_events = VecDeque::new();
                let mut failed_forwards = Vec::new();
                let mut phantom_receives: Vec<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
                {
 -                      let mut forward_htlcs = HashMap::new();
 +                      let mut forward_htlcs = new_hash_map();
                        mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
  
                        for (short_chan_id, mut pending_forwards) in forward_htlcs {
                                                                                                };
                                                                                                match next_hop {
                                                                                                        onion_utils::Hop::Receive(hop_data) => {
 -                                                                                                              let current_height: u32 = self.best_block.read().unwrap().height();
 +                                                                                                              let current_height: u32 = self.best_block.read().unwrap().height;
                                                                                                                match create_recv_pending_htlc_info(hop_data,
                                                                                                                        incoming_shared_secret, payment_hash, outgoing_amt_msat,
                                                                                                                        outgoing_cltv_value, Some(phantom_shared_secret), false, None,
                                                                                (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data },
                                                                                        Some(payment_data), phantom_shared_secret, onion_fields)
                                                                        },
 -                                                                      PendingHTLCRouting::ReceiveKeysend { payment_data, payment_preimage, payment_metadata, incoming_cltv_expiry, custom_tlvs } => {
 +                                                                      PendingHTLCRouting::ReceiveKeysend {
 +                                                                              payment_data, payment_preimage, payment_metadata,
 +                                                                              incoming_cltv_expiry, custom_tlvs, requires_blinded_error: _
 +                                                                      } => {
                                                                                let onion_fields = RecipientOnionFields {
                                                                                        payment_secret: payment_data.as_ref().map(|data| data.payment_secret),
                                                                                        payment_metadata,
                                                                                debug_assert!(!committed_to_claimable);
                                                                                let mut htlc_msat_height_data = $htlc.value.to_be_bytes().to_vec();
                                                                                htlc_msat_height_data.extend_from_slice(
 -                                                                                      &self.best_block.read().unwrap().height().to_be_bytes(),
 +                                                                                      &self.best_block.read().unwrap().height.to_be_bytes(),
                                                                                );
                                                                                failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                                                                                short_channel_id: $htlc.prev_hop.short_channel_id,
                                                                                                        }
                                                                                                };
                                                                                                if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta {
 -                                                                                                      let expected_min_expiry_height = (self.current_best_block().height() + min_final_cltv_expiry_delta as u32) as u64;
 +                                                                                                      let expected_min_expiry_height = (self.current_best_block().height + min_final_cltv_expiry_delta as u32) as u64;
                                                                                                        if (cltv_expiry as u64) < expected_min_expiry_height {
                                                                                                                log_trace!(self.logger, "Failing new HTLC with payment_hash {} as its CLTV expiry was too soon (had {}, earliest expected {})",
                                                                                                                        &payment_hash, cltv_expiry, expected_min_expiry_height);
                        }
                }
  
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(),
                        || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
                        &self.pending_events, &self.logger, |args| self.send_payment_along_path(args));
                                                                hash_map::Entry::Occupied(mut chan_phase) => {
                                                                        if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
                                                                                updated_chan = true;
 -                                                                              handle_new_monitor_update!(self, funding_txo, channel_id, update.clone(),
 +                                                                              handle_new_monitor_update!(self, funding_txo, update.clone(),
                                                                                        peer_state_lock, peer_state, per_peer_state, chan);
                                                                        } else {
                                                                                debug_assert!(false, "We shouldn't have an update for a non-funded channel");
  
                // If the feerate has decreased by less than half, don't bother
                if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() {
 -                      if new_feerate != chan.context.get_feerate_sat_per_1000_weight() {
 -                              log_trace!(logger, "Channel {} does not qualify for a feerate change from {} to {}.",
 -                              chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
 -                      }
                        return NotifyOption::SkipPersistNoEvents;
                }
                if !chan.context.is_live() {
                                                                                if n >= DISABLE_GOSSIP_TICKS {
                                                                                        chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
                                                                                        if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                                               let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+                                                                                               pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                                                        msg: update
                                                                                                });
                                                                                        }
                                                                                if n >= ENABLE_GOSSIP_TICKS {
                                                                                        chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
                                                                                        if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                                               let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+                                                                                               pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                                                        msg: update
                                                                                                });
                                                                                        }
                                                                process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context,
                                                                        pending_msg_events, counterparty_node_id)
                                                        },
 +                                                      #[cfg(dual_funding)]
 +                                                      ChannelPhase::UnfundedInboundV2(chan) => {
 +                                                              process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context,
 +                                                                      pending_msg_events, counterparty_node_id)
 +                                                      },
 +                                                      #[cfg(dual_funding)]
 +                                                      ChannelPhase::UnfundedOutboundV2(chan) => {
 +                                                              process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context,
 +                                                                      pending_msg_events, counterparty_node_id)
 +                                                      },
                                                }
                                        });
  
                        FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(failure_code.into()),
                        FailureCode::IncorrectOrUnknownPaymentDetails => {
                                let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
 -                              htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes());
 +                              htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes());
                                HTLCFailReason::reason(failure_code.into(), htlc_msat_height_data)
                        },
                        FailureCode::InvalidOnionPayload(data) => {
                }
        }
  
 +      fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
 +              let push_forward_event = self.fail_htlc_backwards_internal_without_forward_event(source, payment_hash, onion_error, destination);
 +              if push_forward_event { self.push_pending_forwards_ev(); }
 +      }
 +
        /// Fails an HTLC backwards to the sender of it to us.
        /// Note that we do not assume that channels corresponding to failed HTLCs are still available.
 -      fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
 +      fn fail_htlc_backwards_internal_without_forward_event(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) -> bool {
                // Ensure that no peer state channel storage lock is held when calling this function.
                // This ensures that future code doesn't introduce a lock-order requirement for
                // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
                // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
                // from block_connected which may run during initialization prior to the chain_monitor
                // being fully configured. See the docs for `ChannelManagerReadArgs` for more.
 +              let mut push_forward_event;
                match source {
                        HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => {
 -                              if self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path,
 +                              push_forward_event = self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path,
                                        session_priv, payment_id, self.probing_cookie_secret, &self.secp_ctx,
 -                                      &self.pending_events, &self.logger)
 -                              { self.push_pending_forwards_ev(); }
 +                                      &self.pending_events, &self.logger);
                        },
                        HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret,
                                        }
                                };
  
 -                              let mut push_forward_ev = false;
 +                              push_forward_event = self.decode_update_add_htlcs.lock().unwrap().is_empty();
                                let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
 -                              if forward_htlcs.is_empty() {
 -                                      push_forward_ev = true;
 -                              }
 +                              push_forward_event &= forward_htlcs.is_empty();
                                match forward_htlcs.entry(*short_channel_id) {
                                        hash_map::Entry::Occupied(mut entry) => {
                                                entry.get_mut().push(failure);
                                        }
                                }
                                mem::drop(forward_htlcs);
 -                              if push_forward_ev { self.push_pending_forwards_ev(); }
                                let mut pending_events = self.pending_events.lock().unwrap();
                                pending_events.push_back((events::Event::HTLCHandlingFailed {
                                        prev_channel_id: *channel_id,
                                }, None));
                        },
                }
 +              push_forward_event
        }
  
        /// Provides a payment preimage in response to [`Event::PaymentClaimable`], generating any
                if !valid_mpp {
                        for htlc in sources.drain(..) {
                                let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
 -                              htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes());
 +                              htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes());
                                let source = HTLCSource::PreviousHopData(htlc.prev_hop);
                                let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data);
                                let receiver = HTLCDestination::FailedPayment { payment_hash };
                                                                        peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
                                                                }
                                                                if !during_init {
 -                                                                      handle_new_monitor_update!(self, prev_hop.outpoint, prev_hop.channel_id, monitor_update, peer_state_lock,
 +                                                                      handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
                                                                                peer_state, per_peer_state, chan);
                                                                } else {
                                                                        // If we're running during init we cannot update a monitor directly -
        }
  
        fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage,
 -              forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, startup_replay: bool,
 -              next_channel_counterparty_node_id: Option<PublicKey>, next_channel_outpoint: OutPoint,
 -              next_channel_id: ChannelId,
 +              forwarded_htlc_value_msat: Option<u64>, skimmed_fee_msat: Option<u64>, from_onchain: bool,
 +              startup_replay: bool, next_channel_counterparty_node_id: Option<PublicKey>,
 +              next_channel_outpoint: OutPoint, next_channel_id: ChannelId, next_user_channel_id: Option<u128>,
        ) {
                match source {
                        HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
                        },
                        HTLCSource::PreviousHopData(hop_data) => {
                                let prev_channel_id = hop_data.channel_id;
 +                              let prev_user_channel_id = hop_data.user_channel_id;
                                let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
                                #[cfg(debug_assertions)]
                                let claiming_chan_funding_outpoint = hop_data.outpoint;
 -                              #[cfg(debug_assertions)]
 -                              let claiming_channel_id = hop_data.channel_id;
                                let res = self.claim_funds_from_hop(hop_data, payment_preimage,
                                        |htlc_claim_value_msat, definitely_duplicate| {
                                                let chan_to_release =
                                                                                BackgroundEvent::MonitorUpdatesComplete {
                                                                                        channel_id, ..
                                                                                } =>
 -                                                                                      *channel_id == claiming_channel_id,
 +                                                                                      *channel_id == prev_channel_id,
                                                                        }
                                                                }), "{:?}", *background_events);
                                                        }
                                                                })
                                                        } else { None }
                                                } else {
 -                                                      let fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
 +                                                      let total_fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
                                                                if let Some(claimed_htlc_value) = htlc_claim_value_msat {
                                                                        Some(claimed_htlc_value - forwarded_htlc_value)
                                                                } else { None }
                                                        } else { None };
 +                                                      debug_assert!(skimmed_fee_msat <= total_fee_earned_msat,
 +                                                              "skimmed_fee_msat must always be included in total_fee_earned_msat");
                                                        Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
                                                                event: events::Event::PaymentForwarded {
 -                                                                      fee_earned_msat,
 -                                                                      claim_from_onchain_tx: from_onchain,
                                                                        prev_channel_id: Some(prev_channel_id),
                                                                        next_channel_id: Some(next_channel_id),
 +                                                                      prev_user_channel_id,
 +                                                                      next_user_channel_id,
 +                                                                      total_fee_earned_msat,
 +                                                                      skimmed_fee_msat,
 +                                                                      claim_from_onchain_tx: from_onchain,
                                                                        outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
                                                                },
                                                                downstream_counterparty_and_funding_outpoint: chan_to_release,
        fn handle_channel_resumption(&self, pending_msg_events: &mut Vec<MessageSendEvent>,
                channel: &mut Channel<SP>, raa: Option<msgs::RevokeAndACK>,
                commitment_update: Option<msgs::CommitmentUpdate>, order: RAACommitmentOrder,
 -              pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
 +              pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_update_adds: Vec<msgs::UpdateAddHTLC>,
 +              funding_broadcastable: Option<Transaction>,
                channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
 -      -> Option<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> {
 +      -> (Option<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec<msgs::UpdateAddHTLC>)>) {
                let logger = WithChannelContext::from(&self.logger, &channel.context);
 -              log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
 +              log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement",
                        &channel.context.channel_id(),
                        if raa.is_some() { "an" } else { "no" },
 -                      if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(),
 +                      if commitment_update.is_some() { "a" } else { "no" },
 +                      pending_forwards.len(), pending_update_adds.len(),
                        if funding_broadcastable.is_some() { "" } else { "not " },
                        if channel_ready.is_some() { "sending" } else { "without" },
                        if announcement_sigs.is_some() { "sending" } else { "without" });
  
 -              let mut htlc_forwards = None;
 -
                let counterparty_node_id = channel.context.get_counterparty_node_id();
 +              let short_channel_id = channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias());
 +
 +              let mut htlc_forwards = None;
                if !pending_forwards.is_empty() {
 -                      htlc_forwards = Some((channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias()),
 -                              channel.context.get_funding_txo().unwrap(), channel.context.channel_id(), channel.context.get_user_id(), pending_forwards));
 +                      htlc_forwards = Some((short_channel_id, channel.context.get_funding_txo().unwrap(),
 +                              channel.context.channel_id(), channel.context.get_user_id(), pending_forwards));
 +              }
 +              let mut decode_update_add_htlcs = None;
 +              if !pending_update_adds.is_empty() {
 +                      decode_update_add_htlcs = Some((short_channel_id, pending_update_adds));
                }
  
                if let Some(msg) = channel_ready {
                        emit_channel_ready_event!(pending_events, channel);
                }
  
 -              htlc_forwards
 +              (htlc_forwards, decode_update_add_htlcs)
        }
  
        fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
                                // TODO: Once we can rely on the counterparty_node_id from the
                                // monitor event, this and the outpoint_to_peer map should be removed.
                                let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
 -                              match outpoint_to_peer.get(&funding_txo) {
 +                              match outpoint_to_peer.get(funding_txo) {
                                        Some(cp_id) => cp_id.clone(),
                                        None => return,
                                }
                peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                let channel =
 -                      if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
 +                      if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(channel_id) {
                                chan
                        } else {
                                let update_actions = peer_state.monitor_update_blocked_actions
                // happening and return an error. N.B. that we create channel with an outbound SCID of zero so
                // that we can delay allocating the SCID until after we're sure that the checks below will
                // succeed.
 -              let mut channel = match peer_state.inbound_channel_request_by_id.remove(temporary_channel_id) {
 +              let res = match peer_state.inbound_channel_request_by_id.remove(temporary_channel_id) {
                        Some(unaccepted_channel) => {
 -                              let best_block_height = self.best_block.read().unwrap().height();
 +                              let best_block_height = self.best_block.read().unwrap().height;
                                InboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
                                        counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features,
                                        &unaccepted_channel.open_channel_msg, user_channel_id, &self.default_configuration, best_block_height,
 -                                      &self.logger, accept_0conf).map_err(|e| {
 -                                              let err_str = e.to_string();
 -                                              log_error!(logger, "{}", err_str);
 -
 -                                              APIError::ChannelUnavailable { err: err_str }
 -                                      })
 -                              }
 +                                      &self.logger, accept_0conf).map_err(|err| MsgHandleErrInternal::from_chan_no_close(err, *temporary_channel_id))
 +                      },
                        _ => {
                                let err_str = "No such channel awaiting to be accepted.".to_owned();
                                log_error!(logger, "{}", err_str);
  
 -                              Err(APIError::APIMisuseError { err: err_str })
 +                              return Err(APIError::APIMisuseError { err: err_str });
                        }
 -              }?;
 +              };
  
 -              if accept_0conf {
 -                      // This should have been correctly configured by the call to InboundV1Channel::new.
 -                      debug_assert!(channel.context.minimum_depth().unwrap() == 0);
 -              } else if channel.context.get_channel_type().requires_zero_conf() {
 -                      let send_msg_err_event = events::MessageSendEvent::HandleError {
 -                              node_id: channel.context.get_counterparty_node_id(),
 -                              action: msgs::ErrorAction::SendErrorMessage{
 -                                      msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
 +              match res {
 +                      Err(err) => {
 +                              mem::drop(peer_state_lock);
 +                              mem::drop(per_peer_state);
 +                              match handle_error!(self, Result::<(), MsgHandleErrInternal>::Err(err), *counterparty_node_id) {
 +                                      Ok(_) => unreachable!("`handle_error` only returns Err as we've passed in an Err"),
 +                                      Err(e) => {
 +                                              return Err(APIError::ChannelUnavailable { err: e.err });
 +                                      },
                                }
 -                      };
 -                      peer_state.pending_msg_events.push(send_msg_err_event);
 -                      let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
 -                      log_error!(logger, "{}", err_str);
 +                      }
 +                      Ok(mut channel) => {
 +                              if accept_0conf {
 +                                      // This should have been correctly configured by the call to InboundV1Channel::new.
 +                                      debug_assert!(channel.context.minimum_depth().unwrap() == 0);
 +                              } else if channel.context.get_channel_type().requires_zero_conf() {
 +                                      let send_msg_err_event = events::MessageSendEvent::HandleError {
 +                                              node_id: channel.context.get_counterparty_node_id(),
 +                                              action: msgs::ErrorAction::SendErrorMessage{
 +                                                      msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
 +                                              }
 +                                      };
 +                                      peer_state.pending_msg_events.push(send_msg_err_event);
 +                                      let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
 +                                      log_error!(logger, "{}", err_str);
  
 -                      return Err(APIError::APIMisuseError { err: err_str });
 -              } else {
 -                      // If this peer already has some channels, a new channel won't increase our number of peers
 -                      // with unfunded channels, so as long as we aren't over the maximum number of unfunded
 -                      // channels per-peer we can accept channels from a peer with existing ones.
 -                      if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
 -                              let send_msg_err_event = events::MessageSendEvent::HandleError {
 -                                      node_id: channel.context.get_counterparty_node_id(),
 -                                      action: msgs::ErrorAction::SendErrorMessage{
 -                                              msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
 -                                      }
 -                              };
 -                              peer_state.pending_msg_events.push(send_msg_err_event);
 -                              let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
 -                              log_error!(logger, "{}", err_str);
 +                                      return Err(APIError::APIMisuseError { err: err_str });
 +                              } else {
 +                                      // If this peer already has some channels, a new channel won't increase our number of peers
 +                                      // with unfunded channels, so as long as we aren't over the maximum number of unfunded
 +                                      // channels per-peer we can accept channels from a peer with existing ones.
 +                                      if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
 +                                              let send_msg_err_event = events::MessageSendEvent::HandleError {
 +                                                      node_id: channel.context.get_counterparty_node_id(),
 +                                                      action: msgs::ErrorAction::SendErrorMessage{
 +                                                              msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
 +                                                      }
 +                                              };
 +                                              peer_state.pending_msg_events.push(send_msg_err_event);
 +                                              let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
 +                                              log_error!(logger, "{}", err_str);
  
 -                              return Err(APIError::APIMisuseError { err: err_str });
 -                      }
 -              }
 +                                              return Err(APIError::APIMisuseError { err: err_str });
 +                                      }
 +                              }
  
 -              // Now that we know we have a channel, assign an outbound SCID alias.
 -              let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
 -              channel.context.set_outbound_scid_alias(outbound_scid_alias);
 +                              // Now that we know we have a channel, assign an outbound SCID alias.
 +                              let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
 +                              channel.context.set_outbound_scid_alias(outbound_scid_alias);
  
 -              peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
 -                      node_id: channel.context.get_counterparty_node_id(),
 -                      msg: channel.accept_inbound_channel(),
 -              });
 +                              peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
 +                                      node_id: channel.context.get_counterparty_node_id(),
 +                                      msg: channel.accept_inbound_channel(),
 +                              });
  
 -              peer_state.channel_by_id.insert(temporary_channel_id.clone(), ChannelPhase::UnfundedInboundV1(channel));
 +                              peer_state.channel_by_id.insert(temporary_channel_id.clone(), ChannelPhase::UnfundedInboundV1(channel));
  
 -              Ok(())
 +                              Ok(())
 +                      },
 +              }
        }
  
        /// Gets the number of peers which match the given filter and do not have any funded, outbound,
        fn peers_without_funded_channels<Filter>(&self, maybe_count_peer: Filter) -> usize
        where Filter: Fn(&PeerState<SP>) -> bool {
                let mut peers_without_funded_channels = 0;
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                {
                        let peer_state_lock = self.per_peer_state.read().unwrap();
                        for (_, peer_mtx) in peer_state_lock.iter() {
                                                num_unfunded_channels += 1;
                                        }
                                },
 +                              // TODO(dual_funding): Combine this match arm with above once #[cfg(dual_funding)] is removed.
 +                              #[cfg(dual_funding)]
 +                              ChannelPhase::UnfundedInboundV2(chan) => {
 +                                      // Only inbound V2 channels that are not 0conf and that we do not contribute to will be
 +                                      // included in the unfunded count.
 +                                      if chan.context.minimum_depth().unwrap_or(1) != 0 &&
 +                                              chan.dual_funding_context.our_funding_satoshis == 0 {
 +                                              num_unfunded_channels += 1;
 +                                      }
 +                              },
                                ChannelPhase::UnfundedOutboundV1(_) => {
                                        // Outbound channels don't contribute to the unfunded count in the DoS context.
                                        continue;
 +                              },
 +                              // TODO(dual_funding): Combine this match arm with above once #[cfg(dual_funding)] is removed.
 +                              #[cfg(dual_funding)]
 +                              ChannelPhase::UnfundedOutboundV2(_) => {
 +                                      // Outbound channels don't contribute to the unfunded count in the DoS context.
 +                                      continue;
                                }
                        }
                }
        fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
                // Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are
                // likely to be lost on restart!
 -              if msg.chain_hash != self.chain_hash {
 -                      return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
 +              if msg.common_fields.chain_hash != self.chain_hash {
 +                      return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(),
 +                               msg.common_fields.temporary_channel_id.clone()));
                }
  
                if !self.default_configuration.accept_inbound_channels {
 -                      return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(), msg.temporary_channel_id.clone()));
 +                      return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(),
 +                               msg.common_fields.temporary_channel_id.clone()));
                }
  
                // Get the number of peers with channels, but without funded ones. We don't care too much
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                    .ok_or_else(|| {
                                debug_assert!(false);
 -                              MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id.clone())
 +                              MsgHandleErrInternal::send_err_msg_no_close(
 +                                      format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
 +                                      msg.common_fields.temporary_channel_id.clone())
                        })?;
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                {
                        return Err(MsgHandleErrInternal::send_err_msg_no_close(
                                "Have too many peers with unfunded channels, not accepting new ones".to_owned(),
 -                              msg.temporary_channel_id.clone()));
 +                              msg.common_fields.temporary_channel_id.clone()));
                }
  
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                if Self::unfunded_channel_count(peer_state, best_block_height) >= MAX_UNFUNDED_CHANS_PER_PEER {
                        return Err(MsgHandleErrInternal::send_err_msg_no_close(
                                format!("Refusing more than {} unfunded channels.", MAX_UNFUNDED_CHANS_PER_PEER),
 -                              msg.temporary_channel_id.clone()));
 +                              msg.common_fields.temporary_channel_id.clone()));
                }
  
 -              let channel_id = msg.temporary_channel_id;
 +              let channel_id = msg.common_fields.temporary_channel_id;
                let channel_exists = peer_state.has_channel(&channel_id);
                if channel_exists {
 -                      return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone()));
 +                      return Err(MsgHandleErrInternal::send_err_msg_no_close(
 +                              "temporary_channel_id collision for the same peer!".to_owned(),
 +                              msg.common_fields.temporary_channel_id.clone()));
                }
  
                // If we're doing manual acceptance checks on the channel, then defer creation until we're sure we want to accept.
                if self.default_configuration.manually_accept_inbound_channels {
                        let channel_type = channel::channel_type_from_open_channel(
 -                                      &msg, &peer_state.latest_features, &self.channel_type_features()
 +                                      &msg.common_fields, &peer_state.latest_features, &self.channel_type_features()
                                ).map_err(|e|
 -                                      MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id)
 +                                      MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id)
                                )?;
                        let mut pending_events = self.pending_events.lock().unwrap();
                        pending_events.push_back((events::Event::OpenChannelRequest {
 -                              temporary_channel_id: msg.temporary_channel_id.clone(),
 +                              temporary_channel_id: msg.common_fields.temporary_channel_id.clone(),
                                counterparty_node_id: counterparty_node_id.clone(),
 -                              funding_satoshis: msg.funding_satoshis,
 +                              funding_satoshis: msg.common_fields.funding_satoshis,
                                push_msat: msg.push_msat,
                                channel_type,
                        }, None));
                        &self.default_configuration, best_block_height, &self.logger, /*is_0conf=*/false)
                {
                        Err(e) => {
 -                              return Err(MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id));
 +                              return Err(MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id));
                        },
                        Ok(res) => res
                };
  
                let channel_type = channel.context.get_channel_type();
                if channel_type.requires_zero_conf() {
 -                      return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone()));
 +                      return Err(MsgHandleErrInternal::send_err_msg_no_close(
 +                              "No zero confirmation channels accepted".to_owned(),
 +                              msg.common_fields.temporary_channel_id.clone()));
                }
                if channel_type.requires_anchors_zero_fee_htlc_tx() {
 -                      return Err(MsgHandleErrInternal::send_err_msg_no_close("No channels with anchor outputs accepted".to_owned(), msg.temporary_channel_id.clone()));
 +                      return Err(MsgHandleErrInternal::send_err_msg_no_close(
 +                              "No channels with anchor outputs accepted".to_owned(),
 +                              msg.common_fields.temporary_channel_id.clone()));
                }
  
                let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
                        let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                                .ok_or_else(|| {
                                        debug_assert!(false);
 -                                      MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id)
 +                                      MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id)
                                })?;
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
 -                      match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
 +                      match peer_state.channel_by_id.entry(msg.common_fields.temporary_channel_id) {
                                hash_map::Entry::Occupied(mut phase) => {
                                        match phase.get_mut() {
                                                ChannelPhase::UnfundedOutboundV1(chan) => {
                                                        (chan.context.get_value_satoshis(), chan.context.get_funding_redeemscript().to_v0_p2wsh(), chan.context.get_user_id())
                                                },
                                                _ => {
 -                                                      return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id));
 +                                                      return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id));
                                                }
                                        }
                                },
 -                              hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
 +                              hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id))
                        }
                };
                let mut pending_events = self.pending_events.lock().unwrap();
                pending_events.push_back((events::Event::FundingGenerationReady {
 -                      temporary_channel_id: msg.temporary_channel_id,
 +                      temporary_channel_id: msg.common_fields.temporary_channel_id,
                        counterparty_node_id: *counterparty_node_id,
                        channel_value_satoshis: value,
                        output_script,
                                                }
                                                // Update the monitor with the shutdown script if necessary.
                                                if let Some(monitor_update) = monitor_update_opt {
 -                                                      handle_new_monitor_update!(self, funding_txo_opt.unwrap(), chan.context.channel_id(), monitor_update,
 +                                                      handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
                                                                peer_state_lock, peer_state, per_peer_state, chan);
                                                }
                                        },
                                                let mut chan = remove_channel_phase!(self, chan_phase_entry);
                                                finish_shutdown = Some(chan.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel));
                                        },
 +                                      // TODO(dual_funding): Combine this match arm with above.
 +                                      #[cfg(dual_funding)]
 +                                      ChannelPhase::UnfundedInboundV2(_) | ChannelPhase::UnfundedOutboundV2(_) => {
 +                                              let context = phase.context_mut();
 +                                              log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
 +                                              let mut chan = remove_channel_phase!(self, chan_phase_entry);
 +                                              finish_shutdown = Some(chan.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel));
 +                                      },
                                }
                        } else {
                                return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
                if let Some(ChannelPhase::Funded(chan)) = chan_option {
                        if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                               let peer_state = &mut *peer_state_lock;
-                               peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                               let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+                               pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                        msg: update
                                });
                        }
                match peer_state.channel_by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan_phase_entry) => {
                                if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
 -                                      let pending_forward_info = match decoded_hop_res {
 +                                      let mut pending_forward_info = match decoded_hop_res {
                                                Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
                                                        self.construct_pending_htlc_status(
                                                                msg, counterparty_node_id, shared_secret, next_hop,
                                                        ),
                                                Err(e) => PendingHTLCStatus::Fail(e)
                                        };
 -                                      let create_pending_htlc_status = |chan: &Channel<SP>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
 +                                      let logger = WithChannelContext::from(&self.logger, &chan.context);
 +                                      // If the update_add is completely bogus, the call will Err and we will close,
 +                                      // but if we've sent a shutdown and they haven't acknowledged it yet, we just
 +                                      // want to reject the new HTLC and fail it backwards instead of forwarding.
 +                                      if let Err((_, error_code)) = chan.can_accept_incoming_htlc(&msg, &self.fee_estimator, &logger) {
                                                if msg.blinding_point.is_some() {
 -                                                      return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
 -                                                                      msgs::UpdateFailMalformedHTLC {
 -                                                                              channel_id: msg.channel_id,
 -                                                                              htlc_id: msg.htlc_id,
 -                                                                              sha256_of_onion: [0; 32],
 -                                                                              failure_code: INVALID_ONION_BLINDING,
 -                                                                      }
 -                                                      ))
 -                                              }
 -                                              // If the update_add is completely bogus, the call will Err and we will close,
 -                                              // but if we've sent a shutdown and they haven't acknowledged it yet, we just
 -                                              // want to reject the new HTLC and fail it backwards instead of forwarding.
 -                                              match pending_forward_info {
 -                                                      PendingHTLCStatus::Forward(PendingHTLCInfo {
 -                                                              ref incoming_shared_secret, ref routing, ..
 -                                                      }) => {
 -                                                              let reason = if routing.blinded_failure().is_some() {
 -                                                                      HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32])
 -                                                              } else if (error_code & 0x1000) != 0 {
 -                                                                      let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
 -                                                                      HTLCFailReason::reason(real_code, error_data)
 -                                                              } else {
 -                                                                      HTLCFailReason::from_failure_code(error_code)
 -                                                              }.get_encrypted_failure_packet(incoming_shared_secret, &None);
 -                                                              let msg = msgs::UpdateFailHTLC {
 +                                                      pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
 +                                                              msgs::UpdateFailMalformedHTLC {
                                                                        channel_id: msg.channel_id,
                                                                        htlc_id: msg.htlc_id,
 -                                                                      reason
 -                                                              };
 -                                                              PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg))
 -                                                      },
 -                                                      _ => pending_forward_info
 +                                                                      sha256_of_onion: [0; 32],
 +                                                                      failure_code: INVALID_ONION_BLINDING,
 +                                                              }
 +                                                      ))
 +                                              } else {
 +                                                      match pending_forward_info {
 +                                                              PendingHTLCStatus::Forward(PendingHTLCInfo {
 +                                                                      ref incoming_shared_secret, ref routing, ..
 +                                                              }) => {
 +                                                                      let reason = if routing.blinded_failure().is_some() {
 +                                                                              HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32])
 +                                                                      } else if (error_code & 0x1000) != 0 {
 +                                                                              let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
 +                                                                              HTLCFailReason::reason(real_code, error_data)
 +                                                                      } else {
 +                                                                              HTLCFailReason::from_failure_code(error_code)
 +                                                                      }.get_encrypted_failure_packet(incoming_shared_secret, &None);
 +                                                                      let msg = msgs::UpdateFailHTLC {
 +                                                                              channel_id: msg.channel_id,
 +                                                                              htlc_id: msg.htlc_id,
 +                                                                              reason
 +                                                                      };
 +                                                                      pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg));
 +                                                              },
 +                                                              _ => {},
 +                                                      }
                                                }
 -                                      };
 -                                      let logger = WithChannelContext::from(&self.logger, &chan.context);
 -                                      try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &&logger), chan_phase_entry);
 +                                      }
 +                                      try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info), chan_phase_entry);
                                } else {
                                        return try_chan_phase_entry!(self, Err(ChannelError::Close(
                                                "Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry);
  
        fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
                let funding_txo;
 -              let (htlc_source, forwarded_htlc_value) = {
 +              let next_user_channel_id;
 +              let (htlc_source, forwarded_htlc_value, skimmed_fee_msat) = {
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                                .ok_or_else(|| {
                                                // outbound HTLC is claimed. This is guaranteed to all complete before we
                                                // process the RAA as messages are processed from single peers serially.
                                                funding_txo = chan.context.get_funding_txo().expect("We won't accept a fulfill until funded");
 +                                              next_user_channel_id = chan.context.get_user_id();
                                                res
                                        } else {
                                                return try_chan_phase_entry!(self, Err(ChannelError::Close(
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
                };
 -              self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value),
 -                      false, false, Some(*counterparty_node_id), funding_txo, msg.channel_id);
 +              self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(),
 +                      Some(forwarded_htlc_value), skimmed_fee_msat, false, false, Some(*counterparty_node_id),
 +                      funding_txo, msg.channel_id, Some(next_user_channel_id),
 +              );
 +
                Ok(())
        }
  
                                        let funding_txo = chan.context.get_funding_txo();
                                        let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &&logger), chan_phase_entry);
                                        if let Some(monitor_update) = monitor_update_opt {
 -                                              handle_new_monitor_update!(self, funding_txo.unwrap(), chan.context.channel_id(), monitor_update, peer_state_lock,
 +                                              handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
                                                        peer_state, per_peer_state, chan);
                                        }
                                        Ok(())
                }
        }
  
 +      fn push_decode_update_add_htlcs(&self, mut update_add_htlcs: (u64, Vec<msgs::UpdateAddHTLC>)) {
 +              let mut push_forward_event = self.forward_htlcs.lock().unwrap().is_empty();
 +              let mut decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
 +              push_forward_event &= decode_update_add_htlcs.is_empty();
 +              let scid = update_add_htlcs.0;
 +              match decode_update_add_htlcs.entry(scid) {
 +                      hash_map::Entry::Occupied(mut e) => { e.get_mut().append(&mut update_add_htlcs.1); },
 +                      hash_map::Entry::Vacant(e) => { e.insert(update_add_htlcs.1); },
 +              }
 +              if push_forward_event { self.push_pending_forwards_ev(); }
 +      }
 +
        #[inline]
        fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) {
 +              let push_forward_event = self.forward_htlcs_without_forward_event(per_source_pending_forwards);
 +              if push_forward_event { self.push_pending_forwards_ev() }
 +      }
 +
 +      #[inline]
 +      fn forward_htlcs_without_forward_event(&self, per_source_pending_forwards: &mut [(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) -> bool {
 +              let mut push_forward_event = false;
                for &mut (prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
 -                      let mut push_forward_event = false;
                        let mut new_intercept_events = VecDeque::new();
                        let mut failed_intercept_forwards = Vec::new();
                        if !pending_forwards.is_empty() {
                                        // Pull this now to avoid introducing a lock order with `forward_htlcs`.
                                        let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid);
  
 +                                      let decode_update_add_htlcs_empty = self.decode_update_add_htlcs.lock().unwrap().is_empty();
                                        let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
                                        let forward_htlcs_empty = forward_htlcs.is_empty();
                                        match forward_htlcs.entry(scid) {
                                                        } else {
                                                                // We don't want to generate a PendingHTLCsForwardable event if only intercepted
                                                                // payments are being processed.
 -                                                              if forward_htlcs_empty {
 -                                                                      push_forward_event = true;
 -                                                              }
 +                                                              push_forward_event |= forward_htlcs_empty && decode_update_add_htlcs_empty;
                                                                entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
                                                                        prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info })));
                                                        }
                        }
  
                        for (htlc_source, payment_hash, failure_reason, destination) in failed_intercept_forwards.drain(..) {
 -                              self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination);
 +                              push_forward_event |= self.fail_htlc_backwards_internal_without_forward_event(&htlc_source, &payment_hash, &failure_reason, destination);
                        }
  
                        if !new_intercept_events.is_empty() {
                                let mut events = self.pending_events.lock().unwrap();
                                events.append(&mut new_intercept_events);
                        }
 -                      if push_forward_event { self.push_pending_forwards_ev() }
                }
 +              push_forward_event
        }
  
        fn push_pending_forwards_ev(&self) {
                                                if let Some(monitor_update) = monitor_update_opt {
                                                        let funding_txo = funding_txo_opt
                                                                .expect("Funding outpoint must have been set for RAA handling to succeed");
 -                                                      handle_new_monitor_update!(self, funding_txo, chan.context.channel_id(), monitor_update,
 +                                                      handle_new_monitor_update!(self, funding_txo, monitor_update,
                                                                peer_state_lock, peer_state, per_peer_state, chan);
                                                }
                                                htlcs_to_fail
  
                                        peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
                                                msg: try_chan_phase_entry!(self, chan.announcement_signatures(
 -                                                      &self.node_signer, self.chain_hash, self.best_block.read().unwrap().height(),
 +                                                      &self.node_signer, self.chain_hash, self.best_block.read().unwrap().height,
                                                        msg, &self.default_configuration
                                                ), chan_phase_entry),
                                                // Note that announcement_signatures fails if the channel cannot be announced,
        }
  
        fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<NotifyOption, MsgHandleErrInternal> {
 -              let htlc_forwards;
                let need_lnd_workaround = {
                        let per_peer_state = self.per_peer_state.read().unwrap();
  
                                                        }
                                                }
                                                let need_lnd_workaround = chan.context.workaround_lnd_bug_4006.take();
 -                                              htlc_forwards = self.handle_channel_resumption(
 +                                              let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption(
                                                        &mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.order,
 -                                                      Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
 +                                                      Vec::new(), Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
 +                                              debug_assert!(htlc_forwards.is_none());
 +                                              debug_assert!(decode_update_add_htlcs.is_none());
                                                if let Some(upd) = channel_update {
                                                        peer_state.pending_msg_events.push(upd);
                                                }
                        }
                };
  
 -              let mut persist = NotifyOption::SkipPersistHandleEvents;
 -              if let Some(forwards) = htlc_forwards {
 -                      self.forward_htlcs(&mut [forwards][..]);
 -                      persist = NotifyOption::DoPersist;
 -              }
 -
                if let Some(channel_ready_msg) = need_lnd_workaround {
                        self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;
                }
 -              Ok(persist)
 +              Ok(NotifyOption::SkipPersistHandleEvents)
        }
  
        /// Process pending events from the [`chain::Watch`], returning whether any events were processed.
                                                let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id));
                                                if let Some(preimage) = htlc_update.payment_preimage {
                                                        log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
 -                                                      self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, false, counterparty_node_id, funding_outpoint, channel_id);
 +                                                      self.claim_funds_internal(htlc_update.source, preimage,
 +                                                              htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true,
 +                                                              false, counterparty_node_id, funding_outpoint, channel_id, None);
                                                } else {
                                                        log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
                                                        let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
                                                        self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
                                                }
                                        },
 -                                      MonitorEvent::HolderForceClosed(_funding_outpoint) => {
 +                                      MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => {
                                                let counterparty_node_id_opt = match counterparty_node_id {
                                                        Some(cp_id) => Some(cp_id),
                                                        None => {
                                                                let pending_msg_events = &mut peer_state.pending_msg_events;
                                                                if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id) {
                                                                        if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) {
 -                                                                              failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed));
 +                                                                              let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
 +                                                                                      reason
 +                                                                              } else {
 +                                                                                      ClosureReason::HolderForceClosed
 +                                                                              };
 +                                                                              failed_channels.push(chan.context.force_shutdown(false, reason.clone()));
                                                                                if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                                       let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+                                                                                       pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                                                msg: update
                                                                                        });
                                                                                }
                                                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                                                        node_id: chan.context.get_counterparty_node_id(),
                                                                                        action: msgs::ErrorAction::DisconnectPeer {
 -                                                                                              msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() })
 +                                                                                              msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: reason.to_string() })
                                                                                        },
                                                                                });
                                                                        }
                                                if let Some(monitor_update) = monitor_opt {
                                                        has_monitor_update = true;
  
 -                                                      handle_new_monitor_update!(self, funding_txo.unwrap(), chan.context.channel_id(), monitor_update,
 +                                                      handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
                                                                peer_state_lock, peer_state, per_peer_state, chan);
                                                        continue 'peer_loop;
                                                }
                                                                                // We're done with this channel. We got a closing_signed and sent back
                                                                                // a closing_signed with a closing transaction to broadcast.
                                                                                if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                                       let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+                                                                                       pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                                                msg: update
                                                                                        });
                                                                                }
                        self.finish_close_channel(failure);
                }
        }
 +}
  
 +macro_rules! create_offer_builder { ($self: ident, $builder: ty) => {
        /// Creates an [`OfferBuilder`] such that the [`Offer`] it builds is recognized by the
        /// [`ChannelManager`] when handling [`InvoiceRequest`] messages for the offer. The offer will
        /// not have an expiration unless otherwise set on the builder.
        /// [`Offer`]: crate::offers::offer::Offer
        /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
        pub fn create_offer_builder(
 -              &self, description: String
 -      ) -> Result<OfferBuilder<DerivedMetadata, secp256k1::All>, Bolt12SemanticError> {
 -              let node_id = self.get_our_node_id();
 -              let expanded_key = &self.inbound_payment_key;
 -              let entropy = &*self.entropy_source;
 -              let secp_ctx = &self.secp_ctx;
 -
 -              let path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
 +              &$self, description: String
 +      ) -> Result<$builder, Bolt12SemanticError> {
 +              let node_id = $self.get_our_node_id();
 +              let expanded_key = &$self.inbound_payment_key;
 +              let entropy = &*$self.entropy_source;
 +              let secp_ctx = &$self.secp_ctx;
 +
 +              let path = $self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
                let builder = OfferBuilder::deriving_signing_pubkey(
                        description, node_id, expanded_key, entropy, secp_ctx
                )
 -                      .chain_hash(self.chain_hash)
 +                      .chain_hash($self.chain_hash)
                        .path(path);
  
 -              Ok(builder)
 +              Ok(builder.into())
        }
 +} }
  
 +macro_rules! create_refund_builder { ($self: ident, $builder: ty) => {
        /// Creates a [`RefundBuilder`] such that the [`Refund`] it builds is recognized by the
        /// [`ChannelManager`] when handling [`Bolt12Invoice`] messages for the refund.
        ///
        /// [`Bolt12Invoice::payment_paths`]: crate::offers::invoice::Bolt12Invoice::payment_paths
        /// [Avoiding Duplicate Payments]: #avoiding-duplicate-payments
        pub fn create_refund_builder(
 -              &self, description: String, amount_msats: u64, absolute_expiry: Duration,
 +              &$self, description: String, amount_msats: u64, absolute_expiry: Duration,
                payment_id: PaymentId, retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>
 -      ) -> Result<RefundBuilder<secp256k1::All>, Bolt12SemanticError> {
 -              let node_id = self.get_our_node_id();
 -              let expanded_key = &self.inbound_payment_key;
 -              let entropy = &*self.entropy_source;
 -              let secp_ctx = &self.secp_ctx;
 +      ) -> Result<$builder, Bolt12SemanticError> {
 +              let node_id = $self.get_our_node_id();
 +              let expanded_key = &$self.inbound_payment_key;
 +              let entropy = &*$self.entropy_source;
 +              let secp_ctx = &$self.secp_ctx;
  
 -              let path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
 +              let path = $self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
                let builder = RefundBuilder::deriving_payer_id(
                        description, node_id, expanded_key, entropy, secp_ctx, amount_msats, payment_id
                )?
 -                      .chain_hash(self.chain_hash)
 +                      .chain_hash($self.chain_hash)
                        .absolute_expiry(absolute_expiry)
                        .path(path);
  
 +              let _persistence_guard = PersistenceNotifierGuard::notify_on_drop($self);
 +
                let expiration = StaleExpiration::AbsoluteTimeout(absolute_expiry);
 -              self.pending_outbound_payments
 +              $self.pending_outbound_payments
                        .add_new_awaiting_invoice(
                                payment_id, expiration, retry_strategy, max_total_routing_fee_msat,
                        )
                        .map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?;
  
 -              Ok(builder)
 +              Ok(builder.into())
        }
 +} }
 +
 +impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, L>
 +where
 +      M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
 +      T::Target: BroadcasterInterface,
 +      ES::Target: EntropySource,
 +      NS::Target: NodeSigner,
 +      SP::Target: SignerProvider,
 +      F::Target: FeeEstimator,
 +      R::Target: Router,
 +      L::Target: Logger,
 +{
 +      #[cfg(not(c_bindings))]
 +      create_offer_builder!(self, OfferBuilder<DerivedMetadata, secp256k1::All>);
 +      #[cfg(not(c_bindings))]
 +      create_refund_builder!(self, RefundBuilder<secp256k1::All>);
 +
 +      #[cfg(c_bindings)]
 +      create_offer_builder!(self, OfferWithDerivedMetadataBuilder);
 +      #[cfg(c_bindings)]
 +      create_refund_builder!(self, RefundMaybeWithDerivedMetadataBuilder);
  
        /// Pays for an [`Offer`] using the given parameters by creating an [`InvoiceRequest`] and
        /// enqueuing it to be sent via an onion message. [`ChannelManager`] will pay the actual
        /// Errors if:
        /// - a duplicate `payment_id` is provided given the caveats in the aforementioned link,
        /// - the provided parameters are invalid for the offer,
 +      /// - the offer is for an unsupported chain, or
        /// - the parameterized [`Router`] is unable to create a blinded reply path for the invoice
        ///   request.
        ///
                let entropy = &*self.entropy_source;
                let secp_ctx = &self.secp_ctx;
  
 -              let builder = offer
 +              let builder: InvoiceRequestBuilder<DerivedPayerId, secp256k1::All> = offer
                        .request_invoice_deriving_payer_id(expanded_key, entropy, secp_ctx, payment_id)?
 -                      .chain_hash(self.chain_hash)?;
 +                      .into();
 +              let builder = builder.chain_hash(self.chain_hash)?;
 +
                let builder = match quantity {
                        None => builder,
                        Some(quantity) => builder.quantity(quantity)?,
                let invoice_request = builder.build_and_sign()?;
                let reply_path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
  
 +              let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 +
                let expiration = StaleExpiration::TimerTicks(1);
                self.pending_outbound_payments
                        .add_new_awaiting_invoice(
        ///
        /// # Errors
        ///
 -      /// Errors if the parameterized [`Router`] is unable to create a blinded payment path or reply
 -      /// path for the invoice.
 +      /// Errors if:
 +      /// - the refund is for an unsupported chain, or
 +      /// - the parameterized [`Router`] is unable to create a blinded payment path or reply path for
 +      ///   the invoice.
        ///
        /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
        pub fn request_refund_payment(&self, refund: &Refund) -> Result<(), Bolt12SemanticError> {
                let amount_msats = refund.amount_msats();
                let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
  
 +              if refund.chain() != self.chain_hash {
 +                      return Err(Bolt12SemanticError::UnsupportedChain);
 +              }
 +
 +              let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 +
                match self.create_inbound_payment(Some(amount_msats), relative_expiry, None) {
                        Ok((payment_hash, payment_secret)) => {
                                let payment_paths = self.create_blinded_payment_paths(amount_msats, payment_secret)
                                let builder = refund.respond_using_derived_keys_no_std(
                                        payment_paths, payment_hash, created_at, expanded_key, entropy
                                )?;
 +                              let builder: InvoiceBuilder<DerivedSigningPubkey> = builder.into();
                                let invoice = builder.allow_mpp().build_and_sign(secp_ctx)?;
                                let reply_path = self.create_blinded_path()
                                        .map_err(|_| Bolt12SemanticError::MissingPaths)?;
        /// Errors if the `MessageRouter` errors or returns an empty `Vec`.
        fn create_blinded_path(&self) -> Result<BlindedPath, ()> {
                let recipient = self.get_our_node_id();
 -              let entropy_source = self.entropy_source.deref();
                let secp_ctx = &self.secp_ctx;
  
                let peers = self.per_peer_state.read().unwrap()
                        .collect::<Vec<_>>();
  
                self.router
 -                      .create_blinded_paths(recipient, peers, entropy_source, secp_ctx)
 +                      .create_blinded_paths(recipient, peers, secp_ctx)
                        .and_then(|paths| paths.into_iter().next().ok_or(()))
        }
  
        fn create_blinded_payment_paths(
                &self, amount_msats: u64, payment_secret: PaymentSecret
        ) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()> {
 -              let entropy_source = self.entropy_source.deref();
                let secp_ctx = &self.secp_ctx;
  
                let first_hops = self.list_usable_channels();
                let payee_node_id = self.get_our_node_id();
 -              let max_cltv_expiry = self.best_block.read().unwrap().height() + CLTV_FAR_FAR_AWAY
 +              let max_cltv_expiry = self.best_block.read().unwrap().height + CLTV_FAR_FAR_AWAY
                        + LATENCY_GRACE_PERIOD_BLOCKS;
                let payee_tlvs = ReceiveTlvs {
                        payment_secret,
                        },
                };
                self.router.create_blinded_payment_paths(
 -                      payee_node_id, first_hops, payee_tlvs, amount_msats, entropy_source, secp_ctx
 +                      payee_node_id, first_hops, payee_tlvs, amount_msats, secp_ctx
                )
        }
  
        ///
        /// [phantom node payments]: crate::sign::PhantomKeysManager
        pub fn get_phantom_scid(&self) -> u64 {
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                let short_to_chan_info = self.short_to_chan_info.read().unwrap();
                loop {
                        let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
        /// Note that this method is not guaranteed to return unique values, you may need to call it a few
        /// times to get a unique scid.
        pub fn get_intercept_scid(&self) -> u64 {
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                let short_to_chan_info = self.short_to_chan_info.read().unwrap();
                loop {
                        let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
                                                if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
                                                        log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor",
                                                                channel_id);
 -                                                      handle_new_monitor_update!(self, channel_funding_outpoint, channel_id, monitor_update,
 +                                                      handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
                                                                peer_state_lck, peer_state, per_peer_state, chan);
                                                        if further_update_exists {
                                                                // If there are more `ChannelMonitorUpdate`s to process, restart at the
@@@ -8551,7 -8213,7 +8555,7 @@@ wher
        /// will randomly be placed first or last in the returned array.
        ///
        /// Note that even though `BroadcastChannelAnnouncement` and `BroadcastChannelUpdate`
-       /// `MessageSendEvent`s are intended to be broadcasted to all peers, they will be pleaced among
+       /// `MessageSendEvent`s are intended to be broadcasted to all peers, they will be placed among
        /// the `MessageSendEvent`s to the specific peer they were generated under.
        fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
                let events = RefCell::new(Vec::new());
                                result = NotifyOption::DoPersist;
                        }
  
+                       let mut is_any_peer_connected = false;
                        let mut pending_events = Vec::new();
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
                                if peer_state.pending_msg_events.len() > 0 {
                                        pending_events.append(&mut peer_state.pending_msg_events);
                                }
+                               if peer_state.is_connected {
+                                       is_any_peer_connected = true
+                               }
+                       }
+                       // Ensure that we are connected to some peers before getting broadcast messages.
+                       if is_any_peer_connected {
+                               let mut broadcast_msgs = self.pending_broadcast_messages.lock().unwrap();
+                               pending_events.append(&mut broadcast_msgs);
                        }
  
                        if !pending_events.is_empty() {
@@@ -8626,9 -8298,9 +8640,9 @@@ wher
        fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
                {
                        let best_block = self.best_block.read().unwrap();
 -                      assert_eq!(best_block.block_hash(), header.prev_blockhash,
 +                      assert_eq!(best_block.block_hash, header.prev_blockhash,
                                "Blocks must be connected in chain-order - the connected header must build on the last connected header");
 -                      assert_eq!(best_block.height(), height - 1,
 +                      assert_eq!(best_block.height, height - 1,
                                "Blocks must be connected in chain-order - the connected block height must be one greater than the previous height");
                }
  
                let new_height = height - 1;
                {
                        let mut best_block = self.best_block.write().unwrap();
 -                      assert_eq!(best_block.block_hash(), header.block_hash(),
 +                      assert_eq!(best_block.block_hash, header.block_hash(),
                                "Blocks must be disconnected in chain-order - the disconnected header must be the last connected header");
 -                      assert_eq!(best_block.height(), height,
 +                      assert_eq!(best_block.height, height,
                                "Blocks must be disconnected in chain-order - the disconnected block must have the correct height");
                        *best_block = BestBlock::new(header.prev_blockhash, new_height)
                }
@@@ -8679,7 -8351,7 +8693,7 @@@ wher
                self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context))
                        .map(|(a, b)| (a, Vec::new(), b)));
  
 -              let last_best_block_height = self.best_block.read().unwrap().height();
 +              let last_best_block_height = self.best_block.read().unwrap().height;
                if height < last_best_block_height {
                        let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
                        self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context)));
@@@ -8783,13 -8455,11 +8797,14 @@@ wher
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
                                let pending_msg_events = &mut peer_state.pending_msg_events;
                                peer_state.channel_by_id.retain(|_, phase| {
                                        match phase {
                                                // Retain unfunded channels.
                                                ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => true,
 +                                              // TODO(dual_funding): Combine this match arm with above.
 +                                              #[cfg(dual_funding)]
 +                                              ChannelPhase::UnfundedOutboundV2(_) | ChannelPhase::UnfundedInboundV2(_) => true,
                                                ChannelPhase::Funded(channel) => {
                                                        let res = f(channel);
                                                        if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
                                                                let reason_message = format!("{}", reason);
                                                                failed_channels.push(channel.context.force_shutdown(true, reason));
                                                                if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
-                                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                       let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+                                                                       pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                                msg: update
                                                                        });
                                                                }
@@@ -9039,7 -8710,7 +9055,7 @@@ wher
        fn handle_open_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannelV2) {
                let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
                        "Dual-funded channels not supported".to_owned(),
 -                       msg.temporary_channel_id.clone())), *counterparty_node_id);
 +                       msg.common_fields.temporary_channel_id.clone())), *counterparty_node_id);
        }
  
        fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) {
        fn handle_accept_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannelV2) {
                let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
                        "Dual-funded channels not supported".to_owned(),
 -                       msg.temporary_channel_id.clone())), *counterparty_node_id);
 +                       msg.common_fields.temporary_channel_id.clone())), *counterparty_node_id);
        }
  
        fn handle_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) {
                                                        }
                                                        &mut chan.context
                                                },
 -                                              // Unfunded channels will always be removed.
 -                                              ChannelPhase::UnfundedOutboundV1(chan) => {
 -                                                      &mut chan.context
 +                                              // We retain UnfundedOutboundV1 channel for some time in case
 +                                              // peer unexpectedly disconnects, and intends to reconnect again.
 +                                              ChannelPhase::UnfundedOutboundV1(_) => {
 +                                                      return true;
                                                },
 +                                              // Unfunded inbound channels will always be removed.
                                                ChannelPhase::UnfundedInboundV1(chan) => {
                                                        &mut chan.context
                                                },
 +                                              #[cfg(dual_funding)]
 +                                              ChannelPhase::UnfundedOutboundV2(chan) => {
 +                                                      &mut chan.context
 +                                              },
 +                                              #[cfg(dual_funding)]
 +                                              ChannelPhase::UnfundedInboundV2(chan) => {
 +                                                      &mut chan.context
 +                                              },
                                        };
                                        // Clean up for removal.
                                        update_maps_on_chan_removal!(self, &context);
                                                // Gossip
                                                &events::MessageSendEvent::SendChannelAnnouncement { .. } => false,
                                                &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
-                                               &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
+                                               // [`ChannelManager::pending_broadcast_events`] holds the [`BroadcastChannelUpdate`]
+                                               // This check here is to ensure exhaustivity.
+                                               &events::MessageSendEvent::BroadcastChannelUpdate { .. } => {
+                                                       debug_assert!(false, "This event shouldn't have been here");
+                                                       false
+                                               },
                                                &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
                                                &events::MessageSendEvent::SendChannelUpdate { .. } => false,
                                                &events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
                                                        return NotifyOption::SkipPersistNoEvents;
                                                }
                                                e.insert(Mutex::new(PeerState {
 -                                                      channel_by_id: HashMap::new(),
 -                                                      inbound_channel_request_by_id: HashMap::new(),
 +                                                      channel_by_id: new_hash_map(),
 +                                                      inbound_channel_request_by_id: new_hash_map(),
                                                        latest_features: init_msg.features.clone(),
                                                        pending_msg_events: Vec::new(),
                                                        in_flight_monitor_updates: BTreeMap::new(),
                                                let mut peer_state = e.get().lock().unwrap();
                                                peer_state.latest_features = init_msg.features.clone();
  
 -                                              let best_block_height = self.best_block.read().unwrap().height();
 +                                              let best_block_height = self.best_block.read().unwrap().height;
                                                if inbound_peer_limited &&
                                                        Self::unfunded_channel_count(&*peer_state, best_block_height) ==
                                                        peer_state.channel_by_id.len()
                                let peer_state = &mut *peer_state_lock;
                                let pending_msg_events = &mut peer_state.pending_msg_events;
  
 -                              peer_state.channel_by_id.iter_mut().filter_map(|(_, phase)|
 -                                      if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
 -                              ).for_each(|chan| {
 -                                      let logger = WithChannelContext::from(&self.logger, &chan.context);
 -                                      pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
 -                                              node_id: chan.context.get_counterparty_node_id(),
 -                                              msg: chan.get_channel_reestablish(&&logger),
 -                                      });
 -                              });
 +                              for (_, phase) in peer_state.channel_by_id.iter_mut() {
 +                                      match phase {
 +                                              ChannelPhase::Funded(chan) => {
 +                                                      let logger = WithChannelContext::from(&self.logger, &chan.context);
 +                                                      pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
 +                                                              node_id: chan.context.get_counterparty_node_id(),
 +                                                              msg: chan.get_channel_reestablish(&&logger),
 +                                                      });
 +                                              }
 +
 +                                              ChannelPhase::UnfundedOutboundV1(chan) => {
 +                                                      pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
 +                                                              node_id: chan.context.get_counterparty_node_id(),
 +                                                              msg: chan.get_open_channel(self.chain_hash),
 +                                                      });
 +                                              }
 +
 +                                              // TODO(dual_funding): Combine this match arm with above once #[cfg(dual_funding)] is removed.
 +                                              #[cfg(dual_funding)]
 +                                              ChannelPhase::UnfundedOutboundV2(chan) => {
 +                                                      pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 {
 +                                                              node_id: chan.context.get_counterparty_node_id(),
 +                                                              msg: chan.get_open_channel_v2(self.chain_hash),
 +                                                      });
 +                                              },
 +
 +                                              ChannelPhase::UnfundedInboundV1(_) => {
 +                                                      // Since unfunded inbound channel maps are cleared upon disconnecting a peer,
 +                                                      // they are not persisted and won't be recovered after a crash.
 +                                                      // Therefore, they shouldn't exist at this point.
 +                                                      debug_assert!(false);
 +                                              }
 +
 +                                              // TODO(dual_funding): Combine this match arm with above once #[cfg(dual_funding)] is removed.
 +                                              #[cfg(dual_funding)]
 +                                              ChannelPhase::UnfundedInboundV2(channel) => {
 +                                                      // Since unfunded inbound channel maps are cleared upon disconnecting a peer,
 +                                                      // they are not persisted and won't be recovered after a crash.
 +                                                      // Therefore, they shouldn't exist at this point.
 +                                                      debug_assert!(false);
 +                                              },
 +                                      }
 +                              }
                        }
  
                        return NotifyOption::SkipPersistHandleEvents;
        }
  
        fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
 -              let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 -
                match &msg.data as &str {
                        "cannot co-op close channel w/ active htlcs"|
                        "link failed to shutdown" =>
                                // We're not going to bother handling this in a sensible way, instead simply
                                // repeating the Shutdown message on repeat until morale improves.
                                if !msg.channel_id.is_zero() {
 -                                      let per_peer_state = self.per_peer_state.read().unwrap();
 -                                      let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
 -                                      if peer_state_mutex_opt.is_none() { return; }
 -                                      let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
 -                                      if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) {
 -                                              if let Some(msg) = chan.get_outbound_shutdown() {
 -                                                      peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
 -                                                              node_id: *counterparty_node_id,
 -                                                              msg,
 -                                                      });
 -                                              }
 -                                              peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
 -                                                      node_id: *counterparty_node_id,
 -                                                      action: msgs::ErrorAction::SendWarningMessage {
 -                                                              msg: msgs::WarningMessage {
 -                                                                      channel_id: msg.channel_id,
 -                                                                      data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned()
 -                                                              },
 -                                                              log_level: Level::Trace,
 +                                      PersistenceNotifierGuard::optionally_notify(
 +                                              self,
 +                                              || -> NotifyOption {
 +                                                      let per_peer_state = self.per_peer_state.read().unwrap();
 +                                                      let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
 +                                                      if peer_state_mutex_opt.is_none() { return NotifyOption::SkipPersistNoEvents; }
 +                                                      let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
 +                                                      if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) {
 +                                                              if let Some(msg) = chan.get_outbound_shutdown() {
 +                                                                      peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
 +                                                                              node_id: *counterparty_node_id,
 +                                                                              msg,
 +                                                                      });
 +                                                              }
 +                                                              peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
 +                                                                      node_id: *counterparty_node_id,
 +                                                                      action: msgs::ErrorAction::SendWarningMessage {
 +                                                                              msg: msgs::WarningMessage {
 +                                                                                      channel_id: msg.channel_id,
 +                                                                                      data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned()
 +                                                                              },
 +                                                                              log_level: Level::Trace,
 +                                                                      }
 +                                                              });
 +                                                              // This can happen in a fairly tight loop, so we absolutely cannot trigger
 +                                                              // a `ChannelManager` write here.
 +                                                              return NotifyOption::SkipPersistHandleEvents;
                                                        }
 -                                              });
 -                                      }
 +                                                      NotifyOption::SkipPersistNoEvents
 +                                              }
 +                                      );
                                }
                                return;
                        }
                        _ => {}
                }
  
 +              let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 +
                if msg.channel_id.is_zero() {
                        let channel_ids: Vec<ChannelId> = {
                                let per_peer_state = self.per_peer_state.read().unwrap();
                                if peer_state_mutex_opt.is_none() { return; }
                                let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
 -                              if let Some(ChannelPhase::UnfundedOutboundV1(chan)) = peer_state.channel_by_id.get_mut(&msg.channel_id) {
 -                                      if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) {
 -                                              peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
 -                                                      node_id: *counterparty_node_id,
 -                                                      msg,
 -                                              });
 -                                              return;
 -                                      }
 +                              match peer_state.channel_by_id.get_mut(&msg.channel_id) {
 +                                      Some(ChannelPhase::UnfundedOutboundV1(ref mut chan)) => {
 +                                              if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) {
 +                                                      peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
 +                                                              node_id: *counterparty_node_id,
 +                                                              msg,
 +                                                      });
 +                                                      return;
 +                                              }
 +                                      },
 +                                      #[cfg(dual_funding)]
 +                                      Some(ChannelPhase::UnfundedOutboundV2(ref mut chan)) => {
 +                                              if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) {
 +                                                      peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 {
 +                                                              node_id: *counterparty_node_id,
 +                                                              msg,
 +                                                      });
 +                                                      return;
 +                                              }
 +                                      },
 +                                      None | Some(ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::Funded(_)) => (),
 +                                      #[cfg(dual_funding)]
 +                                      Some(ChannelPhase::UnfundedInboundV2(_)) => (),
                                }
                        }
  
@@@ -9694,8 -9302,6 +9715,8 @@@ wher
                                        let builder = invoice_request.respond_using_derived_keys_no_std(
                                                payment_paths, payment_hash, created_at
                                        );
 +                                      let builder: Result<InvoiceBuilder<DerivedSigningPubkey>, _> =
 +                                              builder.map(|b| b.into());
                                        match builder.and_then(|b| b.allow_mpp().build_and_sign(secp_ctx)) {
                                                Ok(invoice) => Some(OffersMessage::Invoice(invoice)),
                                                Err(error) => Some(OffersMessage::InvoiceError(error.into())),
                                        let builder = invoice_request.respond_with_no_std(
                                                payment_paths, payment_hash, created_at
                                        );
 +                                      let builder: Result<InvoiceBuilder<ExplicitSigningPubkey>, _> =
 +                                              builder.map(|b| b.into());
                                        let response = builder.and_then(|builder| builder.allow_mpp().build())
                                                .map_err(|e| OffersMessage::InvoiceError(e.into()))
 -                                              .and_then(|invoice|
 -                                                      match invoice.sign(|invoice| self.node_signer.sign_bolt12_invoice(invoice)) {
 +                                              .and_then(|invoice| {
 +                                                      #[cfg(c_bindings)]
 +                                                      let mut invoice = invoice;
 +                                                      match invoice.sign(|invoice: &UnsignedBolt12Invoice|
 +                                                              self.node_signer.sign_bolt12_invoice(invoice)
 +                                                      ) {
                                                                Ok(invoice) => Ok(OffersMessage::Invoice(invoice)),
 -                                                              Err(SignError::Signing(())) => Err(OffersMessage::InvoiceError(
 +                                                              Err(SignError::Signing) => Err(OffersMessage::InvoiceError(
                                                                                InvoiceError::from_string("Failed signing invoice".to_string())
                                                                )),
                                                                Err(SignError::Verification(_)) => Err(OffersMessage::InvoiceError(
                                                                                InvoiceError::from_string("Failed invoice signature verification".to_string())
                                                                )),
 -                                                      });
 +                                                      }
 +                                              });
                                        match response {
                                                Ok(invoice) => Some(invoice),
                                                Err(error) => Some(error),
@@@ -9876,8 -9475,6 +9897,8 @@@ impl Writeable for ChannelDetails 
                        (37, user_channel_id_high_opt, option),
                        (39, self.feerate_sat_per_1000_weight, option),
                        (41, self.channel_shutdown_state, option),
 +                      (43, self.pending_inbound_htlcs, optional_vec),
 +                      (45, self.pending_outbound_htlcs, optional_vec),
                });
                Ok(())
        }
@@@ -9916,8 -9513,6 +9937,8 @@@ impl Readable for ChannelDetails 
                        (37, user_channel_id_high_opt, option),
                        (39, feerate_sat_per_1000_weight, option),
                        (41, channel_shutdown_state, option),
 +                      (43, pending_inbound_htlcs, optional_vec),
 +                      (45, pending_outbound_htlcs, optional_vec),
                });
  
                // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with
                        inbound_htlc_maximum_msat,
                        feerate_sat_per_1000_weight,
                        channel_shutdown_state,
 +                      pending_inbound_htlcs: pending_inbound_htlcs.unwrap_or(Vec::new()),
 +                      pending_outbound_htlcs: pending_outbound_htlcs.unwrap_or(Vec::new()),
                })
        }
  }
@@@ -9987,7 -9580,6 +10008,7 @@@ impl_writeable_tlv_based_enum!(PendingH
        },
        (2, ReceiveKeysend) => {
                (0, payment_preimage, required),
 +              (1, requires_blinded_error, (default_value, false)),
                (2, incoming_cltv_expiry, required),
                (3, payment_metadata, option),
                (4, payment_data, option), // Added in 0.0.116
@@@ -10245,7 -9837,7 +10266,7 @@@ impl_writeable_tlv_based!(PendingAddHTL
        (2, prev_short_channel_id, required),
        (4, prev_htlc_id, required),
        (6, prev_funding_outpoint, required),
 -      // Note that by the time we get past the required read for type 2 above, prev_funding_outpoint will be
 +      // Note that by the time we get past the required read for type 6 above, prev_funding_outpoint will be
        // filled in, so we can safely unwrap it here.
        (7, prev_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(prev_funding_outpoint.0.unwrap()))),
  });
@@@ -10340,8 -9932,8 +10361,8 @@@ wher
                self.chain_hash.write(writer)?;
                {
                        let best_block = self.best_block.read().unwrap();
 -                      best_block.height().write(writer)?;
 -                      best_block.block_hash().write(writer)?;
 +                      best_block.height.write(writer)?;
 +                      best_block.block_hash.write(writer)?;
                }
  
                let mut serializable_peer_count: u64 = 0;
                        }
                }
  
 +              let mut decode_update_add_htlcs_opt = None;
 +              let decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
 +              if !decode_update_add_htlcs.is_empty() {
 +                      decode_update_add_htlcs_opt = Some(decode_update_add_htlcs);
 +              }
 +
                let per_peer_state = self.per_peer_state.write().unwrap();
  
                let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
                }
  
                // Encode without retry info for 0.0.101 compatibility.
 -              let mut pending_outbound_payments_no_retry: HashMap<PaymentId, HashSet<[u8; 32]>> = HashMap::new();
 +              let mut pending_outbound_payments_no_retry: HashMap<PaymentId, HashSet<[u8; 32]>> = new_hash_map();
                for (id, outbound) in pending_outbound_payments.iter() {
                        match outbound {
                                PendingOutboundPayment::Legacy { session_privs } |
                for ((counterparty_id, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
                        for (funding_outpoint, updates) in peer_state.in_flight_monitor_updates.iter() {
                                if !updates.is_empty() {
 -                                      if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(HashMap::new()); }
 +                                      if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(new_hash_map()); }
                                        in_flight_monitor_updates.as_mut().unwrap().insert((counterparty_id, funding_outpoint), updates);
                                }
                        }
                        (10, in_flight_monitor_updates, option),
                        (11, self.probing_cookie_secret, required),
                        (13, htlc_onion_fields, optional_vec),
 +                      (14, decode_update_add_htlcs_opt, option),
                });
  
                Ok(())
@@@ -10715,9 -10300,7 +10736,9 @@@ wher
                        mut channel_monitors: Vec<&'a mut ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>) -> Self {
                Self {
                        entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster, router, logger, default_config,
 -                      channel_monitors: channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect()
 +                      channel_monitors: hash_map_from_iter(
 +                              channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) })
 +                      ),
                }
        }
  }
                let mut failed_htlcs = Vec::new();
  
                let channel_count: u64 = Readable::read(reader)?;
 -              let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
 -              let mut funded_peer_channels: HashMap<PublicKey, HashMap<ChannelId, ChannelPhase<SP>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
 -              let mut outpoint_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
 -              let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
 +              let mut funding_txo_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128));
 +              let mut funded_peer_channels: HashMap<PublicKey, HashMap<ChannelId, ChannelPhase<SP>>> = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
 +              let mut outpoint_to_peer = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
 +              let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
                let mut channel_closures = VecDeque::new();
                let mut close_background_events = Vec::new();
 -              let mut funding_txo_to_channel_id = HashMap::with_capacity(channel_count as usize);
 +              let mut funding_txo_to_channel_id = hash_map_with_capacity(channel_count as usize);
                for _ in 0..channel_count {
                        let mut channel: Channel<SP> = Channel::read(reader, (
                                &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
                                                        by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
                                                },
                                                hash_map::Entry::Vacant(entry) => {
 -                                                      let mut by_id_map = HashMap::new();
 +                                                      let mut by_id_map = new_hash_map();
                                                        by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
                                                        entry.insert(by_id_map);
                                                }
  
                const MAX_ALLOC_SIZE: usize = 1024 * 64;
                let forward_htlcs_count: u64 = Readable::read(reader)?;
 -              let mut forward_htlcs = HashMap::with_capacity(cmp::min(forward_htlcs_count as usize, 128));
 +              let mut forward_htlcs = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128));
                for _ in 0..forward_htlcs_count {
                        let short_channel_id = Readable::read(reader)?;
                        let pending_forwards_count: u64 = Readable::read(reader)?;
                let peer_state_from_chans = |channel_by_id| {
                        PeerState {
                                channel_by_id,
 -                              inbound_channel_request_by_id: HashMap::new(),
 +                              inbound_channel_request_by_id: new_hash_map(),
                                latest_features: InitFeatures::empty(),
                                pending_msg_events: Vec::new(),
                                in_flight_monitor_updates: BTreeMap::new(),
                };
  
                let peer_count: u64 = Readable::read(reader)?;
 -              let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
 +              let mut per_peer_state = hash_map_with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
                for _ in 0..peer_count {
                        let peer_pubkey = Readable::read(reader)?;
 -                      let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new());
 +                      let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(new_hash_map());
                        let mut peer_state = peer_state_from_chans(peer_chans);
                        peer_state.latest_features = Readable::read(reader)?;
                        per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
                let highest_seen_timestamp: u32 = Readable::read(reader)?;
  
                let pending_inbound_payment_count: u64 = Readable::read(reader)?;
 -              let mut pending_inbound_payments: HashMap<PaymentHash, PendingInboundPayment> = HashMap::with_capacity(cmp::min(pending_inbound_payment_count as usize, MAX_ALLOC_SIZE/(3*32)));
 +              let mut pending_inbound_payments: HashMap<PaymentHash, PendingInboundPayment> = hash_map_with_capacity(cmp::min(pending_inbound_payment_count as usize, MAX_ALLOC_SIZE/(3*32)));
                for _ in 0..pending_inbound_payment_count {
                        if pending_inbound_payments.insert(Readable::read(reader)?, Readable::read(reader)?).is_some() {
                                return Err(DecodeError::InvalidValue);
  
                let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?;
                let mut pending_outbound_payments_compat: HashMap<PaymentId, PendingOutboundPayment> =
 -                      HashMap::with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32));
 +                      hash_map_with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32));
                for _ in 0..pending_outbound_payments_count_compat {
                        let session_priv = Readable::read(reader)?;
                        let payment = PendingOutboundPayment::Legacy {
 -                              session_privs: [session_priv].iter().cloned().collect()
 +                              session_privs: hash_set_from_iter([session_priv]),
                        };
                        if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() {
                                return Err(DecodeError::InvalidValue)
                // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients.
                let mut pending_outbound_payments_no_retry: Option<HashMap<PaymentId, HashSet<[u8; 32]>>> = None;
                let mut pending_outbound_payments = None;
 -              let mut pending_intercepted_htlcs: Option<HashMap<InterceptId, PendingAddHTLCInfo>> = Some(HashMap::new());
 +              let mut pending_intercepted_htlcs: Option<HashMap<InterceptId, PendingAddHTLCInfo>> = Some(new_hash_map());
                let mut received_network_pubkey: Option<PublicKey> = None;
                let mut fake_scid_rand_bytes: Option<[u8; 32]> = None;
                let mut probing_cookie_secret: Option<[u8; 32]> = None;
                let mut claimable_htlc_purposes = None;
                let mut claimable_htlc_onion_fields = None;
 -              let mut pending_claiming_payments = Some(HashMap::new());
 +              let mut pending_claiming_payments = Some(new_hash_map());
                let mut monitor_update_blocked_actions_per_peer: Option<Vec<(_, BTreeMap<_, Vec<_>>)>> = Some(Vec::new());
                let mut events_override = None;
                let mut in_flight_monitor_updates: Option<HashMap<(PublicKey, OutPoint), Vec<ChannelMonitorUpdate>>> = None;
 +              let mut decode_update_add_htlcs: Option<HashMap<u64, Vec<msgs::UpdateAddHTLC>>> = None;
                read_tlv_fields!(reader, {
                        (1, pending_outbound_payments_no_retry, option),
                        (2, pending_intercepted_htlcs, option),
                        (10, in_flight_monitor_updates, option),
                        (11, probing_cookie_secret, option),
                        (13, claimable_htlc_onion_fields, optional_vec),
 +                      (14, decode_update_add_htlcs, option),
                });
 +              let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map());
                if fake_scid_rand_bytes.is_none() {
                        fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes());
                }
                if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
                        pending_outbound_payments = Some(pending_outbound_payments_compat);
                } else if pending_outbound_payments.is_none() {
 -                      let mut outbounds = HashMap::new();
 +                      let mut outbounds = new_hash_map();
                        for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() {
                                outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs });
                        }
                                                }
                                        }
                                        if chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id {
 -                                              // If the channel is ahead of the monitor, return InvalidValue:
 +                                              // If the channel is ahead of the monitor, return DangerousValue:
                                                log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
                                                log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
                                                        chan.context.channel_id(), monitor.get_latest_update_id(), max_in_flight_update_id);
                                                log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
                                                log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
                                                log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
 -                                              return Err(DecodeError::InvalidValue);
 +                                              return Err(DecodeError::DangerousValue);
                                        }
                                } else {
                                        // We shouldn't have persisted (or read) any unfunded channel types so none should have been
                                        // still open, we need to replay any monitor updates that are for closed channels,
                                        // creating the neccessary peer_state entries as we go.
                                        let peer_state_mutex = per_peer_state.entry(counterparty_id).or_insert_with(|| {
 -                                              Mutex::new(peer_state_from_chans(HashMap::new()))
 +                                              Mutex::new(peer_state_from_chans(new_hash_map()))
                                        });
                                        let mut peer_state = peer_state_mutex.lock().unwrap();
                                        handle_in_flight_updates!(counterparty_id, chan_in_flight_updates,
                                                                                retry_strategy: None,
                                                                                attempts: PaymentAttempts::new(),
                                                                                payment_params: None,
 -                                                                              session_privs: [session_priv_bytes].iter().map(|a| *a).collect(),
 +                                                                              session_privs: hash_set_from_iter([session_priv_bytes]),
                                                                                payment_hash: htlc.payment_hash,
                                                                                payment_secret: None, // only used for retries, and we'll never retry on startup
                                                                                payment_metadata: None, // only used for retries, and we'll never retry on startup
                                                                // still have an entry for this HTLC in `forward_htlcs` or
                                                                // `pending_intercepted_htlcs`, we were apparently not persisted after
                                                                // the monitor was when forwarding the payment.
 +                                                              decode_update_add_htlcs.retain(|scid, update_add_htlcs| {
 +                                                                      update_add_htlcs.retain(|update_add_htlc| {
 +                                                                              let matches = *scid == prev_hop_data.short_channel_id &&
 +                                                                                      update_add_htlc.htlc_id == prev_hop_data.htlc_id;
 +                                                                              if matches {
 +                                                                                      log_info!(logger, "Removing pending to-decode HTLC with hash {} as it was forwarded to the closed channel {}",
 +                                                                                              &htlc.payment_hash, &monitor.channel_id());
 +                                                                              }
 +                                                                              !matches
 +                                                                      });
 +                                                                      !update_add_htlcs.is_empty()
 +                                                              });
                                                                forward_htlcs.retain(|_, forwards| {
                                                                        forwards.retain(|forward| {
                                                                                if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
                        }
                }
  
 -              if !forward_htlcs.is_empty() || pending_outbounds.needs_abandon() {
 +              if !forward_htlcs.is_empty() || !decode_update_add_htlcs.is_empty() || pending_outbounds.needs_abandon() {
                        // If we have pending HTLCs to forward, assume we either dropped a
                        // `PendingHTLCsForwardable` or the user received it but never processed it as they
                        // shut down before the timer hit. Either way, set the time_forwardable to a small
                let inbound_pmt_key_material = args.node_signer.get_inbound_payment_key_material();
                let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
  
 -              let mut claimable_payments = HashMap::with_capacity(claimable_htlcs_list.len());
 +              let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len());
                if let Some(purposes) = claimable_htlc_purposes {
                        if purposes.len() != claimable_htlcs_list.len() {
                                return Err(DecodeError::InvalidValue);
                        }
                }
  
 -              let mut outbound_scid_aliases = HashSet::new();
 +              let mut outbound_scid_aliases = new_hash_set();
                for (_peer_node_id, peer_state_mutex) in per_peer_state.iter_mut() {
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                                                        downstream_counterparty_and_funding_outpoint:
                                                                Some((blocked_node_id, _blocked_channel_outpoint, blocked_channel_id, blocking_action)), ..
                                                } = action {
 -                                                      if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) {
 -                                                              let channel_id = blocked_channel_id;
 +                                                      if let Some(blocked_peer_state) = per_peer_state.get(blocked_node_id) {
                                                                log_trace!(logger,
                                                                        "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
 -                                                                      channel_id);
 +                                                                      blocked_channel_id);
                                                                blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
 -                                                                      .entry(*channel_id)
 +                                                                      .entry(*blocked_channel_id)
                                                                        .or_insert_with(Vec::new).push(blocking_action.clone());
                                                        } else {
                                                                // If the channel we were blocking has closed, we don't need to
                        pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()),
  
                        forward_htlcs: Mutex::new(forward_htlcs),
 +                      decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs),
                        claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap() }),
                        outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
                        outpoint_to_peer: Mutex::new(outpoint_to_peer),
  
                        pending_offers_messages: Mutex::new(Vec::new()),
  
+                       pending_broadcast_messages: Mutex::new(Vec::new()),
                        entropy_source: args.entropy_source,
                        node_signer: args.node_signer,
                        signer_provider: args.signer_provider,
                        // We use `downstream_closed` in place of `from_onchain` here just as a guess - we
                        // don't remember in the `ChannelMonitor` where we got a preimage from, but if the
                        // channel is closed we just assume that it probably came from an on-chain claim.
 -                      channel_manager.claim_funds_internal(source, preimage, Some(downstream_value),
 -                              downstream_closed, true, downstream_node_id, downstream_funding, downstream_channel_id);
 +                      channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), None,
 +                              downstream_closed, true, downstream_node_id, downstream_funding,
 +                              downstream_channel_id, None
 +                      );
                }
  
                //TODO: Broadcast channel update for closed channels, but only after we've made a
@@@ -12133,6 -11701,61 +12156,61 @@@ mod tests 
                }
        }
  
+       #[test]
+       fn test_channel_update_cached() {
+               let chanmon_cfgs = create_chanmon_cfgs(3);
+               let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+               let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+               let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
+               nodes[0].node.force_close_channel_with_peer(&chan.2, &nodes[1].node.get_our_node_id(), None, true).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+               // Confirm that the channel_update was not sent immediately to node[1] but was cached.
+               let node_1_events = nodes[1].node.get_and_clear_pending_msg_events();
+               assert_eq!(node_1_events.len(), 0);
+               {
+                       // Assert that ChannelUpdate message has been added to node[0] pending broadcast messages
+                       let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
+                       assert_eq!(pending_broadcast_messages.len(), 1);
+               }
+               // Test that we do not retrieve the pending broadcast messages when we are not connected to any peer
+               nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+               nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+               nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+               let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(node_0_events.len(), 0);
+               // Now we reconnect to a peer
+               nodes[0].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init {
+                       features: nodes[2].node.init_features(), networks: None, remote_network_address: None
+               }, true).unwrap();
+               nodes[2].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+                       features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+               }, false).unwrap();
+               // Confirm that get_and_clear_pending_msg_events correctly captures pending broadcast messages
+               let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(node_0_events.len(), 1);
+               match &node_0_events[0] {
+                       MessageSendEvent::BroadcastChannelUpdate { .. } => (),
+                       _ => panic!("Unexpected event"),
+               }
+               {
+                       // Assert that ChannelUpdate message has been cleared from nodes[0] pending broadcast messages
+                       let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
+                       assert_eq!(pending_broadcast_messages.len(), 0);
+               }
+       }
        #[test]
        fn test_drop_disconnected_peers_when_removing_channels() {
                let chanmon_cfgs = create_chanmon_cfgs(2);
                }
                let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
  
 -              check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
 -              check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
 +              check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
 +              check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
        }
  
        fn check_not_connected_to_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
                                check_added_monitors!(nodes[0], 1);
                                expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
                        }
 -                      open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
 +                      open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
                }
  
                // A MAX_UNFUNDED_CHANS_PER_PEER + 1 channel will be summarily rejected
 -              open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
 +              open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(
 +                      &nodes[0].keys_manager);
                nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
                assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
 -                      open_channel_msg.temporary_channel_id);
 +                      open_channel_msg.common_fields.temporary_channel_id);
  
                // Further, because all of our channels with nodes[0] are inbound, and none of them funded,
                // it doesn't count as a "protected" peer, i.e. it counts towards the MAX_NO_CHANNEL_PEERS
                for i in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
                        nodes[1].node.handle_open_channel(&peer_pks[i], &open_channel_msg);
                        get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pks[i]);
 -                      open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
 +                      open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
                }
                nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
                assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
 -                      open_channel_msg.temporary_channel_id);
 +                      open_channel_msg.common_fields.temporary_channel_id);
  
                // Of course, however, outbound channels are always allowed
                nodes[1].node.create_channel(last_random_pk, 100_000, 0, 42, None, None).unwrap();
                for _ in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
                        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
                        get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
 -                      open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
 +                      open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
                }
  
                // Once we have MAX_UNFUNDED_CHANS_PER_PEER unfunded channels, new inbound channels will be
                // rejected.
                nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
                assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
 -                      open_channel_msg.temporary_channel_id);
 +                      open_channel_msg.common_fields.temporary_channel_id);
  
                // but we can still open an outbound channel.
                nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
                // but even with such an outbound channel, additional inbound channels will still fail.
                nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
                assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
 -                      open_channel_msg.temporary_channel_id);
 +                      open_channel_msg.common_fields.temporary_channel_id);
        }
  
        #[test]
                                _ => panic!("Unexpected event"),
                        }
                        get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, random_pk);
 -                      open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
 +                      open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
                }
  
                // If we try to accept a channel from another peer non-0conf it will fail.
                        _ => panic!("Unexpected event"),
                }
                assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
 -                      open_channel_msg.temporary_channel_id);
 +                      open_channel_msg.common_fields.temporary_channel_id);
  
                // ...however if we accept the same channel 0conf it should work just fine.
                nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
                };
                // Check that if the amount we received + the penultimate hop extra fee is less than the sender
                // intended amount, we fail the payment.
 -              let current_height: u32 = node[0].node.best_block.read().unwrap().height();
 +              let current_height: u32 = node[0].node.best_block.read().unwrap().height;
                if let Err(crate::ln::channelmanager::InboundHTLCErr { err_code, .. }) =
                        create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
                                sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat),
                        }),
                        custom_tlvs: Vec::new(),
                };
 -              let current_height: u32 = node[0].node.best_block.read().unwrap().height();
 +              let current_height: u32 = node[0].node.best_block.read().unwrap().height;
                assert!(create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
                        sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat),
                        current_height, node[0].node.default_configuration.accept_mpp_keysend).is_ok());
                let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]);
                let node = create_network(1, &node_cfg, &node_chanmgr);
  
 -              let current_height: u32 = node[0].node.best_block.read().unwrap().height();
 +              let current_height: u32 = node[0].node.best_block.read().unwrap().height;
                let result = create_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive {
                        sender_intended_htlc_amt_msat: 100,
                        cltv_expiry_height: 22,
  
                nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 0, None, None).unwrap();
                let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 -              assert!(open_channel_msg.channel_type.as_ref().unwrap().supports_anchors_zero_fee_htlc_tx());
 +              assert!(open_channel_msg.common_fields.channel_type.as_ref().unwrap().supports_anchors_zero_fee_htlc_tx());
  
                nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
                let events = nodes[1].node.get_and_clear_pending_events();
                nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &error_msg);
  
                let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 -              assert!(!open_channel_msg.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx());
 +              assert!(!open_channel_msg.common_fields.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx());
  
                // Since nodes[1] should not have accepted the channel, it should
                // not have generated any events.
  
  
                let (scid_1, scid_2) = (42, 43);
 -              let mut forward_htlcs = HashMap::new();
 +              let mut forward_htlcs = new_hash_map();
                forward_htlcs.insert(scid_1, dummy_htlcs_1.clone());
                forward_htlcs.insert(scid_2, dummy_htlcs_2.clone());
  
@@@ -13107,7 -12729,7 +13185,7 @@@ pub mod bench 
  
                assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
  
 -              let block = create_dummy_block(BestBlock::from_network(network).block_hash(), 42, vec![tx]);
 +              let block = create_dummy_block(BestBlock::from_network(network).block_hash, 42, vec![tx]);
                Listen::block_connected(&node_a, &block, 1);
                Listen::block_connected(&node_b, &block, 1);
  
index 7a3619872bc5f9aa061bd59815be1ea4960294b8,fa1b1e7905f10e6e9dfb3c232f8f4639881f24dc..3a506b57fe2a05b572b7f4c34b2ecc1f617b4f38
@@@ -487,38 -487,16 +487,38 @@@ impl<'a, 'b, 'c> Node<'a, 'b, 'c> 
        /// `release_commitment_secret` are affected by this setting.
        #[cfg(test)]
        pub fn set_channel_signer_available(&self, peer_id: &PublicKey, chan_id: &ChannelId, available: bool) {
 +              use crate::sign::ChannelSigner;
 +              log_debug!(self.logger, "Setting channel signer for {} as available={}", chan_id, available);
 +
                let per_peer_state = self.node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(peer_id).unwrap().lock().unwrap();
 -              let signer = (|| {
 -                      match chan_lock.channel_by_id.get(chan_id) {
 -                              Some(phase) => phase.context().get_signer(),
 -                              None => panic!("Couldn't find a channel with id {}", chan_id),
 +
 +              let mut channel_keys_id = None;
 +              if let Some(chan) = chan_lock.channel_by_id.get(chan_id).map(|phase| phase.context()) {
 +                      chan.get_signer().as_ecdsa().unwrap().set_available(available);
 +                      channel_keys_id = Some(chan.channel_keys_id);
 +              }
 +
 +              let mut monitor = None;
 +              for (funding_txo, channel_id) in self.chain_monitor.chain_monitor.list_monitors() {
 +                      if *chan_id == channel_id {
 +                              monitor = self.chain_monitor.chain_monitor.get_monitor(funding_txo).ok();
                        }
 -              })();
 -              log_debug!(self.logger, "Setting channel signer for {} as available={}", chan_id, available);
 -              signer.as_ecdsa().unwrap().set_available(available);
 +              }
 +              if let Some(monitor) = monitor {
 +                      monitor.do_signer_call(|signer| {
 +                              channel_keys_id = channel_keys_id.or(Some(signer.inner.channel_keys_id()));
 +                              signer.set_available(available)
 +                      });
 +              }
 +
 +              if available {
 +                      self.keys_manager.unavailable_signers.lock().unwrap()
 +                              .remove(channel_keys_id.as_ref().unwrap());
 +              } else {
 +                      self.keys_manager.unavailable_signers.lock().unwrap()
 +                              .insert(channel_keys_id.unwrap());
 +              }
        }
  }
  
@@@ -640,7 -618,7 +640,7 @@@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 
                        // Before using all the new monitors to check the watch outpoints, use the full set of
                        // them to ensure we can write and reload our ChannelManager.
                        {
 -                              let mut channel_monitors = HashMap::new();
 +                              let mut channel_monitors = new_hash_map();
                                for monitor in deserialized_monitors.iter_mut() {
                                        channel_monitors.insert(monitor.get_funding_txo().0, monitor);
                                }
@@@ -1071,7 -1049,7 +1071,7 @@@ pub fn _reload_node<'a, 'b, 'c>(node: &
  
        let mut node_read = &chanman_encoded[..];
        let (_, node_deserialized) = {
 -              let mut channel_monitors = HashMap::new();
 +              let mut channel_monitors = new_hash_map();
                for monitor in monitors_read.iter_mut() {
                        assert!(channel_monitors.insert(monitor.get_funding_txo().0, monitor).is_none());
                }
@@@ -1225,7 -1203,7 +1225,7 @@@ pub fn open_zero_conf_channel<'a, 'b, '
        };
  
        let accept_channel = get_event_msg!(receiver, MessageSendEvent::SendAcceptChannel, initiator.node.get_our_node_id());
 -      assert_eq!(accept_channel.minimum_depth, 0);
 +      assert_eq!(accept_channel.common_fields.minimum_depth, 0);
        initiator.node.handle_accept_channel(&receiver.node.get_our_node_id(), &accept_channel);
  
        let (temporary_channel_id, tx, _) = create_funding_transaction(&initiator, &receiver.node.get_our_node_id(), 100_000, 42);
  pub fn exchange_open_accept_chan<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, push_msat: u64) -> ChannelId {
        let create_chan_id = node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42, None, None).unwrap();
        let open_channel_msg = get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id());
 -      assert_eq!(open_channel_msg.temporary_channel_id, create_chan_id);
 +      assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id);
        assert_eq!(node_a.node.list_channels().iter().find(|channel| channel.channel_id == create_chan_id).unwrap().user_channel_id, 42);
        node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), &open_channel_msg);
        if node_b.node.get_current_default_configuration().manually_accept_inbound_channels {
                };
        }
        let accept_channel_msg = get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id());
 -      assert_eq!(accept_channel_msg.temporary_channel_id, create_chan_id);
 +      assert_eq!(accept_channel_msg.common_fields.temporary_channel_id, create_chan_id);
        node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), &accept_channel_msg);
        assert_ne!(node_b.node.list_channels().iter().find(|channel| channel.channel_id == create_chan_id).unwrap().user_channel_id, 0);
  
@@@ -1553,11 -1531,29 +1553,29 @@@ macro_rules! check_warn_msg 
        }}
  }
  
+ /// Checks if at least one peer is connected.
+ fn is_any_peer_connected(node: &Node) -> bool {
+       let peer_state = node.node.per_peer_state.read().unwrap();
+       for (_, peer_mutex) in peer_state.iter() {
+               let peer = peer_mutex.lock().unwrap();
+               if peer.is_connected { return true; }
+       }
+       false
+ }
  /// Check that a channel's closing channel update has been broadcasted, and optionally
  /// check whether an error message event has occurred.
  pub fn check_closed_broadcast(node: &Node, num_channels: usize, with_error_msg: bool) -> Vec<msgs::ErrorMessage> {
+       let mut dummy_connected = false;
+       if !is_any_peer_connected(node) {
+               connect_dummy_node(&node);
+               dummy_connected = true;
+       }
        let msg_events = node.node.get_and_clear_pending_msg_events();
        assert_eq!(msg_events.len(), if with_error_msg { num_channels * 2 } else { num_channels });
+       if dummy_connected {
+               disconnect_dummy_node(&node);
+       }
        msg_events.into_iter().filter_map(|msg_event| {
                match msg_event {
                        MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
@@@ -1827,9 -1823,14 +1845,9 @@@ macro_rules! expect_htlc_handling_faile
  /// there are any [`Event::HTLCHandlingFailed`] events their [`HTLCDestination`] is included in the
  /// `expected_failures` set.
  pub fn expect_pending_htlcs_forwardable_conditions(events: Vec<Event>, expected_failures: &[HTLCDestination]) {
 -      match events[0] {
 -              Event::PendingHTLCsForwardable { .. } => { },
 -              _ => panic!("Unexpected event {:?}", events),
 -      };
 -
        let count = expected_failures.len() + 1;
        assert_eq!(events.len(), count);
 -
 +      assert!(events.iter().find(|event| matches!(event, Event::PendingHTLCsForwardable { .. })).is_some());
        if expected_failures.len() > 0 {
                expect_htlc_handling_failed_destinations!(events, expected_failures)
        }
@@@ -2218,74 -2219,38 +2236,74 @@@ macro_rules! expect_payment_path_succes
        }
  }
  
 +/// Returns the total fee earned by this HTLC forward, in msat.
  pub fn expect_payment_forwarded<CM: AChannelManager, H: NodeHolder<CM=CM>>(
        event: Event, node: &H, prev_node: &H, next_node: &H, expected_fee: Option<u64>,
 -      upstream_force_closed: bool, downstream_force_closed: bool
 -) {
 +      expected_extra_fees_msat: Option<u64>, upstream_force_closed: bool,
 +      downstream_force_closed: bool, allow_1_msat_fee_overpay: bool,
 +) -> Option<u64> {
        match event {
                Event::PaymentForwarded {
 -                      fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id,
 -                      outbound_amount_forwarded_msat: _
 +                      prev_channel_id, next_channel_id, prev_user_channel_id, next_user_channel_id,
 +                      total_fee_earned_msat, skimmed_fee_msat, claim_from_onchain_tx, ..
                } => {
 -                      assert_eq!(fee_earned_msat, expected_fee);
 +                      if allow_1_msat_fee_overpay {
 +                              // Aggregating fees for blinded paths may result in a rounding error, causing slight
 +                              // overpayment in fees.
 +                              let actual_fee = total_fee_earned_msat.unwrap();
 +                              let expected_fee = expected_fee.unwrap();
 +                              assert!(actual_fee == expected_fee || actual_fee == expected_fee + 1);
 +                      } else {
 +                              assert_eq!(total_fee_earned_msat, expected_fee);
 +                      }
 +
 +                      // Check that the (knowingly) withheld amount is always less or equal to the expected
 +                      // overpaid amount.
 +                      assert!(skimmed_fee_msat == expected_extra_fees_msat);
                        if !upstream_force_closed {
                                // Is the event prev_channel_id in one of the channels between the two nodes?
 -                              assert!(node.node().list_channels().iter().any(|x| x.counterparty.node_id == prev_node.node().get_our_node_id() && x.channel_id == prev_channel_id.unwrap()));
 +                              assert!(node.node().list_channels().iter().any(|x|
 +                                      x.counterparty.node_id == prev_node.node().get_our_node_id() &&
 +                                      x.channel_id == prev_channel_id.unwrap() &&
 +                                      x.user_channel_id == prev_user_channel_id.unwrap()
 +                              ));
                        }
                        // We check for force closures since a force closed channel is removed from the
                        // node's channel list
                        if !downstream_force_closed {
 -                              assert!(node.node().list_channels().iter().any(|x| x.counterparty.node_id == next_node.node().get_our_node_id() && x.channel_id == next_channel_id.unwrap()));
 +                              // As documented, `next_user_channel_id` will only be `Some` if we didn't settle via an
 +                              // onchain transaction, just as the `total_fee_earned_msat` field. Rather than
 +                              // introducing yet another variable, we use the latter's state as a flag to detect
 +                              // this and only check if it's `Some`.
 +                              if total_fee_earned_msat.is_none() {
 +                                      assert!(node.node().list_channels().iter().any(|x|
 +                                              x.counterparty.node_id == next_node.node().get_our_node_id() &&
 +                                              x.channel_id == next_channel_id.unwrap()
 +                                      ));
 +                              } else {
 +                                      assert!(node.node().list_channels().iter().any(|x|
 +                                              x.counterparty.node_id == next_node.node().get_our_node_id() &&
 +                                              x.channel_id == next_channel_id.unwrap() &&
 +                                              x.user_channel_id == next_user_channel_id.unwrap()
 +                                      ));
 +                              }
                        }
                        assert_eq!(claim_from_onchain_tx, downstream_force_closed);
 +                      total_fee_earned_msat
                },
                _ => panic!("Unexpected event"),
        }
  }
  
 +#[macro_export]
  macro_rules! expect_payment_forwarded {
        ($node: expr, $prev_node: expr, $next_node: expr, $expected_fee: expr, $upstream_force_closed: expr, $downstream_force_closed: expr) => {
                let mut events = $node.node.get_and_clear_pending_events();
                assert_eq!(events.len(), 1);
                $crate::ln::functional_test_utils::expect_payment_forwarded(
 -                      events.pop().unwrap(), &$node, &$prev_node, &$next_node, $expected_fee,
 -                      $upstream_force_closed, $downstream_force_closed);
 +                      events.pop().unwrap(), &$node, &$prev_node, &$next_node, $expected_fee, None,
 +                      $upstream_force_closed, $downstream_force_closed, false
 +              );
        }
  }
  
@@@ -2496,65 -2461,7 +2514,65 @@@ fn fail_payment_along_path<'a, 'b, 'c>(
        }
  }
  
 -pub fn do_pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &[&Node<'a, 'b, 'c>], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: Option<PaymentSecret>, ev: MessageSendEvent, payment_claimable_expected: bool, clear_recipient_events: bool, expected_preimage: Option<PaymentPreimage>, is_probe: bool) -> Option<Event> {
 +pub struct PassAlongPathArgs<'a, 'b, 'c, 'd> {
 +      pub origin_node: &'a Node<'b, 'c, 'd>,
 +      pub expected_path: &'a [&'a Node<'b, 'c, 'd>],
 +      pub recv_value: u64,
 +      pub payment_hash: PaymentHash,
 +      pub payment_secret: Option<PaymentSecret>,
 +      pub event: MessageSendEvent,
 +      pub payment_claimable_expected: bool,
 +      pub clear_recipient_events: bool,
 +      pub expected_preimage: Option<PaymentPreimage>,
 +      pub is_probe: bool,
 +      pub custom_tlvs: Vec<(u64, Vec<u8>)>,
 +}
 +
 +impl<'a, 'b, 'c, 'd> PassAlongPathArgs<'a, 'b, 'c, 'd> {
 +      pub fn new(
 +              origin_node: &'a Node<'b, 'c, 'd>, expected_path: &'a [&'a Node<'b, 'c, 'd>], recv_value: u64,
 +              payment_hash: PaymentHash, event: MessageSendEvent,
 +      ) -> Self {
 +              Self {
 +                      origin_node, expected_path, recv_value, payment_hash, payment_secret: None, event,
 +                      payment_claimable_expected: true, clear_recipient_events: true, expected_preimage: None,
 +                      is_probe: false, custom_tlvs: Vec::new(),
 +              }
 +      }
 +      pub fn without_clearing_recipient_events(mut self) -> Self {
 +              self.clear_recipient_events = false;
 +              self
 +      }
 +      pub fn is_probe(mut self) -> Self {
 +              self.payment_claimable_expected = false;
 +              self.is_probe = true;
 +              self
 +      }
 +      pub fn without_claimable_event(mut self) -> Self {
 +              self.payment_claimable_expected = false;
 +              self
 +      }
 +      pub fn with_payment_secret(mut self, payment_secret: PaymentSecret) -> Self {
 +              self.payment_secret = Some(payment_secret);
 +              self
 +      }
 +      pub fn with_payment_preimage(mut self, payment_preimage: PaymentPreimage) -> Self {
 +              self.expected_preimage = Some(payment_preimage);
 +              self
 +      }
 +      pub fn with_custom_tlvs(mut self, custom_tlvs: Vec<(u64, Vec<u8>)>) -> Self {
 +              self.custom_tlvs = custom_tlvs;
 +              self
 +      }
 +}
 +
 +pub fn do_pass_along_path<'a, 'b, 'c>(args: PassAlongPathArgs) -> Option<Event> {
 +      let PassAlongPathArgs {
 +              origin_node, expected_path, recv_value, payment_hash: our_payment_hash,
 +              payment_secret: our_payment_secret, event: ev, payment_claimable_expected,
 +              clear_recipient_events, expected_preimage, is_probe, custom_tlvs
 +      } = args;
 +
        let mut payment_event = SendEvent::from_event(ev);
        let mut prev_node = origin_node;
        let mut event = None;
                                                assert_eq!(our_payment_hash, *payment_hash);
                                                assert_eq!(node.node.get_our_node_id(), receiver_node_id.unwrap());
                                                assert!(onion_fields.is_some());
 +                                              assert_eq!(onion_fields.as_ref().unwrap().custom_tlvs, custom_tlvs);
                                                match &purpose {
                                                        PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
                                                                assert_eq!(expected_preimage, *payment_preimage);
  }
  
  pub fn pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &[&Node<'a, 'b, 'c>], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: Option<PaymentSecret>, ev: MessageSendEvent, payment_claimable_expected: bool, expected_preimage: Option<PaymentPreimage>) -> Option<Event> {
 -      do_pass_along_path(origin_node, expected_path, recv_value, our_payment_hash, our_payment_secret, ev, payment_claimable_expected, true, expected_preimage, false)
 +      let mut args = PassAlongPathArgs::new(origin_node, expected_path, recv_value, our_payment_hash, ev);
 +      if !payment_claimable_expected {
 +              args = args.without_claimable_event();
 +      }
 +      if let Some(payment_secret) = our_payment_secret {
 +              args = args.with_payment_secret(payment_secret);
 +      }
 +      if let Some(payment_preimage) = expected_preimage {
 +              args = args.with_payment_preimage(payment_preimage);
 +      }
 +      do_pass_along_path(args)
  }
  
  pub fn send_probe_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&[&Node<'a, 'b, 'c>]]) {
        for path in expected_route.iter() {
                let ev = remove_first_msg_event_to_node(&path[0].node.get_our_node_id(), &mut events);
  
 -              do_pass_along_path(origin_node, path, 0, PaymentHash([0_u8; 32]), None, ev, false, false, None, true);
 +              do_pass_along_path(PassAlongPathArgs::new(origin_node, path, 0, PaymentHash([0_u8; 32]), ev)
 +                      .is_probe()
 +                      .without_clearing_recipient_events());
 +
                let nodes_to_fail_payment: Vec<_> = vec![origin_node].into_iter().chain(path.iter().cloned()).collect();
  
                fail_payment_along_path(nodes_to_fail_payment.as_slice());
@@@ -2677,71 -2570,26 +2695,71 @@@ pub fn do_claim_payment_along_route<'a
        origin_node: &Node<'a, 'b, 'c>, expected_paths: &[&[&Node<'a, 'b, 'c>]], skip_last: bool,
        our_payment_preimage: PaymentPreimage
  ) -> u64 {
 -      let extra_fees = vec![0; expected_paths.len()];
 -      do_claim_payment_along_route_with_extra_penultimate_hop_fees(origin_node, expected_paths,
 -              &extra_fees[..], skip_last, our_payment_preimage)
 -}
 -
 -pub fn do_claim_payment_along_route_with_extra_penultimate_hop_fees<'a, 'b, 'c>(
 -      origin_node: &Node<'a, 'b, 'c>, expected_paths: &[&[&Node<'a, 'b, 'c>]], expected_extra_fees:
 -      &[u32], skip_last: bool, our_payment_preimage: PaymentPreimage
 -) -> u64 {
 -      assert_eq!(expected_paths.len(), expected_extra_fees.len());
        for path in expected_paths.iter() {
                assert_eq!(path.last().unwrap().node.get_our_node_id(), expected_paths[0].last().unwrap().node.get_our_node_id());
        }
        expected_paths[0].last().unwrap().node.claim_funds(our_payment_preimage);
 -      pass_claimed_payment_along_route(origin_node, expected_paths, expected_extra_fees, skip_last, our_payment_preimage)
 +      pass_claimed_payment_along_route(
 +              ClaimAlongRouteArgs::new(origin_node, expected_paths, our_payment_preimage)
 +                      .skip_last(skip_last)
 +      )
  }
  
 -pub fn pass_claimed_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_paths: &[&[&Node<'a, 'b, 'c>]], expected_extra_fees: &[u32], skip_last: bool, our_payment_preimage: PaymentPreimage) -> u64 {
 +pub struct ClaimAlongRouteArgs<'a, 'b, 'c, 'd> {
 +      pub origin_node: &'a Node<'b, 'c, 'd>,
 +      pub expected_paths: &'a [&'a [&'a Node<'b, 'c, 'd>]],
 +      pub expected_extra_fees: Vec<u32>,
 +      pub expected_min_htlc_overpay: Vec<u32>,
 +      pub skip_last: bool,
 +      pub payment_preimage: PaymentPreimage,
 +      // Allow forwarding nodes to have taken 1 msat more fee than expected based on the downstream
 +      // fulfill amount.
 +      //
 +      // Necessary because our test utils calculate the expected fee for an intermediate node based on
 +      // the amount was claimed in their downstream peer's fulfill, but blinded intermediate nodes
 +      // calculate their fee based on the inbound amount from their upstream peer, causing a difference
 +      // in rounding.
 +      pub allow_1_msat_fee_overpay: bool,
 +}
 +
 +impl<'a, 'b, 'c, 'd> ClaimAlongRouteArgs<'a, 'b, 'c, 'd> {
 +      pub fn new(
 +              origin_node: &'a Node<'b, 'c, 'd>, expected_paths: &'a [&'a [&'a Node<'b, 'c, 'd>]],
 +              payment_preimage: PaymentPreimage,
 +      ) -> Self {
 +              Self {
 +                      origin_node, expected_paths, expected_extra_fees: vec![0; expected_paths.len()],
 +                      expected_min_htlc_overpay: vec![0; expected_paths.len()], skip_last: false, payment_preimage,
 +                      allow_1_msat_fee_overpay: false,
 +              }
 +      }
 +      pub fn skip_last(mut self, skip_last: bool) -> Self {
 +              self.skip_last = skip_last;
 +              self
 +      }
 +      pub fn with_expected_extra_fees(mut self, extra_fees: Vec<u32>) -> Self {
 +              self.expected_extra_fees = extra_fees;
 +              self
 +      }
 +      pub fn with_expected_min_htlc_overpay(mut self, extra_fees: Vec<u32>) -> Self {
 +              self.expected_min_htlc_overpay = extra_fees;
 +              self
 +      }
 +      pub fn allow_1_msat_fee_overpay(mut self) -> Self {
 +              self.allow_1_msat_fee_overpay = true;
 +              self
 +      }
 +}
 +
 +pub fn pass_claimed_payment_along_route<'a, 'b, 'c, 'd>(args: ClaimAlongRouteArgs) -> u64 {
 +      let ClaimAlongRouteArgs {
 +              origin_node, expected_paths, expected_extra_fees, expected_min_htlc_overpay, skip_last,
 +              payment_preimage: our_payment_preimage, allow_1_msat_fee_overpay,
 +      } = args;
        let claim_event = expected_paths[0].last().unwrap().node.get_and_clear_pending_events();
        assert_eq!(claim_event.len(), 1);
 +      #[allow(unused)]
 +      let mut fwd_amt_msat = 0;
        match claim_event[0] {
                Event::PaymentClaimed {
                        purpose: PaymentPurpose::SpontaneousPayment(preimage),
                        assert_eq!(htlcs.len(), expected_paths.len());  // One per path.
                        assert_eq!(htlcs.iter().map(|h| h.value_msat).sum::<u64>(), amount_msat);
                        expected_paths.iter().zip(htlcs).for_each(|(path, htlc)| check_claimed_htlc_channel(origin_node, path, htlc));
 +                      fwd_amt_msat = amount_msat;
                },
                Event::PaymentClaimed {
                        purpose: PaymentPurpose::InvoicePayment { .. },
                        assert_eq!(htlcs.len(), expected_paths.len());  // One per path.
                        assert_eq!(htlcs.iter().map(|h| h.value_msat).sum::<u64>(), amount_msat);
                        expected_paths.iter().zip(htlcs).for_each(|(path, htlc)| check_claimed_htlc_channel(origin_node, path, htlc));
 +                      fwd_amt_msat = amount_msat;
                }
                _ => panic!(),
        }
                per_path_msgs.push(msgs_from_ev!(&events[0]));
        } else {
                for expected_path in expected_paths.iter() {
 -                      // For MPP payments, we always want the message to the first node in the path.
 -                      let ev = remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &mut events);
 +                      // For MPP payments, we want the fulfill message from the payee to the penultimate hop in the
 +                      // path.
 +                      let penultimate_hop_node_id = expected_path.iter().rev().skip(1).next()
 +                              .map(|n| n.node.get_our_node_id())
 +                              .unwrap_or(origin_node.node.get_our_node_id());
 +                      let ev = remove_first_msg_event_to_node(&penultimate_hop_node_id, &mut events);
                        per_path_msgs.push(msgs_from_ev!(&ev));
                }
        }
                                {
                                        $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0);
                                        let mut fee = {
 -                                              let per_peer_state = $node.node.per_peer_state.read().unwrap();
 -                                              let peer_state = per_peer_state.get(&$prev_node.node.get_our_node_id())
 -                                                      .unwrap().lock().unwrap();
 -                                              let channel = peer_state.channel_by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap();
 -                                              if let Some(prev_config) = channel.context().prev_config() {
 -                                                      prev_config.forwarding_fee_base_msat
 -                                              } else {
 -                                                      channel.context().config().forwarding_fee_base_msat
 -                                              }
 +                                              let (base_fee, prop_fee) = {
 +                                                      let per_peer_state = $node.node.per_peer_state.read().unwrap();
 +                                                      let peer_state = per_peer_state.get(&$prev_node.node.get_our_node_id())
 +                                                              .unwrap().lock().unwrap();
 +                                                      let channel = peer_state.channel_by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap();
 +                                                      if let Some(prev_config) = channel.context().prev_config() {
 +                                                              (prev_config.forwarding_fee_base_msat as u64,
 +                                                               prev_config.forwarding_fee_proportional_millionths as u64)
 +                                                      } else {
 +                                                              (channel.context().config().forwarding_fee_base_msat as u64,
 +                                                               channel.context().config().forwarding_fee_proportional_millionths as u64)
 +                                                      }
 +                                              };
 +                                              ((fwd_amt_msat * prop_fee / 1_000_000) + base_fee) as u32
                                        };
 -                                      if $idx == 1 { fee += expected_extra_fees[i]; }
 -                                      expect_payment_forwarded!(*$node, $next_node, $prev_node, Some(fee as u64), false, false);
 -                                      expected_total_fee_msat += fee as u64;
 +
 +                                      let mut expected_extra_fee = None;
 +                                      if $idx == 1 {
 +                                              fee += expected_extra_fees[i];
 +                                              fee += expected_min_htlc_overpay[i];
 +                                              expected_extra_fee = if expected_extra_fees[i] > 0 { Some(expected_extra_fees[i] as u64) } else { None };
 +                                      }
 +                                      let mut events = $node.node.get_and_clear_pending_events();
 +                                      assert_eq!(events.len(), 1);
 +                                      let actual_fee = expect_payment_forwarded(events.pop().unwrap(), *$node, $next_node, $prev_node,
 +                                              Some(fee as u64), expected_extra_fee, false, false, allow_1_msat_fee_overpay);
 +                                      expected_total_fee_msat += actual_fee.unwrap();
 +                                      fwd_amt_msat += actual_fee.unwrap();
                                        check_added_monitors!($node, 1);
                                        let new_next_msgs = if $new_msgs {
                                                let events = $node.node.get_and_clear_pending_msg_events();
@@@ -3125,7 -2952,7 +3143,7 @@@ pub fn create_node_cfgs_with_persisters
                        tx_broadcaster: &chanmon_cfgs[i].tx_broadcaster,
                        fee_estimator: &chanmon_cfgs[i].fee_estimator,
                        router: test_utils::TestRouter::new(network_graph.clone(), &chanmon_cfgs[i].logger, &chanmon_cfgs[i].scorer),
 -                      message_router: test_utils::TestMessageRouter::new(network_graph.clone()),
 +                      message_router: test_utils::TestMessageRouter::new(network_graph.clone(), &chanmon_cfgs[i].keys_manager),
                        chain_monitor,
                        keys_manager: &chanmon_cfgs[i].keys_manager,
                        node_seed: seed,
@@@ -3230,6 -3057,28 +3248,28 @@@ pub fn create_network<'a, 'b: 'a, 'c: '
        nodes
  }
  
+ pub fn connect_dummy_node<'a, 'b: 'a, 'c: 'b>(node: &Node<'a, 'b, 'c>) {
+       let node_id_dummy = PublicKey::from_slice(&[2; 33]).unwrap();
+       let mut dummy_init_features = InitFeatures::empty();
+       dummy_init_features.set_static_remote_key_required();
+       let init_dummy = msgs::Init {
+               features: dummy_init_features,
+               networks: None,
+               remote_network_address: None
+       };
+       node.node.peer_connected(&node_id_dummy, &init_dummy, true).unwrap();
+       node.onion_messenger.peer_connected(&node_id_dummy, &init_dummy, true).unwrap();
+ }
+ pub fn disconnect_dummy_node<'a, 'b: 'a, 'c: 'b>(node: &Node<'a, 'b, 'c>) {
+       let node_id_dummy = PublicKey::from_slice(&[2; 33]).unwrap();
+       node.node.peer_disconnected(&node_id_dummy);
+       node.onion_messenger.peer_disconnected(&node_id_dummy);
+ }
  // Note that the following only works for CLTV values up to 128
  pub const ACCEPTED_HTLC_SCRIPT_WEIGHT: usize = 137; // Here we have a diff due to HTLC CLTV expiry being < 2^15 in test
  pub const ACCEPTED_HTLC_SCRIPT_WEIGHT_ANCHORS: usize = 140; // Here we have a diff due to HTLC CLTV expiry being < 2^15 in test
@@@ -3249,7 -3098,7 +3289,7 @@@ pub enum HTLCType { NONE, TIMEOUT, SUCC
  /// also fail.
  pub fn test_txn_broadcast<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, chan: &(msgs::ChannelUpdate, msgs::ChannelUpdate, ChannelId, Transaction), commitment_tx: Option<Transaction>, has_htlc_tx: HTLCType) -> Vec<Transaction>  {
        let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
 -      let mut txn_seen = HashSet::new();
 +      let mut txn_seen = new_hash_set();
        node_txn.retain(|tx| txn_seen.insert(tx.txid()));
        assert!(node_txn.len() >= if commitment_tx.is_some() { 0 } else { 1 } + if has_htlc_tx == HTLCType::NONE { 0 } else { 1 });
  
@@@ -3314,7 -3163,7 +3354,7 @@@ pub fn test_revoked_htlc_claim_txn_broa
  
  pub fn check_preimage_claim<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, prev_txn: &Vec<Transaction>) -> Vec<Transaction>  {
        let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
 -      let mut txn_seen = HashSet::new();
 +      let mut txn_seen = new_hash_set();
        node_txn.retain(|tx| txn_seen.insert(tx.txid()));
  
        let mut found_prev = false;
  }
  
  pub fn handle_announce_close_broadcast_events<'a, 'b, 'c>(nodes: &Vec<Node<'a, 'b, 'c>>, a: usize, b: usize, needs_err_handle: bool, expected_error: &str)  {
+       let mut dummy_connected = false;
+       if !is_any_peer_connected(&nodes[a]) {
+               connect_dummy_node(&nodes[a]);
+               dummy_connected = true
+       }
        let events_1 = nodes[a].node.get_and_clear_pending_msg_events();
        assert_eq!(events_1.len(), 2);
-       let as_update = match events_1[0] {
+       let as_update = match events_1[1] {
                MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
                        msg.clone()
                },
                _ => panic!("Unexpected event"),
        };
-       match events_1[1] {
+       match events_1[0] {
                MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
                        assert_eq!(node_id, nodes[b].node.get_our_node_id());
                        assert_eq!(msg.data, expected_error);
                },
                _ => panic!("Unexpected event"),
        }
+       if dummy_connected {
+               disconnect_dummy_node(&nodes[a]);
+               dummy_connected = false;
+       }
+       if !is_any_peer_connected(&nodes[b]) {
+               connect_dummy_node(&nodes[b]);
+               dummy_connected = true;
+       }
        let events_2 = nodes[b].node.get_and_clear_pending_msg_events();
        assert_eq!(events_2.len(), if needs_err_handle { 1 } else { 2 });
-       let bs_update = match events_2[0] {
+       let bs_update = match events_2.last().unwrap() {
                MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
                        msg.clone()
                },
                _ => panic!("Unexpected event"),
        };
        if !needs_err_handle {
-               match events_2[1] {
+               match events_2[0] {
                        MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
                                assert_eq!(node_id, nodes[a].node.get_our_node_id());
                                assert_eq!(msg.data, expected_error);
                        _ => panic!("Unexpected event"),
                }
        }
+       if dummy_connected {
+               disconnect_dummy_node(&nodes[b]);
+       }
        for node in nodes {
                node.gossip_sync.handle_channel_update(&as_update).unwrap();
                node.gossip_sync.handle_channel_update(&bs_update).unwrap();
@@@ -3414,7 -3278,7 +3469,7 @@@ macro_rules! get_channel_value_stat 
  macro_rules! get_chan_reestablish_msgs {
        ($src_node: expr, $dst_node: expr) => {
                {
 -                      let mut announcements = $crate::prelude::HashSet::new();
 +                      let mut announcements = $crate::prelude::new_hash_set();
                        let mut res = Vec::with_capacity(1);
                        for msg in $src_node.node.get_and_clear_pending_msg_events() {
                                if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg {
index 971c10c55f97ba66950365a514e2455f62d92859,c0254e35fd9064711df34d2643963ee5dd7738bc..eee9ef49b60bcd14e5ebd962b7486a8af1b3880f
@@@ -107,22 -107,22 +107,22 @@@ fn test_insane_channel_opens() 
        use crate::ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT;
  
        // Test all mutations that would make the channel open message insane
 -      insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg });
 -      insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg });
 +      insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg });
 +      insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg });
  
 -      insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.funding_satoshis + 1; msg });
 +      insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.common_fields.funding_satoshis + 1; msg });
  
 -      insane_open_helper(r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", |mut msg| { msg.push_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
 +      insane_open_helper(r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", |mut msg| { msg.push_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
  
 -      insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.dust_limit_satoshis = msg.funding_satoshis + 1 ; msg });
 +      insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.common_fields.dust_limit_satoshis = msg.common_fields.funding_satoshis + 1 ; msg });
  
 -      insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.htlc_minimum_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
 +      insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.common_fields.htlc_minimum_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
  
 -      insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg });
 +      insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.common_fields.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg });
  
 -      insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { msg.max_accepted_htlcs = 0; msg });
 +      insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { msg.common_fields.max_accepted_htlcs = 0; msg });
  
 -      insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.max_accepted_htlcs = 484; msg });
 +      insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.common_fields.max_accepted_htlcs = 484; msg });
  }
  
  #[test]
@@@ -166,7 -166,7 +166,7 @@@ fn do_test_counterparty_no_reserve(send
        let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
        if !send_from_initiator {
                open_channel_message.channel_reserve_satoshis = 0;
 -              open_channel_message.max_htlc_value_in_flight_msat = 100_000_000;
 +              open_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000;
        }
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
  
        let mut accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
        if send_from_initiator {
                accept_channel_message.channel_reserve_satoshis = 0;
 -              accept_channel_message.max_htlc_value_in_flight_msat = 100_000_000;
 +              accept_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000;
        }
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
        {
                                chan_context.holder_selected_channel_reserve_satoshis = 0;
                                chan_context.holder_max_htlc_value_in_flight_msat = 100_000_000;
                        },
 -                      ChannelPhase::Funded(_) => assert!(false),
 +                      _ => assert!(false),
                }
        }
  
@@@ -871,8 -871,8 +871,8 @@@ fn test_update_fee_with_fundee_update_a
        send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
        close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
 -      check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
 -      check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
 +      check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
  }
  
  #[test]
@@@ -985,8 -985,8 +985,8 @@@ fn test_update_fee() 
        assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30);
        assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30);
        close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
 -      check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
 -      check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
 +      check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
  }
  
  #[test]
@@@ -1104,17 -1104,17 +1104,17 @@@ fn fake_network_test() 
  
        // Close down the channels...
        close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
 -      check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
 -      check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
 +      check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
        close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
 -      check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
 -      check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
 +      check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
 +      check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
        close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
 -      check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
 -      check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
 +      check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
 +      check_closed_event!(nodes[3], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
        close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
 -      check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
 -      check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
 +      check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
 +      check_closed_event!(nodes[3], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
  }
  
  #[test]
@@@ -1401,7 -1401,7 +1401,7 @@@ fn test_fee_spike_violation_fails_htlc(
        let secp_ctx = Secp256k1::new();
        let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!");
  
 -      let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
 +      let cur_height = nodes[1].node.best_block.read().unwrap().height + 1;
  
        let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
        let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
@@@ -1599,7 -1599,7 +1599,7 @@@ fn test_chan_reserve_violation_inbound_
        // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
        let secp_ctx = Secp256k1::new();
        let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
 -      let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
 +      let cur_height = nodes[1].node.best_block.read().unwrap().height + 1;
        let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
        let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
                700_000, RecipientOnionFields::secret_only(payment_secret), cur_height, &None).unwrap();
@@@ -1778,7 -1778,7 +1778,7 @@@ fn test_chan_reserve_violation_inbound_
        // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
        let secp_ctx = Secp256k1::new();
        let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
 -      let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1;
 +      let cur_height = nodes[0].node.best_block.read().unwrap().height + 1;
        let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv).unwrap();
        let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
                &route_2.paths[0], recv_value_2, RecipientOnionFields::spontaneous_empty(), cur_height, &None).unwrap();
@@@ -2371,13 -2371,13 +2371,13 @@@ fn channel_monitor_network_test() 
                connect_blocks(&nodes[3], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
                let events = nodes[3].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 2);
-               let close_chan_update_1 = match events[0] {
+               let close_chan_update_1 = match events[1] {
                        MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
                                msg.clone()
                        },
                        _ => panic!("Unexpected event"),
                };
-               match events[1] {
+               match events[0] {
                        MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
                                assert_eq!(node_id, nodes[4].node.get_our_node_id());
                        },
                connect_blocks(&nodes[4], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2);
                let events = nodes[4].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 2);
-               let close_chan_update_2 = match events[0] {
+               let close_chan_update_2 = match events[1] {
                        MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
                                msg.clone()
                        },
                        _ => panic!("Unexpected event"),
                };
-               match events[1] {
+               match events[0] {
                        MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
                                assert_eq!(node_id, nodes[3].node.get_our_node_id());
                        },
                }
                check_added_monitors!(nodes[4], 1);
                test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
 -              check_closed_event!(nodes[4], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000);
 +              check_closed_event!(nodes[4], 1, ClosureReason::HTLCsTimedOut, [nodes[3].node.get_our_node_id()], 100000);
  
                mine_transaction(&nodes[4], &node_txn[0]);
                check_preimage_claim(&nodes[4], &node_txn);
  
        assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
                Ok(ChannelMonitorUpdateStatus::Completed));
 -      check_closed_event!(nodes[3], 1, ClosureReason::HolderForceClosed, [nodes[4].node.get_our_node_id()], 100000);
 +      check_closed_event!(nodes[3], 1, ClosureReason::HTLCsTimedOut, [nodes[4].node.get_our_node_id()], 100000);
  }
  
  #[test]
@@@ -2750,7 -2750,7 +2750,7 @@@ fn claim_htlc_outputs_single_tx() 
                check_added_monitors!(nodes[1], 1);
                check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
                let mut events = nodes[0].node.get_and_clear_pending_events();
 -              expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
 +              expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
                match events.last().unwrap() {
                        Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
                        _ => panic!("Unexpected event"),
@@@ -2889,10 -2889,8 +2889,10 @@@ fn test_htlc_on_chain_success() 
        }
        let chan_id = Some(chan_1.2);
        match forwarded_events[1] {
 -              Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
 -                      assert_eq!(fee_earned_msat, Some(1000));
 +              Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
 +                      next_channel_id, outbound_amount_forwarded_msat, ..
 +              } => {
 +                      assert_eq!(total_fee_earned_msat, Some(1000));
                        assert_eq!(prev_channel_id, chan_id);
                        assert_eq!(claim_from_onchain_tx, true);
                        assert_eq!(next_channel_id, Some(chan_2.2));
                _ => panic!()
        }
        match forwarded_events[2] {
 -              Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
 -                      assert_eq!(fee_earned_msat, Some(1000));
 +              Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
 +                      next_channel_id, outbound_amount_forwarded_msat, ..
 +              } => {
 +                      assert_eq!(total_fee_earned_msat, Some(1000));
                        assert_eq!(prev_channel_id, chan_id);
                        assert_eq!(claim_from_onchain_tx, true);
                        assert_eq!(next_channel_id, Some(chan_2.2));
@@@ -3312,18 -3308,18 +3312,18 @@@ fn do_test_commitment_revoked_fail_back
                let events = nodes[1].node.get_and_clear_pending_events();
                assert_eq!(events.len(), 2);
                match events[0] {
 -                      Event::PendingHTLCsForwardable { .. } => { },
 -                      _ => panic!("Unexpected event"),
 -              };
 -              match events[1] {
                        Event::HTLCHandlingFailed { .. } => { },
                        _ => panic!("Unexpected event"),
                }
 +              match events[1] {
 +                      Event::PendingHTLCsForwardable { .. } => { },
 +                      _ => panic!("Unexpected event"),
 +              };
                // Deliberately don't process the pending fail-back so they all fail back at once after
                // block connection just like the !deliver_bs_raa case
        }
  
 -      let mut failed_htlcs = HashSet::new();
 +      let mut failed_htlcs = new_hash_set();
        assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
  
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
@@@ -3503,7 -3499,7 +3503,7 @@@ fn fail_backward_pending_htlc_upon_chan
  
                let secp_ctx = Secp256k1::new();
                let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
 -              let current_height = nodes[1].node.best_block.read().unwrap().height() + 1;
 +              let current_height = nodes[1].node.best_block.read().unwrap().height + 1;
                let (onion_payloads, _amount_msat, cltv_expiry) = onion_utils::build_onion_payloads(
                        &route.paths[0], 50_000, RecipientOnionFields::secret_only(payment_secret), current_height, &None).unwrap();
                let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
@@@ -3699,7 -3695,7 +3699,7 @@@ fn test_dup_events_on_peer_disconnect(
  #[test]
  fn test_peer_disconnected_before_funding_broadcasted() {
        // Test that channels are closed with `ClosureReason::DisconnectedPeer` if the peer disconnects
 -      // before the funding transaction has been broadcasted.
 +      // before the funding transaction has been broadcasted, and doesn't reconnect back within time.
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
                assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
        }
  
 -      // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` when the peers are
 -      // disconnected before the funding transaction was broadcasted.
 +      // The peers disconnect before the funding is broadcasted.
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
  
 -      check_closed_event!(&nodes[0], 2, ClosureReason::DisconnectedPeer, true
 +      // The time for peers to reconnect expires.
 +      for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS {
 +              nodes[0].node.timer_tick_occurred();
 +      }
 +
 +      // Ensure that the channel is closed with `ClosureReason::HolderForceClosed`
 +      // when the peers are disconnected and do not reconnect before the funding
 +      // transaction is broadcasted.
 +      check_closed_event!(&nodes[0], 2, ClosureReason::HolderForceClosed, true
                , [nodes[1].node.get_our_node_id()], 1000000);
        check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false
                , [nodes[0].node.get_our_node_id()], 1000000);
@@@ -4616,7 -4605,7 +4616,7 @@@ fn test_static_spendable_outputs_preima
                MessageSendEvent::UpdateHTLCs { .. } => {},
                _ => panic!("Unexpected event"),
        }
-       match events[1] {
+       match events[2] {
                MessageSendEvent::BroadcastChannelUpdate { .. } => {},
                _ => panic!("Unexepected event"),
        }
@@@ -4659,7 -4648,7 +4659,7 @@@ fn test_static_spendable_outputs_timeou
        mine_transaction(&nodes[1], &commitment_tx[0]);
        check_added_monitors!(nodes[1], 1);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
-       match events[0] {
+       match events[1] {
                MessageSendEvent::BroadcastChannelUpdate { .. } => {},
                _ => panic!("Unexpected event"),
        }
@@@ -4923,10 -4912,8 +4923,10 @@@ fn test_onchain_to_onchain_claim() 
                _ => panic!("Unexpected event"),
        }
        match events[1] {
 -              Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
 -                      assert_eq!(fee_earned_msat, Some(1000));
 +              Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
 +                      next_channel_id, outbound_amount_forwarded_msat, ..
 +              } => {
 +                      assert_eq!(total_fee_earned_msat, Some(1000));
                        assert_eq!(prev_channel_id, Some(chan_1.2));
                        assert_eq!(claim_from_onchain_tx, true);
                        assert_eq!(next_channel_id, Some(chan_2.2));
@@@ -5075,7 -5062,7 +5075,7 @@@ fn test_duplicate_payment_hash_one_fail
                MessageSendEvent::UpdateHTLCs { .. } => {},
                _ => panic!("Unexpected event"),
        }
-       match events[1] {
+       match events[2] {
                MessageSendEvent::BroadcastChannelUpdate { .. } => {},
                _ => panic!("Unexepected event"),
        }
@@@ -5153,7 -5140,7 +5153,7 @@@ fn test_dynamic_spendable_outputs_local
                MessageSendEvent::UpdateHTLCs { .. } => {},
                _ => panic!("Unexpected event"),
        }
-       match events[1] {
+       match events[2] {
                MessageSendEvent::BroadcastChannelUpdate { .. } => {},
                _ => panic!("Unexepected event"),
        }
@@@ -5351,7 -5338,7 +5351,7 @@@ fn do_test_fail_backwards_unrevoked_rem
        connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
        check_closed_broadcast!(nodes[2], true);
        if deliver_last_raa {
 -              expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
 +              expect_pending_htlcs_forwardable_from_events!(nodes[2], events[1..2], true);
  
                let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect();
                expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations);
  
        let as_events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(as_events.len(), if announce_latest { 10 } else { 6 });
 -      let mut as_failds = HashSet::new();
 +      let mut as_faileds = new_hash_set();
        let mut as_updates = 0;
        for event in as_events.iter() {
                if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
 -                      assert!(as_failds.insert(*payment_hash));
 +                      assert!(as_faileds.insert(*payment_hash));
                        if *payment_hash != payment_hash_2 {
                                assert_eq!(*payment_failed_permanently, deliver_last_raa);
                        } else {
                } else if let &Event::PaymentFailed { .. } = event {
                } else { panic!("Unexpected event"); }
        }
 -      assert!(as_failds.contains(&payment_hash_1));
 -      assert!(as_failds.contains(&payment_hash_2));
 +      assert!(as_faileds.contains(&payment_hash_1));
 +      assert!(as_faileds.contains(&payment_hash_2));
        if announce_latest {
 -              assert!(as_failds.contains(&payment_hash_3));
 -              assert!(as_failds.contains(&payment_hash_5));
 +              assert!(as_faileds.contains(&payment_hash_3));
 +              assert!(as_faileds.contains(&payment_hash_5));
        }
 -      assert!(as_failds.contains(&payment_hash_6));
 +      assert!(as_faileds.contains(&payment_hash_6));
  
        let bs_events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(bs_events.len(), if announce_latest { 8 } else { 6 });
 -      let mut bs_failds = HashSet::new();
 +      let mut bs_faileds = new_hash_set();
        let mut bs_updates = 0;
        for event in bs_events.iter() {
                if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
 -                      assert!(bs_failds.insert(*payment_hash));
 +                      assert!(bs_faileds.insert(*payment_hash));
                        if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 {
                                assert_eq!(*payment_failed_permanently, deliver_last_raa);
                        } else {
                } else if let &Event::PaymentFailed { .. } = event {
                } else { panic!("Unexpected event"); }
        }
 -      assert!(bs_failds.contains(&payment_hash_1));
 -      assert!(bs_failds.contains(&payment_hash_2));
 +      assert!(bs_faileds.contains(&payment_hash_1));
 +      assert!(bs_faileds.contains(&payment_hash_2));
        if announce_latest {
 -              assert!(bs_failds.contains(&payment_hash_4));
 +              assert!(bs_faileds.contains(&payment_hash_4));
        }
 -      assert!(bs_failds.contains(&payment_hash_5));
 +      assert!(bs_faileds.contains(&payment_hash_5));
  
        // For each HTLC which was not failed-back by normal process (ie deliver_last_raa), we should
        // get a NetworkUpdate. A should have gotten 4 HTLCs which were failed-back due to
@@@ -5547,7 -5534,7 +5547,7 @@@ fn test_key_derivation_params() 
        let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger));
        let scorer = RwLock::new(test_utils::TestScorer::new());
        let router = test_utils::TestRouter::new(network_graph.clone(), &chanmon_cfgs[0].logger, &scorer);
 -      let message_router = test_utils::TestMessageRouter::new(network_graph.clone());
 +      let message_router = test_utils::TestMessageRouter::new(network_graph.clone(), &keys_manager);
        let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, message_router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) };
        let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        node_cfgs.remove(0);
@@@ -5632,7 -5619,7 +5632,7 @@@ fn test_static_output_closing_tx() 
        let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
  
        mine_transaction(&nodes[0], &closing_tx);
 -      check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
  
        let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
        check_spends!(spend_txn[0], closing_tx);
  
        mine_transaction(&nodes[1], &closing_tx);
 -      check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
 +      check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
  
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@@ -5682,7 -5669,7 +5682,7 @@@ fn do_htlc_claim_local_commitment_only(
        test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 -      check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
 +      check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 100000);
  }
  
  fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
        test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 -      check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
 +      check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
  }
  
  fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
                test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
 -              check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
 +              check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
        } else {
                expect_payment_failed!(nodes[0], our_payment_hash, true);
        }
@@@ -5841,26 -5828,26 +5841,26 @@@ fn bolt2_open_channel_sending_node_chec
        let push_msat=10001;
        assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_ok()); //Create a valid channel
        let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 -      assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.dust_limit_satoshis);
 +      assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.common_fields.dust_limit_satoshis);
  
        // BOLT #2 spec: Sending node must set undefined bits in channel_flags to 0
        // Only the least-significant bit of channel_flags is currently defined resulting in channel_flags only having one of two possible states 0 or 1
 -      assert!(node0_to_1_send_open_channel.channel_flags<=1);
 +      assert!(node0_to_1_send_open_channel.common_fields.channel_flags<=1);
  
        // BOLT #2 spec: Sending node should set to_self_delay sufficient to ensure the sender can irreversibly spend a commitment transaction output, in case of misbehaviour by the receiver.
        assert!(BREAKDOWN_TIMEOUT>0);
 -      assert!(node0_to_1_send_open_channel.to_self_delay==BREAKDOWN_TIMEOUT);
 +      assert!(node0_to_1_send_open_channel.common_fields.to_self_delay==BREAKDOWN_TIMEOUT);
  
        // BOLT #2 spec: Sending node must ensure the chain_hash value identifies the chain it wishes to open the channel within.
        let chain_hash = ChainHash::using_genesis_block(Network::Testnet);
 -      assert_eq!(node0_to_1_send_open_channel.chain_hash, chain_hash);
 +      assert_eq!(node0_to_1_send_open_channel.common_fields.chain_hash, chain_hash);
  
        // BOLT #2 spec: Sending node must set funding_pubkey, revocation_basepoint, htlc_basepoint, payment_basepoint, and delayed_payment_basepoint to valid DER-encoded, compressed, secp256k1 pubkeys.
 -      assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.funding_pubkey.serialize()).is_ok());
 -      assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.revocation_basepoint.serialize()).is_ok());
 -      assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.htlc_basepoint.serialize()).is_ok());
 -      assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.payment_point.serialize()).is_ok());
 -      assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.delayed_payment_basepoint.serialize()).is_ok());
 +      assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.funding_pubkey.serialize()).is_ok());
 +      assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.revocation_basepoint.serialize()).is_ok());
 +      assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.htlc_basepoint.serialize()).is_ok());
 +      assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.payment_basepoint.serialize()).is_ok());
 +      assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.delayed_payment_basepoint.serialize()).is_ok());
  }
  
  #[test]
@@@ -5874,7 -5861,7 +5874,7 @@@ fn bolt2_open_channel_sane_dust_limit(
        let push_msat=10001;
        nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).unwrap();
        let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 -      node0_to_1_send_open_channel.dust_limit_satoshis = 547;
 +      node0_to_1_send_open_channel.common_fields.dust_limit_satoshis = 547;
        node0_to_1_send_open_channel.channel_reserve_satoshis = 100001;
  
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel);
@@@ -6182,7 -6169,7 +6182,7 @@@ fn test_fail_holding_cell_htlc_upon_fre
        // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process.
        let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(process_htlc_forwards_event.len(), 2);
 -      match &process_htlc_forwards_event[0] {
 +      match &process_htlc_forwards_event[1] {
                &Event::PendingHTLCsForwardable { .. } => {},
                _ => panic!("Unexpected event"),
        }
@@@ -6488,7 -6475,7 +6488,7 @@@ fn test_update_add_htlc_bolt2_receiver_
                get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
        route.paths[0].hops[0].fee_msat = send_amt;
        let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
 -      let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1;
 +      let cur_height = nodes[0].node.best_block.read().unwrap().height + 1;
        let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv).unwrap();
        let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
                &route.paths[0], send_amt, RecipientOnionFields::secret_only(our_payment_secret), cur_height, &None).unwrap();
@@@ -7214,7 -7201,7 +7214,7 @@@ fn test_user_configurable_csv_delay() 
        // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new()
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
        let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
 -      open_channel.to_self_delay = 200;
 +      open_channel.common_fields.to_self_delay = 200;
        if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
                &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
                &low_our_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
        nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
        let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
 -      accept_channel.to_self_delay = 200;
 +      accept_channel.common_fields.to_self_delay = 200;
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
        let reason_msg;
        if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] {
        // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new()
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
        let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
 -      open_channel.to_self_delay = 200;
 +      open_channel.common_fields.to_self_delay = 200;
        if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
                &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
                &high_their_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
@@@ -7334,6 -7321,9 +7334,9 @@@ fn test_announce_disable_channels() 
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
  
+       // Connect a dummy node for proper future events broadcasting
+       connect_dummy_node(&nodes[0]);
        create_announced_chan_between_nodes(&nodes, 0, 1);
        create_announced_chan_between_nodes(&nodes, 1, 0);
        create_announced_chan_between_nodes(&nodes, 0, 1);
        }
        let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(msg_events.len(), 3);
 -      let mut chans_disabled = HashMap::new();
 +      let mut chans_disabled = new_hash_map();
        for e in msg_events {
                match e {
                        MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
@@@ -7543,7 -7533,7 +7546,7 @@@ fn test_bump_penalty_txn_on_revoked_htl
        let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
        let route = get_route(&nodes[1].node.get_our_node_id(), &route_params, &nodes[1].network_graph.read_only(), None,
                nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
 -      send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000);
 +      let failed_payment_hash = send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000).1;
  
        let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
        assert_eq!(revoked_local_txn[0].input.len(), 1);
        let block_129 = create_dummy_block(block_11.block_hash(), 42, vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()]);
        connect_block(&nodes[0], &block_129);
        let events = nodes[0].node.get_and_clear_pending_events();
 -      expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
 +      expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: failed_payment_hash }]);
        match events.last().unwrap() {
                Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
                _ => panic!("Unexpected event"),
@@@ -7931,8 -7921,8 +7934,8 @@@ fn test_override_channel_config() 
  
        // Assert the channel created by node0 is using the override config.
        let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 -      assert_eq!(res.channel_flags, 0);
 -      assert_eq!(res.to_self_delay, 200);
 +      assert_eq!(res.common_fields.channel_flags, 0);
 +      assert_eq!(res.common_fields.to_self_delay, 200);
  }
  
  #[test]
@@@ -7946,11 -7936,11 +7949,11 @@@ fn test_override_0msat_htlc_minimum() 
  
        nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, None, Some(zero_config)).unwrap();
        let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 -      assert_eq!(res.htlc_minimum_msat, 1);
 +      assert_eq!(res.common_fields.htlc_minimum_msat, 1);
  
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
        let res = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
 -      assert_eq!(res.htlc_minimum_msat, 1);
 +      assert_eq!(res.common_fields.htlc_minimum_msat, 1);
  }
  
  #[test]
@@@ -8654,7 -8644,7 +8657,7 @@@ fn test_concurrent_monitor_claim() 
        let height = HTLC_TIMEOUT_BROADCAST + 1;
        connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
        check_closed_broadcast(&nodes[0], 1, true);
 -      check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed, false,
 +      check_closed_event!(&nodes[0], 1, ClosureReason::HTLCsTimedOut, false,
                [nodes[1].node.get_our_node_id()], 100000);
        watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height);
        check_added_monitors(&nodes[0], 1);
@@@ -8962,7 -8952,7 +8965,7 @@@ fn test_duplicate_temporary_channel_id_
  
        // Modify the `OpenChannel` from `nodes[2]` to `nodes[0]` to ensure that it uses the same
        // `temporary_channel_id` as the `OpenChannel` from nodes[1] to nodes[0].
 -      open_chan_msg_chan_2_0.temporary_channel_id = open_chan_msg_chan_1_0.temporary_channel_id;
 +      open_chan_msg_chan_2_0.common_fields.temporary_channel_id = open_chan_msg_chan_1_0.common_fields.temporary_channel_id;
  
        // Assert that `nodes[0]` can accept both `OpenChannel` requests, even though they use the same
        // `temporary_channel_id` as they are from different peers.
                match &events[0] {
                        MessageSendEvent::SendAcceptChannel { node_id, msg } => {
                                assert_eq!(node_id, &nodes[1].node.get_our_node_id());
 -                              assert_eq!(msg.temporary_channel_id, open_chan_msg_chan_1_0.temporary_channel_id);
 +                              assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id);
                        },
                        _ => panic!("Unexpected event"),
                }
                match &events[0] {
                        MessageSendEvent::SendAcceptChannel { node_id, msg } => {
                                assert_eq!(node_id, &nodes[2].node.get_our_node_id());
 -                              assert_eq!(msg.temporary_channel_id, open_chan_msg_chan_1_0.temporary_channel_id);
 +                              assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id);
                        },
                        _ => panic!("Unexpected event"),
                }
@@@ -9106,11 -9096,11 +9109,11 @@@ fn test_duplicate_funding_err_in_fundin
  
        nodes[2].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
        let mut open_chan_msg = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 -      let node_c_temp_chan_id = open_chan_msg.temporary_channel_id;
 -      open_chan_msg.temporary_channel_id = real_channel_id;
 +      let node_c_temp_chan_id = open_chan_msg.common_fields.temporary_channel_id;
 +      open_chan_msg.common_fields.temporary_channel_id = real_channel_id;
        nodes[1].node.handle_open_channel(&nodes[2].node.get_our_node_id(), &open_chan_msg);
        let mut accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[2].node.get_our_node_id());
 -      accept_chan_msg.temporary_channel_id = node_c_temp_chan_id;
 +      accept_chan_msg.common_fields.temporary_channel_id = node_c_temp_chan_id;
        nodes[2].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
  
        // Now that we have a second channel with the same funding txo, send a bogus funding message
@@@ -9168,7 -9158,7 +9171,7 @@@ fn test_duplicate_chan_id() 
                                // first (valid) and second (invalid) channels are closed, given they both have
                                // the same non-temporary channel_id. However, currently we do not, so we just
                                // move forward with it.
 -                              assert_eq!(msg.channel_id, open_chan_msg.temporary_channel_id);
 +                              assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id);
                                assert_eq!(node_id, nodes[0].node.get_our_node_id());
                        },
                        _ => panic!("Unexpected event"),
        // First try to open a second channel with a temporary channel id equal to the txid-based one.
        // Technically this is allowed by the spec, but we don't support it and there's little reason
        // to. Still, it shouldn't cause any other issues.
 -      open_chan_msg.temporary_channel_id = channel_id;
 +      open_chan_msg.common_fields.temporary_channel_id = channel_id;
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
        {
                let events = nodes[1].node.get_and_clear_pending_msg_events();
                        MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
                                // Technically, at this point, nodes[1] would be justified in thinking both
                                // channels are closed, but currently we do not, so we just move forward with it.
 -                              assert_eq!(msg.channel_id, open_chan_msg.temporary_channel_id);
 +                              assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id);
                                assert_eq!(node_id, nodes[0].node.get_our_node_id());
                        },
                        _ => panic!("Unexpected event"),
                // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
                // try to create another channel. Instead, we drop the channel entirely here (leaving the
                // channelmanager in a possibly nonsense state instead).
 -              match a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap() {
 +              match a_peer_state.channel_by_id.remove(&open_chan_2_msg.common_fields.temporary_channel_id).unwrap() {
                        ChannelPhase::UnfundedOutboundV1(mut chan) => {
                                let logger = test_utils::TestLogger::new();
                                chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap()
@@@ -9899,10 -9889,10 +9902,10 @@@ fn do_test_max_dust_htlc_exposure(dust_
  
        nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap();
        let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 -      open_channel.max_htlc_value_in_flight_msat = 50_000_000;
 -      open_channel.max_accepted_htlcs = 60;
 +      open_channel.common_fields.max_htlc_value_in_flight_msat = 50_000_000;
 +      open_channel.common_fields.max_accepted_htlcs = 60;
        if on_holder_tx {
 -              open_channel.dust_limit_satoshis = 546;
 +              open_channel.common_fields.dust_limit_satoshis = 546;
        }
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
        let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
                (chan.context().get_dust_buffer_feerate(None) as u64,
                chan.context().get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator)))
        };
 -      let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
 +      let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - 1) * 1000;
        let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
  
 -      let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
 +      let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - 1) * 1000;
        let dust_inbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
  
        let dust_htlc_on_counterparty_tx: u64 = 4;
@@@ -10085,7 -10075,7 +10088,7 @@@ fn test_non_final_funding_tx() 
        let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
  
 -      let best_height = nodes[0].node.best_block.read().unwrap().height();
 +      let best_height = nodes[0].node.best_block.read().unwrap().height;
  
        let chan_id = *nodes[0].network_chan_count.borrow();
        let events = nodes[0].node.get_and_clear_pending_events();
@@@ -10130,7 -10120,7 +10133,7 @@@ fn test_non_final_funding_tx_within_hea
        let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
  
 -      let best_height = nodes[0].node.best_block.read().unwrap().height();
 +      let best_height = nodes[0].node.best_block.read().unwrap().height;
  
        let chan_id = *nodes[0].network_chan_count.borrow();
        let events = nodes[0].node.get_and_clear_pending_events();
@@@ -10519,90 -10509,6 +10522,90 @@@ fn test_remove_expired_inbound_unfunded
        check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
  }
  
 +#[test]
 +fn test_channel_close_when_not_timely_accepted() {
 +      // Create network of two nodes
 +      let chanmon_cfgs = create_chanmon_cfgs(2);
 +      let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
 +      let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
 +      let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 +
 +      // Simulate peer-disconnects mid-handshake
 +      // The channel is initiated from the node 0 side,
 +      // but the nodes disconnect before node 1 could send accept channel
 +      let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
 +      let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 +      assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id);
 +
 +      nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 +
 +      // Make sure that we have not removed the OutboundV1Channel from node[0] immediately.
 +      assert_eq!(nodes[0].node.list_channels().len(), 1);
 +
 +      // Since channel was inbound from node[1] perspective, it should have been dropped immediately.
 +      assert_eq!(nodes[1].node.list_channels().len(), 0);
 +
 +      // In the meantime, some time passes.
 +      for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS {
 +              nodes[0].node.timer_tick_occurred();
 +      }
 +
 +      // Since we disconnected from peer and did not connect back within time,
 +      // we should have forced-closed the channel by now.
 +      check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
 +      assert_eq!(nodes[0].node.list_channels().len(), 0);
 +
 +      {
 +              // Since accept channel message was never received
 +              // The channel should be forced close by now from node 0 side
 +              // and the peer removed from per_peer_state
 +              let node_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
 +              assert_eq!(node_0_per_peer_state.len(), 0);
 +      }
 +}
 +
 +#[test]
 +fn test_rebroadcast_open_channel_when_reconnect_mid_handshake() {
 +      // Create network of two nodes
 +      let chanmon_cfgs = create_chanmon_cfgs(2);
 +      let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
 +      let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
 +      let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 +
 +      // Simulate peer-disconnects mid-handshake
 +      // The channel is initiated from the node 0 side,
 +      // but the nodes disconnect before node 1 could send accept channel
 +      let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
 +      let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 +      assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id);
 +
 +      nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 +      nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 +
 +      // Make sure that we have not removed the OutboundV1Channel from node[0] immediately.
 +      assert_eq!(nodes[0].node.list_channels().len(), 1);
 +
 +      // Since channel was inbound from node[1] perspective, it should have been immediately dropped.
 +      assert_eq!(nodes[1].node.list_channels().len(), 0);
 +
 +      // The peers now reconnect
 +      nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
 +              features: nodes[1].node.init_features(), networks: None, remote_network_address: None
 +      }, true).unwrap();
 +      nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
 +              features: nodes[0].node.init_features(), networks: None, remote_network_address: None
 +      }, false).unwrap();
 +
 +      // Make sure the SendOpenChannel message is added to node_0 pending message events
 +      let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
 +      assert_eq!(msg_events.len(), 1);
 +      match &msg_events[0] {
 +              MessageSendEvent::SendOpenChannel { msg, .. } => assert_eq!(msg, &open_channel_msg),
 +              _ => panic!("Unexpected message."),
 +      }
 +}
 +
  fn do_test_multi_post_event_actions(do_reload: bool) {
        // Tests handling multiple post-Event actions at once.
        // There is specific code in ChannelManager to handle channels where multiple post-Event
@@@ -10759,9 -10665,7 +10762,9 @@@ fn test_batch_channel_open() 
  }
  
  #[test]
 -fn test_disconnect_in_funding_batch() {
 +fn test_close_in_funding_batch() {
 +      // This test ensures that if one of the channels
 +      // in the batch closes, the complete batch will close.
        let chanmon_cfgs = create_chanmon_cfgs(3);
        let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        // The transaction should not have been broadcast before all channels are ready.
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
  
 -      // The remaining peer in the batch disconnects.
 -      nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id());
 -
 -      // The channels in the batch will close immediately.
 +      // Force-close the channel for which we've completed the initial monitor.
        let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
        let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
        let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
        let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
 +
 +      nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
 +
 +      // The monitor should become closed.
 +      check_added_monitors(&nodes[0], 1);
 +      {
 +              let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
 +              let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
 +              assert_eq!(monitor_updates_1.len(), 1);
 +              assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
 +      }
 +
 +      let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
 +      match msg_events[0] {
 +              MessageSendEvent::HandleError { .. } => (),
 +              _ => panic!("Unexpected message."),
 +      }
 +
 +      // We broadcast the commitment transaction as part of the force-close.
 +      {
 +              let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
 +              assert_eq!(broadcasted_txs.len(), 1);
 +              assert!(broadcasted_txs[0].txid() != tx.txid());
 +              assert_eq!(broadcasted_txs[0].input.len(), 1);
 +              assert_eq!(broadcasted_txs[0].input[0].previous_output.txid, tx.txid());
 +      }
 +
 +      // All channels in the batch should close immediately.
        check_closed_events(&nodes[0], &[
                ExpectedCloseEvent {
                        channel_id: Some(channel_id_1),
                },
        ]);
  
 -      // The monitor should become closed.
 -      check_added_monitors(&nodes[0], 1);
 -      {
 -              let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
 -              let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
 -              assert_eq!(monitor_updates_1.len(), 1);
 -              assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
 -      }
 -
 -      // The funding transaction should not have been broadcast, and therefore, we don't need
 -      // to broadcast a force-close transaction for the closed monitor.
 -      assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
 -
        // Ensure the channels don't exist anymore.
        assert!(nodes[0].node.list_channels().is_empty());
  }
@@@ -10983,36 -10875,3 +10986,36 @@@ fn test_funding_and_commitment_tx_confi
        do_test_funding_and_commitment_tx_confirm_same_block(false);
        do_test_funding_and_commitment_tx_confirm_same_block(true);
  }
 +
 +#[test]
 +fn test_accept_inbound_channel_errors_queued() {
 +      // For manually accepted inbound channels, tests that a close error is correctly handled
 +      // and the channel fails for the initiator.
 +      let mut config0 = test_default_channel_config();
 +      let mut config1 = config0.clone();
 +      config1.channel_handshake_limits.their_to_self_delay = 1000;
 +      config1.manually_accept_inbound_channels = true;
 +      config0.channel_handshake_config.our_to_self_delay = 2000;
 +
 +      let chanmon_cfgs = create_chanmon_cfgs(2);
 +      let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
 +      let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config0), Some(config1)]);
 +      let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 +
 +      nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
 +      let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 +
 +      nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
 +      let events = nodes[1].node.get_and_clear_pending_events();
 +      match events[0] {
 +              Event::OpenChannelRequest { temporary_channel_id, .. } => {
 +                      match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23) {
 +                              Err(APIError::ChannelUnavailable { err: _ }) => (),
 +                              _ => panic!(),
 +                      }
 +              }
 +              _ => panic!("Unexpected event"),
 +      }
 +      assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
 +              open_channel_msg.common_fields.temporary_channel_id);
 +}