Merge pull request #2726 from shaavan/issue2712
[rust-lightning] / lightning / src / ln / channel.rs
index 022767e6f613faffd36143dee351a89baf1304c1..16e8ed3ef69503cb9e9ed56172a6749b3747254b 100644 (file)
@@ -7,6 +7,7 @@
 // You may not use this file except in accordance with one or both of these
 // licenses.
 
+use bitcoin::blockdata::constants::ChainHash;
 use bitcoin::blockdata::script::{Script,Builder};
 use bitcoin::blockdata::transaction::{Transaction, EcdsaSighashType};
 use bitcoin::util::sighash;
@@ -38,7 +39,7 @@ use crate::chain::transaction::{OutPoint, TransactionData};
 use crate::sign::{EcdsaChannelSigner, WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
 use crate::events::ClosureReason;
 use crate::routing::gossip::NodeId;
-use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
+use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
 use crate::util::logger::Logger;
 use crate::util::errors::APIError;
 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
@@ -47,6 +48,7 @@ use crate::util::scid_utils::scid_from_parts;
 use crate::io;
 use crate::prelude::*;
 use core::{cmp,mem,fmt};
+use core::convert::TryInto;
 use core::ops::Deref;
 #[cfg(any(test, fuzzing, debug_assertions))]
 use crate::sync::Mutex;
@@ -66,6 +68,8 @@ pub struct ChannelValueStat {
 }
 
 pub struct AvailableBalances {
+       /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
+       pub balance_msat: u64,
        /// Total amount available for our counterparty to send to us.
        pub inbound_capacity_msat: u64,
        /// Total amount available for us to send to our counterparty.
@@ -300,9 +304,24 @@ enum ChannelState {
        /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
        /// to drop us, but we store this anyway.
        ShutdownComplete = 4096,
+       /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the
+       /// broadcasting of the funding transaction is being held until all channels in the batch
+       /// have received funding_signed and have their monitors persisted.
+       WaitingForBatch = 1 << 13,
 }
-const BOTH_SIDES_SHUTDOWN_MASK: u32 = ChannelState::LocalShutdownSent as u32 | ChannelState::RemoteShutdownSent as u32;
-const MULTI_STATE_FLAGS: u32 = BOTH_SIDES_SHUTDOWN_MASK | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32;
+const BOTH_SIDES_SHUTDOWN_MASK: u32 =
+       ChannelState::LocalShutdownSent as u32 |
+       ChannelState::RemoteShutdownSent as u32;
+const MULTI_STATE_FLAGS: u32 =
+       BOTH_SIDES_SHUTDOWN_MASK |
+       ChannelState::PeerDisconnected as u32 |
+       ChannelState::MonitorUpdateInProgress as u32;
+const STATE_FLAGS: u32 =
+       MULTI_STATE_FLAGS |
+       ChannelState::TheirChannelReady as u32 |
+       ChannelState::OurChannelReady as u32 |
+       ChannelState::AwaitingRemoteRevoke as u32 |
+       ChannelState::WaitingForBatch as u32;
 
 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
 
@@ -515,6 +534,15 @@ pub(super) struct MonitorRestoreUpdates {
        pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
 }
 
+/// The return value of `signer_maybe_unblocked`
+#[allow(unused)]
+pub(super) struct SignerResumeUpdates {
+       pub commitment_update: Option<msgs::CommitmentUpdate>,
+       pub funding_signed: Option<msgs::FundingSigned>,
+       pub funding_created: Option<msgs::FundingCreated>,
+       pub channel_ready: Option<msgs::ChannelReady>,
+}
+
 /// The return value of `channel_reestablish`
 pub(super) struct ReestablishResponses {
        pub channel_ready: Option<msgs::ChannelReady>,
@@ -525,15 +553,17 @@ pub(super) struct ReestablishResponses {
        pub shutdown_msg: Option<msgs::Shutdown>,
 }
 
-/// The return type of `force_shutdown`
-///
-/// Contains a (counterparty_node_id, funding_txo, [`ChannelMonitorUpdate`]) tuple
-/// followed by a list of HTLCs to fail back in the form of the (source, payment hash, and this
-/// channel's counterparty_node_id and channel_id).
-pub(crate) type ShutdownResult = (
-       Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
-       Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>
-);
+/// The result of a shutdown that should be handled.
+#[must_use]
+pub(crate) struct ShutdownResult {
+       /// A channel monitor update to apply.
+       pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
+       /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
+       pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
+       /// An unbroadcasted batch funding transaction id. The closure of this channel should be
+       /// propagated to the remainder of the batch.
+       pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
+}
 
 /// If the majority of the channels funds are to the fundee and the initiator holds only just
 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
@@ -605,6 +635,35 @@ impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
        (0, update, required),
 });
 
+/// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
+/// its variants containing an appropriate channel struct.
+pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
+       UnfundedOutboundV1(OutboundV1Channel<SP>),
+       UnfundedInboundV1(InboundV1Channel<SP>),
+       Funded(Channel<SP>),
+}
+
+impl<'a, SP: Deref> ChannelPhase<SP> where
+       SP::Target: SignerProvider,
+       <SP::Target as SignerProvider>::Signer: ChannelSigner,
+{
+       pub fn context(&'a self) -> &'a ChannelContext<SP> {
+               match self {
+                       ChannelPhase::Funded(chan) => &chan.context,
+                       ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
+                       ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
+               }
+       }
+
+       pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
+               match self {
+                       ChannelPhase::Funded(ref mut chan) => &mut chan.context,
+                       ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
+                       ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
+               }
+       }
+}
+
 /// Contains all state common to unfunded inbound/outbound channels.
 pub(super) struct UnfundedChannelContext {
        /// A counter tracking how many ticks have elapsed since this unfunded channel was
@@ -674,7 +733,7 @@ pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
 
        cur_holder_commitment_transaction_number: u64,
        cur_counterparty_commitment_transaction_number: u64,
-       value_to_self_msat: u64, // Excluding all pending_htlcs, excluding fees
+       value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
        pending_inbound_htlcs: Vec<InboundHTLCOutput>,
        pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
        holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
@@ -700,6 +759,18 @@ pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
        monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
        monitor_pending_finalized_fulfills: Vec<HTLCSource>,
 
+       /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
+       /// but our signer (initially) refused to give us a signature, we should retry at some point in
+       /// the future when the signer indicates it may have a signature for us.
+       ///
+       /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
+       /// setting it again as a side-effect of [`Channel::channel_reestablish`].
+       signer_pending_commitment_update: bool,
+       /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
+       /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
+       /// outbound or inbound.
+       signer_pending_funding: bool,
+
        // pending_update_fee is filled when sending and receiving update_fee.
        //
        // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
@@ -792,6 +863,7 @@ pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
 
        pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
        funding_transaction: Option<Transaction>,
+       is_batch_funding: Option<()>,
 
        counterparty_cur_commitment_point: Option<PublicKey>,
        counterparty_prev_commitment_point: Option<PublicKey>,
@@ -916,7 +988,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 
        /// Returns true if we've ever received a message from the remote end for this Channel
        pub fn have_received_message(&self) -> bool {
-               self.channel_state > (ChannelState::OurInitSent as u32)
+               self.channel_state & !STATE_FLAGS > (ChannelState::OurInitSent as u32)
        }
 
        /// Returns true if this channel is fully established and not known to be closing.
@@ -1006,6 +1078,12 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                self.outbound_scid_alias
        }
 
+       /// Returns the holder signer for this channel.
+       #[cfg(test)]
+       pub fn get_signer(&self) -> &ChannelSignerType<<SP::Target as SignerProvider>::Signer> {
+               return &self.holder_signer
+       }
+
        /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
        /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
        /// or prior to any channel actions during `Channel` initialization.
@@ -1118,8 +1196,8 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                match self.config.options.max_dust_htlc_exposure {
                        MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
                                let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
-                                       ConfirmationTarget::HighPriority);
-                               feerate_per_kw as u64 * multiplier
+                                       ConfirmationTarget::OnChainSweep) as u64;
+                               feerate_per_kw.saturating_mul(multiplier)
                        },
                        MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
                }
@@ -1132,7 +1210,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 
        // Checks whether we should emit a `ChannelPending` event.
        pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
-               self.is_funding_initiated() && !self.channel_pending_event_emitted
+               self.is_funding_broadcast() && !self.channel_pending_event_emitted
        }
 
        // Returns whether we already emitted a `ChannelPending` event.
@@ -1191,9 +1269,11 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                did_channel_update
        }
 
-       /// Returns true if funding_created was sent/received.
-       pub fn is_funding_initiated(&self) -> bool {
-               self.channel_state >= ChannelState::FundingSent as u32
+       /// Returns true if funding_signed was sent/received and the
+       /// funding transaction has been broadcast if necessary.
+       pub fn is_funding_broadcast(&self) -> bool {
+               self.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 &&
+                       self.channel_state & ChannelState::WaitingForBatch as u32 == 0
        }
 
        /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
@@ -1605,6 +1685,14 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                let inbound_stats = context.get_inbound_pending_htlc_stats(None);
                let outbound_stats = context.get_outbound_pending_htlc_stats(None);
 
+               let mut balance_msat = context.value_to_self_msat;
+               for ref htlc in context.pending_inbound_htlcs.iter() {
+                       if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
+                               balance_msat += htlc.amount_msat;
+                       }
+               }
+               balance_msat -= outbound_stats.pending_htlcs_value_msat;
+
                let outbound_capacity_msat = context.value_to_self_msat
                                .saturating_sub(outbound_stats.pending_htlcs_value_msat)
                                .saturating_sub(
@@ -1612,6 +1700,11 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 
                let mut available_capacity_msat = outbound_capacity_msat;
 
+               let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+                       ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
+               } else {
+                       0
+               };
                if context.is_outbound() {
                        // We should mind channel commit tx fee when computing how much of the available capacity
                        // can be used in the next htlc. Mirrors the logic in send_htlc.
@@ -1626,14 +1719,19 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        }
 
                        let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
-                       let max_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
+                       let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
                        let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
-                       let min_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
+                       let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
+                       if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+                               max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
+                               min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
+                       }
 
                        // We will first subtract the fee as if we were above-dust. Then, if the resulting
                        // value ends up being below dust, we have this fee available again. In that case,
                        // match the value to right-below-dust.
-                       let mut capacity_minus_commitment_fee_msat: i64 = (available_capacity_msat as i64) - (max_reserved_commit_tx_fee_msat as i64);
+                       let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
+                               max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
                        if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
                                let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
                                debug_assert!(one_htlc_difference_msat != 0);
@@ -1658,7 +1756,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
                                .saturating_sub(inbound_stats.pending_htlcs_value_msat);
 
-                       if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat {
+                       if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
                                // If another HTLC's fee would reduce the remote's balance below the reserve limit
                                // we've selected for them, we can only send dust HTLCs.
                                available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
@@ -1683,14 +1781,14 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                         context.holder_dust_limit_satoshis       + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
                };
                let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
-               if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 {
+               if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
                        remaining_msat_below_dust_exposure_limit =
                                Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
                        dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
                }
 
                let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
-               if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 {
+               if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
                        remaining_msat_below_dust_exposure_limit = Some(cmp::min(
                                remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
                                max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
@@ -1721,6 +1819,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        outbound_capacity_msat,
                        next_outbound_htlc_limit_msat: available_capacity_msat,
                        next_outbound_htlc_minimum_msat,
+                       balance_msat,
                }
        }
 
@@ -1923,15 +2022,41 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                res
        }
 
-       /// Returns transaction if there is pending funding transaction that is yet to broadcast
-       pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
-               if self.channel_state & (ChannelState::FundingCreated as u32) != 0 {
-                       self.funding_transaction.clone()
+       fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
+               where F: Fn() -> Option<O> {
+               if self.channel_state & ChannelState::FundingCreated as u32 != 0 ||
+                  self.channel_state & ChannelState::WaitingForBatch as u32 != 0 {
+                       f()
                } else {
                        None
                }
        }
 
+       /// Returns the transaction if there is a pending funding transaction that is yet to be
+       /// broadcast.
+       pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
+               self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
+       }
+
+       /// Returns the transaction ID if there is a pending funding transaction that is yet to be
+       /// broadcast.
+       pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
+               self.if_unbroadcasted_funding(||
+                       self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
+               )
+       }
+
+       /// Returns whether the channel is funded in a batch.
+       pub fn is_batch_funding(&self) -> bool {
+               self.is_batch_funding.is_some()
+       }
+
+       /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
+       /// broadcast.
+       pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
+               self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
+       }
+
        /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
        /// shutdown of this channel - no more calls into this Channel may be made afterwards except
        /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
@@ -1972,10 +2097,80 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                                }))
                        } else { None }
                } else { None };
+               let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
 
                self.channel_state = ChannelState::ShutdownComplete as u32;
                self.update_time_counter += 1;
-               (monitor_update, dropped_outbound_htlcs)
+               ShutdownResult {
+                       monitor_update,
+                       dropped_outbound_htlcs,
+                       unbroadcasted_batch_funding_txid,
+               }
+       }
+
+       /// Only allowed after [`Self::channel_transaction_parameters`] is set.
+       fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
+               let counterparty_keys = self.build_remote_transaction_keys();
+               let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
+               let signature = match &self.holder_signer {
+                       // TODO (taproot|arik): move match into calling method for Taproot
+                       ChannelSignerType::Ecdsa(ecdsa) => {
+                               ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
+                                       .map(|(sig, _)| sig).ok()?
+                       }
+               };
+
+               if self.signer_pending_funding {
+                       log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
+                       self.signer_pending_funding = false;
+               }
+
+               Some(msgs::FundingCreated {
+                       temporary_channel_id: self.temporary_channel_id.unwrap(),
+                       funding_txid: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
+                       funding_output_index: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
+                       signature,
+                       #[cfg(taproot)]
+                       partial_signature_with_nonce: None,
+                       #[cfg(taproot)]
+                       next_local_nonce: None,
+               })
+       }
+
+       /// Only allowed after [`Self::channel_transaction_parameters`] is set.
+       fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
+               let counterparty_keys = self.build_remote_transaction_keys();
+               let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
+
+               let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
+               let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
+               log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
+                       &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
+
+               match &self.holder_signer {
+                       // TODO (arik): move match into calling method for Taproot
+                       ChannelSignerType::Ecdsa(ecdsa) => {
+                               let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
+                                       .map(|(signature, _)| msgs::FundingSigned {
+                                               channel_id: self.channel_id(),
+                                               signature,
+                                               #[cfg(taproot)]
+                                               partial_signature_with_nonce: None,
+                                       })
+                                       .ok();
+
+                               if funding_signed.is_none() {
+                                       log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
+                                       self.signer_pending_funding = true;
+                               } else if self.signer_pending_funding {
+                                       log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
+                                       self.signer_pending_funding = false;
+                               }
+
+                               // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
+                               (counterparty_initial_commitment_tx, funding_signed)
+                       }
+               }
        }
 }
 
@@ -2030,17 +2225,12 @@ fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_feature
 
 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
 // Note that num_htlcs should not include dust HTLCs.
-fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
+pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
        // Note that we need to divide before multiplying to round properly,
        // since the lowest denomination of bitcoin on-chain is the satoshi.
        (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
 }
 
-// TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
-// has been completed, and then turn into a Channel to get compiler-time enforcement of things like
-// calling channel_id() before we're set up or things like get_funding_signed on an
-// inbound channel.
-//
 // Holder designates channel data owned for the benefit of the user client.
 // Counterparty designates channel data owned by the another channel participant entity.
 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
@@ -2072,28 +2262,20 @@ impl<SP: Deref> Channel<SP> where
                // apply to channels supporting anchor outputs since HTLC transactions are pre-signed with a
                // zero fee, so their fee is no longer considered to determine dust limits.
                if !channel_type.supports_anchors_zero_fee_htlc_tx() {
-                       let upper_limit = cmp::max(250 * 25,
-                               fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64 * 10);
+                       let upper_limit =
+                               fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MaxAllowedNonAnchorChannelRemoteFee) as u64;
                        if feerate_per_kw as u64 > upper_limit {
                                return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit)));
                        }
                }
 
-               // We can afford to use a lower bound with anchors than previously since we can now bump
-               // fees when broadcasting our commitment. However, we must still make sure we meet the
-               // minimum mempool feerate, until package relay is deployed, such that we can ensure the
-               // commitment transaction propagates throughout node mempools on its own.
                let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
-                       ConfirmationTarget::MempoolMinimum
+                       ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
                } else {
-                       ConfirmationTarget::Background
+                       ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
                };
                let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
-               // Some fee estimators round up to the next full sat/vbyte (ie 250 sats per kw), causing
-               // occasional issues with feerate disagreements between an initiator that wants a feerate
-               // of 1.1 sat/vbyte and a receiver that wants 1.1 rounded up to 2. Thus, we always add 250
-               // sat/kw before the comparison here.
-               if feerate_per_kw + 250 < lower_limit {
+               if feerate_per_kw < lower_limit {
                        if let Some(cur_feerate) = cur_feerate_per_kw {
                                if feerate_per_kw > cur_feerate {
                                        log_warn!(logger,
@@ -2102,7 +2284,7 @@ impl<SP: Deref> Channel<SP> where
                                        return Ok(());
                                }
                        }
-                       return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {} (- 250)", feerate_per_kw, lower_limit)));
+                       return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
                }
                Ok(())
        }
@@ -2550,7 +2732,11 @@ impl<SP: Deref> Channel<SP> where
                        counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
 
                assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
-               self.context.channel_state = ChannelState::FundingSent as u32;
+               if self.context.is_batch_funding() {
+                       self.context.channel_state = ChannelState::FundingSent as u32 | ChannelState::WaitingForBatch as u32;
+               } else {
+                       self.context.channel_state = ChannelState::FundingSent as u32;
+               }
                self.context.cur_holder_commitment_transaction_number -= 1;
                self.context.cur_counterparty_commitment_transaction_number -= 1;
 
@@ -2561,11 +2747,20 @@ impl<SP: Deref> Channel<SP> where
                Ok(channel_monitor)
        }
 
+       /// Updates the state of the channel to indicate that all channels in the batch have received
+       /// funding_signed and persisted their monitors.
+       /// The funding transaction is consequently allowed to be broadcast, and the channel can be
+       /// treated as a non-batch channel going forward.
+       pub fn set_batch_ready(&mut self) {
+               self.context.is_batch_funding = None;
+               self.context.channel_state &= !(ChannelState::WaitingForBatch as u32);
+       }
+
        /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
        /// and the channel is now usable (and public), this may generate an announcement_signatures to
        /// reply with.
        pub fn channel_ready<NS: Deref, L: Deref>(
-               &mut self, msg: &msgs::ChannelReady, node_signer: &NS, genesis_block_hash: BlockHash,
+               &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
                user_config: &UserConfig, best_block: &BestBlock, logger: &L
        ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
        where
@@ -2588,7 +2783,13 @@ impl<SP: Deref> Channel<SP> where
 
                let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
 
-               if non_shutdown_state == ChannelState::FundingSent as u32 {
+               // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
+               // batch, but we can receive channel_ready messages.
+               debug_assert!(
+                       non_shutdown_state & ChannelState::OurChannelReady as u32 == 0 ||
+                       non_shutdown_state & ChannelState::WaitingForBatch as u32 == 0
+               );
+               if non_shutdown_state & !(ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 {
                        self.context.channel_state |= ChannelState::TheirChannelReady as u32;
                } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
                        self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
@@ -2630,7 +2831,7 @@ impl<SP: Deref> Channel<SP> where
 
                log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
 
-               Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger))
+               Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
        }
 
        pub fn update_add_htlc<F, FE: Deref, L: Deref>(
@@ -2671,6 +2872,7 @@ impl<SP: Deref> Channel<SP> where
                if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
                        return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
                }
+
                // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
                // the reserve_satoshis we told them to always have as direct payment so that they lose
                // something if we punish them for broadcasting an old state).
@@ -2730,30 +2932,40 @@ impl<SP: Deref> Channel<SP> where
 
                // Check that the remote can afford to pay for this HTLC on-chain at the current
                // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
-               let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
-                       let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
-                       self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
-               };
-               if pending_remote_value_msat - msg.amount_msat < remote_commit_tx_fee_msat {
-                       return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
-               };
-
-               if pending_remote_value_msat - msg.amount_msat - remote_commit_tx_fee_msat < self.context.holder_selected_channel_reserve_satoshis * 1000 {
-                       return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
+               {
+                       let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
+                               let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
+                               self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
+                       };
+                       let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+                               ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
+                       } else {
+                               0
+                       };
+                       if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
+                               return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
+                       };
+                       if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
+                               return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
+                       }
                }
 
+               let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+                       ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
+               } else {
+                       0
+               };
                if !self.context.is_outbound() {
-                       // `2 *` and `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
-                       // the spec because in the spec, the fee spike buffer requirement doesn't exist on the
-                       // receiver's side, only on the sender's.
-                       // Note that when we eventually remove support for fee updates and switch to anchor output
-                       // fees, we will drop the `2 *`, since we no longer be as sensitive to fee spikes. But, keep
-                       // the extra htlc when calculating the next remote commitment transaction fee as we should
-                       // still be able to afford adding this HTLC plus one more future HTLC, regardless of being
-                       // sensitive to fee spikes.
+                       // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
+                       // the spec because the fee spike buffer requirement doesn't exist on the receiver's
+                       // side, only on the sender's. Note that with anchor outputs we are no longer as
+                       // sensitive to fee spikes, so we need to account for them.
                        let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
-                       let remote_fee_cost_incl_stuck_buffer_msat = 2 * self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
-                       if pending_remote_value_msat - msg.amount_msat - self.context.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat {
+                       let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
+                       if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+                               remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
+                       }
+                       if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
                                // Note that if the pending_forward_status is not updated here, then it's because we're already failing
                                // the HTLC, i.e. its status is already set to failing.
                                log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
@@ -2763,7 +2975,7 @@ impl<SP: Deref> Channel<SP> where
                        // Check that they won't violate our local required channel reserve by adding this HTLC.
                        let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
                        let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
-                       if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat {
+                       if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
                                return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
                        }
                }
@@ -3047,8 +3259,8 @@ impl<SP: Deref> Channel<SP> where
                        self.context.monitor_pending_revoke_and_ack = true;
                        if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
                                // If we were going to send a commitment_signed after the RAA, go ahead and do all
-                               // the corresponding HTLC status updates so that get_last_commitment_update
-                               // includes the right HTLCs.
+                               // the corresponding HTLC status updates so that
+                               // get_last_commitment_update_for_send includes the right HTLCs.
                                self.context.monitor_pending_commitment_signed = true;
                                let mut additional_update = self.build_commitment_no_status_check(logger);
                                // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
@@ -3087,7 +3299,7 @@ impl<SP: Deref> Channel<SP> where
        ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
        where F::Target: FeeEstimator, L::Target: Logger
        {
-               if self.context.channel_state >= ChannelState::ChannelReady as u32 &&
+               if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 &&
                   (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
                        self.free_holding_cell_htlcs(fee_estimator, logger)
                } else { (None, Vec::new()) }
@@ -3422,9 +3634,10 @@ impl<SP: Deref> Channel<SP> where
                        // cells) while we can't update the monitor, so we just return what we have.
                        if require_commitment {
                                self.context.monitor_pending_commitment_signed = true;
-                               // When the monitor updating is restored we'll call get_last_commitment_update(),
-                               // which does not update state, but we're definitely now awaiting a remote revoke
-                               // before we can step forward any more, so set it here.
+                               // When the monitor updating is restored we'll call
+                               // get_last_commitment_update_for_send(), which does not update state, but we're
+                               // definitely now awaiting a remote revoke before we can step forward any more, so
+                               // set it here.
                                let mut additional_update = self.build_commitment_no_status_check(logger);
                                // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
                                // strictly increasing by one, so decrement it here.
@@ -3561,17 +3774,17 @@ impl<SP: Deref> Channel<SP> where
        /// resent.
        /// No further message handling calls may be made until a channel_reestablish dance has
        /// completed.
-       pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L)  where L::Target: Logger {
+       /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
+       pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
                assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
-               if self.context.channel_state < ChannelState::FundingSent as u32 {
-                       self.context.channel_state = ChannelState::ShutdownComplete as u32;
-                       return;
+               if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
+                       return Err(());
                }
 
                if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
                        // While the below code should be idempotent, it's simpler to just return early, as
                        // redundant disconnect events can fire, though they should be rare.
-                       return;
+                       return Ok(());
                }
 
                if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
@@ -3632,6 +3845,7 @@ impl<SP: Deref> Channel<SP> where
 
                self.context.channel_state |= ChannelState::PeerDisconnected as u32;
                log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
+               Ok(())
        }
 
        /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
@@ -3663,7 +3877,7 @@ impl<SP: Deref> Channel<SP> where
        /// successfully and we should restore normal operation. Returns messages which should be sent
        /// to the remote side.
        pub fn monitor_updating_restored<L: Deref, NS: Deref>(
-               &mut self, logger: &L, node_signer: &NS, genesis_block_hash: BlockHash,
+               &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
                user_config: &UserConfig, best_block_height: u32
        ) -> MonitorRestoreUpdates
        where
@@ -3677,12 +3891,12 @@ impl<SP: Deref> Channel<SP> where
                // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
                // first received the funding_signed.
                let mut funding_broadcastable =
-                       if self.context.is_outbound() && self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::FundingSent as u32 {
+                       if self.context.is_outbound() && self.context.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && self.context.channel_state & ChannelState::WaitingForBatch as u32 == 0 {
                                self.context.funding_transaction.take()
                        } else { None };
                // That said, if the funding transaction is already confirmed (ie we're active with a
                // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
-               if self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
+               if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
                        funding_broadcastable = None;
                }
 
@@ -3704,7 +3918,7 @@ impl<SP: Deref> Channel<SP> where
                        })
                } else { None };
 
-               let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block_height, logger);
+               let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
 
                let mut accepted_htlcs = Vec::new();
                mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
@@ -3726,9 +3940,11 @@ impl<SP: Deref> Channel<SP> where
                        Some(self.get_last_revoke_and_ack())
                } else { None };
                let commitment_update = if self.context.monitor_pending_commitment_signed {
-                       self.mark_awaiting_response();
-                       Some(self.get_last_commitment_update(logger))
+                       self.get_last_commitment_update_for_send(logger).ok()
                } else { None };
+               if commitment_update.is_some() {
+                       self.mark_awaiting_response();
+               }
 
                self.context.monitor_pending_revoke_and_ack = false;
                self.context.monitor_pending_commitment_signed = false;
@@ -3777,6 +3993,37 @@ impl<SP: Deref> Channel<SP> where
                Ok(())
        }
 
+       /// Indicates that the signer may have some signatures for us, so we should retry if we're
+       /// blocked.
+       #[allow(unused)]
+       pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
+               let commitment_update = if self.context.signer_pending_commitment_update {
+                       self.get_last_commitment_update_for_send(logger).ok()
+               } else { None };
+               let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
+                       self.context.get_funding_signed_msg(logger).1
+               } else { None };
+               let channel_ready = if funding_signed.is_some() {
+                       self.check_get_channel_ready(0)
+               } else { None };
+               let funding_created = if self.context.signer_pending_funding && self.context.is_outbound() {
+                       self.context.get_funding_created_msg(logger)
+               } else { None };
+
+               log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed, {} funding_created, and {} channel_ready",
+                       if commitment_update.is_some() { "a" } else { "no" },
+                       if funding_signed.is_some() { "a" } else { "no" },
+                       if funding_created.is_some() { "a" } else { "no" },
+                       if channel_ready.is_some() { "a" } else { "no" });
+
+               SignerResumeUpdates {
+                       commitment_update,
+                       funding_signed,
+                       funding_created,
+                       channel_ready,
+               }
+       }
+
        fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
                let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
                let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
@@ -3789,7 +4036,8 @@ impl<SP: Deref> Channel<SP> where
                }
        }
 
-       fn get_last_commitment_update<L: Deref>(&self, logger: &L) -> msgs::CommitmentUpdate where L::Target: Logger {
+       /// Gets the last commitment update for immediate sending to our peer.
+       fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
                let mut update_add_htlcs = Vec::new();
                let mut update_fulfill_htlcs = Vec::new();
                let mut update_fail_htlcs = Vec::new();
@@ -3845,13 +4093,26 @@ impl<SP: Deref> Channel<SP> where
                        })
                } else { None };
 
-               log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
+               log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
                                &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
                                update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
-               msgs::CommitmentUpdate {
+               let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
+                       if self.context.signer_pending_commitment_update {
+                               log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
+                               self.context.signer_pending_commitment_update = false;
+                       }
+                       update
+               } else {
+                       if !self.context.signer_pending_commitment_update {
+                               log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
+                               self.context.signer_pending_commitment_update = true;
+                       }
+                       return Err(());
+               };
+               Ok(msgs::CommitmentUpdate {
                        update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
-                       commitment_signed: self.send_commitment_no_state_update(logger).expect("It looks like we failed to re-generate a commitment_signed we had previously sent?").0,
-               }
+                       commitment_signed,
+               })
        }
 
        /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
@@ -3874,7 +4135,7 @@ impl<SP: Deref> Channel<SP> where
        /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
        pub fn channel_reestablish<L: Deref, NS: Deref>(
                &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
-               genesis_block_hash: BlockHash, user_config: &UserConfig, best_block: &BestBlock
+               chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
        ) -> Result<ReestablishResponses, ChannelError>
        where
                L::Target: Logger,
@@ -3889,7 +4150,7 @@ impl<SP: Deref> Channel<SP> where
 
                if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
                        msg.next_local_commitment_number == 0 {
-                       return Err(ChannelError::Close("Peer sent a garbage channel_reestablish (usually an lnd node with lost state asking us to force-close for them)".to_owned()));
+                       return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
                }
 
                if msg.next_remote_commitment_number > 0 {
@@ -3933,7 +4194,7 @@ impl<SP: Deref> Channel<SP> where
 
                let shutdown_msg = self.get_outbound_shutdown();
 
-               let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger);
+               let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
 
                if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
                        // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
@@ -4031,7 +4292,7 @@ impl<SP: Deref> Channel<SP> where
                                Ok(ReestablishResponses {
                                        channel_ready, shutdown_msg, announcement_sigs,
                                        raa: required_revoke,
-                                       commitment_update: Some(self.get_last_commitment_update(logger)),
+                                       commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
                                        order: self.context.resend_order.clone(),
                                })
                        }
@@ -4052,8 +4313,10 @@ impl<SP: Deref> Channel<SP> where
                // Propose a range from our current Background feerate to our Normal feerate plus our
                // force_close_avoidance_max_fee_satoshis.
                // If we fail to come to consensus, we'll have to force-close.
-               let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background);
-               let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
+               let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
+               // Use NonAnchorChannelFee because this should be an estimate for a channel close
+               // that we don't expect to need fee bumping
+               let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
                let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
 
                // The spec requires that (when the channel does not have anchors) we only send absolute
@@ -4115,18 +4378,18 @@ impl<SP: Deref> Channel<SP> where
 
        pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
                &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
-               -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
+               -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
                where F::Target: FeeEstimator, L::Target: Logger
        {
                if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
-                       return Ok((None, None));
+                       return Ok((None, None, None));
                }
 
                if !self.context.is_outbound() {
                        if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
                                return self.closing_signed(fee_estimator, &msg);
                        }
-                       return Ok((None, None));
+                       return Ok((None, None, None));
                }
 
                let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
@@ -4151,7 +4414,7 @@ impl<SP: Deref> Channel<SP> where
                                                min_fee_satoshis: our_min_fee,
                                                max_fee_satoshis: our_max_fee,
                                        }),
-                               }), None))
+                               }), None, None))
                        }
                }
        }
@@ -4185,7 +4448,7 @@ impl<SP: Deref> Channel<SP> where
                if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
                        return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
                }
-               if self.context.channel_state < ChannelState::FundingSent as u32 {
+               if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
                        // Spec says we should fail the connection, not the channel, but that's nonsense, there
                        // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
                        // can do that via error message without getting a connection fail anyway...
@@ -4300,7 +4563,7 @@ impl<SP: Deref> Channel<SP> where
 
        pub fn closing_signed<F: Deref>(
                &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
-               -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
+               -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
                where F::Target: FeeEstimator
        {
                if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
@@ -4322,7 +4585,7 @@ impl<SP: Deref> Channel<SP> where
 
                if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
                        self.context.pending_counterparty_closing_signed = Some(msg.clone());
-                       return Ok((None, None));
+                       return Ok((None, None, None));
                }
 
                let funding_redeemscript = self.context.get_funding_redeemscript();
@@ -4352,10 +4615,15 @@ impl<SP: Deref> Channel<SP> where
                assert!(self.context.shutdown_scriptpubkey.is_some());
                if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
                        if last_fee == msg.fee_satoshis {
+                               let shutdown_result = ShutdownResult {
+                                       monitor_update: None,
+                                       dropped_outbound_htlcs: Vec::new(),
+                                       unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
+                               };
                                let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
                                self.context.channel_state = ChannelState::ShutdownComplete as u32;
                                self.context.update_time_counter += 1;
-                               return Ok((None, Some(tx)));
+                               return Ok((None, Some(tx), Some(shutdown_result)));
                        }
                }
 
@@ -4374,13 +4642,19 @@ impl<SP: Deref> Channel<SP> where
                                                let sig = ecdsa
                                                        .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
                                                        .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
-
-                                               let signed_tx = if $new_fee == msg.fee_satoshis {
+                                               let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
+                                                       let shutdown_result = ShutdownResult {
+                                                               monitor_update: None,
+                                                               dropped_outbound_htlcs: Vec::new(),
+                                                               unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
+                                                       };
                                                        self.context.channel_state = ChannelState::ShutdownComplete as u32;
                                                        self.context.update_time_counter += 1;
                                                        let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
-                                                       Some(tx)
-                                               } else { None };
+                                                       (Some(tx), Some(shutdown_result))
+                                               } else {
+                                                       (None, None)
+                                               };
 
                                                self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
                                                Ok((Some(msgs::ClosingSigned {
@@ -4391,7 +4665,7 @@ impl<SP: Deref> Channel<SP> where
                                                                min_fee_satoshis: our_min_fee,
                                                                max_fee_satoshis: our_max_fee,
                                                        }),
-                                               }), signed_tx))
+                                               }), signed_tx, shutdown_result))
                                        }
                                }
                        }
@@ -4579,7 +4853,7 @@ impl<SP: Deref> Channel<SP> where
        pub fn is_awaiting_initial_mon_persist(&self) -> bool {
                if !self.is_awaiting_monitor_update() { return false; }
                if self.context.channel_state &
-                       !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)
+                       !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32)
                                == ChannelState::FundingSent as u32 {
                        // If we're not a 0conf channel, we'll be waiting on a monitor update with only
                        // FundingSent set, though our peer could have sent their channel_ready.
@@ -4610,7 +4884,7 @@ impl<SP: Deref> Channel<SP> where
 
        /// Returns true if our channel_ready has been sent
        pub fn is_our_channel_ready(&self) -> bool {
-               (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state >= ChannelState::ChannelReady as u32
+               (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32
        }
 
        /// Returns true if our peer has either initiated or agreed to shut down the channel.
@@ -4659,6 +4933,14 @@ impl<SP: Deref> Channel<SP> where
                        return None;
                }
 
+               // If we're still pending the signature on a funding transaction, then we're not ready to send a
+               // channel_ready yet.
+               if self.context.signer_pending_funding {
+                       return None;
+               }
+
+               // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
+               // channel_ready until the entire batch is ready.
                let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
                let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
                        self.context.channel_state |= ChannelState::OurChannelReady as u32;
@@ -4671,7 +4953,7 @@ impl<SP: Deref> Channel<SP> where
                        // We got a reorg but not enough to trigger a force close, just ignore.
                        false
                } else {
-                       if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state < ChannelState::ChannelReady as u32 {
+                       if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 {
                                // We should never see a funding transaction on-chain until we've received
                                // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
                                // an inbound channel - before that we have no known funding TXID). The fuzzer,
@@ -4708,12 +4990,13 @@ impl<SP: Deref> Channel<SP> where
        /// In the second, we simply return an Err indicating we need to be force-closed now.
        pub fn transactions_confirmed<NS: Deref, L: Deref>(
                &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
-               genesis_block_hash: BlockHash, node_signer: &NS, user_config: &UserConfig, logger: &L
+               chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
        ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
        where
                NS::Target: NodeSigner,
                L::Target: Logger
        {
+               let mut msgs = (None, None);
                if let Some(funding_txo) = self.context.get_funding_txo() {
                        for &(index_in_block, tx) in txdata.iter() {
                                // Check if the transaction is the expected funding transaction, and if it is,
@@ -4768,8 +5051,8 @@ impl<SP: Deref> Channel<SP> where
                                        // may have already happened for this block).
                                        if let Some(channel_ready) = self.check_get_channel_ready(height) {
                                                log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
-                                               let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger);
-                                               return Ok((Some(channel_ready), announcement_sigs));
+                                               let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
+                                               msgs = (Some(channel_ready), announcement_sigs);
                                        }
                                }
                                for inp in tx.input.iter() {
@@ -4780,7 +5063,7 @@ impl<SP: Deref> Channel<SP> where
                                }
                        }
                }
-               Ok((None, None))
+               Ok(msgs)
        }
 
        /// When a new block is connected, we check the height of the block against outbound holding
@@ -4795,19 +5078,19 @@ impl<SP: Deref> Channel<SP> where
        /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
        /// back.
        pub fn best_block_updated<NS: Deref, L: Deref>(
-               &mut self, height: u32, highest_header_time: u32, genesis_block_hash: BlockHash,
+               &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
                node_signer: &NS, user_config: &UserConfig, logger: &L
        ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
        where
                NS::Target: NodeSigner,
                L::Target: Logger
        {
-               self.do_best_block_updated(height, highest_header_time, Some((genesis_block_hash, node_signer, user_config)), logger)
+               self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
        }
 
        fn do_best_block_updated<NS: Deref, L: Deref>(
                &mut self, height: u32, highest_header_time: u32,
-               genesis_node_signer: Option<(BlockHash, &NS, &UserConfig)>, logger: &L
+               chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
        ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
        where
                NS::Target: NodeSigner,
@@ -4833,15 +5116,15 @@ impl<SP: Deref> Channel<SP> where
                self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
 
                if let Some(channel_ready) = self.check_get_channel_ready(height) {
-                       let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer {
-                               self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger)
+                       let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
+                               self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
                        } else { None };
                        log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
                        return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
                }
 
                let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
-               if non_shutdown_state >= ChannelState::ChannelReady as u32 ||
+               if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 ||
                   (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
                        let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
                        if self.context.funding_tx_confirmation_height == 0 {
@@ -4869,13 +5152,13 @@ impl<SP: Deref> Channel<SP> where
                                height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
                        log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
                        // If funding_tx_confirmed_in is unset, the channel must not be active
-                       assert!(non_shutdown_state <= ChannelState::ChannelReady as u32);
+                       assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32);
                        assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
                        return Err(ClosureReason::FundingTimedOut);
                }
 
-               let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer {
-                       self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger)
+               let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
+                       self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
                } else { None };
                Ok((None, timed_out_htlcs, announcement_sigs))
        }
@@ -4892,7 +5175,7 @@ impl<SP: Deref> Channel<SP> where
                        // larger. If we don't know that time has moved forward, we can just set it to the last
                        // time we saw and it will be ignored.
                        let best_time = self.context.update_time_counter;
-                       match self.do_best_block_updated(reorg_height, best_time, None::<(BlockHash, &&NodeSigner, &UserConfig)>, logger) {
+                       match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) {
                                Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
                                        assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
                                        assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
@@ -4922,7 +5205,7 @@ impl<SP: Deref> Channel<SP> where
        ///
        /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
        fn get_channel_announcement<NS: Deref>(
-               &self, node_signer: &NS, chain_hash: BlockHash, user_config: &UserConfig,
+               &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
        ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
                if !self.context.config.announced_channel {
                        return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
@@ -4953,7 +5236,7 @@ impl<SP: Deref> Channel<SP> where
        }
 
        fn get_announcement_sigs<NS: Deref, L: Deref>(
-               &mut self, node_signer: &NS, genesis_block_hash: BlockHash, user_config: &UserConfig,
+               &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
                best_block_height: u32, logger: &L
        ) -> Option<msgs::AnnouncementSignatures>
        where
@@ -4978,7 +5261,7 @@ impl<SP: Deref> Channel<SP> where
                }
 
                log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
-               let announcement = match self.get_channel_announcement(node_signer, genesis_block_hash, user_config) {
+               let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
                        Ok(a) => a,
                        Err(e) => {
                                log_trace!(logger, "{:?}", e);
@@ -5052,7 +5335,7 @@ impl<SP: Deref> Channel<SP> where
        /// channel_announcement message which we can broadcast and storing our counterparty's
        /// signatures for later reconstruction/rebroadcast of the channel_announcement.
        pub fn announcement_signatures<NS: Deref>(
-               &mut self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32,
+               &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
                msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
        ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
                let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
@@ -5082,7 +5365,7 @@ impl<SP: Deref> Channel<SP> where
        /// Gets a signed channel_announcement for this channel, if we previously received an
        /// announcement_signatures from our counterparty.
        pub fn get_signed_channel_announcement<NS: Deref>(
-               &self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32, user_config: &UserConfig
+               &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
        ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
                if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
                        return None;
@@ -5389,7 +5672,7 @@ impl<SP: Deref> Channel<SP> where
                                        }
 
                                        let res = ecdsa.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
-                                               .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?;
+                                               .map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
                                        signature = res.0;
                                        htlc_signatures = res.1;
 
@@ -5443,17 +5726,20 @@ impl<SP: Deref> Channel<SP> where
                }
        }
 
-       pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<(), ChannelError> {
-               if msg.contents.htlc_minimum_msat >= self.context.channel_value_satoshis * 1000 {
-                       return Err(ChannelError::Close("Minimum htlc value is greater than channel value".to_string()));
-               }
-               self.context.counterparty_forwarding_info = Some(CounterpartyForwardingInfo {
+       /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
+       /// happened.
+       pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
+               let new_forwarding_info = Some(CounterpartyForwardingInfo {
                        fee_base_msat: msg.contents.fee_base_msat,
                        fee_proportional_millionths: msg.contents.fee_proportional_millionths,
                        cltv_expiry_delta: msg.contents.cltv_expiry_delta
                });
+               let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
+               if did_change {
+                       self.context.counterparty_forwarding_info = new_forwarding_info;
+               }
 
-               Ok(())
+               Ok(did_change)
        }
 
        /// Begins the shutdown process, getting a message for the remote peer and returning all
@@ -5463,7 +5749,7 @@ impl<SP: Deref> Channel<SP> where
        /// [`ChannelMonitorUpdate`] will be returned).
        pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
                target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
-       -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
+       -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>, Option<ShutdownResult>), APIError>
        {
                for htlc in self.context.pending_outbound_htlcs.iter() {
                        if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
@@ -5489,7 +5775,7 @@ impl<SP: Deref> Channel<SP> where
                // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
                // script is set, we just force-close and call it a day.
                let mut chan_closed = false;
-               if self.context.channel_state < ChannelState::FundingSent as u32 {
+               if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
                        chan_closed = true;
                }
 
@@ -5518,11 +5804,18 @@ impl<SP: Deref> Channel<SP> where
 
                // From here on out, we may not fail!
                self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
-               if self.context.channel_state < ChannelState::FundingSent as u32 {
+               let shutdown_result = if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
+                       let shutdown_result = ShutdownResult {
+                               monitor_update: None,
+                               dropped_outbound_htlcs: Vec::new(),
+                               unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
+                       };
                        self.context.channel_state = ChannelState::ShutdownComplete as u32;
+                       Some(shutdown_result)
                } else {
                        self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
-               }
+                       None
+               };
                self.context.update_time_counter += 1;
 
                let monitor_update = if update_shutdown_script {
@@ -5558,7 +5851,7 @@ impl<SP: Deref> Channel<SP> where
                debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
                        "we can't both complete shutdown and return a monitor update");
 
-               Ok((shutdown, monitor_update, dropped_outbound_htlcs))
+               Ok((shutdown, monitor_update, dropped_outbound_htlcs, shutdown_result))
        }
 
        pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
@@ -5584,7 +5877,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
        pub fn new<ES: Deref, F: Deref>(
                fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
                channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
-               outbound_scid_alias: u64
+               outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
        ) -> Result<OutboundV1Channel<SP>, APIError>
        where ES::Target: EntropySource,
              F::Target: FeeEstimator
@@ -5617,16 +5910,16 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                let channel_type = Self::get_initial_channel_type(&config, their_features);
                debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
 
-               let commitment_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
-                       ConfirmationTarget::MempoolMinimum
+               let (commitment_conf_target, anchor_outputs_value_msat)  = if channel_type.supports_anchors_zero_fee_htlc_tx() {
+                       (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
                } else {
-                       ConfirmationTarget::Normal
+                       (ConfirmationTarget::NonAnchorChannelFee, 0)
                };
                let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
 
                let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
                let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
-               if value_to_self_msat < commitment_tx_fee {
+               if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
                        return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
                }
 
@@ -5651,7 +5944,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                        Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
                };
 
-               let temporary_channel_id = ChannelId::temporary_from_entropy_source(entropy_source);
+               let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
 
                Ok(Self {
                        context: ChannelContext {
@@ -5702,6 +5995,9 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                                monitor_pending_failures: Vec::new(),
                                monitor_pending_finalized_fulfills: Vec::new(),
 
+                               signer_pending_commitment_update: false,
+                               signer_pending_funding: false,
+
                                #[cfg(debug_assertions)]
                                holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
                                #[cfg(debug_assertions)]
@@ -5741,6 +6037,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                                        channel_type_features: channel_type.clone()
                                },
                                funding_transaction: None,
+                               is_batch_funding: None,
 
                                counterparty_cur_commitment_point: None,
                                counterparty_prev_commitment_point: None,
@@ -5781,19 +6078,6 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                })
        }
 
-       /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
-       fn get_funding_created_signature<L: Deref>(&mut self, logger: &L) -> Result<Signature, ChannelError> where L::Target: Logger {
-               let counterparty_keys = self.context.build_remote_transaction_keys();
-               let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
-               match &self.context.holder_signer {
-                       // TODO (taproot|arik): move match into calling method for Taproot
-                       ChannelSignerType::Ecdsa(ecdsa) => {
-                               Ok(ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
-                                       .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0)
-                       }
-               }
-       }
-
        /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
        /// a funding_created message for the remote peer.
        /// Panics if called at some time other than immediately after initial handshake, if called twice,
@@ -5801,8 +6085,8 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
        /// Note that channel_id changes during this call!
        /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
        /// If an Err is returned, it is a ChannelError::Close.
-       pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L)
-       -> Result<(Channel<SP>, msgs::FundingCreated), (Self, ChannelError)> where L::Target: Logger {
+       pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
+       -> Result<(Channel<SP>, Option<msgs::FundingCreated>), (Self, ChannelError)> where L::Target: Logger {
                if !self.context.is_outbound() {
                        panic!("Tried to create outbound funding_created message on an inbound channel!");
                }
@@ -5818,17 +6102,6 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
                self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
 
-               let signature = match self.get_funding_created_signature(logger) {
-                       Ok(res) => res,
-                       Err(e) => {
-                               log_error!(logger, "Got bad signatures: {:?}!", e);
-                               self.context.channel_transaction_parameters.funding_outpoint = None;
-                               return Err((self, e));
-                       }
-               };
-
-               let temporary_channel_id = self.context.channel_id;
-
                // Now that we're past error-generating stuff, update our local state:
 
                self.context.channel_state = ChannelState::FundingCreated as u32;
@@ -5843,21 +6116,21 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                }
 
                self.context.funding_transaction = Some(funding_transaction);
+               self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
+
+               let funding_created = self.context.get_funding_created_msg(logger);
+               if funding_created.is_none() {
+                       if !self.context.signer_pending_funding {
+                               log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
+                               self.context.signer_pending_funding = true;
+                       }
+               }
 
                let channel = Channel {
                        context: self.context,
                };
 
-               Ok((channel, msgs::FundingCreated {
-                       temporary_channel_id,
-                       funding_txid: funding_txo.txid,
-                       funding_output_index: funding_txo.index,
-                       signature,
-                       #[cfg(taproot)]
-                       partial_signature_with_nonce: None,
-                       #[cfg(taproot)]
-                       next_local_nonce: None,
-               }))
+               Ok((channel, funding_created))
        }
 
        fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
@@ -5887,7 +6160,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
        /// not of our ability to open any channel at all. Thus, on error, we should first call this
        /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
        pub(crate) fn maybe_handle_error_without_close<F: Deref>(
-               &mut self, chain_hash: BlockHash, fee_estimator: &LowerBoundedFeeEstimator<F>
+               &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
        ) -> Result<msgs::OpenChannel, ()>
        where
                F::Target: FeeEstimator
@@ -5908,7 +6181,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                // whatever reason.
                if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
                        self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
-                       self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
+                       self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
                        assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
                } else if self.context.channel_type.supports_scid_privacy() {
                        self.context.channel_type.clear_scid_privacy();
@@ -5919,7 +6192,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                Ok(self.get_open_channel(chain_hash))
        }
 
-       pub fn get_open_channel(&self, chain_hash: BlockHash) -> msgs::OpenChannel {
+       pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
                if !self.context.is_outbound() {
                        panic!("Tried to open a channel for an inbound channel?");
                }
@@ -6241,13 +6514,18 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
 
                // check if the funder's amount for the initial commitment tx is sufficient
                // for full fee payment plus a few HTLCs to ensure the channel will be useful.
+               let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
+                       ANCHOR_OUTPUT_VALUE_SATOSHI * 2
+               } else {
+                       0
+               };
                let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
                let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
-               if funders_amount_msat / 1000 < commitment_tx_fee {
-                       return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", funders_amount_msat / 1000, commitment_tx_fee)));
+               if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
+                       return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
                }
 
-               let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee;
+               let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
                // While it's reasonable for us to not meet the channel reserve initially (if they don't
                // want to push much to us), our counterparty should always have more than our reserve.
                if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
@@ -6349,6 +6627,9 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                                monitor_pending_failures: Vec::new(),
                                monitor_pending_finalized_fulfills: Vec::new(),
 
+                               signer_pending_commitment_update: false,
+                               signer_pending_funding: false,
+
                                #[cfg(debug_assertions)]
                                holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
                                #[cfg(debug_assertions)]
@@ -6392,6 +6673,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                                        channel_type_features: channel_type.clone()
                                },
                                funding_transaction: None,
+                               is_batch_funding: None,
 
                                counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
                                counterparty_prev_commitment_point: None,
@@ -6495,46 +6777,27 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                self.generate_accept_channel_message()
        }
 
-       fn funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<(CommitmentTransaction, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger {
+       fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
                let funding_script = self.context.get_funding_redeemscript();
 
                let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
                let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
-               {
-                       let trusted_tx = initial_commitment_tx.trust();
-                       let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
-                       let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
-                       // They sign the holder commitment transaction...
-                       log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
-                               log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
-                               encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
-                               encode::serialize_hex(&funding_script), &self.context.channel_id());
-                       secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
-               }
+               let trusted_tx = initial_commitment_tx.trust();
+               let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
+               let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
+               // They sign the holder commitment transaction...
+               log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
+                       log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
+                       encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
+                       encode::serialize_hex(&funding_script), &self.context.channel_id());
+               secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
 
-               let counterparty_keys = self.context.build_remote_transaction_keys();
-               let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
-
-               let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
-               let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
-               log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
-                       &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
-
-               match &self.context.holder_signer {
-                       // TODO (arik): move match into calling method for Taproot
-                       ChannelSignerType::Ecdsa(ecdsa) => {
-                               let counterparty_signature = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
-                                       .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0;
-
-                               // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
-                               Ok((counterparty_initial_commitment_tx, initial_commitment_tx, counterparty_signature))
-                       }
-               }
+               Ok(initial_commitment_tx)
        }
 
        pub fn funding_created<L: Deref>(
                mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
-       ) -> Result<(Channel<SP>, msgs::FundingSigned, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), (Self, ChannelError)>
+       ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), (Self, ChannelError)>
        where
                L::Target: Logger
        {
@@ -6556,10 +6819,10 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
                self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
                // This is an externally observable change before we finish all our checks.  In particular
-               // funding_created_signature may fail.
+               // check_funding_created_signature may fail.
                self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
 
-               let (counterparty_initial_commitment_tx, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) {
+               let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
                        Ok(res) => res,
                        Err(ChannelError::Close(e)) => {
                                self.context.channel_transaction_parameters.funding_outpoint = None;
@@ -6568,7 +6831,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                        Err(e) => {
                                // The only error we know how to handle is ChannelError::Close, so we fall over here
                                // to make sure we don't continue with an inconsistent state.
-                               panic!("unexpected error type from funding_created_signature {:?}", e);
+                               panic!("unexpected error type from check_funding_created_signature {:?}", e);
                        }
                };
 
@@ -6586,6 +6849,13 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
 
                // Now that we're past error-generating stuff, update our local state:
 
+               self.context.channel_state = ChannelState::FundingSent as u32;
+               self.context.channel_id = funding_txo.to_channel_id();
+               self.context.cur_counterparty_commitment_transaction_number -= 1;
+               self.context.cur_holder_commitment_transaction_number -= 1;
+
+               let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
+
                let funding_redeemscript = self.context.get_funding_redeemscript();
                let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
                let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
@@ -6602,38 +6872,28 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
 
                channel_monitor.provide_initial_counterparty_commitment_tx(
                        counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
-                       self.context.cur_counterparty_commitment_transaction_number,
+                       self.context.cur_counterparty_commitment_transaction_number + 1,
                        self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
                        counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
                        counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
 
-               self.context.channel_state = ChannelState::FundingSent as u32;
-               self.context.channel_id = funding_txo.to_channel_id();
-               self.context.cur_counterparty_commitment_transaction_number -= 1;
-               self.context.cur_holder_commitment_transaction_number -= 1;
-
-               log_info!(logger, "Generated funding_signed for peer for channel {}", &self.context.channel_id());
+               log_info!(logger, "{} funding_signed for peer for channel {}",
+                       if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
 
                // Promote the channel to a full-fledged one now that we have updated the state and have a
                // `ChannelMonitor`.
                let mut channel = Channel {
                        context: self.context,
                };
-               let channel_id = channel.context.channel_id.clone();
                let need_channel_ready = channel.check_get_channel_ready(0).is_some();
                channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
 
-               Ok((channel, msgs::FundingSigned {
-                       channel_id,
-                       signature,
-                       #[cfg(taproot)]
-                       partial_signature_with_nonce: None,
-               }, channel_monitor))
+               Ok((channel, funding_signed, channel_monitor))
        }
 }
 
 const SERIALIZATION_VERSION: u8 = 3;
-const MIN_SERIALIZATION_VERSION: u8 = 2;
+const MIN_SERIALIZATION_VERSION: u8 = 3;
 
 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
        (0, FailRelay),
@@ -6713,14 +6973,6 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
 
                self.context.latest_monitor_update_id.write(writer)?;
 
-               let mut key_data = VecWriter(Vec::new());
-               // TODO (taproot|arik): Introduce serialization distinction for non-ECDSA signers.
-               self.context.holder_signer.as_ecdsa().expect("Only ECDSA signers may be serialized").write(&mut key_data)?;
-               assert!(key_data.0.len() < core::usize::MAX);
-               assert!(key_data.0.len() < core::u32::MAX as usize);
-               (key_data.0.len() as u32).write(writer)?;
-               writer.write_all(&key_data.0[..])?;
-
                // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
                // deserialized from that format.
                match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
@@ -7007,6 +7259,7 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                        (31, channel_pending_event_emitted, option),
                        (35, pending_outbound_skimmed_fees, optional_vec),
                        (37, holding_cell_skimmed_fees, optional_vec),
+                       (38, self.context.is_batch_funding, option),
                });
 
                Ok(())
@@ -7229,7 +7482,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                };
 
                let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
-               let funding_transaction = Readable::read(reader)?;
+               let funding_transaction: Option<Transaction> = Readable::read(reader)?;
 
                let counterparty_cur_commitment_point = Readable::read(reader)?;
 
@@ -7290,6 +7543,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
                let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
 
+               let mut is_batch_funding: Option<()> = None;
+
                read_tlv_fields!(reader, {
                        (0, announcement_sigs, option),
                        (1, minimum_depth, option),
@@ -7315,6 +7570,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        (31, channel_pending_event_emitted, option),
                        (35, pending_outbound_skimmed_fees_opt, optional_vec),
                        (37, holding_cell_skimmed_fees_opt, optional_vec),
+                       (38, is_batch_funding, option),
                });
 
                let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
@@ -7322,7 +7578,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        // If we've gotten to the funding stage of the channel, populate the signer with its
                        // required channel parameters.
                        let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
-                       if non_shutdown_state >= (ChannelState::FundingCreated as u32) {
+                       if non_shutdown_state & !STATE_FLAGS >= (ChannelState::FundingCreated as u32) {
                                holder_signer.provide_channel_parameters(&channel_parameters);
                        }
                        (channel_keys_id, holder_signer)
@@ -7435,6 +7691,9 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                                monitor_pending_failures,
                                monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
 
+                               signer_pending_commitment_update: false,
+                               signer_pending_funding: false,
+
                                pending_update_fee,
                                holding_cell_update_fee,
                                next_holder_htlc_id,
@@ -7472,6 +7731,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
 
                                channel_transaction_parameters: channel_parameters,
                                funding_transaction,
+                               is_batch_funding,
 
                                counterparty_cur_commitment_point,
                                counterparty_prev_commitment_point,
@@ -7516,16 +7776,16 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
 #[cfg(test)]
 mod tests {
        use std::cmp;
+       use bitcoin::blockdata::constants::ChainHash;
        use bitcoin::blockdata::script::{Script, Builder};
        use bitcoin::blockdata::transaction::{Transaction, TxOut};
-       use bitcoin::blockdata::constants::genesis_block;
        use bitcoin::blockdata::opcodes;
        use bitcoin::network::constants::Network;
        use hex;
        use crate::ln::PaymentHash;
        use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
        use crate::ln::channel::InitFeatures;
-       use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat};
+       use crate::ln::channel::{Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat};
        use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
        use crate::ln::features::ChannelTypeFeatures;
        use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
@@ -7634,7 +7894,7 @@ mod tests {
                let secp_ctx = Secp256k1::new();
                let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let config = UserConfig::default();
-               match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42) {
+               match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
                        Err(APIError::IncompatibleShutdownScript { script }) => {
                                assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
                        },
@@ -7657,12 +7917,12 @@ mod tests {
 
                let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let config = UserConfig::default();
-               let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
+               let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
 
                // Now change the fee so we can check that the fee in the open_channel message is the
                // same as the old fee.
                fee_est.fee_est = 500;
-               let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
+               let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
                assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
        }
 
@@ -7684,11 +7944,11 @@ mod tests {
                // Create Node A's channel pointing to Node B's pubkey
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let config = UserConfig::default();
-               let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
+               let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
 
                // Create Node B's channel by receiving Node A's open_channel message
                // Make sure A's dust limit is as we expect.
-               let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
+               let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
                let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
 
@@ -7704,11 +7964,11 @@ mod tests {
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
-               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
-               let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
+               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
+               let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
 
                // Node B --> Node A: funding signed
-               let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap();
+               let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
 
                // Put some inbound and outbound HTLCs in A's channel.
                let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
@@ -7765,7 +8025,7 @@ mod tests {
 
                let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let config = UserConfig::default();
-               let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
+               let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
 
                let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
                let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
@@ -7806,7 +8066,7 @@ mod tests {
                let seed = [42; 32];
                let network = Network::Testnet;
                let best_block = BestBlock::from_network(network);
-               let chain_hash = best_block.block_hash();
+               let chain_hash = ChainHash::using_genesis_block(network);
                let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
 
                // Go through the flow of opening a channel between two nodes.
@@ -7814,7 +8074,7 @@ mod tests {
                // Create Node A's channel pointing to Node B's pubkey
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let config = UserConfig::default();
-               let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
+               let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
 
                // Create Node B's channel by receiving Node A's open_channel message
                let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
@@ -7831,15 +8091,15 @@ mod tests {
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
-               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
-               let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
+               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
+               let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
 
                // Node B --> Node A: funding signed
-               let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap();
+               let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
 
                // Now disconnect the two nodes and check that the commitment point in
                // Node B's channel_reestablish message is sane.
-               node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger);
+               assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
                let msg = node_b_chan.get_channel_reestablish(&&logger);
                assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
                assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
@@ -7847,7 +8107,7 @@ mod tests {
 
                // Check that the commitment point in Node A's channel_reestablish message
                // is sane.
-               node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger);
+               assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
                let msg = node_a_chan.get_channel_reestablish(&&logger);
                assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
                assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
@@ -7877,16 +8137,16 @@ mod tests {
                // Test that `OutboundV1Channel::new` creates a channel with the correct value for
                // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
                // which is set to the lower bound + 1 (2%) of the `channel_value`.
-               let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42).unwrap();
+               let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
                let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
                assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
 
                // Test with the upper bound - 1 of valid values (99%).
-               let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42).unwrap();
+               let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
                let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
                assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
 
-               let chan_1_open_channel_msg = chan_1.get_open_channel(genesis_block(network).header.block_hash());
+               let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
 
                // Test that `InboundV1Channel::new` creates a channel with the correct value for
                // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
@@ -7902,14 +8162,14 @@ mod tests {
 
                // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
                // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
-               let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42).unwrap();
+               let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
                let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
                assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
 
                // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
                // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
                // than 100.
-               let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42).unwrap();
+               let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
                let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
                assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
 
@@ -7962,12 +8222,12 @@ mod tests {
 
                let mut outbound_node_config = UserConfig::default();
                outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
-               let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42).unwrap();
+               let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
 
                let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
                assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
 
-               let chan_open_channel_msg = chan.get_open_channel(genesis_block(network).header.block_hash());
+               let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
                let mut inbound_node_config = UserConfig::default();
                inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
 
@@ -7993,17 +8253,17 @@ mod tests {
                let seed = [42; 32];
                let network = Network::Testnet;
                let best_block = BestBlock::from_network(network);
-               let chain_hash = genesis_block(network).header.block_hash();
+               let chain_hash = ChainHash::using_genesis_block(network);
                let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
 
                // Create Node A's channel pointing to Node B's pubkey
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let config = UserConfig::default();
-               let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
+               let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
 
                // Create Node B's channel by receiving Node A's open_channel message
                // Make sure A's dust limit is as we expect.
-               let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
+               let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
                let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
 
@@ -8019,11 +8279,11 @@ mod tests {
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
-               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
-               let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
+               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
+               let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
 
                // Node B --> Node A: funding signed
-               let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap();
+               let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
 
                // Make sure that receiving a channel update will update the Channel as expected.
                let update = ChannelUpdate {
@@ -8041,7 +8301,7 @@ mod tests {
                        },
                        signature: Signature::from(unsafe { FFISignature::new() })
                };
-               node_a_chan.channel_update(&update).unwrap();
+               assert!(node_a_chan.channel_update(&update).unwrap());
 
                // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
                // change our official htlc_minimum_msat.
@@ -8054,6 +8314,8 @@ mod tests {
                        },
                        None => panic!("expected counterparty forwarding info to be Some")
                }
+
+               assert!(!node_a_chan.channel_update(&update).unwrap());
        }
 
        #[cfg(feature = "_test_vectors")]
@@ -8065,7 +8327,7 @@ mod tests {
                use bitcoin::hashes::hex::FromHex;
                use bitcoin::hash_types::Txid;
                use bitcoin::secp256k1::Message;
-               use crate::sign::EcdsaChannelSigner;
+               use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, EcdsaChannelSigner};
                use crate::ln::PaymentPreimage;
                use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
                use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
@@ -8099,7 +8361,7 @@ mod tests {
                let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let mut config = UserConfig::default();
                config.channel_handshake_config.announced_channel = false;
-               let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test
+               let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
                chan.context.holder_dust_limit_satoshis = 546;
                chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
 
@@ -8190,7 +8452,7 @@ mod tests {
                                        &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
                                        chan.context.counterparty_funding_pubkey()
                                );
-                               let (holder_sig, htlc_sigs) = signer.sign_holder_commitment_and_htlcs(&holder_commitment_tx, &secp_ctx).unwrap();
+                               let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
                                assert_eq!(Signature::from_der(&hex::decode($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
 
                                let funding_redeemscript = chan.context.get_funding_redeemscript();
@@ -8198,14 +8460,14 @@ mod tests {
                                assert_eq!(serialize(&tx)[..], hex::decode($tx_hex).unwrap()[..], "tx");
 
                                // ((htlc, counterparty_sig), (index, holder_sig))
-                               let mut htlc_sig_iter = holder_commitment_tx.htlcs().iter().zip(&holder_commitment_tx.counterparty_htlc_sigs).zip(htlc_sigs.iter().enumerate());
+                               let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
 
                                $({
                                        log_trace!(logger, "verifying htlc {}", $htlc_idx);
                                        let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
 
                                        let ref htlc = htlcs[$htlc_idx];
-                                       let htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
+                                       let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
                                                chan.context.get_counterparty_selected_contest_delay().unwrap(),
                                                &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
                                        let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
@@ -8225,20 +8487,32 @@ mod tests {
                                                assert!(preimage.is_some());
                                        }
 
-                                       let htlc_sig = htlc_sig_iter.next().unwrap();
+                                       let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
+                                       let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
+                                               channel_derivation_parameters: ChannelDerivationParameters {
+                                                       value_satoshis: chan.context.channel_value_satoshis,
+                                                       keys_id: chan.context.channel_keys_id,
+                                                       transaction_parameters: chan.context.channel_transaction_parameters.clone(),
+                                               },
+                                               commitment_txid: trusted_tx.txid(),
+                                               per_commitment_number: trusted_tx.commitment_number(),
+                                               per_commitment_point: trusted_tx.per_commitment_point(),
+                                               feerate_per_kw: trusted_tx.feerate_per_kw(),
+                                               htlc: htlc.clone(),
+                                               preimage: preimage.clone(),
+                                               counterparty_sig: *htlc_counterparty_sig,
+                                       }, &secp_ctx).unwrap();
                                        let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
-                                       assert_eq!((htlc_sig.0).0.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
+                                       assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
 
                                        let signature = Signature::from_der(&hex::decode($htlc_sig_hex).unwrap()[..]).unwrap();
-                                       assert_eq!(signature, *(htlc_sig.1).1, "htlc sig");
-                                       let index = (htlc_sig.1).0;
-                                       let channel_parameters = chan.context.channel_transaction_parameters.as_holder_broadcastable();
+                                       assert_eq!(signature, htlc_holder_sig, "htlc sig");
                                        let trusted_tx = holder_commitment_tx.trust();
-                                       log_trace!(logger, "htlc_tx = {}", hex::encode(serialize(&trusted_tx.get_signed_htlc_tx(&channel_parameters, index, &(htlc_sig.0).1, (htlc_sig.1).1, &preimage))));
-                                       assert_eq!(serialize(&trusted_tx.get_signed_htlc_tx(&channel_parameters, index, &(htlc_sig.0).1, (htlc_sig.1).1, &preimage))[..],
-                                                       hex::decode($htlc_tx_hex).unwrap()[..], "htlc tx");
+                                       htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
+                                       log_trace!(logger, "htlc_tx = {}", hex::encode(serialize(&htlc_tx)));
+                                       assert_eq!(serialize(&htlc_tx)[..], hex::decode($htlc_tx_hex).unwrap()[..], "htlc tx");
                                })*
-                               assert!(htlc_sig_iter.next().is_none());
+                               assert!(htlc_counterparty_sig_iter.next().is_none());
                        } }
                }
 
@@ -8833,12 +9107,12 @@ mod tests {
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let config = UserConfig::default();
                let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
-                       node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
+                       node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
 
                let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
                channel_type_features.set_zero_conf_required();
 
-               let mut open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
+               let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
                open_channel_msg.channel_type = Some(channel_type_features);
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
                let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
@@ -8868,7 +9142,7 @@ mod tests {
                let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
                        &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
                        &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
-                       &config, 0, 42
+                       &config, 0, 42, None
                ).unwrap();
                assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
 
@@ -8878,10 +9152,11 @@ mod tests {
 
                let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
                        &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
-                       &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
+                       &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
+                       None
                ).unwrap();
 
-               let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
+               let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
                let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
                        &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
                        &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
@@ -8915,11 +9190,12 @@ mod tests {
 
                let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
                        &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
-                       &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
+                       &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
+                       None
                ).unwrap();
 
                // Set `channel_type` to `None` to force the implicit feature negotiation.
-               let mut open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
+               let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
                open_channel_msg.channel_type = None;
 
                // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
@@ -8961,10 +9237,11 @@ mod tests {
                // B as it's not supported by LDK.
                let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
                        &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
-                       &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
+                       &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
+                       None
                ).unwrap();
 
-               let mut open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
+               let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
                open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
 
                let res = InboundV1Channel::<&TestKeysInterface>::new(
@@ -8980,10 +9257,10 @@ mod tests {
                // LDK.
                let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
                        &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
-                       10000000, 100000, 42, &config, 0, 42
+                       10000000, 100000, 42, &config, 0, 42, None
                ).unwrap();
 
-               let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
+               let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
 
                let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
                        &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
@@ -8999,4 +9276,147 @@ mod tests {
                );
                assert!(res.is_err());
        }
+
+       #[test]
+       fn test_waiting_for_batch() {
+               let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
+               let logger = test_utils::TestLogger::new();
+               let secp_ctx = Secp256k1::new();
+               let seed = [42; 32];
+               let network = Network::Testnet;
+               let best_block = BestBlock::from_network(network);
+               let chain_hash = ChainHash::using_genesis_block(network);
+               let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
+
+               let mut config = UserConfig::default();
+               // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
+               // channel in a batch before all channels are ready.
+               config.channel_handshake_limits.trust_own_funding_0conf = true;
+
+               // Create a channel from node a to node b that will be part of batch funding.
+               let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+               let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
+                       &feeest,
+                       &&keys_provider,
+                       &&keys_provider,
+                       node_b_node_id,
+                       &channelmanager::provided_init_features(&config),
+                       10000000,
+                       100000,
+                       42,
+                       &config,
+                       0,
+                       42,
+                       None
+               ).unwrap();
+
+               let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
+               let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
+               let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
+                       &feeest,
+                       &&keys_provider,
+                       &&keys_provider,
+                       node_b_node_id,
+                       &channelmanager::provided_channel_type_features(&config),
+                       &channelmanager::provided_init_features(&config),
+                       &open_channel_msg,
+                       7,
+                       &config,
+                       0,
+                       &&logger,
+                       true,  // Allow node b to send a 0conf channel_ready.
+               ).unwrap();
+
+               let accept_channel_msg = node_b_chan.accept_inbound_channel();
+               node_a_chan.accept_channel(
+                       &accept_channel_msg,
+                       &config.channel_handshake_limits,
+                       &channelmanager::provided_init_features(&config),
+               ).unwrap();
+
+               // Fund the channel with a batch funding transaction.
+               let output_script = node_a_chan.context.get_funding_redeemscript();
+               let tx = Transaction {
+                       version: 1,
+                       lock_time: PackedLockTime::ZERO,
+                       input: Vec::new(),
+                       output: vec![
+                               TxOut {
+                                       value: 10000000, script_pubkey: output_script.clone(),
+                               },
+                               TxOut {
+                                       value: 10000000, script_pubkey: Builder::new().into_script(),
+                               },
+                       ]};
+               let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
+               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(
+                       tx.clone(),
+                       funding_outpoint,
+                       true,
+                       &&logger,
+               ).map_err(|_| ()).unwrap();
+               let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
+                       &funding_created_msg.unwrap(),
+                       best_block,
+                       &&keys_provider,
+                       &&logger,
+               ).map_err(|_| ()).unwrap();
+               let node_b_updates = node_b_chan.monitor_updating_restored(
+                       &&logger,
+                       &&keys_provider,
+                       chain_hash,
+                       &config,
+                       0,
+               );
+
+               // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
+               // broadcasting the funding transaction until the batch is ready.
+               let _ = node_a_chan.funding_signed(
+                       &funding_signed_msg.unwrap(),
+                       best_block,
+                       &&keys_provider,
+                       &&logger,
+               ).unwrap();
+               let node_a_updates = node_a_chan.monitor_updating_restored(
+                       &&logger,
+                       &&keys_provider,
+                       chain_hash,
+                       &config,
+                       0,
+               );
+               // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
+               // as the funding transaction depends on all channels in the batch becoming ready.
+               assert!(node_a_updates.channel_ready.is_none());
+               assert!(node_a_updates.funding_broadcastable.is_none());
+               assert_eq!(
+                       node_a_chan.context.channel_state,
+                       ChannelState::FundingSent as u32 |
+                       ChannelState::WaitingForBatch as u32,
+               );
+
+               // It is possible to receive a 0conf channel_ready from the remote node.
+               node_a_chan.channel_ready(
+                       &node_b_updates.channel_ready.unwrap(),
+                       &&keys_provider,
+                       chain_hash,
+                       &config,
+                       &best_block,
+                       &&logger,
+               ).unwrap();
+               assert_eq!(
+                       node_a_chan.context.channel_state,
+                       ChannelState::FundingSent as u32 |
+                       ChannelState::WaitingForBatch as u32 |
+                       ChannelState::TheirChannelReady as u32,
+               );
+
+               // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
+               node_a_chan.set_batch_ready();
+               assert_eq!(
+                       node_a_chan.context.channel_state,
+                       ChannelState::FundingSent as u32 |
+                       ChannelState::TheirChannelReady as u32,
+               );
+               assert!(node_a_chan.check_get_channel_ready(0).is_some());
+       }
 }