Fix deadlock when closing an unavailable channel
[rust-lightning] / lightning / src / ln / channel.rs
index da1364021cefe99e9f1b63e2ecd215d8f67b438e..a61a8de82debecd5906c0ee95bdcdb420aae48f4 100644 (file)
@@ -66,6 +66,8 @@ pub struct ChannelValueStat {
 }
 
 pub struct AvailableBalances {
+       /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
+       pub balance_msat: u64,
        /// Total amount available for our counterparty to send to us.
        pub inbound_capacity_msat: u64,
        /// Total amount available for us to send to our counterparty.
@@ -300,9 +302,24 @@ enum ChannelState {
        /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
        /// to drop us, but we store this anyway.
        ShutdownComplete = 4096,
+       /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the
+       /// broadcasting of the funding transaction is being held until all channels in the batch
+       /// have received funding_signed and have their monitors persisted.
+       WaitingForBatch = 1 << 13,
 }
-const BOTH_SIDES_SHUTDOWN_MASK: u32 = ChannelState::LocalShutdownSent as u32 | ChannelState::RemoteShutdownSent as u32;
-const MULTI_STATE_FLAGS: u32 = BOTH_SIDES_SHUTDOWN_MASK | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32;
+const BOTH_SIDES_SHUTDOWN_MASK: u32 =
+       ChannelState::LocalShutdownSent as u32 |
+       ChannelState::RemoteShutdownSent as u32;
+const MULTI_STATE_FLAGS: u32 =
+       BOTH_SIDES_SHUTDOWN_MASK |
+       ChannelState::PeerDisconnected as u32 |
+       ChannelState::MonitorUpdateInProgress as u32;
+const STATE_FLAGS: u32 =
+       MULTI_STATE_FLAGS |
+       ChannelState::TheirChannelReady as u32 |
+       ChannelState::OurChannelReady as u32 |
+       ChannelState::AwaitingRemoteRevoke as u32 |
+       ChannelState::WaitingForBatch as u32;
 
 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
 
@@ -527,12 +544,15 @@ pub(super) struct ReestablishResponses {
 
 /// The return type of `force_shutdown`
 ///
-/// Contains a (counterparty_node_id, funding_txo, [`ChannelMonitorUpdate`]) tuple
-/// followed by a list of HTLCs to fail back in the form of the (source, payment hash, and this
-/// channel's counterparty_node_id and channel_id).
+/// Contains a tuple with the following:
+/// - An optional (counterparty_node_id, funding_txo, [`ChannelMonitorUpdate`]) tuple
+/// - A list of HTLCs to fail back in the form of the (source, payment hash, and this channel's
+/// counterparty_node_id and channel_id).
+/// - An optional transaction id identifying a corresponding batch funding transaction.
 pub(crate) type ShutdownResult = (
        Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
-       Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>
+       Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
+       Option<Txid>
 );
 
 /// If the majority of the channels funds are to the fundee and the initiator holds only just
@@ -594,6 +614,9 @@ pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
 /// exceeding this age limit will be force-closed and purged from memory.
 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
 
+/// Number of blocks needed for an output from a coinbase transaction to be spendable.
+pub(crate) const COINBASE_MATURITY: u32 = 100;
+
 struct PendingChannelMonitorUpdate {
        update: ChannelMonitorUpdate,
 }
@@ -602,6 +625,35 @@ impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
        (0, update, required),
 });
 
+/// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
+/// its variants containing an appropriate channel struct.
+pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
+       UnfundedOutboundV1(OutboundV1Channel<SP>),
+       UnfundedInboundV1(InboundV1Channel<SP>),
+       Funded(Channel<SP>),
+}
+
+impl<'a, SP: Deref> ChannelPhase<SP> where
+       SP::Target: SignerProvider,
+       <SP::Target as SignerProvider>::Signer: ChannelSigner,
+{
+       pub fn context(&'a self) -> &'a ChannelContext<SP> {
+               match self {
+                       ChannelPhase::Funded(chan) => &chan.context,
+                       ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
+                       ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
+               }
+       }
+
+       pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
+               match self {
+                       ChannelPhase::Funded(ref mut chan) => &mut chan.context,
+                       ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
+                       ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
+               }
+       }
+}
+
 /// Contains all state common to unfunded inbound/outbound channels.
 pub(super) struct UnfundedChannelContext {
        /// A counter tracking how many ticks have elapsed since this unfunded channel was
@@ -789,6 +841,7 @@ pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
 
        pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
        funding_transaction: Option<Transaction>,
+       is_batch_funding: Option<()>,
 
        counterparty_cur_commitment_point: Option<PublicKey>,
        counterparty_prev_commitment_point: Option<PublicKey>,
@@ -913,7 +966,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 
        /// Returns true if we've ever received a message from the remote end for this Channel
        pub fn have_received_message(&self) -> bool {
-               self.channel_state > (ChannelState::OurInitSent as u32)
+               self.channel_state & !STATE_FLAGS > (ChannelState::OurInitSent as u32)
        }
 
        /// Returns true if this channel is fully established and not known to be closing.
@@ -1129,7 +1182,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 
        // Checks whether we should emit a `ChannelPending` event.
        pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
-               self.is_funding_initiated() && !self.channel_pending_event_emitted
+               self.is_funding_broadcast() && !self.channel_pending_event_emitted
        }
 
        // Returns whether we already emitted a `ChannelPending` event.
@@ -1188,9 +1241,11 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                did_channel_update
        }
 
-       /// Returns true if funding_created was sent/received.
-       pub fn is_funding_initiated(&self) -> bool {
-               self.channel_state >= ChannelState::FundingSent as u32
+       /// Returns true if funding_signed was sent/received and the
+       /// funding transaction has been broadcast if necessary.
+       pub fn is_funding_broadcast(&self) -> bool {
+               self.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 &&
+                       self.channel_state & ChannelState::WaitingForBatch as u32 == 0
        }
 
        /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
@@ -1602,6 +1657,14 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                let inbound_stats = context.get_inbound_pending_htlc_stats(None);
                let outbound_stats = context.get_outbound_pending_htlc_stats(None);
 
+               let mut balance_msat = context.value_to_self_msat;
+               for ref htlc in context.pending_inbound_htlcs.iter() {
+                       if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
+                               balance_msat += htlc.amount_msat;
+                       }
+               }
+               balance_msat -= outbound_stats.pending_htlcs_value_msat;
+
                let outbound_capacity_msat = context.value_to_self_msat
                                .saturating_sub(outbound_stats.pending_htlcs_value_msat)
                                .saturating_sub(
@@ -1718,6 +1781,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        outbound_capacity_msat,
                        next_outbound_htlc_limit_msat: available_capacity_msat,
                        next_outbound_htlc_minimum_msat,
+                       balance_msat,
                }
        }
 
@@ -1920,15 +1984,41 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                res
        }
 
-       /// Returns transaction if there is pending funding transaction that is yet to broadcast
-       pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
-               if self.channel_state & (ChannelState::FundingCreated as u32) != 0 {
-                       self.funding_transaction.clone()
+       fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
+               where F: Fn() -> Option<O> {
+               if self.channel_state & ChannelState::FundingCreated as u32 != 0 ||
+                  self.channel_state & ChannelState::WaitingForBatch as u32 != 0 {
+                       f()
                } else {
                        None
                }
        }
 
+       /// Returns the transaction if there is a pending funding transaction that is yet to be
+       /// broadcast.
+       pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
+               self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
+       }
+
+       /// Returns the transaction ID if there is a pending funding transaction that is yet to be
+       /// broadcast.
+       pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
+               self.if_unbroadcasted_funding(||
+                       self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
+               )
+       }
+
+       /// Returns whether the channel is funded in a batch.
+       pub fn is_batch_funding(&self) -> bool {
+               self.is_batch_funding.is_some()
+       }
+
+       /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
+       /// broadcast.
+       pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
+               self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
+       }
+
        /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
        /// shutdown of this channel - no more calls into this Channel may be made afterwards except
        /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
@@ -1969,10 +2059,11 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                                }))
                        } else { None }
                } else { None };
+               let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
 
                self.channel_state = ChannelState::ShutdownComplete as u32;
                self.update_time_counter += 1;
-               (monitor_update, dropped_outbound_htlcs)
+               (monitor_update, dropped_outbound_htlcs, unbroadcasted_batch_funding_txid)
        }
 }
 
@@ -2033,11 +2124,6 @@ fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_featur
        (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
 }
 
-// TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
-// has been completed, and then turn into a Channel to get compiler-time enforcement of things like
-// calling channel_id() before we're set up or things like get_funding_signed on an
-// inbound channel.
-//
 // Holder designates channel data owned for the benefit of the user client.
 // Counterparty designates channel data owned by the another channel participant entity.
 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
@@ -2547,7 +2633,11 @@ impl<SP: Deref> Channel<SP> where
                        counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
 
                assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
-               self.context.channel_state = ChannelState::FundingSent as u32;
+               if self.context.is_batch_funding() {
+                       self.context.channel_state = ChannelState::FundingSent as u32 | ChannelState::WaitingForBatch as u32;
+               } else {
+                       self.context.channel_state = ChannelState::FundingSent as u32;
+               }
                self.context.cur_holder_commitment_transaction_number -= 1;
                self.context.cur_counterparty_commitment_transaction_number -= 1;
 
@@ -2558,6 +2648,15 @@ impl<SP: Deref> Channel<SP> where
                Ok(channel_monitor)
        }
 
+       /// Updates the state of the channel to indicate that all channels in the batch have received
+       /// funding_signed and persisted their monitors.
+       /// The funding transaction is consequently allowed to be broadcast, and the channel can be
+       /// treated as a non-batch channel going forward.
+       pub fn set_batch_ready(&mut self) {
+               self.context.is_batch_funding = None;
+               self.context.channel_state &= !(ChannelState::WaitingForBatch as u32);
+       }
+
        /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
        /// and the channel is now usable (and public), this may generate an announcement_signatures to
        /// reply with.
@@ -2585,7 +2684,13 @@ impl<SP: Deref> Channel<SP> where
 
                let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
 
-               if non_shutdown_state == ChannelState::FundingSent as u32 {
+               // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
+               // batch, but we can receive channel_ready messages.
+               debug_assert!(
+                       non_shutdown_state & ChannelState::OurChannelReady as u32 == 0 ||
+                       non_shutdown_state & ChannelState::WaitingForBatch as u32 == 0
+               );
+               if non_shutdown_state & !(ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 {
                        self.context.channel_state |= ChannelState::TheirChannelReady as u32;
                } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
                        self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
@@ -3084,7 +3189,7 @@ impl<SP: Deref> Channel<SP> where
        ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
        where F::Target: FeeEstimator, L::Target: Logger
        {
-               if self.context.channel_state >= ChannelState::ChannelReady as u32 &&
+               if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 &&
                   (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
                        self.free_holding_cell_htlcs(fee_estimator, logger)
                } else { (None, Vec::new()) }
@@ -3558,17 +3663,17 @@ impl<SP: Deref> Channel<SP> where
        /// resent.
        /// No further message handling calls may be made until a channel_reestablish dance has
        /// completed.
-       pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L)  where L::Target: Logger {
+       /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
+       pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
                assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
-               if self.context.channel_state < ChannelState::FundingSent as u32 {
-                       self.context.channel_state = ChannelState::ShutdownComplete as u32;
-                       return;
+               if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
+                       return Err(());
                }
 
                if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
                        // While the below code should be idempotent, it's simpler to just return early, as
                        // redundant disconnect events can fire, though they should be rare.
-                       return;
+                       return Ok(());
                }
 
                if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
@@ -3629,6 +3734,7 @@ impl<SP: Deref> Channel<SP> where
 
                self.context.channel_state |= ChannelState::PeerDisconnected as u32;
                log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
+               Ok(())
        }
 
        /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
@@ -3674,12 +3780,12 @@ impl<SP: Deref> Channel<SP> where
                // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
                // first received the funding_signed.
                let mut funding_broadcastable =
-                       if self.context.is_outbound() && self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::FundingSent as u32 {
+                       if self.context.is_outbound() && self.context.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && self.context.channel_state & ChannelState::WaitingForBatch as u32 == 0 {
                                self.context.funding_transaction.take()
                        } else { None };
                // That said, if the funding transaction is already confirmed (ie we're active with a
                // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
-               if self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
+               if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
                        funding_broadcastable = None;
                }
 
@@ -4182,7 +4288,7 @@ impl<SP: Deref> Channel<SP> where
                if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
                        return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
                }
-               if self.context.channel_state < ChannelState::FundingSent as u32 {
+               if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
                        // Spec says we should fail the connection, not the channel, but that's nonsense, there
                        // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
                        // can do that via error message without getting a connection fail anyway...
@@ -4576,7 +4682,7 @@ impl<SP: Deref> Channel<SP> where
        pub fn is_awaiting_initial_mon_persist(&self) -> bool {
                if !self.is_awaiting_monitor_update() { return false; }
                if self.context.channel_state &
-                       !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)
+                       !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32)
                                == ChannelState::FundingSent as u32 {
                        // If we're not a 0conf channel, we'll be waiting on a monitor update with only
                        // FundingSent set, though our peer could have sent their channel_ready.
@@ -4607,7 +4713,7 @@ impl<SP: Deref> Channel<SP> where
 
        /// Returns true if our channel_ready has been sent
        pub fn is_our_channel_ready(&self) -> bool {
-               (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state >= ChannelState::ChannelReady as u32
+               (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32
        }
 
        /// Returns true if our peer has either initiated or agreed to shut down the channel.
@@ -4656,6 +4762,8 @@ impl<SP: Deref> Channel<SP> where
                        return None;
                }
 
+               // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
+               // channel_ready until the entire batch is ready.
                let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
                let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
                        self.context.channel_state |= ChannelState::OurChannelReady as u32;
@@ -4668,7 +4776,7 @@ impl<SP: Deref> Channel<SP> where
                        // We got a reorg but not enough to trigger a force close, just ignore.
                        false
                } else {
-                       if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state < ChannelState::ChannelReady as u32 {
+                       if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 {
                                // We should never see a funding transaction on-chain until we've received
                                // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
                                // an inbound channel - before that we have no known funding TXID). The fuzzer,
@@ -4711,6 +4819,7 @@ impl<SP: Deref> Channel<SP> where
                NS::Target: NodeSigner,
                L::Target: Logger
        {
+               let mut msgs = (None, None);
                if let Some(funding_txo) = self.context.get_funding_txo() {
                        for &(index_in_block, tx) in txdata.iter() {
                                // Check if the transaction is the expected funding transaction, and if it is,
@@ -4734,12 +4843,14 @@ impl<SP: Deref> Channel<SP> where
                                                        return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
                                                } else {
                                                        if self.context.is_outbound() {
-                                                               for input in tx.input.iter() {
-                                                                       if input.witness.is_empty() {
-                                                                               // We generated a malleable funding transaction, implying we've
-                                                                               // just exposed ourselves to funds loss to our counterparty.
-                                                                               #[cfg(not(fuzzing))]
-                                                                               panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
+                                                               if !tx.is_coin_base() {
+                                                                       for input in tx.input.iter() {
+                                                                               if input.witness.is_empty() {
+                                                                                       // We generated a malleable funding transaction, implying we've
+                                                                                       // just exposed ourselves to funds loss to our counterparty.
+                                                                                       #[cfg(not(fuzzing))]
+                                                                                       panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
+                                                                               }
                                                                        }
                                                                }
                                                        }
@@ -4750,6 +4861,13 @@ impl<SP: Deref> Channel<SP> where
                                                                Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
                                                        }
                                                }
+                                               // If this is a coinbase transaction and not a 0-conf channel
+                                               // we should update our min_depth to 100 to handle coinbase maturity
+                                               if tx.is_coin_base() &&
+                                                       self.context.minimum_depth.unwrap_or(0) > 0 &&
+                                                       self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
+                                                       self.context.minimum_depth = Some(COINBASE_MATURITY);
+                                               }
                                        }
                                        // If we allow 1-conf funding, we may need to check for channel_ready here and
                                        // send it immediately instead of waiting for a best_block_updated call (which
@@ -4757,7 +4875,7 @@ impl<SP: Deref> Channel<SP> where
                                        if let Some(channel_ready) = self.check_get_channel_ready(height) {
                                                log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
                                                let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger);
-                                               return Ok((Some(channel_ready), announcement_sigs));
+                                               msgs = (Some(channel_ready), announcement_sigs);
                                        }
                                }
                                for inp in tx.input.iter() {
@@ -4768,7 +4886,7 @@ impl<SP: Deref> Channel<SP> where
                                }
                        }
                }
-               Ok((None, None))
+               Ok(msgs)
        }
 
        /// When a new block is connected, we check the height of the block against outbound holding
@@ -4829,7 +4947,7 @@ impl<SP: Deref> Channel<SP> where
                }
 
                let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
-               if non_shutdown_state >= ChannelState::ChannelReady as u32 ||
+               if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 ||
                   (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
                        let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
                        if self.context.funding_tx_confirmation_height == 0 {
@@ -4857,7 +4975,7 @@ impl<SP: Deref> Channel<SP> where
                                height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
                        log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
                        // If funding_tx_confirmed_in is unset, the channel must not be active
-                       assert!(non_shutdown_state <= ChannelState::ChannelReady as u32);
+                       assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32);
                        assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
                        return Err(ClosureReason::FundingTimedOut);
                }
@@ -5431,17 +5549,20 @@ impl<SP: Deref> Channel<SP> where
                }
        }
 
-       pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<(), ChannelError> {
-               if msg.contents.htlc_minimum_msat >= self.context.channel_value_satoshis * 1000 {
-                       return Err(ChannelError::Close("Minimum htlc value is greater than channel value".to_string()));
-               }
-               self.context.counterparty_forwarding_info = Some(CounterpartyForwardingInfo {
+       /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
+       /// happened.
+       pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
+               let new_forwarding_info = Some(CounterpartyForwardingInfo {
                        fee_base_msat: msg.contents.fee_base_msat,
                        fee_proportional_millionths: msg.contents.fee_proportional_millionths,
                        cltv_expiry_delta: msg.contents.cltv_expiry_delta
                });
+               let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
+               if did_change {
+                       self.context.counterparty_forwarding_info = new_forwarding_info;
+               }
 
-               Ok(())
+               Ok(did_change)
        }
 
        /// Begins the shutdown process, getting a message for the remote peer and returning all
@@ -5477,7 +5598,7 @@ impl<SP: Deref> Channel<SP> where
                // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
                // script is set, we just force-close and call it a day.
                let mut chan_closed = false;
-               if self.context.channel_state < ChannelState::FundingSent as u32 {
+               if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
                        chan_closed = true;
                }
 
@@ -5506,7 +5627,7 @@ impl<SP: Deref> Channel<SP> where
 
                // From here on out, we may not fail!
                self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
-               if self.context.channel_state < ChannelState::FundingSent as u32 {
+               if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
                        self.context.channel_state = ChannelState::ShutdownComplete as u32;
                } else {
                        self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
@@ -5729,6 +5850,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                                        channel_type_features: channel_type.clone()
                                },
                                funding_transaction: None,
+                               is_batch_funding: None,
 
                                counterparty_cur_commitment_point: None,
                                counterparty_prev_commitment_point: None,
@@ -5789,7 +5911,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
        /// Note that channel_id changes during this call!
        /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
        /// If an Err is returned, it is a ChannelError::Close.
-       pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L)
+       pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
        -> Result<(Channel<SP>, msgs::FundingCreated), (Self, ChannelError)> where L::Target: Logger {
                if !self.context.is_outbound() {
                        panic!("Tried to create outbound funding_created message on an inbound channel!");
@@ -5821,7 +5943,17 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
 
                self.context.channel_state = ChannelState::FundingCreated as u32;
                self.context.channel_id = funding_txo.to_channel_id();
+
+               // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
+               // We can skip this if it is a zero-conf channel.
+               if funding_transaction.is_coin_base() &&
+                       self.context.minimum_depth.unwrap_or(0) > 0 &&
+                       self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
+                       self.context.minimum_depth = Some(COINBASE_MATURITY);
+               }
+
                self.context.funding_transaction = Some(funding_transaction);
+               self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
 
                let channel = Channel {
                        context: self.context,
@@ -6371,6 +6503,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                                        channel_type_features: channel_type.clone()
                                },
                                funding_transaction: None,
+                               is_batch_funding: None,
 
                                counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
                                counterparty_prev_commitment_point: None,
@@ -6986,6 +7119,7 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                        (31, channel_pending_event_emitted, option),
                        (35, pending_outbound_skimmed_fees, optional_vec),
                        (37, holding_cell_skimmed_fees, optional_vec),
+                       (38, self.context.is_batch_funding, option),
                });
 
                Ok(())
@@ -7208,7 +7342,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                };
 
                let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
-               let funding_transaction = Readable::read(reader)?;
+               let funding_transaction: Option<Transaction> = Readable::read(reader)?;
 
                let counterparty_cur_commitment_point = Readable::read(reader)?;
 
@@ -7269,6 +7403,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
                let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
 
+               let mut is_batch_funding: Option<()> = None;
+
                read_tlv_fields!(reader, {
                        (0, announcement_sigs, option),
                        (1, minimum_depth, option),
@@ -7294,6 +7430,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        (31, channel_pending_event_emitted, option),
                        (35, pending_outbound_skimmed_fees_opt, optional_vec),
                        (37, holding_cell_skimmed_fees_opt, optional_vec),
+                       (38, is_batch_funding, option),
                });
 
                let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
@@ -7301,7 +7438,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        // If we've gotten to the funding stage of the channel, populate the signer with its
                        // required channel parameters.
                        let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
-                       if non_shutdown_state >= (ChannelState::FundingCreated as u32) {
+                       if non_shutdown_state & !STATE_FLAGS >= (ChannelState::FundingCreated as u32) {
                                holder_signer.provide_channel_parameters(&channel_parameters);
                        }
                        (channel_keys_id, holder_signer)
@@ -7451,6 +7588,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
 
                                channel_transaction_parameters: channel_parameters,
                                funding_transaction,
+                               is_batch_funding,
 
                                counterparty_cur_commitment_point,
                                counterparty_prev_commitment_point,
@@ -7504,7 +7642,7 @@ mod tests {
        use crate::ln::PaymentHash;
        use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
        use crate::ln::channel::InitFeatures;
-       use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat};
+       use crate::ln::channel::{Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat};
        use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
        use crate::ln::features::ChannelTypeFeatures;
        use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
@@ -7683,7 +7821,7 @@ mod tests {
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
-               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
+               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
                let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
 
                // Node B --> Node A: funding signed
@@ -7810,7 +7948,7 @@ mod tests {
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
-               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
+               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
                let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
 
                // Node B --> Node A: funding signed
@@ -7818,7 +7956,7 @@ mod tests {
 
                // Now disconnect the two nodes and check that the commitment point in
                // Node B's channel_reestablish message is sane.
-               node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger);
+               assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
                let msg = node_b_chan.get_channel_reestablish(&&logger);
                assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
                assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
@@ -7826,7 +7964,7 @@ mod tests {
 
                // Check that the commitment point in Node A's channel_reestablish message
                // is sane.
-               node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger);
+               assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
                let msg = node_a_chan.get_channel_reestablish(&&logger);
                assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
                assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
@@ -7998,7 +8136,7 @@ mod tests {
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
-               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
+               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
                let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
 
                // Node B --> Node A: funding signed
@@ -8020,7 +8158,7 @@ mod tests {
                        },
                        signature: Signature::from(unsafe { FFISignature::new() })
                };
-               node_a_chan.channel_update(&update).unwrap();
+               assert!(node_a_chan.channel_update(&update).unwrap());
 
                // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
                // change our official htlc_minimum_msat.
@@ -8033,6 +8171,8 @@ mod tests {
                        },
                        None => panic!("expected counterparty forwarding info to be Some")
                }
+
+               assert!(!node_a_chan.channel_update(&update).unwrap());
        }
 
        #[cfg(feature = "_test_vectors")]
@@ -8978,4 +9118,146 @@ mod tests {
                );
                assert!(res.is_err());
        }
+
+       #[test]
+       fn test_waiting_for_batch() {
+               let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
+               let logger = test_utils::TestLogger::new();
+               let secp_ctx = Secp256k1::new();
+               let seed = [42; 32];
+               let network = Network::Testnet;
+               let best_block = BestBlock::from_network(network);
+               let chain_hash = genesis_block(network).header.block_hash();
+               let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
+
+               let mut config = UserConfig::default();
+               // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
+               // channel in a batch before all channels are ready.
+               config.channel_handshake_limits.trust_own_funding_0conf = true;
+
+               // Create a channel from node a to node b that will be part of batch funding.
+               let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+               let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
+                       &feeest,
+                       &&keys_provider,
+                       &&keys_provider,
+                       node_b_node_id,
+                       &channelmanager::provided_init_features(&config),
+                       10000000,
+                       100000,
+                       42,
+                       &config,
+                       0,
+                       42,
+               ).unwrap();
+
+               let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
+               let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
+               let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
+                       &feeest,
+                       &&keys_provider,
+                       &&keys_provider,
+                       node_b_node_id,
+                       &channelmanager::provided_channel_type_features(&config),
+                       &channelmanager::provided_init_features(&config),
+                       &open_channel_msg,
+                       7,
+                       &config,
+                       0,
+                       &&logger,
+                       true,  // Allow node b to send a 0conf channel_ready.
+               ).unwrap();
+
+               let accept_channel_msg = node_b_chan.accept_inbound_channel();
+               node_a_chan.accept_channel(
+                       &accept_channel_msg,
+                       &config.channel_handshake_limits,
+                       &channelmanager::provided_init_features(&config),
+               ).unwrap();
+
+               // Fund the channel with a batch funding transaction.
+               let output_script = node_a_chan.context.get_funding_redeemscript();
+               let tx = Transaction {
+                       version: 1,
+                       lock_time: PackedLockTime::ZERO,
+                       input: Vec::new(),
+                       output: vec![
+                               TxOut {
+                                       value: 10000000, script_pubkey: output_script.clone(),
+                               },
+                               TxOut {
+                                       value: 10000000, script_pubkey: Builder::new().into_script(),
+                               },
+                       ]};
+               let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
+               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(
+                       tx.clone(),
+                       funding_outpoint,
+                       true,
+                       &&logger,
+               ).map_err(|_| ()).unwrap();
+               let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
+                       &funding_created_msg,
+                       best_block,
+                       &&keys_provider,
+                       &&logger,
+               ).map_err(|_| ()).unwrap();
+               let node_b_updates = node_b_chan.monitor_updating_restored(
+                       &&logger,
+                       &&keys_provider,
+                       chain_hash,
+                       &config,
+                       0,
+               );
+
+               // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
+               // broadcasting the funding transaction until the batch is ready.
+               let _ = node_a_chan.funding_signed(
+                       &funding_signed_msg,
+                       best_block,
+                       &&keys_provider,
+                       &&logger,
+               ).unwrap();
+               let node_a_updates = node_a_chan.monitor_updating_restored(
+                       &&logger,
+                       &&keys_provider,
+                       chain_hash,
+                       &config,
+                       0,
+               );
+               // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
+               // as the funding transaction depends on all channels in the batch becoming ready.
+               assert!(node_a_updates.channel_ready.is_none());
+               assert!(node_a_updates.funding_broadcastable.is_none());
+               assert_eq!(
+                       node_a_chan.context.channel_state,
+                       ChannelState::FundingSent as u32 |
+                       ChannelState::WaitingForBatch as u32,
+               );
+
+               // It is possible to receive a 0conf channel_ready from the remote node.
+               node_a_chan.channel_ready(
+                       &node_b_updates.channel_ready.unwrap(),
+                       &&keys_provider,
+                       chain_hash,
+                       &config,
+                       &best_block,
+                       &&logger,
+               ).unwrap();
+               assert_eq!(
+                       node_a_chan.context.channel_state,
+                       ChannelState::FundingSent as u32 |
+                       ChannelState::WaitingForBatch as u32 |
+                       ChannelState::TheirChannelReady as u32,
+               );
+
+               // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
+               node_a_chan.set_batch_ready();
+               assert_eq!(
+                       node_a_chan.context.channel_state,
+                       ChannelState::FundingSent as u32 |
+                       ChannelState::TheirChannelReady as u32,
+               );
+               assert!(node_a_chan.check_get_channel_ready(0).is_some());
+       }
 }