Assert we never write channels in pre-funded/shutdown state
[rust-lightning] / lightning / src / ln / channel.rs
index a1af5e6da6854e30bc56b7b23e041114d9678a1b..5edc661144eb08353d39ed1be5bddbb5facfe38e 100644 (file)
@@ -419,6 +419,8 @@ define_state_flags!(
        ]
 );
 
+// Note that the order of this enum is implicitly defined by where each variant is placed. Take this
+// into account when introducing new states and update `test_channel_state_order` accordingly.
 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
 enum ChannelState {
        /// We are negotiating the parameters required for the channel prior to funding it.
@@ -523,14 +525,14 @@ impl ChannelState {
                }
        }
 
-       fn should_force_holding_cell(&self) -> bool {
+       fn can_generate_new_commitment(&self) -> bool {
                match self {
                        ChannelState::ChannelReady(flags) =>
-                               flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) ||
-                                       flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) ||
-                                       flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
+                               !flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) &&
+                                       !flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) &&
+                                       !flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
                        _ => {
-                               debug_assert!(false, "The holding cell is only valid within ChannelReady");
+                               debug_assert!(false, "Can only generate new commitment within ChannelReady");
                                false
                        },
                }
@@ -732,8 +734,8 @@ struct CommitmentStats<'a> {
        total_fee_sat: u64, // the total fee included in the transaction
        num_nondust_htlcs: usize,  // the number of HTLC outputs (dust HTLCs *non*-included)
        htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
-       local_balance_msat: u64, // local balance before fees but considering dust limits
-       remote_balance_msat: u64, // remote balance before fees but considering dust limits
+       local_balance_msat: u64, // local balance before fees *not* considering dust limits
+       remote_balance_msat: u64, // remote balance before fees *not* considering dust limits
        outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
        inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
 }
@@ -814,6 +816,7 @@ pub(super) struct ReestablishResponses {
 /// The result of a shutdown that should be handled.
 #[must_use]
 pub(crate) struct ShutdownResult {
+       pub(crate) closure_reason: ClosureReason,
        /// A channel monitor update to apply.
        pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
        /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
@@ -822,7 +825,10 @@ pub(crate) struct ShutdownResult {
        /// propagated to the remainder of the batch.
        pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
        pub(crate) channel_id: ChannelId,
+       pub(crate) user_channel_id: u128,
+       pub(crate) channel_capacity_satoshis: u64,
        pub(crate) counterparty_node_id: PublicKey,
+       pub(crate) unbroadcasted_funding_tx: Option<Transaction>,
 }
 
 /// If the majority of the channels funds are to the fundee and the initiator holds only just
@@ -1728,13 +1734,13 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        }
                }
 
-               let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
+               let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
                assert!(value_to_self_msat >= 0);
                // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
                // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
                // "violate" their reserve value by couting those against it. Thus, we have to convert
                // everything to i64 before subtracting as otherwise we can overflow.
-               let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
+               let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
                assert!(value_to_remote_msat >= 0);
 
                #[cfg(debug_assertions)]
@@ -1800,10 +1806,6 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
                htlcs_included.append(&mut included_dust_htlcs);
 
-               // For the stats, trimmed-to-0 the value in msats accordingly
-               value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
-               value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
-
                CommitmentStats {
                        tx,
                        feerate_per_kw,
@@ -1837,8 +1839,6 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
        /// will sign and send to our counterparty.
        /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
        fn build_remote_transaction_keys(&self) -> TxCreationKeys {
-               //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
-               //may see payments to it!
                let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
                let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
                let counterparty_pubkeys = self.get_counterparty_pubkeys();
@@ -1876,7 +1876,8 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                if let Some(feerate) = outbound_feerate_update {
                        feerate_per_kw = cmp::max(feerate_per_kw, feerate);
                }
-               cmp::max(2530, feerate_per_kw * 1250 / 1000)
+               let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000);
+               cmp::max(2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
        }
 
        /// Get forwarding information for the counterparty.
@@ -2316,15 +2317,17 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                res
        }
 
-       fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
-               where F: Fn() -> Option<O> {
+       fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O> where F: Fn() -> Option<O> {
                match self.channel_state {
                        ChannelState::FundingNegotiated => f(),
-                       ChannelState::AwaitingChannelReady(flags) => if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) {
-                               f()
-                       } else {
-                               None
-                       },
+                       ChannelState::AwaitingChannelReady(flags) =>
+                               if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) ||
+                                       flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into())
+                               {
+                                       f()
+                               } else {
+                                       None
+                               },
                        _ => None,
                }
        }
@@ -2359,7 +2362,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
        /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
        /// Also returns the list of payment_hashes for channels which we can safely fail backwards
        /// immediately (others we will have to allow to time out).
-       pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
+       pub fn force_shutdown(&mut self, should_broadcast: bool, closure_reason: ClosureReason) -> ShutdownResult {
                // Note that we MUST only generate a monitor update that indicates force-closure - we're
                // called during initialization prior to the chain_monitor in the encompassing ChannelManager
                // being fully configured in some cases. Thus, its likely any monitor events we generate will
@@ -2386,28 +2389,30 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        // funding transaction, don't return a funding txo (which prevents providing the
                        // monitor update to the user, even if we return one).
                        // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
-                       let generate_monitor_update = match self.channel_state {
-                               ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)|ChannelState::ShutdownComplete => true,
-                               _ => false,
-                       };
-                       if generate_monitor_update {
+                       if !self.channel_state.is_pre_funded_state() {
                                self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
                                Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
                                        update_id: self.latest_monitor_update_id,
+                                       counterparty_node_id: Some(self.counterparty_node_id),
                                        updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
                                }))
                        } else { None }
                } else { None };
                let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
+               let unbroadcasted_funding_tx = self.unbroadcasted_funding();
 
                self.channel_state = ChannelState::ShutdownComplete;
                self.update_time_counter += 1;
                ShutdownResult {
+                       closure_reason,
                        monitor_update,
                        dropped_outbound_htlcs,
                        unbroadcasted_batch_funding_txid,
                        channel_id: self.channel_id,
+                       user_channel_id: self.user_id,
+                       channel_capacity_satoshis: self.channel_value_satoshis,
                        counterparty_node_id: self.counterparty_node_id,
+                       unbroadcasted_funding_tx,
                }
        }
 
@@ -2434,8 +2439,13 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                                        .ok();
 
                                if funding_signed.is_none() {
-                                       log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
-                                       self.signer_pending_funding = true;
+                                       #[cfg(not(async_signing))] {
+                                               panic!("Failed to get signature for funding_signed");
+                                       }
+                                       #[cfg(async_signing)] {
+                                               log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
+                                               self.signer_pending_funding = true;
+                                       }
                                } else if self.signer_pending_funding {
                                        log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
                                        self.signer_pending_funding = false;
@@ -2523,6 +2533,62 @@ struct CommitmentTxInfoCached {
        feerate: u32,
 }
 
+/// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
+/// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
+trait FailHTLCContents {
+       type Message: FailHTLCMessageName;
+       fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
+       fn to_inbound_htlc_state(self) -> InboundHTLCState;
+       fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
+}
+impl FailHTLCContents for msgs::OnionErrorPacket {
+       type Message = msgs::UpdateFailHTLC;
+       fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
+               msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
+       }
+       fn to_inbound_htlc_state(self) -> InboundHTLCState {
+               InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
+       }
+       fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
+               HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
+       }
+}
+impl FailHTLCContents for ([u8; 32], u16) {
+       type Message = msgs::UpdateFailMalformedHTLC;
+       fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
+               msgs::UpdateFailMalformedHTLC {
+                       htlc_id,
+                       channel_id,
+                       sha256_of_onion: self.0,
+                       failure_code: self.1
+               }
+       }
+       fn to_inbound_htlc_state(self) -> InboundHTLCState {
+               InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(self))
+       }
+       fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
+               HTLCUpdateAwaitingACK::FailMalformedHTLC {
+                       htlc_id,
+                       sha256_of_onion: self.0,
+                       failure_code: self.1
+               }
+       }
+}
+
+trait FailHTLCMessageName {
+       fn name() -> &'static str;
+}
+impl FailHTLCMessageName for msgs::UpdateFailHTLC {
+       fn name() -> &'static str {
+               "update_fail_htlc"
+       }
+}
+impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
+       fn name() -> &'static str {
+               "update_fail_malformed_htlc"
+       }
+}
+
 impl<SP: Deref> Channel<SP> where
        SP::Target: SignerProvider,
        <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
@@ -2639,7 +2705,7 @@ impl<SP: Deref> Channel<SP> where
        where L::Target: Logger {
                // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
                // (see equivalent if condition there).
-               assert!(self.context.channel_state.should_force_holding_cell());
+               assert!(!self.context.channel_state.can_generate_new_commitment());
                let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
                let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
                self.context.latest_monitor_update_id = mon_update_id;
@@ -2703,12 +2769,13 @@ impl<SP: Deref> Channel<SP> where
                self.context.latest_monitor_update_id += 1;
                let monitor_update = ChannelMonitorUpdate {
                        update_id: self.context.latest_monitor_update_id,
+                       counterparty_node_id: Some(self.context.counterparty_node_id),
                        updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
                                payment_preimage: payment_preimage_arg.clone(),
                        }],
                };
 
-               if self.context.channel_state.should_force_holding_cell() {
+               if !self.context.channel_state.can_generate_new_commitment() {
                        // Note that this condition is the same as the assertion in
                        // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
                        // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
@@ -2823,6 +2890,17 @@ impl<SP: Deref> Channel<SP> where
                        .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
        }
 
+       /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
+       /// want to fail blinded HTLCs where we are not the intro node.
+       ///
+       /// See [`Self::queue_fail_htlc`] for more info.
+       pub fn queue_fail_malformed_htlc<L: Deref>(
+               &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
+       ) -> Result<(), ChannelError> where L::Target: Logger {
+               self.fail_htlc(htlc_id_arg, (sha256_of_onion, failure_code), true, logger)
+                       .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
+       }
+
        /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
        /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
        /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
@@ -2831,8 +2909,10 @@ impl<SP: Deref> Channel<SP> where
        /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
        /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
        /// [`ChannelError::Ignore`].
-       fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
-       -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
+       fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
+               &mut self, htlc_id_arg: u64, err_contents: E, mut force_holding_cell: bool,
+               logger: &L
+       ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
                if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
                        panic!("Was asked to fail an HTLC when channel was not in an operational state");
                }
@@ -2869,7 +2949,7 @@ impl<SP: Deref> Channel<SP> where
                        return Ok(None);
                }
 
-               if self.context.channel_state.should_force_holding_cell() {
+               if !self.context.channel_state.can_generate_new_commitment() {
                        debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
                        force_holding_cell = true;
                }
@@ -2897,24 +2977,18 @@ impl<SP: Deref> Channel<SP> where
                                }
                        }
                        log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
-                       self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
-                               htlc_id: htlc_id_arg,
-                               err_packet,
-                       });
+                       self.context.holding_cell_htlc_updates.push(err_contents.to_htlc_update_awaiting_ack(htlc_id_arg));
                        return Ok(None);
                }
 
-               log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
+               log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
+                       E::Message::name(), &self.context.channel_id());
                {
                        let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
-                       htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
+                       htlc.state = err_contents.clone().to_inbound_htlc_state();
                }
 
-               Ok(Some(msgs::UpdateFailHTLC {
-                       channel_id: self.context.channel_id(),
-                       htlc_id: htlc_id_arg,
-                       reason: err_packet
-               }))
+               Ok(Some(err_contents.to_message(htlc_id_arg, self.context.channel_id())))
        }
 
        // Message handlers:
@@ -2927,6 +3001,20 @@ impl<SP: Deref> Channel<SP> where
                self.context.channel_state.clear_waiting_for_batch();
        }
 
+       /// Unsets the existing funding information.
+       ///
+       /// This must only be used if the channel has not yet completed funding and has not been used.
+       ///
+       /// Further, the channel must be immediately shut down after this with a call to
+       /// [`ChannelContext::force_shutdown`].
+       pub fn unset_funding_info(&mut self, temporary_channel_id: ChannelId) {
+               debug_assert!(matches!(
+                       self.context.channel_state, ChannelState::AwaitingChannelReady(_)
+               ));
+               self.context.channel_transaction_parameters.funding_outpoint = None;
+               self.context.channel_id = temporary_channel_id;
+       }
+
        /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
        /// and the channel is now usable (and public), this may generate an announcement_signatures to
        /// reply with.
@@ -3417,6 +3505,7 @@ impl<SP: Deref> Channel<SP> where
                self.context.latest_monitor_update_id += 1;
                let mut monitor_update = ChannelMonitorUpdate {
                        update_id: self.context.latest_monitor_update_id,
+                       counterparty_node_id: Some(self.context.counterparty_node_id),
                        updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
                                commitment_tx: holder_commitment_tx,
                                htlc_outputs: htlcs_and_sigs,
@@ -3477,7 +3566,7 @@ impl<SP: Deref> Channel<SP> where
        ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
        where F::Target: FeeEstimator, L::Target: Logger
        {
-               if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && !self.context.channel_state.should_force_holding_cell() {
+               if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.channel_state.can_generate_new_commitment() {
                        self.free_holding_cell_htlcs(fee_estimator, logger)
                } else { (None, Vec::new()) }
        }
@@ -3496,6 +3585,7 @@ impl<SP: Deref> Channel<SP> where
 
                        let mut monitor_update = ChannelMonitorUpdate {
                                update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
+                               counterparty_node_id: Some(self.context.counterparty_node_id),
                                updates: Vec::new(),
                        };
 
@@ -3511,7 +3601,7 @@ impl<SP: Deref> Channel<SP> where
                                // the limit. In case it's less rare than I anticipate, we may want to revisit
                                // handling this case better and maybe fulfilling some of the HTLCs while attempting
                                // to rebalance channels.
-                               match &htlc_update {
+                               let fail_htlc_res = match &htlc_update {
                                        &HTLCUpdateAwaitingACK::AddHTLC {
                                                amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
                                                skimmed_fee_msat, blinding_point, ..
@@ -3539,6 +3629,7 @@ impl<SP: Deref> Channel<SP> where
                                                                }
                                                        }
                                                }
+                                               None
                                        },
                                        &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
                                                // If an HTLC claim was previously added to the holding cell (via
@@ -3552,29 +3643,33 @@ impl<SP: Deref> Channel<SP> where
                                                        { monitor_update } else { unreachable!() };
                                                update_fulfill_count += 1;
                                                monitor_update.updates.append(&mut additional_monitor_update.updates);
+                                               None
                                        },
                                        &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
-                                               match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
-                                                       Ok(update_fail_msg_option) => {
-                                                               // If an HTLC failure was previously added to the holding cell (via
-                                                               // `queue_fail_htlc`) then generating the fail message itself must
-                                                               // not fail - we should never end up in a state where we double-fail
-                                                               // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
-                                                               // for a full revocation before failing.
-                                                               debug_assert!(update_fail_msg_option.is_some());
-                                                               update_fail_count += 1;
-                                                       },
-                                                       Err(e) => {
-                                                               if let ChannelError::Ignore(_) = e {}
-                                                               else {
-                                                                       panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
-                                                               }
-                                                       }
-                                               }
-                                       },
-                                       &HTLCUpdateAwaitingACK::FailMalformedHTLC { .. } => {
-                                               todo!()
+                                               Some(self.fail_htlc(htlc_id, err_packet.clone(), false, logger)
+                                                .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
                                        },
+                                       &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
+                                               Some(self.fail_htlc(htlc_id, (sha256_of_onion, failure_code), false, logger)
+                                                .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
+                                       }
+                               };
+                               if let Some(res) = fail_htlc_res {
+                                       match res {
+                                               Ok(fail_msg_opt) => {
+                                                       // If an HTLC failure was previously added to the holding cell (via
+                                                       // `queue_fail_{malformed_}htlc`) then generating the fail message itself must
+                                                       // not fail - we should never end up in a state where we double-fail
+                                                       // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
+                                                       // for a full revocation before failing.
+                                                       debug_assert!(fail_msg_opt.is_some());
+                                                       update_fail_count += 1;
+                                               },
+                                               Err(ChannelError::Ignore(_)) => {},
+                                               Err(_) => {
+                                                       panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
+                                               },
+                                       }
                                }
                        }
                        if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
@@ -3665,6 +3760,7 @@ impl<SP: Deref> Channel<SP> where
                self.context.latest_monitor_update_id += 1;
                let mut monitor_update = ChannelMonitorUpdate {
                        update_id: self.context.latest_monitor_update_id,
+                       counterparty_node_id: Some(self.context.counterparty_node_id),
                        updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
                                idx: self.context.cur_counterparty_commitment_transaction_number + 1,
                                secret: msg.per_commitment_secret,
@@ -4080,8 +4176,8 @@ impl<SP: Deref> Channel<SP> where
                // first received the funding_signed.
                let mut funding_broadcastable =
                        if self.context.is_outbound() &&
-                               matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
-                               matches!(self.context.channel_state, ChannelState::ChannelReady(_))
+                               (matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
+                               matches!(self.context.channel_state, ChannelState::ChannelReady(_)))
                        {
                                self.context.funding_transaction.take()
                        } else { None };
@@ -4183,7 +4279,7 @@ impl<SP: Deref> Channel<SP> where
 
        /// Indicates that the signer may have some signatures for us, so we should retry if we're
        /// blocked.
-       #[allow(unused)]
+       #[cfg(async_signing)]
        pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
                let commitment_update = if self.context.signer_pending_commitment_update {
                        self.get_last_commitment_update_for_send(logger).ok()
@@ -4287,11 +4383,16 @@ impl<SP: Deref> Channel<SP> where
                        }
                        update
                } else {
-                       if !self.context.signer_pending_commitment_update {
-                               log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
-                               self.context.signer_pending_commitment_update = true;
+                       #[cfg(not(async_signing))] {
+                               panic!("Failed to get signature for new commitment state");
+                       }
+                       #[cfg(async_signing)] {
+                               if !self.context.signer_pending_commitment_update {
+                                       log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
+                                       self.context.signer_pending_commitment_update = true;
+                               }
+                               return Err(());
                        }
-                       return Err(());
                };
                Ok(msgs::CommitmentUpdate {
                        update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
@@ -4717,6 +4818,7 @@ impl<SP: Deref> Channel<SP> where
                        self.context.latest_monitor_update_id += 1;
                        let monitor_update = ChannelMonitorUpdate {
                                update_id: self.context.latest_monitor_update_id,
+                               counterparty_node_id: Some(self.context.counterparty_node_id),
                                updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
                                        scriptpubkey: self.get_closing_scriptpubkey(),
                                }],
@@ -4830,11 +4932,15 @@ impl<SP: Deref> Channel<SP> where
                if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
                        if last_fee == msg.fee_satoshis {
                                let shutdown_result = ShutdownResult {
+                                       closure_reason: ClosureReason::CooperativeClosure,
                                        monitor_update: None,
                                        dropped_outbound_htlcs: Vec::new(),
                                        unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
                                        channel_id: self.context.channel_id,
+                                       user_channel_id: self.context.user_id,
+                                       channel_capacity_satoshis: self.context.channel_value_satoshis,
                                        counterparty_node_id: self.context.counterparty_node_id,
+                                       unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
                                };
                                let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
                                self.context.channel_state = ChannelState::ShutdownComplete;
@@ -4860,11 +4966,15 @@ impl<SP: Deref> Channel<SP> where
                                                        .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
                                                let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
                                                        let shutdown_result = ShutdownResult {
+                                                               closure_reason: ClosureReason::CooperativeClosure,
                                                                monitor_update: None,
                                                                dropped_outbound_htlcs: Vec::new(),
                                                                unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
                                                                channel_id: self.context.channel_id,
+                                                               user_channel_id: self.context.user_id,
+                                                               channel_capacity_satoshis: self.context.channel_value_satoshis,
                                                                counterparty_node_id: self.context.counterparty_node_id,
+                                                               unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
                                                        };
                                                        self.context.channel_state = ChannelState::ShutdownComplete;
                                                        self.context.update_time_counter += 1;
@@ -5395,7 +5505,7 @@ impl<SP: Deref> Channel<SP> where
                        // larger. If we don't know that time has moved forward, we can just set it to the last
                        // time we saw and it will be ignored.
                        let best_time = self.context.update_time_counter;
-                       match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) {
+                       match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
                                Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
                                        assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
                                        assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
@@ -5743,7 +5853,7 @@ impl<SP: Deref> Channel<SP> where
                        return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
                }
 
-               let need_holding_cell = self.context.channel_state.should_force_holding_cell();
+               let need_holding_cell = !self.context.channel_state.can_generate_new_commitment();
                log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
                        payment_hash, amount_msat,
                        if force_holding_cell { "into holding cell" }
@@ -5840,6 +5950,7 @@ impl<SP: Deref> Channel<SP> where
                self.context.latest_monitor_update_id += 1;
                let monitor_update = ChannelMonitorUpdate {
                        update_id: self.context.latest_monitor_update_id,
+                       counterparty_node_id: Some(self.context.counterparty_node_id),
                        updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
                                commitment_txid: counterparty_commitment_txid,
                                htlc_outputs: htlcs.clone(),
@@ -6038,6 +6149,7 @@ impl<SP: Deref> Channel<SP> where
                        self.context.latest_monitor_update_id += 1;
                        let monitor_update = ChannelMonitorUpdate {
                                update_id: self.context.latest_monitor_update_id,
+                               counterparty_node_id: Some(self.context.counterparty_node_id),
                                updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
                                        scriptpubkey: self.get_closing_scriptpubkey(),
                                }],
@@ -6372,9 +6484,14 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
 
                let funding_created = self.get_funding_created_msg(logger);
                if funding_created.is_none() {
-                       if !self.context.signer_pending_funding {
-                               log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
-                               self.context.signer_pending_funding = true;
+                       #[cfg(not(async_signing))] {
+                               panic!("Failed to get signature for new funding creation");
+                       }
+                       #[cfg(async_signing)] {
+                               if !self.context.signer_pending_funding {
+                                       log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
+                                       self.context.signer_pending_funding = true;
+                               }
                        }
                }
 
@@ -6720,7 +6837,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
 
        /// Indicates that the signer may have some signatures for us, so we should retry if we're
        /// blocked.
-       #[allow(unused)]
+       #[cfg(async_signing)]
        pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
                if self.context.signer_pending_funding && self.context.is_outbound() {
                        log_trace!(logger, "Signer unblocked a funding_created");
@@ -6735,6 +6852,41 @@ pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
        pub unfunded_context: UnfundedChannelContext,
 }
 
+/// Fetches the [`ChannelTypeFeatures`] that will be used for a channel built from a given
+/// [`msgs::OpenChannel`].
+pub(super) fn channel_type_from_open_channel(
+       msg: &msgs::OpenChannel, their_features: &InitFeatures,
+       our_supported_features: &ChannelTypeFeatures
+) -> Result<ChannelTypeFeatures, ChannelError> {
+       if let Some(channel_type) = &msg.channel_type {
+               if channel_type.supports_any_optional_bits() {
+                       return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
+               }
+
+               // We only support the channel types defined by the `ChannelManager` in
+               // `provided_channel_type_features`. The channel type must always support
+               // `static_remote_key`.
+               if !channel_type.requires_static_remote_key() {
+                       return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
+               }
+               // Make sure we support all of the features behind the channel type.
+               if !channel_type.is_subset(our_supported_features) {
+                       return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
+               }
+               let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
+               if channel_type.requires_scid_privacy() && announced_channel {
+                       return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
+               }
+               Ok(channel_type.clone())
+       } else {
+               let channel_type = ChannelTypeFeatures::from_init(&their_features);
+               if channel_type != ChannelTypeFeatures::only_static_remote_key() {
+                       return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
+               }
+               Ok(channel_type)
+       }
+}
+
 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
        /// Creates a new channel from a remote sides' request for one.
        /// Assumes chain_hash has already been checked and corresponds with what we expect!
@@ -6753,32 +6905,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
 
                // First check the channel type is known, failing before we do anything else if we don't
                // support this channel type.
-               let channel_type = if let Some(channel_type) = &msg.channel_type {
-                       if channel_type.supports_any_optional_bits() {
-                               return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
-                       }
-
-                       // We only support the channel types defined by the `ChannelManager` in
-                       // `provided_channel_type_features`. The channel type must always support
-                       // `static_remote_key`.
-                       if !channel_type.requires_static_remote_key() {
-                               return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
-                       }
-                       // Make sure we support all of the features behind the channel type.
-                       if !channel_type.is_subset(our_supported_features) {
-                               return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
-                       }
-                       if channel_type.requires_scid_privacy() && announced_channel {
-                               return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
-                       }
-                       channel_type.clone()
-               } else {
-                       let channel_type = ChannelTypeFeatures::from_init(&their_features);
-                       if channel_type != ChannelTypeFeatures::only_static_remote_key() {
-                               return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
-                       }
-                       channel_type
-               };
+               let channel_type = channel_type_from_open_channel(msg, their_features, our_supported_features)?;
 
                let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
                let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
@@ -7344,6 +7471,8 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                        let mut channel_state = self.context.channel_state;
                        if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
                                channel_state.set_peer_disconnected();
+                       } else {
+                               debug_assert!(false, "Pre-funded/shutdown channels should not be written");
                        }
                        channel_state.to_u32().write(writer)?;
                }
@@ -8212,6 +8341,7 @@ mod tests {
        use bitcoin::blockdata::transaction::{Transaction, TxOut};
        use bitcoin::blockdata::opcodes;
        use bitcoin::network::constants::Network;
+       use crate::ln::onion_utils::INVALID_ONION_BLINDING;
        use crate::ln::{PaymentHash, PaymentPreimage};
        use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
        use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
@@ -8244,6 +8374,18 @@ mod tests {
        use bitcoin::address::{WitnessProgram, WitnessVersion};
        use crate::prelude::*;
 
+       #[test]
+       fn test_channel_state_order() {
+               use crate::ln::channel::NegotiatingFundingFlags;
+               use crate::ln::channel::AwaitingChannelReadyFlags;
+               use crate::ln::channel::ChannelReadyFlags;
+
+               assert!(ChannelState::NegotiatingFunding(NegotiatingFundingFlags::new()) < ChannelState::FundingNegotiated);
+               assert!(ChannelState::FundingNegotiated < ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()));
+               assert!(ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()) < ChannelState::ChannelReady(ChannelReadyFlags::new()));
+               assert!(ChannelState::ChannelReady(ChannelReadyFlags::new()) < ChannelState::ShutdownComplete);
+       }
+
        struct TestFeeEstimator {
                fee_est: u32
        }
@@ -8748,19 +8890,37 @@ mod tests {
        }
 
        #[test]
-       fn blinding_point_skimmed_fee_ser() {
-               // Ensure that channel blinding points and skimmed fees are (de)serialized properly.
+       fn blinding_point_skimmed_fee_malformed_ser() {
+               // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
+               // properly.
+               let logger = test_utils::TestLogger::new();
                let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
                let secp_ctx = Secp256k1::new();
                let seed = [42; 32];
                let network = Network::Testnet;
+               let best_block = BestBlock::from_network(network);
                let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
 
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let config = UserConfig::default();
                let features = channelmanager::provided_init_features(&config);
-               let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
-               let mut chan = Channel { context: outbound_chan.context };
+               let mut outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(
+                       &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None
+               ).unwrap();
+               let inbound_chan = InboundV1Channel::<&TestKeysInterface>::new(
+                       &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config),
+                       &features, &outbound_chan.get_open_channel(ChainHash::using_genesis_block(network)), 7, &config, 0, &&logger, false
+               ).unwrap();
+               outbound_chan.accept_channel(&inbound_chan.get_accept_channel_message(), &config.channel_handshake_limits, &features).unwrap();
+               let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
+                       value: 10000000, script_pubkey: outbound_chan.context.get_funding_redeemscript(),
+               }]};
+               let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
+               let funding_created = outbound_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap().unwrap();
+               let mut chan = match inbound_chan.funding_created(&funding_created, best_block, &&keys_provider, &&logger) {
+                       Ok((chan, _, _)) => chan,
+                       Err((_, e)) => panic!("{}", e),
+               };
 
                let dummy_htlc_source = HTLCSource::OutboundRoute {
                        path: Path {
@@ -8814,13 +8974,19 @@ mod tests {
                        payment_preimage: PaymentPreimage([42; 32]),
                        htlc_id: 0,
                };
-               let mut holding_cell_htlc_updates = Vec::with_capacity(10);
-               for i in 0..10 {
-                       if i % 3 == 0 {
+               let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
+                       htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
+               };
+               let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
+                       htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
+               };
+               let mut holding_cell_htlc_updates = Vec::with_capacity(12);
+               for i in 0..12 {
+                       if i % 5 == 0 {
                                holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
-                       } else if i % 3 == 1 {
+                       } else if i % 5 == 1 {
                                holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
-                       } else {
+                       } else if i % 5 == 2 {
                                let mut dummy_add = dummy_holding_cell_add_htlc.clone();
                                if let HTLCUpdateAwaitingACK::AddHTLC {
                                        ref mut blinding_point, ref mut skimmed_fee_msat, ..
@@ -8829,6 +8995,10 @@ mod tests {
                                        *skimmed_fee_msat = Some(42);
                                } else { panic!() }
                                holding_cell_htlc_updates.push(dummy_add);
+                       } else if i % 5 == 3 {
+                               holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
+                       } else {
+                               holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
                        }
                }
                chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
@@ -8843,7 +9013,7 @@ mod tests {
                assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
        }
 
-       #[cfg(feature = "_test_vectors")]
+       #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
        #[test]
        fn outbound_commitment_test() {
                use bitcoin::sighash;
@@ -8864,7 +9034,7 @@ mod tests {
 
                // Test vectors from BOLT 3 Appendices C and F (anchors):
                let feeest = TestFeeEstimator{fee_est: 15000};
-               let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
+               let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
                let secp_ctx = Secp256k1::new();
 
                let mut signer = InMemorySigner::new(