Merge pull request #1505 from tnull/2022-05-support-0conf-channeltype
authorvalentinewallace <valentinewallace@users.noreply.github.com>
Thu, 2 Jun 2022 20:02:25 +0000 (13:02 -0700)
committerGitHub <noreply@github.com>
Thu, 2 Jun 2022 20:02:25 +0000 (13:02 -0700)
Support `ZeroConf` channel type.

1  2 
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/priv_short_conf_tests.rs
lightning/src/util/events.rs

index ca02f0a9ac9e302ae10047025097ff24c1fcc7cd,1e59894694dda9d5174835f6644a7269f075afed..4daaf630ac4e57bd7c96ed73e401badf986236d1
@@@ -243,7 -243,7 +243,7 @@@ enum HTLCUpdateAwaitingACK 
  
  /// There are a few "states" and then a number of flags which can be applied:
  /// We first move through init with OurInitSent -> TheirInitSent -> FundingCreated -> FundingSent.
 -/// TheirFundingLocked and OurFundingLocked then get set on FundingSent, and when both are set we
 +/// TheirChannelReady and OurChannelReady then get set on FundingSent, and when both are set we
  /// move on to ChannelFunded.
  /// Note that PeerDisconnected can be set on both ChannelFunded and FundingSent.
  /// ChannelFunded can then get all remaining flags set on it, until we finish shutdown, then we
@@@ -258,15 -258,15 +258,15 @@@ enum ChannelState 
        /// upon receipt of funding_created, so simply skip this state.
        FundingCreated = 4,
        /// Set when we have received/sent funding_created and funding_signed and are thus now waiting
 -      /// on the funding transaction to confirm. The FundingLocked flags are set to indicate when we
 +      /// on the funding transaction to confirm. The ChannelReady flags are set to indicate when we
        /// and our counterparty consider the funding transaction confirmed.
        FundingSent = 8,
 -      /// Flag which can be set on FundingSent to indicate they sent us a funding_locked message.
 -      /// Once both TheirFundingLocked and OurFundingLocked are set, state moves on to ChannelFunded.
 -      TheirFundingLocked = 1 << 4,
 -      /// Flag which can be set on FundingSent to indicate we sent them a funding_locked message.
 -      /// Once both TheirFundingLocked and OurFundingLocked are set, state moves on to ChannelFunded.
 -      OurFundingLocked = 1 << 5,
 +      /// Flag which can be set on FundingSent to indicate they sent us a channel_ready message.
 +      /// Once both TheirChannelReady and OurChannelReady are set, state moves on to ChannelFunded.
 +      TheirChannelReady = 1 << 4,
 +      /// Flag which can be set on FundingSent to indicate we sent them a channel_ready message.
 +      /// Once both TheirChannelReady and OurChannelReady are set, state moves on to ChannelFunded.
 +      OurChannelReady = 1 << 5,
        ChannelFunded = 64,
        /// Flag which is set on ChannelFunded and FundingSent indicating remote side is considered
        /// "disconnected" and no updates are allowed until after we've done a channel_reestablish
@@@ -429,13 -429,13 +429,13 @@@ pub(super) struct MonitorRestoreUpdate
        pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
        pub finalized_claimed_htlcs: Vec<HTLCSource>,
        pub funding_broadcastable: Option<Transaction>,
 -      pub funding_locked: Option<msgs::FundingLocked>,
 +      pub channel_ready: Option<msgs::ChannelReady>,
        pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
  }
  
  /// The return value of `channel_reestablish`
  pub(super) struct ReestablishResponses {
 -      pub funding_locked: Option<msgs::FundingLocked>,
 +      pub channel_ready: Option<msgs::ChannelReady>,
        pub raa: Option<msgs::RevokeAndACK>,
        pub commitment_update: Option<msgs::CommitmentUpdate>,
        pub order: RAACommitmentOrder,
@@@ -543,7 -543,7 +543,7 @@@ pub(super) struct Channel<Signer: Sign
        /// send it first.
        resend_order: RAACommitmentOrder,
  
 -      monitor_pending_funding_locked: bool,
 +      monitor_pending_channel_ready: bool,
        monitor_pending_revoke_and_ack: bool,
        monitor_pending_commitment_signed: bool,
        monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
  
        /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
        /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
 -      /// funding_locked *before* sending the channel_reestablish (which is clearly a violation of
 -      /// the BOLT specs). We copy c-lightning's workaround here and simply store the funding_locked
 +      /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
 +      /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
        /// message until we receive a channel_reestablish.
        ///
        /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
 -      pub workaround_lnd_bug_4006: Option<msgs::FundingLocked>,
 +      pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
  
        #[cfg(any(test, fuzzing))]
        // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
@@@ -960,7 -960,7 +960,7 @@@ impl<Signer: Sign> Channel<Signer> 
  
                        resend_order: RAACommitmentOrder::CommitmentFirst,
  
 -                      monitor_pending_funding_locked: false,
 +                      monitor_pending_channel_ready: false,
                        monitor_pending_revoke_and_ack: false,
                        monitor_pending_commitment_signed: false,
                        monitor_pending_forwards: Vec::new(),
                        if channel_type.supports_any_optional_bits() {
                                return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
                        }
-                       // We currently only allow two channel types, so write it all out here - we allow
-                       // `only_static_remote_key` in all contexts, and further allow
-                       // `static_remote_key|scid_privacy` if the channel is not publicly announced.
-                       let mut allowed_type = ChannelTypeFeatures::only_static_remote_key();
-                       if *channel_type != allowed_type {
-                               allowed_type.set_scid_privacy_required();
-                               if *channel_type != allowed_type {
+                       if channel_type.requires_unknown_bits() {
+                               return Err(ChannelError::Close("Channel Type field contains unknown bits".to_owned()));
+                       }
+                       // We currently only allow four channel types, so write it all out here - we allow
+                       // `only_static_remote_key` or `static_remote_key | zero_conf` in all contexts, and
+                       // further allow `static_remote_key | scid_privacy` or
+                       // `static_remote_key | scid_privacy | zero_conf`, if the channel is not
+                       // publicly announced.
+                       if *channel_type != ChannelTypeFeatures::only_static_remote_key() {
+                               if !channel_type.requires_scid_privacy() && !channel_type.requires_zero_conf() {
                                        return Err(ChannelError::Close("Channel Type was not understood".to_owned()));
                                }
-                               if announced_channel {
+                               if channel_type.requires_scid_privacy() && announced_channel {
                                        return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
                                }
                        }
  
                        resend_order: RAACommitmentOrder::CommitmentFirst,
  
 -                      monitor_pending_funding_locked: false,
 +                      monitor_pending_channel_ready: false,
                        monitor_pending_revoke_and_ack: false,
                        monitor_pending_commitment_signed: false,
                        monitor_pending_forwards: Vec::new(),
                make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
        }
  
 +      /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
 +      /// entirely.
 +      ///
 +      /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
 +      /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
 +      ///
 +      /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
 +      /// disconnected).
 +      pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
 +              (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
 +      where L::Target: Logger {
 +              // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
 +              // (see equivalent if condition there).
 +              assert!(self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32) != 0);
 +              let mon_update_id = self.latest_monitor_update_id; // Forget the ChannelMonitor update
 +              let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
 +              self.latest_monitor_update_id = mon_update_id;
 +              if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
 +                      assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
 +              }
 +      }
 +
        fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
                // Either ChannelFunded got set (which means it won't be unset) or there is no way any
                // caller thought we could have something claimed (cause we wouldn't have accepted in an
                };
  
                if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32)) != 0 {
 +                      // Note that this condition is the same as the assertion in
 +                      // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
 +                      // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
 +                      // do not not get into this branch.
                        for pending_update in self.holding_cell_htlc_updates.iter() {
                                match pending_update {
                                        &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
                &self.get_counterparty_pubkeys().funding_pubkey
        }
  
 -      pub fn funding_created<L: Deref>(&mut self, msg: &msgs::FundingCreated, best_block: BestBlock, logger: &L) -> Result<(msgs::FundingSigned, ChannelMonitor<Signer>, Option<msgs::FundingLocked>), ChannelError> where L::Target: Logger {
 +      pub fn funding_created<L: Deref>(&mut self, msg: &msgs::FundingCreated, best_block: BestBlock, logger: &L) -> Result<(msgs::FundingSigned, ChannelMonitor<Signer>, Option<msgs::ChannelReady>), ChannelError> where L::Target: Logger {
                if self.is_outbound() {
                        return Err(ChannelError::Close("Received funding_created for an outbound channel?".to_owned()));
                }
                Ok((msgs::FundingSigned {
                        channel_id: self.channel_id,
                        signature
 -              }, channel_monitor, self.check_get_funding_locked(0)))
 +              }, channel_monitor, self.check_get_channel_ready(0)))
        }
  
        /// Handles a funding_signed message from the remote end.
        /// If this call is successful, broadcast the funding transaction (and not before!)
 -      pub fn funding_signed<L: Deref>(&mut self, msg: &msgs::FundingSigned, best_block: BestBlock, logger: &L) -> Result<(ChannelMonitor<Signer>, Transaction, Option<msgs::FundingLocked>), ChannelError> where L::Target: Logger {
 +      pub fn funding_signed<L: Deref>(&mut self, msg: &msgs::FundingSigned, best_block: BestBlock, logger: &L) -> Result<(ChannelMonitor<Signer>, Transaction, Option<msgs::ChannelReady>), ChannelError> where L::Target: Logger {
                if !self.is_outbound() {
                        return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
                }
  
                log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.channel_id()));
  
 -              Ok((channel_monitor, self.funding_transaction.as_ref().cloned().unwrap(), self.check_get_funding_locked(0)))
 +              Ok((channel_monitor, self.funding_transaction.as_ref().cloned().unwrap(), self.check_get_channel_ready(0)))
        }
  
 -      /// Handles a funding_locked message from our peer. If we've already sent our funding_locked
 +      /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
        /// and the channel is now usable (and public), this may generate an announcement_signatures to
        /// reply with.
 -      pub fn funding_locked<L: Deref>(&mut self, msg: &msgs::FundingLocked, node_pk: PublicKey, genesis_block_hash: BlockHash, best_block: &BestBlock, logger: &L) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError> where L::Target: Logger {
 +      pub fn channel_ready<L: Deref>(&mut self, msg: &msgs::ChannelReady, node_pk: PublicKey, genesis_block_hash: BlockHash, best_block: &BestBlock, logger: &L) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError> where L::Target: Logger {
                if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
                        self.workaround_lnd_bug_4006 = Some(msg.clone());
 -                      return Err(ChannelError::Ignore("Peer sent funding_locked when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
 +                      return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
                }
  
                if let Some(scid_alias) = msg.short_channel_id_alias {
                let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
  
                if non_shutdown_state == ChannelState::FundingSent as u32 {
 -                      self.channel_state |= ChannelState::TheirFundingLocked as u32;
 -              } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurFundingLocked as u32) {
 +                      self.channel_state |= ChannelState::TheirChannelReady as u32;
 +              } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
                        self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & MULTI_STATE_FLAGS);
                        self.update_time_counter += 1;
                } else if self.channel_state & (ChannelState::ChannelFunded as u32) != 0 ||
 -                      // If we reconnected before sending our funding locked they may still resend theirs:
 -                      (self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32) ==
 -                                            (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32))
 +                      // If we reconnected before sending our `channel_ready` they may still resend theirs:
 +                      (self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
 +                                            (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
                {
 -                      // They probably disconnected/reconnected and re-sent the funding_locked, which is
 +                      // They probably disconnected/reconnected and re-sent the channel_ready, which is
                        // required, or they're sending a fresh SCID alias.
                        let expected_point =
                                if self.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
                                        // the current one.
                                        self.counterparty_cur_commitment_point
                                } else {
 -                                      // If they have sent updated points, funding_locked is always supposed to match
 +                                      // If they have sent updated points, channel_ready is always supposed to match
                                        // their "first" point, which we re-derive here.
                                        Some(PublicKey::from_secret_key(&self.secp_ctx, &SecretKey::from_slice(
                                                        &self.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
                                                ).expect("We already advanced, so previous secret keys should have been validated already")))
                                };
                        if expected_point != Some(msg.next_per_commitment_point) {
 -                              return Err(ChannelError::Close("Peer sent a reconnect funding_locked with a different point".to_owned()));
 +                              return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
                        }
                        return Ok(None);
                } else {
 -                      return Err(ChannelError::Close("Peer sent a funding_locked at a strange time".to_owned()));
 +                      return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
                }
  
                self.counterparty_prev_commitment_point = self.counterparty_cur_commitment_point;
                self.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
  
 -              log_info!(logger, "Received funding_locked from peer for channel {}", log_bytes!(self.channel_id()));
 +              log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.channel_id()));
  
                Ok(self.get_announcement_sigs(node_pk, genesis_block_hash, best_block.height(), logger))
        }
        /// monitor update failure must *not* have been sent to the remote end, and must instead
        /// have been dropped. They will be regenerated when monitor_updating_restored is called.
        pub fn monitor_update_failed(&mut self, resend_raa: bool, resend_commitment: bool,
 -              resend_funding_locked: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
 +              resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
                mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
                mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
        ) {
                self.monitor_pending_revoke_and_ack |= resend_raa;
                self.monitor_pending_commitment_signed |= resend_commitment;
 -              self.monitor_pending_funding_locked |= resend_funding_locked;
 +              self.monitor_pending_channel_ready |= resend_channel_ready;
                self.monitor_pending_forwards.append(&mut pending_forwards);
                self.monitor_pending_failures.append(&mut pending_fails);
                self.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
  
                // We will never broadcast the funding transaction when we're in MonitorUpdateFailed (and
                // we assume the user never directly broadcasts the funding transaction and waits for us to
 -              // do it). Thus, we can only ever hit monitor_pending_funding_locked when we're
 +              // do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
                // * an inbound channel that failed to persist the monitor on funding_created and we got
                //   the funding transaction confirmed before the monitor was persisted, or
 -              // * a 0-conf channel and intended to send the funding_locked before any broadcast at all.
 -              let funding_locked = if self.monitor_pending_funding_locked {
 +              // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
 +              let channel_ready = if self.monitor_pending_channel_ready {
                        assert!(!self.is_outbound() || self.minimum_depth == Some(0),
                                "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
 -                      self.monitor_pending_funding_locked = false;
 +                      self.monitor_pending_channel_ready = false;
                        let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
 -                      Some(msgs::FundingLocked {
 +                      Some(msgs::ChannelReady {
                                channel_id: self.channel_id(),
                                next_per_commitment_point,
                                short_channel_id_alias: Some(self.outbound_scid_alias),
                        self.monitor_pending_commitment_signed = false;
                        return MonitorRestoreUpdates {
                                raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
 -                              accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, funding_locked, announcement_sigs
 +                              accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
                        };
                }
  
                        if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
                        match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
                MonitorRestoreUpdates {
 -                      raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, funding_locked, announcement_sigs
 +                      raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
                }
        }
  
                let announcement_sigs = self.get_announcement_sigs(node_pk, genesis_block_hash, best_block.height(), logger);
  
                if self.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
 -                      // If we're waiting on a monitor update, we shouldn't re-send any funding_locked's.
 -                      if self.channel_state & (ChannelState::OurFundingLocked as u32) == 0 ||
 +                      // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
 +                      if self.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
                                        self.channel_state & (ChannelState::MonitorUpdateFailed as u32) != 0 {
                                if msg.next_remote_commitment_number != 0 {
 -                                      return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent funding_locked yet".to_owned()));
 +                                      return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
                                }
                                // Short circuit the whole handler as there is nothing we can resend them
                                return Ok(ReestablishResponses {
 -                                      funding_locked: None,
 +                                      channel_ready: None,
                                        raa: None, commitment_update: None, mon_update: None,
                                        order: RAACommitmentOrder::CommitmentFirst,
                                        holding_cell_failed_htlcs: Vec::new(),
                                });
                        }
  
 -                      // We have OurFundingLocked set!
 +                      // We have OurChannelReady set!
                        let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
                        return Ok(ReestablishResponses {
 -                              funding_locked: Some(msgs::FundingLocked {
 +                              channel_ready: Some(msgs::ChannelReady {
                                        channel_id: self.channel_id(),
                                        next_per_commitment_point,
                                        short_channel_id_alias: Some(self.outbound_scid_alias),
  
                let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number {
                        // Remote isn't waiting on any RevokeAndACK from us!
 -                      // Note that if we need to repeat our FundingLocked we'll do that in the next if block.
 +                      // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
                        None
                } else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.cur_holder_commitment_transaction_number {
                        if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) != 0 {
                // the corresponding revoke_and_ack back yet.
                let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.cur_counterparty_commitment_transaction_number + if (self.channel_state & ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 };
  
 -              let funding_locked = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number == 1 {
 -                      // We should never have to worry about MonitorUpdateFailed resending FundingLocked
 +              let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number == 1 {
 +                      // We should never have to worry about MonitorUpdateFailed resending ChannelReady
                        let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
 -                      Some(msgs::FundingLocked {
 +                      Some(msgs::ChannelReady {
                                channel_id: self.channel_id(),
                                next_per_commitment_point,
                                short_channel_id_alias: Some(self.outbound_scid_alias),
                                                panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
                                        Ok((Some((commitment_update, monitor_update)), holding_cell_failed_htlcs)) => {
                                                Ok(ReestablishResponses {
 -                                                      funding_locked, shutdown_msg, announcement_sigs,
 +                                                      channel_ready, shutdown_msg, announcement_sigs,
                                                        raa: required_revoke,
                                                        commitment_update: Some(commitment_update),
                                                        order: self.resend_order.clone(),
                                        },
                                        Ok((None, holding_cell_failed_htlcs)) => {
                                                Ok(ReestablishResponses {
 -                                                      funding_locked, shutdown_msg, announcement_sigs,
 +                                                      channel_ready, shutdown_msg, announcement_sigs,
                                                        raa: required_revoke,
                                                        commitment_update: None,
                                                        order: self.resend_order.clone(),
                                }
                        } else {
                                Ok(ReestablishResponses {
 -                                      funding_locked, shutdown_msg, announcement_sigs,
 +                                      channel_ready, shutdown_msg, announcement_sigs,
                                        raa: required_revoke,
                                        commitment_update: None,
                                        order: self.resend_order.clone(),
                        if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) != 0 {
                                self.monitor_pending_commitment_signed = true;
                                Ok(ReestablishResponses {
 -                                      funding_locked, shutdown_msg, announcement_sigs,
 +                                      channel_ready, shutdown_msg, announcement_sigs,
                                        commitment_update: None, raa: None, mon_update: None,
                                        order: self.resend_order.clone(),
                                        holding_cell_failed_htlcs: Vec::new(),
                                })
                        } else {
                                Ok(ReestablishResponses {
 -                                      funding_locked, shutdown_msg, announcement_sigs,
 +                                      channel_ready, shutdown_msg, announcement_sigs,
                                        raa: required_revoke,
                                        commitment_update: Some(self.get_last_commitment_update(logger)),
                                        order: self.resend_order.clone(),
                &self.channel_type
        }
  
 -      /// Guaranteed to be Some after both FundingLocked messages have been exchanged (and, thus,
 +      /// Guaranteed to be Some after both ChannelReady messages have been exchanged (and, thus,
        /// is_usable() returns true).
        /// Allowed in any state (including after shutdown)
        pub fn get_short_channel_id(&self) -> Option<u64> {
        /// Allowed in any state (including after shutdown)
        pub fn is_usable(&self) -> bool {
                let mask = ChannelState::ChannelFunded as u32 | BOTH_SIDES_SHUTDOWN_MASK;
 -              (self.channel_state & mask) == (ChannelState::ChannelFunded as u32) && !self.monitor_pending_funding_locked
 +              (self.channel_state & mask) == (ChannelState::ChannelFunded as u32) && !self.monitor_pending_channel_ready
        }
  
        /// Returns true if this channel is currently available for use. This is a superset of
                self.channel_state >= ChannelState::FundingSent as u32
        }
  
 -      /// Returns true if our funding_locked has been sent
 -      pub fn is_our_funding_locked(&self) -> bool {
 -              (self.channel_state & ChannelState::OurFundingLocked as u32) != 0 || self.channel_state >= ChannelState::ChannelFunded as u32
 +      /// Returns true if our channel_ready has been sent
 +      pub fn is_our_channel_ready(&self) -> bool {
 +              (self.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.channel_state >= ChannelState::ChannelFunded as u32
        }
  
        /// Returns true if our peer has either initiated or agreed to shut down the channel.
                self.channel_update_status = status;
        }
  
 -      fn check_get_funding_locked(&mut self, height: u32) -> Option<msgs::FundingLocked> {
 +      fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
                if self.funding_tx_confirmation_height == 0 && self.minimum_depth != Some(0) {
                        return None;
                }
  
                let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
                let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
 -                      self.channel_state |= ChannelState::OurFundingLocked as u32;
 +                      self.channel_state |= ChannelState::OurChannelReady as u32;
                        true
 -              } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32) {
 +              } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
                        self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & MULTI_STATE_FLAGS);
                        self.update_time_counter += 1;
                        true
 -              } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurFundingLocked as u32) {
 +              } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
                        // We got a reorg but not enough to trigger a force close, just ignore.
                        false
                } else if self.channel_state < ChannelState::ChannelFunded as u32 {
                                if self.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
                                        let next_per_commitment_point =
                                                self.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.secp_ctx);
 -                                      return Some(msgs::FundingLocked {
 +                                      return Some(msgs::ChannelReady {
                                                channel_id: self.channel_id,
                                                next_per_commitment_point,
                                                short_channel_id_alias: Some(self.outbound_scid_alias),
                                        });
                                }
                        } else {
 -                              self.monitor_pending_funding_locked = true;
 +                              self.monitor_pending_channel_ready = true;
                        }
                }
                None
        /// In the second, we simply return an Err indicating we need to be force-closed now.
        pub fn transactions_confirmed<L: Deref>(&mut self, block_hash: &BlockHash, height: u32,
                txdata: &TransactionData, genesis_block_hash: BlockHash, node_pk: PublicKey, logger: &L)
 -      -> Result<(Option<msgs::FundingLocked>, Option<msgs::AnnouncementSignatures>), ClosureReason> where L::Target: Logger {
 +      -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason> where L::Target: Logger {
                if let Some(funding_txo) = self.get_funding_txo() {
                        for &(index_in_block, tx) in txdata.iter() {
                                // Check if the transaction is the expected funding transaction, and if it is,
                                                        }
                                                }
                                        }
 -                                      // If we allow 1-conf funding, we may need to check for funding_locked here and
 +                                      // If we allow 1-conf funding, we may need to check for channel_ready here and
                                        // send it immediately instead of waiting for a best_block_updated call (which
                                        // may have already happened for this block).
 -                                      if let Some(funding_locked) = self.check_get_funding_locked(height) {
 -                                              log_info!(logger, "Sending a funding_locked to our peer for channel {}", log_bytes!(self.channel_id));
 +                                      if let Some(channel_ready) = self.check_get_channel_ready(height) {
 +                                              log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.channel_id));
                                                let announcement_sigs = self.get_announcement_sigs(node_pk, genesis_block_hash, height, logger);
 -                                              return Ok((Some(funding_locked), announcement_sigs));
 +                                              return Ok((Some(channel_ready), announcement_sigs));
                                        }
                                }
                                for inp in tx.input.iter() {
        /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
        /// back.
        pub fn best_block_updated<L: Deref>(&mut self, height: u32, highest_header_time: u32, genesis_block_hash: BlockHash, node_pk: PublicKey, logger: &L)
 -      -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason> where L::Target: Logger {
 +      -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason> where L::Target: Logger {
                self.do_best_block_updated(height, highest_header_time, Some((genesis_block_hash, node_pk)), logger)
        }
  
        fn do_best_block_updated<L: Deref>(&mut self, height: u32, highest_header_time: u32, genesis_node_pk: Option<(BlockHash, PublicKey)>, logger: &L)
 -      -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason> where L::Target: Logger {
 +      -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason> where L::Target: Logger {
                let mut timed_out_htlcs = Vec::new();
                // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
                // forward an HTLC when our counterparty should almost certainly just fail it for expiring
  
                self.update_time_counter = cmp::max(self.update_time_counter, highest_header_time);
  
 -              if let Some(funding_locked) = self.check_get_funding_locked(height) {
 +              if let Some(channel_ready) = self.check_get_channel_ready(height) {
                        let announcement_sigs = if let Some((genesis_block_hash, node_pk)) = genesis_node_pk {
                                self.get_announcement_sigs(node_pk, genesis_block_hash, height, logger)
                        } else { None };
 -                      log_info!(logger, "Sending a funding_locked to our peer for channel {}", log_bytes!(self.channel_id));
 -                      return Ok((Some(funding_locked), timed_out_htlcs, announcement_sigs));
 +                      log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.channel_id));
 +                      return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
                }
  
                let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
                if non_shutdown_state >= ChannelState::ChannelFunded as u32 ||
 -                 (non_shutdown_state & ChannelState::OurFundingLocked as u32) == ChannelState::OurFundingLocked as u32 {
 +                 (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
                        let mut funding_tx_confirmations = height as i64 - self.funding_tx_confirmation_height as i64 + 1;
                        if self.funding_tx_confirmation_height == 0 {
 -                              // Note that check_get_funding_locked may reset funding_tx_confirmation_height to
 +                              // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
                                // zero if it has been reorged out, however in either case, our state flags
 -                              // indicate we've already sent a funding_locked
 +                              // indicate we've already sent a channel_ready
                                funding_tx_confirmations = 0;
                        }
  
 -                      // If we've sent funding_locked (or have both sent and received funding_locked), and
 +                      // If we've sent channel_ready (or have both sent and received channel_ready), and
                        // the funding transaction has become unconfirmed,
                        // close the channel and hope we can get the latest state on chain (because presumably
                        // the funding transaction is at least still in the mempool of most nodes).
                        log_info!(logger, "Closing channel {} due to funding timeout", log_bytes!(self.channel_id));
                        // If funding_tx_confirmed_in is unset, the channel must not be active
                        assert!(non_shutdown_state <= ChannelState::ChannelFunded as u32);
 -                      assert_eq!(non_shutdown_state & ChannelState::OurFundingLocked as u32, 0);
 +                      assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
                        return Err(ClosureReason::FundingTimedOut);
                }
  
  
        /// Indicates the funding transaction is no longer confirmed in the main chain. This may
        /// force-close the channel, but may also indicate a harmless reorganization of a block or two
 -      /// before the channel has reached funding_locked and we can just wait for more blocks.
 +      /// before the channel has reached channel_ready and we can just wait for more blocks.
        pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
                if self.funding_tx_confirmation_height != 0 {
                        // We handle the funding disconnection by calling best_block_updated with a height one
                        // time we saw and it will be ignored.
                        let best_time = self.update_time_counter;
                        match self.do_best_block_updated(reorg_height, best_time, None, logger) {
 -                              Ok((funding_locked, timed_out_htlcs, announcement_sigs)) => {
 -                                      assert!(funding_locked.is_none(), "We can't generate a funding with 0 confirmations?");
 +                              Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
 +                                      assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
                                        assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
                                        assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
                                        Ok(())
        }
  
        /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
 -      /// announceable and available for use (have exchanged FundingLocked messages in both
 +      /// announceable and available for use (have exchanged ChannelReady messages in both
        /// directions). Should be used for both broadcasted announcements and in response to an
        /// AnnouncementSignatures message from the remote peer.
        ///
@@@ -5871,7 -5851,7 +5877,7 @@@ impl<Signer: Sign> Writeable for Channe
                        RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
                }
  
 -              self.monitor_pending_funding_locked.write(writer)?;
 +              self.monitor_pending_channel_ready.write(writer)?;
                self.monitor_pending_revoke_and_ack.write(writer)?;
                self.monitor_pending_commitment_signed.write(writer)?;
  
@@@ -6132,7 -6112,7 +6138,7 @@@ impl<'a, Signer: Sign, K: Deref> Readab
                        _ => return Err(DecodeError::InvalidValue),
                };
  
 -              let monitor_pending_funding_locked = Readable::read(reader)?;
 +              let monitor_pending_channel_ready = Readable::read(reader)?;
                let monitor_pending_revoke_and_ack = Readable::read(reader)?;
                let monitor_pending_commitment_signed = Readable::read(reader)?;
  
  
                        resend_order,
  
 -                      monitor_pending_funding_locked,
 +                      monitor_pending_channel_ready,
                        monitor_pending_revoke_and_ack,
                        monitor_pending_commitment_signed,
                        monitor_pending_forwards,
@@@ -6433,7 -6413,7 +6439,7 @@@ mod tests 
        use ln::channelmanager::{HTLCSource, PaymentId};
        use ln::channel::{Channel, InboundHTLCOutput, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator};
        use ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS};
-       use ln::features::InitFeatures;
+       use ln::features::{InitFeatures, ChannelTypeFeatures};
        use ln::msgs::{ChannelUpdate, DataLossProtect, DecodeError, OptionalField, UnsignedChannelUpdate};
        use ln::script::ShutdownScript;
        use ln::chan_utils;
                assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret).unwrap(),
                                SecretKey::from_slice(&hex::decode("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
        }
+       #[test]
+       fn test_zero_conf_channel_type_support() {
+               let feeest = TestFeeEstimator{fee_est: 15000};
+               let secp_ctx = Secp256k1::new();
+               let seed = [42; 32];
+               let network = Network::Testnet;
+               let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
+               let logger = test_utils::TestLogger::new();
+               let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+               let config = UserConfig::default();
+               let node_a_chan = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider,
+                       node_b_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config, 0, 42).unwrap();
+               let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
+               channel_type_features.set_zero_conf_required();
+               let mut open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
+               open_channel_msg.channel_type = Some(channel_type_features);
+               let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
+               let res = Channel::<EnforcingSigner>::new_from_req(&&feeest, &&keys_provider,
+                       node_b_node_id, &InitFeatures::known(), &open_channel_msg, 7, &config, 0, &&logger, 42);
+               assert!(res.is_ok());
+       }
  }
index 17a52e29e4a5eecb210fe9d3f10c2edc4306f233,9032b8f4b385e8f176a2d89dd308e33d66abece6..6eeed58b5fd195d7ffebaecebbed791c2de3ffdc
@@@ -169,7 -169,7 +169,7 @@@ enum OnionPayload 
        Invoice {
                /// This is only here for backwards-compatibility in serialization, in the future it can be
                /// removed, breaking clients running 0.0.106 and earlier.
 -              _legacy_hop_data: msgs::FinalOnionHopData,
 +              _legacy_hop_data: Option<msgs::FinalOnionHopData>,
        },
        /// Contains the payer-provided preimage.
        Spontaneous(PaymentPreimage),
@@@ -419,13 -419,11 +419,13 @@@ pub(super) struct ChannelHolder<Signer
        /// guarantees are made about the existence of a channel with the short id here, nor the short
        /// ids in the PendingHTLCInfo!
        pub(super) forward_htlcs: HashMap<u64, Vec<HTLCForwardInfo>>,
 -      /// Map from payment hash to any HTLCs which are to us and can be failed/claimed by the user.
 +      /// Map from payment hash to the payment data and any HTLCs which are to us and can be
 +      /// failed/claimed by the user.
 +      ///
        /// Note that while this is held in the same mutex as the channels themselves, no consistency
        /// guarantees are made about the channels given here actually existing anymore by the time you
        /// go to read them!
 -      claimable_htlcs: HashMap<PaymentHash, Vec<ClaimableHTLC>>,
 +      claimable_htlcs: HashMap<PaymentHash, (events::PaymentPurpose, Vec<ClaimableHTLC>)>,
        /// Messages to send to peers - pushed to in the same lock that they are generated in (except
        /// for broadcast messages, where ordering isn't as strict).
        pub(super) pending_msg_events: Vec<MessageSendEvent>,
@@@ -1074,18 -1072,18 +1074,18 @@@ pub struct ChannelDetails 
        pub force_close_spend_delay: Option<u16>,
        /// True if the channel was initiated (and thus funded) by us.
        pub is_outbound: bool,
 -      /// True if the channel is confirmed, funding_locked messages have been exchanged, and the
 -      /// channel is not currently being shut down. `funding_locked` message exchange implies the
 +      /// True if the channel is confirmed, channel_ready messages have been exchanged, and the
 +      /// channel is not currently being shut down. `channel_ready` message exchange implies the
        /// required confirmation count has been reached (and we were connected to the peer at some
        /// point after the funding transaction received enough confirmations). The required
        /// confirmation count is provided in [`confirmations_required`].
        ///
        /// [`confirmations_required`]: ChannelDetails::confirmations_required
 -      pub is_funding_locked: bool,
 -      /// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b)
 +      pub is_channel_ready: bool,
 +      /// True if the channel is (a) confirmed and channel_ready messages have been exchanged, (b)
        /// the peer is connected, and (c) the channel is not currently negotiating a shutdown.
        ///
 -      /// This is a strict superset of `is_funding_locked`.
 +      /// This is a strict superset of `is_channel_ready`.
        pub is_usable: bool,
        /// True if this channel is (or will be) publicly-announced.
        pub is_public: bool,
@@@ -1314,7 -1312,7 +1314,7 @@@ macro_rules! remove_channel 
  }
  
  macro_rules! handle_monitor_err {
 -      ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_funding_locked: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
 +      ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
                match $err {
                        ChannelMonitorUpdateErr::PermanentFailure => {
                                log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateErr::PermanentFailure", log_bytes!($chan_id[..]));
                                if !$resend_raa {
                                        debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
                                }
 -                              $chan.monitor_update_failed($resend_raa, $resend_commitment, $resend_funding_locked, $failed_forwards, $failed_fails, $failed_finalized_fulfills);
 +                              $chan.monitor_update_failed($resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills);
                                (Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false)
                        },
                }
        };
 -      ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_funding_locked: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
 -              let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_funding_locked, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
 +      ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
 +              let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
                if drop {
                        $entry.remove_entry();
                }
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $chan_id: expr, NO_UPDATE) => {
                handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
        };
 -      ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_funding_locked: expr, OPTIONALLY_RESEND_FUNDING_LOCKED) => {
 -              handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, false, $resend_funding_locked, Vec::new(), Vec::new(), Vec::new())
 +      ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_channel_ready: expr, OPTIONALLY_RESEND_FUNDING_LOCKED) => {
 +              handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, false, $resend_channel_ready, Vec::new(), Vec::new(), Vec::new())
        };
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
                handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new())
@@@ -1403,13 -1401,13 +1403,13 @@@ macro_rules! maybe_break_monitor_err 
        }
  }
  
 -macro_rules! send_funding_locked {
 -      ($short_to_id: expr, $pending_msg_events: expr, $channel: expr, $funding_locked_msg: expr) => {
 -              $pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
 +macro_rules! send_channel_ready {
 +      ($short_to_id: expr, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {
 +              $pending_msg_events.push(events::MessageSendEvent::SendChannelReady {
                        node_id: $channel.get_counterparty_node_id(),
 -                      msg: $funding_locked_msg,
 +                      msg: $channel_ready_msg,
                });
 -              // Note that we may send a funding locked multiple times for a channel if we reconnect, so
 +              // Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so
                // we allow collisions, but we shouldn't ever be updating the channel ID pointed to.
                let outbound_alias_insert = $short_to_id.insert($channel.outbound_scid_alias(), $channel.channel_id());
                assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == $channel.channel_id(),
  macro_rules! handle_chan_restoration_locked {
        ($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr,
         $raa: expr, $commitment_update: expr, $order: expr, $chanmon_update: expr,
 -       $pending_forwards: expr, $funding_broadcastable: expr, $funding_locked: expr, $announcement_sigs: expr) => { {
 +       $pending_forwards: expr, $funding_broadcastable: expr, $channel_ready: expr, $announcement_sigs: expr) => { {
                let mut htlc_forwards = None;
  
                let chanmon_update: Option<ChannelMonitorUpdate> = $chanmon_update; // Force type-checking to resolve
                        }
  
                        if chanmon_update.is_some() {
 -                              // On reconnect, we, by definition, only resend a funding_locked if there have been
 +                              // On reconnect, we, by definition, only resend a channel_ready if there have been
                                // no commitment updates, so the only channel monitor update which could also be
 -                              // associated with a funding_locked would be the funding_created/funding_signed
 +                              // associated with a channel_ready would be the funding_created/funding_signed
                                // monitor update. That monitor update failing implies that we won't send
 -                              // funding_locked until it's been updated, so we can't have a funding_locked and a
 +                              // channel_ready until it's been updated, so we can't have a channel_ready and a
                                // monitor update here (so we don't bother to handle it correctly below).
 -                              assert!($funding_locked.is_none());
 -                              // A channel monitor update makes no sense without either a funding_locked or a
 -                              // commitment update to process after it. Since we can't have a funding_locked, we
 +                              assert!($channel_ready.is_none());
 +                              // A channel monitor update makes no sense without either a channel_ready or a
 +                              // commitment update to process after it. Since we can't have a channel_ready, we
                                // only bother to handle the monitor-update + commitment_update case below.
                                assert!($commitment_update.is_some());
                        }
  
 -                      if let Some(msg) = $funding_locked {
 -                              // Similar to the above, this implies that we're letting the funding_locked fly
 +                      if let Some(msg) = $channel_ready {
 +                              // Similar to the above, this implies that we're letting the channel_ready fly
                                // before it should be allowed to.
                                assert!(chanmon_update.is_none());
 -                              send_funding_locked!($channel_state.short_to_id, $channel_state.pending_msg_events, $channel_entry.get(), msg);
 +                              send_channel_ready!($channel_state.short_to_id, $channel_state.pending_msg_events, $channel_entry.get(), msg);
                        }
                        if let Some(msg) = $announcement_sigs {
                                $channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
@@@ -1754,7 -1752,7 +1754,7 @@@ impl<Signer: Sign, M: Deref, T: Deref, 
                                        confirmations_required: channel.minimum_depth(),
                                        force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
                                        is_outbound: channel.is_outbound(),
 -                                      is_funding_locked: channel.is_usable(),
 +                                      is_channel_ready: channel.is_usable(),
                                        is_usable: channel.is_live(),
                                        is_public: channel.should_announce(),
                                        inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()),
                                });
                                announced_chans = true;
                        } else {
 -                              // If the channel is not public or has not yet reached funding_locked, check the
 +                              // If the channel is not public or has not yet reached channel_ready, check the
                                // next channel. If we don't yet have any public channels, we'll skip the broadcast
                                // below as peers may not accept it without channels on chain first.
                        }
                                                                        prev_funding_outpoint } => {
                                                                let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret) = match routing {
                                                                        PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry, phantom_shared_secret } => {
 -                                                                              let _legacy_hop_data = payment_data.clone();
 +                                                                              let _legacy_hop_data = Some(payment_data.clone());
                                                                                (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data }, Some(payment_data), phantom_shared_secret)
                                                                        },
                                                                        PendingHTLCRouting::ReceiveKeysend { payment_preimage, incoming_cltv_expiry } =>
                                                                macro_rules! check_total_value {
                                                                        ($payment_data: expr, $payment_preimage: expr) => {{
                                                                                let mut payment_received_generated = false;
 -                                                                              let htlcs = channel_state.claimable_htlcs.entry(payment_hash)
 -                                                                                      .or_insert(Vec::new());
 +                                                                              let purpose = || {
 +                                                                                      events::PaymentPurpose::InvoicePayment {
 +                                                                                              payment_preimage: $payment_preimage,
 +                                                                                              payment_secret: $payment_data.payment_secret,
 +                                                                                      }
 +                                                                              };
 +                                                                              let (_, htlcs) = channel_state.claimable_htlcs.entry(payment_hash)
 +                                                                                      .or_insert_with(|| (purpose(), Vec::new()));
                                                                                if htlcs.len() == 1 {
                                                                                        if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
                                                                                                log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0));
                                                                                        htlcs.push(claimable_htlc);
                                                                                        new_events.push(events::Event::PaymentReceived {
                                                                                                payment_hash,
 -                                                                                              purpose: events::PaymentPurpose::InvoicePayment {
 -                                                                                                      payment_preimage: $payment_preimage,
 -                                                                                                      payment_secret: $payment_data.payment_secret,
 -                                                                                              },
 -                                                                                              amt: total_value,
 +                                                                                              purpose: purpose(),
 +                                                                                              amount_msat: total_value,
                                                                                        });
                                                                                        payment_received_generated = true;
                                                                                } else {
                                                                                        OnionPayload::Spontaneous(preimage) => {
                                                                                                match channel_state.claimable_htlcs.entry(payment_hash) {
                                                                                                        hash_map::Entry::Vacant(e) => {
 -                                                                                                              e.insert(vec![claimable_htlc]);
 +                                                                                                              let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
 +                                                                                                              e.insert((purpose.clone(), vec![claimable_htlc]));
                                                                                                                new_events.push(events::Event::PaymentReceived {
                                                                                                                        payment_hash,
 -                                                                                                                      amt: amt_to_forward,
 -                                                                                                                      purpose: events::PaymentPurpose::SpontaneousPayment(preimage),
 +                                                                                                                      amount_msat: amt_to_forward,
 +                                                                                                                      purpose,
                                                                                                                });
                                                                                                        },
                                                                                                        hash_map::Entry::Occupied(_) => {
                                        true
                                });
  
 -                              channel_state.claimable_htlcs.retain(|payment_hash, htlcs| {
 +                              channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
                                        if htlcs.is_empty() {
                                                // This should be unreachable
                                                debug_assert!(false);
        /// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect
        /// after a PaymentReceived event, failing the HTLC back to its origin and freeing resources
        /// along the path (including in our own channel on which we received it).
 -      /// Returns false if no payment was found to fail backwards, true if the process of failing the
 -      /// HTLC backwards has been started.
 -      pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) -> bool {
 +      ///
 +      /// Note that in some cases around unclean shutdown, it is possible the payment may have
 +      /// already been claimed by you via [`ChannelManager::claim_funds`] prior to you seeing (a
 +      /// second copy of) the [`events::Event::PaymentReceived`] event. Alternatively, the payment
 +      /// may have already been failed automatically by LDK if it was nearing its expiration time.
 +      ///
 +      /// While LDK will never claim a payment automatically on your behalf (i.e. without you calling
 +      /// [`ChannelManager::claim_funds`]), you should still monitor for
 +      /// [`events::Event::PaymentClaimed`] events even for payments you intend to fail, especially on
 +      /// startup during which time claims that were in-progress at shutdown may be replayed.
 +      pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
  
                let mut channel_state = Some(self.channel_state.lock().unwrap());
                let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash);
 -              if let Some(mut sources) = removed_source {
 +              if let Some((_, mut sources)) = removed_source {
                        for htlc in sources.drain(..) {
                                if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
                                let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
                                                HTLCSource::PreviousHopData(htlc.prev_hop), payment_hash,
                                                HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data });
                        }
 -                      true
 -              } else { false }
 +              }
        }
  
        /// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code
        /// Provides a payment preimage in response to [`Event::PaymentReceived`], generating any
        /// [`MessageSendEvent`]s needed to claim the payment.
        ///
 +      /// Note that calling this method does *not* guarantee that the payment has been claimed. You
 +      /// *must* wait for an [`Event::PaymentClaimed`] event which upon a successful claim will be
 +      /// provided to your [`EventHandler`] when [`process_pending_events`] is next called.
 +      ///
        /// Note that if you did not set an `amount_msat` when calling [`create_inbound_payment`] or
        /// [`create_inbound_payment_for_hash`] you must check that the amount in the `PaymentReceived`
        /// event matches your expectation. If you fail to do so and call this method, you may provide
        /// the sender "proof-of-payment" when they did not fulfill the full expected payment.
        ///
 -      /// Returns whether any HTLCs were claimed, and thus if any new [`MessageSendEvent`]s are now
 -      /// pending for processing via [`get_and_clear_pending_msg_events`].
 -      ///
        /// [`Event::PaymentReceived`]: crate::util::events::Event::PaymentReceived
 +      /// [`Event::PaymentClaimed`]: crate::util::events::Event::PaymentClaimed
 +      /// [`process_pending_events`]: EventsProvider::process_pending_events
        /// [`create_inbound_payment`]: Self::create_inbound_payment
        /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
        /// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events
 -      pub fn claim_funds(&self, payment_preimage: PaymentPreimage) -> bool {
 +      pub fn claim_funds(&self, payment_preimage: PaymentPreimage) {
                let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
  
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
  
                let mut channel_state = Some(self.channel_state.lock().unwrap());
                let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash);
 -              if let Some(mut sources) = removed_source {
 +              if let Some((payment_purpose, mut sources)) = removed_source {
                        assert!(!sources.is_empty());
  
                        // If we are claiming an MPP payment, we have to take special care to ensure that each
                        // we got all the HTLCs and then a channel closed while we were waiting for the user to
                        // provide the preimage, so worrying too much about the optimal handling isn't worth
                        // it.
 +                      let mut claimable_amt_msat = 0;
 +                      let mut expected_amt_msat = None;
                        let mut valid_mpp = true;
                        for htlc in sources.iter() {
                                if let None = channel_state.as_ref().unwrap().short_to_id.get(&htlc.prev_hop.short_channel_id) {
                                        valid_mpp = false;
                                        break;
                                }
 +                              if expected_amt_msat.is_some() && expected_amt_msat != Some(htlc.total_msat) {
 +                                      log_error!(self.logger, "Somehow ended up with an MPP payment with different total amounts - this should not be reachable!");
 +                                      debug_assert!(false);
 +                                      valid_mpp = false;
 +                                      break;
 +                              }
 +                              expected_amt_msat = Some(htlc.total_msat);
 +                              if let OnionPayload::Spontaneous(_) = &htlc.onion_payload {
 +                                      // We don't currently support MPP for spontaneous payments, so just check
 +                                      // that there's one payment here and move on.
 +                                      if sources.len() != 1 {
 +                                              log_error!(self.logger, "Somehow ended up with an MPP spontaneous payment - this should not be reachable!");
 +                                              debug_assert!(false);
 +                                              valid_mpp = false;
 +                                              break;
 +                                      }
 +                              }
 +
 +                              claimable_amt_msat += htlc.value;
 +                      }
 +                      if sources.is_empty() || expected_amt_msat.is_none() {
 +                              log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!");
 +                              return;
 +                      }
 +                      if claimable_amt_msat != expected_amt_msat.unwrap() {
 +                              log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.",
 +                                      expected_amt_msat.unwrap(), claimable_amt_msat);
 +                              return;
                        }
  
                        let mut errs = Vec::new();
                                }
                        }
  
 +                      if claimed_any_htlcs {
 +                              self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed {
 +                                      payment_hash,
 +                                      purpose: payment_purpose,
 +                                      amount_msat: claimable_amt_msat,
 +                              });
 +                      }
 +
                        // Now that we've done the entire above loop in one lock, we can handle any errors
                        // which were generated.
                        channel_state.take();
                                let res: Result<(), _> = Err(err);
                                let _ = handle_error!(self, res, counterparty_node_id);
                        }
 -
 -                      claimed_any_htlcs
 -              } else { false }
 +              }
        }
  
        fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop {
                        }
  
                        let updates = channel.get_mut().monitor_updating_restored(&self.logger, self.get_our_node_id(), self.genesis_hash, self.best_block.read().unwrap().height());
 -                      let channel_update = if updates.funding_locked.is_some() && channel.get().is_usable() {
 +                      let channel_update = if updates.channel_ready.is_some() && channel.get().is_usable() {
                                // We only send a channel_update in the case where we are just now sending a
 -                              // funding_locked and the channel is in a usable state. We may re-send a
 +                              // channel_ready and the channel is in a usable state. We may re-send a
                                // channel_update later through the announcement_signatures process for public
                                // channels, but there's no reason not to just inform our counterparty of our fees
                                // now.
                                        })
                                } else { None }
                        } else { None };
 -                      chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, updates.raa, updates.commitment_update, updates.order, None, updates.accepted_htlcs, updates.funding_broadcastable, updates.funding_locked, updates.announcement_sigs);
 +                      chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, updates.raa, updates.commitment_update, updates.order, None, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
                        if let Some(upd) = channel_update {
                                channel_state.pending_msg_events.push(upd);
                        }
        /// [`Event::ChannelClosed::user_channel_id`] to allow tracking of which events correspond
        /// with which `accept_inbound_channel`/`accept_inbound_channel_from_trusted_peer_0conf` call.
        ///
+       /// Note that this method will return an error and reject the channel, if it requires support
+       /// for zero confirmations. Instead, `accept_inbound_channel_from_trusted_peer_0conf` must be
+       /// used to accept such channels.
+       ///
        /// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
        /// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
        pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u64) -> Result<(), APIError> {
                                if *counterparty_node_id != channel.get().get_counterparty_node_id() {
                                        return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() });
                                }
-                               if accept_0conf { channel.get_mut().set_0conf(); }
+                               if accept_0conf {
+                                       channel.get_mut().set_0conf();
+                               } else if channel.get().get_channel_type().requires_zero_conf() {
+                                       let send_msg_err_event = events::MessageSendEvent::HandleError {
+                                               node_id: channel.get().get_counterparty_node_id(),
+                                               action: msgs::ErrorAction::SendErrorMessage{
+                                                       msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
+                                               }
+                                       };
+                                       channel_state.pending_msg_events.push(send_msg_err_event);
+                                       let _ = remove_channel!(self, channel_state, channel);
+                                       return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() });
+                               }
                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
                                        node_id: channel.get().get_counterparty_node_id(),
                                        msg: channel.get_mut().accept_inbound_channel(user_channel_id),
                        },
                        hash_map::Entry::Vacant(entry) => {
                                if !self.default_configuration.manually_accept_inbound_channels {
+                                       if channel.get_channel_type().requires_zero_conf() {
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone()));
+                                       }
                                        channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
                                                node_id: counterparty_node_id.clone(),
                                                msg: channel.accept_inbound_channel(0),
        }
  
        fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
 -              let ((funding_msg, monitor, mut funding_locked), mut chan) = {
 +              let ((funding_msg, monitor, mut channel_ready), mut chan) = {
                        let best_block = *self.best_block.read().unwrap();
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
                                ChannelMonitorUpdateErr::TemporaryFailure => {
                                        // There's no problem signing a counterparty's funding transaction if our monitor
                                        // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
 -                                      // accepted payment from yet. We do, however, need to wait to send our funding_locked
 +                                      // accepted payment from yet. We do, however, need to wait to send our channel_ready
                                        // until we have persisted our monitor.
 -                                      chan.monitor_update_failed(false, false, funding_locked.is_some(), Vec::new(), Vec::new(), Vec::new());
 -                                      funding_locked = None; // Don't send the funding_locked now
 +                                      chan.monitor_update_failed(false, false, channel_ready.is_some(), Vec::new(), Vec::new(), Vec::new());
 +                                      channel_ready = None; // Don't send the channel_ready now
                                },
                        }
                }
                                        node_id: counterparty_node_id.clone(),
                                        msg: funding_msg,
                                });
 -                              if let Some(msg) = funding_locked {
 -                                      send_funding_locked!(channel_state.short_to_id, channel_state.pending_msg_events, chan, msg);
 +                              if let Some(msg) = channel_ready {
 +                                      send_channel_ready!(channel_state.short_to_id, channel_state.pending_msg_events, chan, msg);
                                }
                                e.insert(chan);
                        }
                                        if chan.get().get_counterparty_node_id() != *counterparty_node_id {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
 -                                      let (monitor, funding_tx, funding_locked) = match chan.get_mut().funding_signed(&msg, best_block, &self.logger) {
 +                                      let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.logger) {
                                                Ok(update) => update,
                                                Err(e) => try_chan_entry!(self, Err(e), channel_state, chan),
                                        };
                                        if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
 -                                              let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, funding_locked.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
 +                                              let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
                                                if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
                                                        // We weren't able to watch the channel to begin with, so no updates should be made on
                                                        // it. Previously, full_stack_target found an (unreachable) panic when the
                                                }
                                                return res
                                        }
 -                                      if let Some(msg) = funding_locked {
 -                                              send_funding_locked!(channel_state.short_to_id, channel_state.pending_msg_events, chan.get(), msg);
 +                                      if let Some(msg) = channel_ready {
 +                                              send_channel_ready!(channel_state.short_to_id, channel_state.pending_msg_events, chan.get(), msg);
                                        }
                                        funding_tx
                                },
                Ok(())
        }
  
 -      fn internal_funding_locked(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), MsgHandleErrInternal> {
 +      fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> {
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
                match channel_state.by_id.entry(msg.channel_id) {
                                if chan.get().get_counterparty_node_id() != *counterparty_node_id {
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                }
 -                              let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().funding_locked(&msg, self.get_our_node_id(),
 +                              let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, self.get_our_node_id(),
                                        self.genesis_hash.clone(), &self.best_block.read().unwrap(), &self.logger), channel_state, chan);
                                if let Some(announcement_sigs) = announcement_sigs_opt {
                                        log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id()));
                                        let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
                                        chan_restoration_res = handle_chan_restoration_locked!(
                                                self, channel_state_lock, channel_state, chan, responses.raa, responses.commitment_update, responses.order,
 -                                              responses.mon_update, Vec::new(), None, responses.funding_locked, responses.announcement_sigs);
 +                                              responses.mon_update, Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
                                        if let Some(upd) = channel_update {
                                                channel_state.pending_msg_events.push(upd);
                                        }
                post_handle_chan_restoration!(self, chan_restoration_res);
                self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id);
  
 -              if let Some(funding_locked_msg) = need_lnd_workaround {
 -                      self.internal_funding_locked(counterparty_node_id, &funding_locked_msg)?;
 +              if let Some(channel_ready_msg) = need_lnd_workaround {
 +                      self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;
                }
                Ok(())
        }
@@@ -5585,7 -5553,7 +5605,7 @@@ wher
        /// Calls a function which handles an on-chain event (blocks dis/connected, transactions
        /// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by
        /// the function.
 -      fn do_chain_event<FN: Fn(&mut Channel<Signer>) -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>>
 +      fn do_chain_event<FN: Fn(&mut Channel<Signer>) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>>
                        (&self, height_opt: Option<u32>, f: FN) {
                // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
                // during initialization prior to the chain_monitor being fully configured in some cases.
                        let pending_msg_events = &mut channel_state.pending_msg_events;
                        channel_state.by_id.retain(|_, channel| {
                                let res = f(channel);
 -                              if let Ok((funding_locked_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
 +                              if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
                                        for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
                                                let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
                                                timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason {
                                                        failure_code, data,
                                                }));
                                        }
 -                                      if let Some(funding_locked) = funding_locked_opt {
 -                                              send_funding_locked!(short_to_id, pending_msg_events, channel, funding_locked);
 +                                      if let Some(channel_ready) = channel_ready_opt {
 +                                              send_channel_ready!(short_to_id, pending_msg_events, channel, channel_ready);
                                                if channel.is_usable() {
 -                                                      log_trace!(self.logger, "Sending funding_locked with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
 +                                                      log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
                                                        if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
                                                                pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
                                                                        node_id: channel.get_counterparty_node_id(),
                                                                });
                                                        }
                                                } else {
 -                                                      log_trace!(self.logger, "Sending funding_locked WITHOUT channel_update for {}", log_bytes!(channel.channel_id()));
 +                                                      log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.channel_id()));
                                                }
                                        }
                                        if let Some(announcement_sigs) = announcement_sigs {
                                                        }
                                                }
                                        }
 -                                      if channel.is_our_funding_locked() {
 +                                      if channel.is_our_channel_ready() {
                                                if let Some(real_scid) = channel.get_short_channel_id() {
 -                                                      // If we sent a 0conf funding_locked, and now have an SCID, we add it
 +                                                      // If we sent a 0conf channel_ready, and now have an SCID, we add it
                                                        // to the short_to_id map here. Note that we check whether we can relay
                                                        // using the real SCID at relay-time (i.e. enforce option_scid_alias
                                                        // then), and if the funding tx is ever un-confirmed we force-close the
                        });
  
                        if let Some(height) = height_opt {
 -                              channel_state.claimable_htlcs.retain(|payment_hash, htlcs| {
 +                              channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
                                        htlcs.retain(|htlc| {
                                                // If height is approaching the number of blocks we think it takes us to get
                                                // our commitment transaction confirmed before the HTLC expires, plus the
@@@ -5765,9 -5733,9 +5785,9 @@@ impl<Signer: Sign, M: Deref , T: Deref 
                let _ = handle_error!(self, self.internal_funding_signed(counterparty_node_id, msg), *counterparty_node_id);
        }
  
 -      fn handle_funding_locked(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingLocked) {
 +      fn handle_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 -              let _ = handle_error!(self, self.internal_funding_locked(counterparty_node_id, msg), *counterparty_node_id);
 +              let _ = handle_error!(self, self.internal_channel_ready(counterparty_node_id, msg), *counterparty_node_id);
        }
  
        fn handle_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) {
                                        &events::MessageSendEvent::SendOpenChannel { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::SendFundingCreated { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::SendFundingSigned { ref node_id, .. } => node_id != counterparty_node_id,
 -                                      &events::MessageSendEvent::SendFundingLocked { ref node_id, .. } => node_id != counterparty_node_id,
 +                                      &events::MessageSendEvent::SendChannelReady { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::SendAnnouncementSignatures { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => node_id != counterparty_node_id,
@@@ -6078,7 -6046,7 +6098,7 @@@ impl_writeable_tlv_based!(ChannelDetail
        (22, confirmations_required, option),
        (24, force_close_spend_delay, option),
        (26, is_outbound, required),
 -      (28, is_funding_locked, required),
 +      (28, is_channel_ready, required),
        (30, is_usable, required),
        (32, is_public, required),
        (33, inbound_htlc_minimum_msat, option),
@@@ -6198,9 -6166,13 +6218,9 @@@ impl_writeable_tlv_based!(HTLCPreviousH
  
  impl Writeable for ClaimableHTLC {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
 -              let payment_data = match &self.onion_payload {
 -                      OnionPayload::Invoice { _legacy_hop_data } => Some(_legacy_hop_data),
 -                      _ => None,
 -              };
 -              let keysend_preimage = match self.onion_payload {
 -                      OnionPayload::Invoice { .. } => None,
 -                      OnionPayload::Spontaneous(preimage) => Some(preimage.clone()),
 +              let (payment_data, keysend_preimage) = match &self.onion_payload {
 +                      OnionPayload::Invoice { _legacy_hop_data } => (_legacy_hop_data.as_ref(), None),
 +                      OnionPayload::Spontaneous(preimage) => (None, Some(preimage)),
                };
                write_tlv_fields!(writer, {
                        (0, self.prev_hop, required),
@@@ -6241,13 -6213,13 +6261,13 @@@ impl Readable for ClaimableHTLC 
                                OnionPayload::Spontaneous(p)
                        },
                        None => {
 -                              if payment_data.is_none() {
 -                                      return Err(DecodeError::InvalidValue)
 -                              }
                                if total_msat.is_none() {
 +                                      if payment_data.is_none() {
 +                                              return Err(DecodeError::InvalidValue)
 +                                      }
                                        total_msat = Some(payment_data.as_ref().unwrap().total_msat);
                                }
 -                              OnionPayload::Invoice { _legacy_hop_data: payment_data.unwrap() }
 +                              OnionPayload::Invoice { _legacy_hop_data: payment_data }
                        },
                };
                Ok(Self {
@@@ -6420,15 -6392,13 +6440,15 @@@ impl<Signer: Sign, M: Deref, T: Deref, 
                        }
                }
  
 +              let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new();
                (channel_state.claimable_htlcs.len() as u64).write(writer)?;
 -              for (payment_hash, previous_hops) in channel_state.claimable_htlcs.iter() {
 +              for (payment_hash, (purpose, previous_hops)) in channel_state.claimable_htlcs.iter() {
                        payment_hash.write(writer)?;
                        (previous_hops.len() as u64).write(writer)?;
                        for htlc in previous_hops.iter() {
                                htlc.write(writer)?;
                        }
 +                      htlc_purposes.push(purpose);
                }
  
                let per_peer_state = self.per_peer_state.write().unwrap();
                        (3, pending_outbound_payments, required),
                        (5, self.our_network_pubkey, required),
                        (7, self.fake_scid_rand_bytes, required),
 +                      (9, htlc_purposes, vec_type),
                });
  
                Ok(())
@@@ -6724,15 -6693,15 +6744,15 @@@ impl<'a, Signer: Sign, M: Deref, T: Der
                }
  
                let claimable_htlcs_count: u64 = Readable::read(reader)?;
 -              let mut claimable_htlcs = HashMap::with_capacity(cmp::min(claimable_htlcs_count as usize, 128));
 +              let mut claimable_htlcs_list = Vec::with_capacity(cmp::min(claimable_htlcs_count as usize, 128));
                for _ in 0..claimable_htlcs_count {
                        let payment_hash = Readable::read(reader)?;
                        let previous_hops_len: u64 = Readable::read(reader)?;
                        let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, MAX_ALLOC_SIZE/mem::size_of::<ClaimableHTLC>()));
                        for _ in 0..previous_hops_len {
 -                              previous_hops.push(Readable::read(reader)?);
 +                              previous_hops.push(<ClaimableHTLC as Readable>::read(reader)?);
                        }
 -                      claimable_htlcs.insert(payment_hash, previous_hops);
 +                      claimable_htlcs_list.push((payment_hash, previous_hops));
                }
  
                let peer_count: u64 = Readable::read(reader)?;
                let mut pending_outbound_payments = None;
                let mut received_network_pubkey: Option<PublicKey> = None;
                let mut fake_scid_rand_bytes: Option<[u8; 32]> = None;
 +              let mut claimable_htlc_purposes = None;
                read_tlv_fields!(reader, {
                        (1, pending_outbound_payments_no_retry, option),
                        (3, pending_outbound_payments, option),
                        (5, received_network_pubkey, option),
                        (7, fake_scid_rand_bytes, option),
 +                      (9, claimable_htlc_purposes, vec_type),
                });
                if fake_scid_rand_bytes.is_none() {
                        fake_scid_rand_bytes = Some(args.keys_manager.get_secure_random_bytes());
                        // payments which are still in-flight via their on-chain state.
                        // We only rebuild the pending payments map if we were most recently serialized by
                        // 0.0.102+
 -                      for (_, monitor) in args.channel_monitors {
 +                      for (_, monitor) in args.channel_monitors.iter() {
                                if by_id.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() {
                                        for (htlc_source, htlc) in monitor.get_pending_outbound_htlcs() {
                                                if let HTLCSource::OutboundRoute { payment_id, session_priv, path, payment_secret, .. } = htlc_source {
                        }
                }
  
 +              let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material();
 +              let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
 +
 +              let mut claimable_htlcs = HashMap::with_capacity(claimable_htlcs_list.len());
 +              if let Some(mut purposes) = claimable_htlc_purposes {
 +                      if purposes.len() != claimable_htlcs_list.len() {
 +                              return Err(DecodeError::InvalidValue);
 +                      }
 +                      for (purpose, (payment_hash, previous_hops)) in purposes.drain(..).zip(claimable_htlcs_list.drain(..)) {
 +                              claimable_htlcs.insert(payment_hash, (purpose, previous_hops));
 +                      }
 +              } else {
 +                      // LDK versions prior to 0.0.107 did not write a `pending_htlc_purposes`, but do
 +                      // include a `_legacy_hop_data` in the `OnionPayload`.
 +                      for (payment_hash, previous_hops) in claimable_htlcs_list.drain(..) {
 +                              if previous_hops.is_empty() {
 +                                      return Err(DecodeError::InvalidValue);
 +                              }
 +                              let purpose = match &previous_hops[0].onion_payload {
 +                                      OnionPayload::Invoice { _legacy_hop_data } => {
 +                                              if let Some(hop_data) = _legacy_hop_data {
 +                                                      events::PaymentPurpose::InvoicePayment {
 +                                                              payment_preimage: match pending_inbound_payments.get(&payment_hash) {
 +                                                                      Some(inbound_payment) => inbound_payment.payment_preimage,
 +                                                                      None => match inbound_payment::verify(payment_hash, &hop_data, 0, &expanded_inbound_key, &args.logger) {
 +                                                                              Ok(payment_preimage) => payment_preimage,
 +                                                                              Err(()) => {
 +                                                                                      log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", log_bytes!(payment_hash.0));
 +                                                                                      return Err(DecodeError::InvalidValue);
 +                                                                              }
 +                                                                      }
 +                                                              },
 +                                                              payment_secret: hop_data.payment_secret,
 +                                                      }
 +                                              } else { return Err(DecodeError::InvalidValue); }
 +                                      },
 +                                      OnionPayload::Spontaneous(payment_preimage) =>
 +                                              events::PaymentPurpose::SpontaneousPayment(*payment_preimage),
 +                              };
 +                              claimable_htlcs.insert(payment_hash, (purpose, previous_hops));
 +                      }
 +              }
 +
                let mut secp_ctx = Secp256k1::new();
                secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes());
  
                        }
                }
  
 -              let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material();
 -              let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
 +              for (_, monitor) in args.channel_monitors.iter() {
 +                      for (payment_hash, payment_preimage) in monitor.get_stored_preimages() {
 +                              if let Some((payment_purpose, claimable_htlcs)) = claimable_htlcs.remove(&payment_hash) {
 +                                      log_info!(args.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", log_bytes!(payment_hash.0));
 +                                      let mut claimable_amt_msat = 0;
 +                                      for claimable_htlc in claimable_htlcs {
 +                                              claimable_amt_msat += claimable_htlc.value;
 +
 +                                              // Add a holding-cell claim of the payment to the Channel, which should be
 +                                              // applied ~immediately on peer reconnection. Because it won't generate a
 +                                              // new commitment transaction we can just provide the payment preimage to
 +                                              // the corresponding ChannelMonitor and nothing else.
 +                                              //
 +                                              // We do so directly instead of via the normal ChannelMonitor update
 +                                              // procedure as the ChainMonitor hasn't yet been initialized, implying
 +                                              // we're not allowed to call it directly yet. Further, we do the update
 +                                              // without incrementing the ChannelMonitor update ID as there isn't any
 +                                              // reason to.
 +                                              // If we were to generate a new ChannelMonitor update ID here and then
 +                                              // crash before the user finishes block connect we'd end up force-closing
 +                                              // this channel as well. On the flip side, there's no harm in restarting
 +                                              // without the new monitor persisted - we'll end up right back here on
 +                                              // restart.
 +                                              let previous_channel_id = claimable_htlc.prev_hop.outpoint.to_channel_id();
 +                                              if let Some(channel) = by_id.get_mut(&previous_channel_id) {
 +                                                      channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &args.logger);
 +                                              }
 +                                              if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) {
 +                                                      previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &args.fee_estimator, &args.logger);
 +                                              }
 +                                      }
 +                                      pending_events_read.push(events::Event::PaymentClaimed {
 +                                              payment_hash,
 +                                              purpose: payment_purpose,
 +                                              amount_msat: claimable_amt_msat,
 +                                      });
 +                              }
 +                      }
 +              }
 +
                let channel_manager = ChannelManager {
                        genesis_hash,
                        fee_estimator: args.fee_estimator,
@@@ -7250,10 -7136,8 +7270,10 @@@ mod tests 
                // claim_funds_along_route because the ordering of the messages causes the second half of the
                // payment to be put in the holding cell, which confuses the test utilities. So we exchange the
                // lightning messages manually.
 -              assert!(nodes[1].node.claim_funds(payment_preimage));
 +              nodes[1].node.claim_funds(payment_preimage);
 +              expect_payment_claimed!(nodes[1], our_payment_hash, 200_000);
                check_added_monitors!(nodes[1], 2);
 +
                let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
                nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]);
                nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed);
@@@ -7652,12 -7536,12 +7672,12 @@@ pub mod bench 
                Listen::block_connected(&node_a, &block, 1);
                Listen::block_connected(&node_b, &block, 1);
  
 -              node_a.handle_funding_locked(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingLocked, node_a.get_our_node_id()));
 +              node_a.handle_channel_ready(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendChannelReady, node_a.get_our_node_id()));
                let msg_events = node_a.get_and_clear_pending_msg_events();
                assert_eq!(msg_events.len(), 2);
                match msg_events[0] {
 -                      MessageSendEvent::SendFundingLocked { ref msg, .. } => {
 -                              node_b.handle_funding_locked(&node_a.get_our_node_id(), msg);
 +                      MessageSendEvent::SendChannelReady { ref msg, .. } => {
 +                              node_b.handle_channel_ready(&node_a.get_our_node_id(), msg);
                                get_event_msg!(node_b_holder, MessageSendEvent::SendChannelUpdate, node_a.get_our_node_id());
                        },
                        _ => panic!(),
  
                                expect_pending_htlcs_forwardable!(NodeHolder { node: &$node_b });
                                expect_payment_received!(NodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000);
 -                              assert!($node_b.claim_funds(payment_preimage));
 +                              $node_b.claim_funds(payment_preimage);
 +                              expect_payment_claimed!(NodeHolder { node: &$node_b }, payment_hash, 10_000);
  
                                match $node_b.get_and_clear_pending_msg_events().pop().unwrap() {
                                        MessageSendEvent::UpdateHTLCs { node_id, updates } => {
index 754e20a4ff11ba0c68a891059a683705892849ff,85bbb6f02e706a5aba3b1dc604a87a83a33ee79c..bc430fe52837c8a2282e6ea2bf929b0b52568a17
@@@ -17,9 -17,9 +17,9 @@@ use chain::keysinterface::{Recipient, K
  use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, MIN_CLTV_EXPIRY_DELTA};
  use routing::network_graph::RoutingFees;
  use routing::router::{PaymentParameters, RouteHint, RouteHintHop};
- use ln::features::{InitFeatures, InvoiceFeatures};
+ use ln::features::{InitFeatures, InvoiceFeatures, ChannelTypeFeatures};
  use ln::msgs;
- use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, OptionalField, ChannelUpdate};
+ use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, OptionalField, ChannelUpdate, ErrorAction};
  use ln::wire::Encode;
  use util::enforcing_trait_impls::EnforcingSigner;
  use util::events::{ClosureReason, Event, MessageSendEvent, MessageSendEventsProvider};
@@@ -167,7 -167,7 +167,7 @@@ fn test_priv_forwarding_rejection() 
  }
  
  fn do_test_1_conf_open(connect_style: ConnectStyle) {
 -      // Previously, if the minium_depth config was set to 1, we'd never send a funding_locked. This
 +      // Previously, if the minium_depth config was set to 1, we'd never send a channel_ready. This
        // tests that we properly send one in that case.
        let mut alice_config = UserConfig::default();
        alice_config.own_channel_config.minimum_depth = 1;
  
        let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::known(), InitFeatures::known());
        mine_transaction(&nodes[1], &tx);
 -      nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id()));
 +      nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id()));
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
  
        mine_transaction(&nodes[0], &tx);
        let as_msg_events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(as_msg_events.len(), 2);
 -      let as_funding_locked = if let MessageSendEvent::SendFundingLocked { ref node_id, ref msg } = as_msg_events[0] {
 +      let as_channel_ready = if let MessageSendEvent::SendChannelReady { ref node_id, ref msg } = as_msg_events[0] {
                assert_eq!(*node_id, nodes[1].node.get_our_node_id());
                msg.clone()
        } else { panic!("Unexpected event"); };
                assert_eq!(*node_id, nodes[1].node.get_our_node_id());
        } else { panic!("Unexpected event"); }
  
 -      nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &as_funding_locked);
 +      nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
        let bs_msg_events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(bs_msg_events.len(), 1);
        if let MessageSendEvent::SendChannelUpdate { ref node_id, msg: _ } = bs_msg_events[0] {
@@@ -259,7 -259,7 +259,7 @@@ fn test_routed_scid_alias() 
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
  
        create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000, InitFeatures::known(), InitFeatures::known()).2;
 -      let mut as_funding_locked = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 500_000_000, InitFeatures::known(), InitFeatures::known()).0;
 +      let mut as_channel_ready = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 500_000_000, InitFeatures::known(), InitFeatures::known()).0;
  
        let last_hop = nodes[2].node.list_usable_channels();
        let hop_hints = vec![RouteHint(vec![RouteHintHop {
        pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], 100_000, payment_hash, payment_secret);
        claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
  
 -      // Now test that if a peer sends us a second funding_locked after the channel is operational we
 +      // Now test that if a peer sends us a second channel_ready after the channel is operational we
        // will use the new alias.
 -      as_funding_locked.short_channel_id_alias = Some(0xdeadbeef);
 -      nodes[2].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &as_funding_locked);
 -      // Note that we always respond to a funding_locked with a channel_update. Not a lot of reason
 +      as_channel_ready.short_channel_id_alias = Some(0xdeadbeef);
 +      nodes[2].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &as_channel_ready);
 +      // Note that we always respond to a channel_ready with a channel_update. Not a lot of reason
        // to bother updating that code, so just drop the message here.
        get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
        let updated_channel_info = nodes[2].node.list_usable_channels();
        assert_eq!(updated_channel_info.len(), 1);
        assert_eq!(updated_channel_info[0].inbound_scid_alias.unwrap(), 0xdeadbeef);
 -      // Note that because we never send a duplicate funding_locked we can't send a payment through
 +      // Note that because we never send a duplicate channel_ready we can't send a payment through
        // the 0xdeadbeef SCID alias.
  }
  
@@@ -403,10 -403,10 +403,10 @@@ fn test_inbound_scid_privacy() 
        connect_blocks(&nodes[1], CHAN_CONFIRM_DEPTH - 1);
        confirm_transaction_at(&nodes[2], &tx, conf_height);
        connect_blocks(&nodes[2], CHAN_CONFIRM_DEPTH - 1);
 -      let bs_funding_locked = get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[2].node.get_our_node_id());
 -      nodes[1].node.handle_funding_locked(&nodes[2].node.get_our_node_id(), &get_event_msg!(nodes[2], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id()));
 +      let bs_channel_ready = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[2].node.get_our_node_id());
 +      nodes[1].node.handle_channel_ready(&nodes[2].node.get_our_node_id(), &get_event_msg!(nodes[2], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
        let bs_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[2].node.get_our_node_id());
 -      nodes[2].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &bs_funding_locked);
 +      nodes[2].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_channel_ready);
        let cs_update = get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
  
        nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &cs_update);
@@@ -592,7 -592,7 +592,7 @@@ fn open_zero_conf_channel<'a, 'b, 'c, '
        check_added_monitors!(receiver, 1);
        let bs_signed_locked = receiver.node.get_and_clear_pending_msg_events();
        assert_eq!(bs_signed_locked.len(), 2);
 -      let as_funding_locked;
 +      let as_channel_ready;
        match &bs_signed_locked[0] {
                MessageSendEvent::SendFundingSigned { node_id, msg } => {
                        assert_eq!(*node_id, initiator.node.get_our_node_id());
                        assert_eq!(initiator.tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
                        assert_eq!(initiator.tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0)[0], tx);
  
 -                      as_funding_locked = get_event_msg!(initiator, MessageSendEvent::SendFundingLocked, receiver.node.get_our_node_id());
 +                      as_channel_ready = get_event_msg!(initiator, MessageSendEvent::SendChannelReady, receiver.node.get_our_node_id());
                }
                _ => panic!("Unexpected event"),
        }
        match &bs_signed_locked[1] {
 -              MessageSendEvent::SendFundingLocked { node_id, msg } => {
 +              MessageSendEvent::SendChannelReady { node_id, msg } => {
                        assert_eq!(*node_id, initiator.node.get_our_node_id());
 -                      initiator.node.handle_funding_locked(&receiver.node.get_our_node_id(), &msg);
 +                      initiator.node.handle_channel_ready(&receiver.node.get_our_node_id(), &msg);
                }
                _ => panic!("Unexpected event"),
        }
  
 -      receiver.node.handle_funding_locked(&initiator.node.get_our_node_id(), &as_funding_locked);
 +      receiver.node.handle_channel_ready(&initiator.node.get_our_node_id(), &as_channel_ready);
  
        let as_channel_update = get_event_msg!(initiator, MessageSendEvent::SendChannelUpdate, receiver.node.get_our_node_id());
        let bs_channel_update = get_event_msg!(receiver, MessageSendEvent::SendChannelUpdate, initiator.node.get_our_node_id());
@@@ -633,7 -633,7 +633,7 @@@ fn test_simple_0conf_channel() 
        // If our peer tells us they will accept our channel with 0 confs, and we funded the channel,
        // we should trust the funding won't be double-spent (assuming `trust_own_funding_0conf` is
        // set)!
 -      // Further, if we `accept_inbound_channel_from_trusted_peer_0conf`, funding locked messages
 +      // Further, if we `accept_inbound_channel_from_trusted_peer_0conf`, `channel_ready` messages
        // should fly immediately and the channel should be available for use as soon as they are
        // received.
  
  
  #[test]
  fn test_0conf_channel_with_async_monitor() {
 -      // Test that we properly send out funding_locked in (both inbound- and outbound-) zero-conf
 +      // Test that we properly send out channel_ready in (both inbound- and outbound-) zero-conf
        // channels if ChannelMonitor updates return a `TemporaryFailure` during the initial channel
        // negotiation.
  
                _ => panic!("Unexpected event"),
        }
        match &bs_signed_locked[1] {
 -              MessageSendEvent::SendFundingLocked { node_id, msg } => {
 +              MessageSendEvent::SendChannelReady { node_id, msg } => {
                        assert_eq!(*node_id, nodes[0].node.get_our_node_id());
 -                      nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &msg);
 +                      nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &msg);
                }
                _ => panic!("Unexpected event"),
        }
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0)[0], tx);
  
        match &as_locked_update[0] {
 -              MessageSendEvent::SendFundingLocked { node_id, msg } => {
 +              MessageSendEvent::SendChannelReady { node_id, msg } => {
                        assert_eq!(*node_id, nodes[1].node.get_our_node_id());
 -                      nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &msg);
 +                      nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &msg);
                }
                _ => panic!("Unexpected event"),
        }
@@@ -922,3 -922,102 +922,102 @@@ fn test_0conf_channel_reorg() 
        });
        check_closed_broadcast!(nodes[1], true);
  }
+ #[test]
+ fn test_zero_conf_accept_reject() {
+       let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
+       channel_type_features.set_zero_conf_required();
+       // 1. Check we reject zero conf channels by default
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
+       let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       open_channel_msg.channel_type = Some(channel_type_features.clone());
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel_msg);
+       let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
+       match msg_events[0] {
+               MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg, .. }, .. } => {
+                       assert_eq!(msg.data, "No zero confirmation channels accepted".to_owned());
+               },
+               _ => panic!(),
+       }
+       // 2. Check we can manually accept zero conf channels via the right method
+       let mut manually_accept_conf = UserConfig::default();
+       manually_accept_conf.manually_accept_inbound_channels = true;
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs,
+               &[None, Some(manually_accept_conf.clone())]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+       // 2.1 First try the non-0conf method to manually accept
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42,
+               Some(manually_accept_conf)).unwrap();
+       let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel,
+               nodes[1].node.get_our_node_id());
+       open_channel_msg.channel_type = Some(channel_type_features.clone());
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(),
+               &open_channel_msg);
+       // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in the `msg_events`.
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+       let events = nodes[1].node.get_and_clear_pending_events();
+       
+       match events[0] {
+               Event::OpenChannelRequest { temporary_channel_id, .. } => {
+                       // Assert we fail to accept via the non-0conf method
+                       assert!(nodes[1].node.accept_inbound_channel(&temporary_channel_id,
+                               &nodes[0].node.get_our_node_id(), 0).is_err());
+               },
+               _ => panic!(),
+       }
+       let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
+       match msg_events[0] {
+               MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg, .. }, .. } => {
+                       assert_eq!(msg.data, "No zero confirmation channels accepted".to_owned());
+               },
+               _ => panic!(),
+       }
+       // 2.2 Try again with the 0conf method to manually accept
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42,
+               Some(manually_accept_conf)).unwrap();
+       let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel,
+               nodes[1].node.get_our_node_id());
+       open_channel_msg.channel_type = Some(channel_type_features);
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(),
+               &open_channel_msg);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       
+       match events[0] {
+               Event::OpenChannelRequest { temporary_channel_id, .. } => {
+                       // Assert we can accept via the 0conf method
+                       assert!(nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(
+                               &temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).is_ok());
+               },
+               _ => panic!(),
+       }
+       // Check we would send accept
+       let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
+       match msg_events[0] {
+               MessageSendEvent::SendAcceptChannel { .. } => {},
+               _ => panic!(),
+       }
+ }
index 0a886b93f6e01eb3dc70c967bc8d439ee3805b84,26eefc510fb8c9ac2a91e7811df4f87a5bd45a61..342dbb25545059660a10de797b952c229cf81d3e
@@@ -66,14 -66,6 +66,14 @@@ pub enum PaymentPurpose 
        SpontaneousPayment(PaymentPreimage),
  }
  
 +impl_writeable_tlv_based_enum!(PaymentPurpose,
 +      (0, InvoicePayment) => {
 +              (0, payment_preimage, option),
 +              (2, payment_secret, required),
 +      };
 +      (2, SpontaneousPayment)
 +);
 +
  #[derive(Clone, Debug, PartialEq)]
  /// The reason the channel was closed. See individual variants more details.
  pub enum ClosureReason {
@@@ -188,9 -180,8 +188,9 @@@ pub enum Event 
                /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel
                user_channel_id: u64,
        },
 -      /// Indicates we've received money! Just gotta dig out that payment preimage and feed it to
 -      /// [`ChannelManager::claim_funds`] to get it....
 +      /// Indicates we've received (an offer of) money! Just gotta dig out that payment preimage and
 +      /// feed it to [`ChannelManager::claim_funds`] to get it....
 +      ///
        /// Note that if the preimage is not known, you should call
        /// [`ChannelManager::fail_htlc_backwards`] to free up resources for this HTLC and avoid
        /// network congestion.
                /// not stop you from registering duplicate payment hashes for inbound payments.
                payment_hash: PaymentHash,
                /// The value, in thousandths of a satoshi, that this payment is for.
 -              amt: u64,
 +              amount_msat: u64,
                /// Information for claiming this received payment, based on whether the purpose of the
                /// payment is to pay an invoice or to send a spontaneous payment.
                purpose: PaymentPurpose,
        },
 +      /// Indicates a payment has been claimed and we've received money!
 +      ///
 +      /// This most likely occurs when [`ChannelManager::claim_funds`] has been called in response
 +      /// to an [`Event::PaymentReceived`]. However, if we previously crashed during a
 +      /// [`ChannelManager::claim_funds`] call you may see this event without a corresponding
 +      /// [`Event::PaymentReceived`] event.
 +      ///
 +      /// # Note
 +      /// LDK will not stop an inbound payment from being paid multiple times, so multiple
 +      /// `PaymentReceived` events may be generated for the same payment. If you then call
 +      /// [`ChannelManager::claim_funds`] twice for the same [`Event::PaymentReceived`] you may get
 +      /// multiple `PaymentClaimed` events.
 +      ///
 +      /// [`ChannelManager::claim_funds`]: crate::ln::channelmanager::ChannelManager::claim_funds
 +      PaymentClaimed {
 +              /// The payment hash of the claimed payment. Note that LDK will not stop you from
 +              /// registering duplicate payment hashes for inbound payments.
 +              payment_hash: PaymentHash,
 +              /// The value, in thousandths of a satoshi, that this payment is for.
 +              amount_msat: u64,
 +              /// The purpose of this claimed payment, i.e. whether the payment was for an invoice or a
 +              /// spontaneous payment.
 +              purpose: PaymentPurpose,
 +      },
        /// Indicates an outbound payment we made succeeded (i.e. it made it all the way to its target
        /// and we got back the payment preimage for it).
        ///
                /// transaction.
                claim_from_onchain_tx: bool,
        },
-       /// Used to indicate that a channel with the given `channel_id` is in the process of closure.
+       /// Used to indicate that a previously opened channel with the given `channel_id` is in the
+       /// process of closure.
        ChannelClosed  {
                /// The channel_id of the channel which has been closed. Note that on-chain transactions
                /// resolving the channel are likely still awaiting confirmation.
                /// the resulting [`ChannelManager`] will not be readable by versions of LDK prior to
                /// 0.0.106.
                ///
+               /// Furthermore, note that if [`ChannelTypeFeatures::supports_zero_conf`] returns true on this type,
+               /// the resulting [`ChannelManager`] will not be readable by versions of LDK prior to
+               /// 0.0.107. Channels setting this type also need to get manually accepted via
+               /// [`crate::ln::channelmanager::ChannelManager::accept_inbound_channel_from_trusted_peer_0conf`],
+               /// or will be rejected otherwise.
+               ///
                /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
                channel_type: ChannelTypeFeatures,
        },
@@@ -512,7 -486,7 +519,7 @@@ impl Writeable for Event 
                                // We never write out FundingGenerationReady events as, upon disconnection, peers
                                // drop any channels which have not yet exchanged funding_signed.
                        },
 -                      &Event::PaymentReceived { ref payment_hash, ref amt, ref purpose } => {
 +                      &Event::PaymentReceived { ref payment_hash, ref amount_msat, ref purpose } => {
                                1u8.write(writer)?;
                                let mut payment_secret = None;
                                let payment_preimage;
                                write_tlv_fields!(writer, {
                                        (0, payment_hash, required),
                                        (2, payment_secret, option),
 -                                      (4, amt, required),
 +                                      (4, amount_msat, required),
                                        (6, 0u64, required), // user_payment_id required for compatibility with 0.0.103 and earlier
                                        (8, payment_preimage, option),
                                });
                                // We never write the OpenChannelRequest events as, upon disconnection, peers
                                // drop any channels which have not yet exchanged funding_signed.
                        },
 +                      &Event::PaymentClaimed { ref payment_hash, ref amount_msat, ref purpose } => {
 +                              19u8.write(writer)?;
 +                              write_tlv_fields!(writer, {
 +                                      (0, payment_hash, required),
 +                                      (2, purpose, required),
 +                                      (4, amount_msat, required),
 +                              });
 +                      },
                        // Note that, going forward, all new events must only write data inside of
                        // `write_tlv_fields`. Versions 0.0.101+ will ignore odd-numbered events that write
                        // data via `write_tlv_fields`.
@@@ -647,12 -613,12 +654,12 @@@ impl MaybeReadable for Event 
                                        let mut payment_hash = PaymentHash([0; 32]);
                                        let mut payment_preimage = None;
                                        let mut payment_secret = None;
 -                                      let mut amt = 0;
 +                                      let mut amount_msat = 0;
                                        let mut _user_payment_id = None::<u64>; // For compatibility with 0.0.103 and earlier
                                        read_tlv_fields!(reader, {
                                                (0, payment_hash, required),
                                                (2, payment_secret, option),
 -                                              (4, amt, required),
 +                                              (4, amount_msat, required),
                                                (6, _user_payment_id, option),
                                                (8, payment_preimage, option),
                                        });
                                        };
                                        Ok(Some(Event::PaymentReceived {
                                                payment_hash,
 -                                              amt,
 +                                              amount_msat,
                                                purpose,
                                        }))
                                };
                                // Value 17 is used for `Event::OpenChannelRequest`.
                                Ok(None)
                        },
 +                      19u8 => {
 +                              let f = || {
 +                                      let mut payment_hash = PaymentHash([0; 32]);
 +                                      let mut purpose = None;
 +                                      let mut amount_msat = 0;
 +                                      read_tlv_fields!(reader, {
 +                                              (0, payment_hash, required),
 +                                              (2, purpose, ignorable),
 +                                              (4, amount_msat, required),
 +                                      });
 +                                      if purpose.is_none() { return Ok(None); }
 +                                      Ok(Some(Event::PaymentClaimed {
 +                                              payment_hash,
 +                                              purpose: purpose.unwrap(),
 +                                              amount_msat,
 +                                      }))
 +                              };
 +                              f()
 +                      },
                        // Versions prior to 0.0.100 did not ignore odd types, instead returning InvalidValue.
                        // Version 0.0.100 failed to properly ignore odd types, possibly resulting in corrupt
                        // reads.
@@@ -902,12 -849,12 +909,12 @@@ pub enum MessageSendEvent 
                /// The message which should be sent.
                msg: msgs::FundingSigned,
        },
 -      /// Used to indicate that a funding_locked message should be sent to the peer with the given node_id.
 -      SendFundingLocked {
 +      /// Used to indicate that a channel_ready message should be sent to the peer with the given node_id.
 +      SendChannelReady {
                /// The node_id of the node which should receive these message(s)
                node_id: PublicKey,
 -              /// The funding_locked message which should be sent.
 -              msg: msgs::FundingLocked,
 +              /// The channel_ready message which should be sent.
 +              msg: msgs::ChannelReady,
        },
        /// Used to indicate that an announcement_signatures message should be sent to the peer with the given node_id.
        SendAnnouncementSignatures {