Merge pull request #2681 from tnull/2023-10-bump-msrv-to-1.63.0
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Mon, 11 Dec 2023 18:31:30 +0000 (18:31 +0000)
committerGitHub <noreply@github.com>
Mon, 11 Dec 2023 18:31:30 +0000 (18:31 +0000)
Bump MSRV to rustc 1.63.0

1  2 
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/payment_tests.rs
lightning/src/util/test_utils.rs

index 582f67878e60d8e59d4a36b567e12a8fcded0798,ffde167ca0c4e0371071a165f41e8311de633cee..8cff537bbfc1417c5b0fb64aa097c3cf179aeaf1
@@@ -793,6 -793,7 +793,6 @@@ pub(super) struct MonitorRestoreUpdate
  pub(super) struct SignerResumeUpdates {
        pub commitment_update: Option<msgs::CommitmentUpdate>,
        pub funding_signed: Option<msgs::FundingSigned>,
 -      pub funding_created: Option<msgs::FundingCreated>,
        pub channel_ready: Option<msgs::ChannelReady>,
  }
  
@@@ -2406,6 -2407,38 +2406,6 @@@ impl<SP: Deref> ChannelContext<SP> wher
                }
        }
  
 -      /// Only allowed after [`Self::channel_transaction_parameters`] is set.
 -      fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
 -              let counterparty_keys = self.build_remote_transaction_keys();
 -              let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
 -              let signature = match &self.holder_signer {
 -                      // TODO (taproot|arik): move match into calling method for Taproot
 -                      ChannelSignerType::Ecdsa(ecdsa) => {
 -                              ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
 -                                      .map(|(sig, _)| sig).ok()?
 -                      },
 -                      // TODO (taproot|arik)
 -                      #[cfg(taproot)]
 -                      _ => todo!()
 -              };
 -
 -              if self.signer_pending_funding {
 -                      log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
 -                      self.signer_pending_funding = false;
 -              }
 -
 -              Some(msgs::FundingCreated {
 -                      temporary_channel_id: self.temporary_channel_id.unwrap(),
 -                      funding_txid: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
 -                      funding_output_index: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
 -                      signature,
 -                      #[cfg(taproot)]
 -                      partial_signature_with_nonce: None,
 -                      #[cfg(taproot)]
 -                      next_local_nonce: None,
 -              })
 -      }
 -
        /// Only allowed after [`Self::channel_transaction_parameters`] is set.
        fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
                let counterparty_keys = self.build_remote_transaction_keys();
@@@ -2909,6 -2942,99 +2909,6 @@@ impl<SP: Deref> Channel<SP> wher
        }
  
        // Message handlers:
 -
 -      /// Handles a funding_signed message from the remote end.
 -      /// If this call is successful, broadcast the funding transaction (and not before!)
 -      pub fn funding_signed<L: Deref>(
 -              &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
 -      ) -> Result<ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>, ChannelError>
 -      where
 -              L::Target: Logger
 -      {
 -              if !self.context.is_outbound() {
 -                      return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
 -              }
 -              if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
 -                      return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
 -              }
 -              if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
 -                              self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
 -                              self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
 -                      panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
 -              }
 -
 -              let funding_script = self.context.get_funding_redeemscript();
 -
 -              let counterparty_keys = self.context.build_remote_transaction_keys();
 -              let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
 -              let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
 -              let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
 -
 -              log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
 -                      &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
 -
 -              let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
 -              let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
 -              {
 -                      let trusted_tx = initial_commitment_tx.trust();
 -                      let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
 -                      let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
 -                      // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
 -                      if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
 -                              return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
 -                      }
 -              }
 -
 -              let holder_commitment_tx = HolderCommitmentTransaction::new(
 -                      initial_commitment_tx,
 -                      msg.signature,
 -                      Vec::new(),
 -                      &self.context.get_holder_pubkeys().funding_pubkey,
 -                      self.context.counterparty_funding_pubkey()
 -              );
 -
 -              self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new())
 -                      .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
 -
 -
 -              let funding_redeemscript = self.context.get_funding_redeemscript();
 -              let funding_txo = self.context.get_funding_txo().unwrap();
 -              let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
 -              let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
 -              let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
 -              let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
 -              monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
 -              let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
 -                                                        shutdown_script, self.context.get_holder_selected_contest_delay(),
 -                                                        &self.context.destination_script, (funding_txo, funding_txo_script),
 -                                                        &self.context.channel_transaction_parameters,
 -                                                        funding_redeemscript.clone(), self.context.channel_value_satoshis,
 -                                                        obscure_factor,
 -                                                        holder_commitment_tx, best_block, self.context.counterparty_node_id);
 -              channel_monitor.provide_initial_counterparty_commitment_tx(
 -                      counterparty_initial_bitcoin_tx.txid, Vec::new(),
 -                      self.context.cur_counterparty_commitment_transaction_number,
 -                      self.context.counterparty_cur_commitment_point.unwrap(),
 -                      counterparty_initial_commitment_tx.feerate_per_kw(),
 -                      counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
 -                      counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
 -
 -              assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
 -              if self.context.is_batch_funding() {
 -                      self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
 -              } else {
 -                      self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
 -              }
 -              self.context.cur_holder_commitment_transaction_number -= 1;
 -              self.context.cur_counterparty_commitment_transaction_number -= 1;
 -
 -              log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
 -
 -              let need_channel_ready = self.check_get_channel_ready(0).is_some();
 -              self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
 -              Ok(channel_monitor)
 -      }
 -
        /// Updates the state of the channel to indicate that all channels in the batch have received
        /// funding_signed and persisted their monitors.
        /// The funding transaction is consequently allowed to be broadcast, and the channel can be
                let channel_ready = if funding_signed.is_some() {
                        self.check_get_channel_ready(0)
                } else { None };
 -              let funding_created = if self.context.signer_pending_funding && self.context.is_outbound() {
 -                      self.context.get_funding_created_msg(logger)
 -              } else { None };
  
 -              log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed, {} funding_created, and {} channel_ready",
 +              log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
                        if commitment_update.is_some() { "a" } else { "no" },
                        if funding_signed.is_some() { "a" } else { "no" },
 -                      if funding_created.is_some() { "a" } else { "no" },
                        if channel_ready.is_some() { "a" } else { "no" });
  
                SignerResumeUpdates {
                        commitment_update,
                        funding_signed,
 -                      funding_created,
                        channel_ready,
                }
        }
                        // larger. If we don't know that time has moved forward, we can just set it to the last
                        // time we saw and it will be ignored.
                        let best_time = self.context.update_time_counter;
-                       match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) {
+                       match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
                                Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
                                        assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
                                        assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
  
        /// Begins the shutdown process, getting a message for the remote peer and returning all
        /// holding cell HTLCs for payment failure.
 -      ///
 -      /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
 -      /// [`ChannelMonitorUpdate`] will be returned).
        pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
                target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
 -      -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>, Option<ShutdownResult>), APIError>
 +      -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
        {
                for htlc in self.context.pending_outbound_htlcs.iter() {
                        if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
                        return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
                }
  
 -              // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
 -              // script is set, we just force-close and call it a day.
 -              let mut chan_closed = false;
 -              if self.context.channel_state.is_pre_funded_state() {
 -                      chan_closed = true;
 -              }
 -
                let update_shutdown_script = match self.context.shutdown_scriptpubkey {
                        Some(_) => false,
 -                      None if !chan_closed => {
 +                      None => {
                                // use override shutdown script if provided
                                let shutdown_scriptpubkey = match override_shutdown_script {
                                        Some(script) => script,
                                self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
                                true
                        },
 -                      None => false,
                };
  
                // From here on out, we may not fail!
                self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
 -              let shutdown_result = if self.context.channel_state.is_pre_funded_state() {
 -                      let shutdown_result = ShutdownResult {
 -                              monitor_update: None,
 -                              dropped_outbound_htlcs: Vec::new(),
 -                              unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
 -                              channel_id: self.context.channel_id,
 -                              counterparty_node_id: self.context.counterparty_node_id,
 -                      };
 -                      self.context.channel_state = ChannelState::ShutdownComplete;
 -                      Some(shutdown_result)
 -              } else {
 -                      self.context.channel_state.set_local_shutdown_sent();
 -                      None
 -              };
 +              self.context.channel_state.set_local_shutdown_sent();
                self.context.update_time_counter += 1;
  
                let monitor_update = if update_shutdown_script {
                debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
                        "we can't both complete shutdown and return a monitor update");
  
 -              Ok((shutdown, monitor_update, dropped_outbound_htlcs, shutdown_result))
 +              Ok((shutdown, monitor_update, dropped_outbound_htlcs))
        }
  
        pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
@@@ -6283,38 -6438,6 +6283,38 @@@ impl<SP: Deref> OutboundV1Channel<SP> w
                })
        }
  
 +      /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
 +      fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
 +              let counterparty_keys = self.context.build_remote_transaction_keys();
 +              let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
 +              let signature = match &self.context.holder_signer {
 +                      // TODO (taproot|arik): move match into calling method for Taproot
 +                      ChannelSignerType::Ecdsa(ecdsa) => {
 +                              ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
 +                                      .map(|(sig, _)| sig).ok()?
 +                      },
 +                      // TODO (taproot|arik)
 +                      #[cfg(taproot)]
 +                      _ => todo!()
 +              };
 +
 +              if self.context.signer_pending_funding {
 +                      log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
 +                      self.context.signer_pending_funding = false;
 +              }
 +
 +              Some(msgs::FundingCreated {
 +                      temporary_channel_id: self.context.temporary_channel_id.unwrap(),
 +                      funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
 +                      funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
 +                      signature,
 +                      #[cfg(taproot)]
 +                      partial_signature_with_nonce: None,
 +                      #[cfg(taproot)]
 +                      next_local_nonce: None,
 +              })
 +      }
 +
        /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
        /// a funding_created message for the remote peer.
        /// Panics if called at some time other than immediately after initial handshake, if called twice,
        /// Note that channel_id changes during this call!
        /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
        /// If an Err is returned, it is a ChannelError::Close.
 -      pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
 -      -> Result<(Channel<SP>, Option<msgs::FundingCreated>), (Self, ChannelError)> where L::Target: Logger {
 +      pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
 +      -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
                if !self.context.is_outbound() {
                        panic!("Tried to create outbound funding_created message on an inbound channel!");
                }
                self.context.funding_transaction = Some(funding_transaction);
                self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
  
 -              let funding_created = self.context.get_funding_created_msg(logger);
 +              let funding_created = self.get_funding_created_msg(logger);
                if funding_created.is_none() {
                        if !self.context.signer_pending_funding {
                                log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
                        }
                }
  
 -              let channel = Channel {
 -                      context: self.context,
 -              };
 -
 -              Ok((channel, funding_created))
 +              Ok(funding_created)
        }
  
        fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
  
                Ok(())
        }
 +
 +      /// Handles a funding_signed message from the remote end.
 +      /// If this call is successful, broadcast the funding transaction (and not before!)
 +      pub fn funding_signed<L: Deref>(
 +              mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
 +      ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
 +      where
 +              L::Target: Logger
 +      {
 +              if !self.context.is_outbound() {
 +                      return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
 +              }
 +              if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
 +                      return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
 +              }
 +              if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
 +                              self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
 +                              self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
 +                      panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
 +              }
 +
 +              let funding_script = self.context.get_funding_redeemscript();
 +
 +              let counterparty_keys = self.context.build_remote_transaction_keys();
 +              let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
 +              let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
 +              let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
 +
 +              log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
 +                      &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
 +
 +              let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
 +              let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
 +              {
 +                      let trusted_tx = initial_commitment_tx.trust();
 +                      let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
 +                      let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
 +                      // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
 +                      if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
 +                              return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
 +                      }
 +              }
 +
 +              let holder_commitment_tx = HolderCommitmentTransaction::new(
 +                      initial_commitment_tx,
 +                      msg.signature,
 +                      Vec::new(),
 +                      &self.context.get_holder_pubkeys().funding_pubkey,
 +                      self.context.counterparty_funding_pubkey()
 +              );
 +
 +              let validated =
 +                      self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
 +              if validated.is_err() {
 +                      return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
 +              }
 +
 +              let funding_redeemscript = self.context.get_funding_redeemscript();
 +              let funding_txo = self.context.get_funding_txo().unwrap();
 +              let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
 +              let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
 +              let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
 +              let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
 +              monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
 +              let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
 +                                                        shutdown_script, self.context.get_holder_selected_contest_delay(),
 +                                                        &self.context.destination_script, (funding_txo, funding_txo_script),
 +                                                        &self.context.channel_transaction_parameters,
 +                                                        funding_redeemscript.clone(), self.context.channel_value_satoshis,
 +                                                        obscure_factor,
 +                                                        holder_commitment_tx, best_block, self.context.counterparty_node_id);
 +              channel_monitor.provide_initial_counterparty_commitment_tx(
 +                      counterparty_initial_bitcoin_tx.txid, Vec::new(),
 +                      self.context.cur_counterparty_commitment_transaction_number,
 +                      self.context.counterparty_cur_commitment_point.unwrap(),
 +                      counterparty_initial_commitment_tx.feerate_per_kw(),
 +                      counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
 +                      counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
 +
 +              assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
 +              if self.context.is_batch_funding() {
 +                      self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
 +              } else {
 +                      self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
 +              }
 +              self.context.cur_holder_commitment_transaction_number -= 1;
 +              self.context.cur_counterparty_commitment_transaction_number -= 1;
 +
 +              log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
 +
 +              let mut channel = Channel { context: self.context };
 +
 +              let need_channel_ready = channel.check_get_channel_ready(0).is_some();
 +              channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
 +              Ok((channel, channel_monitor))
 +      }
 +
 +      /// Indicates that the signer may have some signatures for us, so we should retry if we're
 +      /// blocked.
 +      #[allow(unused)]
 +      pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
 +              if self.context.signer_pending_funding && self.context.is_outbound() {
 +                      log_trace!(logger, "Signer unblocked a funding_created");
 +                      self.get_funding_created_msg(logger)
 +              } else { None }
 +      }
  }
  
  /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
@@@ -8343,12 -8364,11 +8343,12 @@@ mod tests 
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
 -              let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
 +              let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
                let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
  
                // Node B --> Node A: funding signed
 -              let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
 +              let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
 +              let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
  
                // Put some inbound and outbound HTLCs in A's channel.
                let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
 -              let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
 +              let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
                let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
  
                // Node B --> Node A: funding signed
 -              let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
 +              let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
 +              let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
  
                // Now disconnect the two nodes and check that the commitment point in
                // Node B's channel_reestablish message is sane.
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
 -              let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
 +              let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
                let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
  
                // Node B --> Node A: funding signed
 -              let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
 +              let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
 +              let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
  
                // Make sure that receiving a channel update will update the Channel as expected.
                let update = ChannelUpdate {
                assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
        }
  
-       #[cfg(feature = "_test_vectors")]
+       #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
        #[test]
        fn outbound_commitment_test() {
                use bitcoin::sighash;
  
                // Test vectors from BOLT 3 Appendices C and F (anchors):
                let feeest = TestFeeEstimator{fee_est: 15000};
-               let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
+               let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
                let secp_ctx = Secp256k1::new();
  
                let mut signer = InMemorySigner::new(
                                },
                        ]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
 -              let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(
 -                      tx.clone(),
 -                      funding_outpoint,
 -                      true,
 -                      &&logger,
 +              let funding_created_msg = node_a_chan.get_funding_created(
 +                      tx.clone(), funding_outpoint, true, &&logger,
                ).map_err(|_| ()).unwrap();
                let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
                        &funding_created_msg.unwrap(),
  
                // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
                // broadcasting the funding transaction until the batch is ready.
 -              let _ = node_a_chan.funding_signed(
 -                      &funding_signed_msg.unwrap(),
 -                      best_block,
 -                      &&keys_provider,
 -                      &&logger,
 -              ).unwrap();
 +              let res = node_a_chan.funding_signed(
 +                      &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
 +              );
 +              let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
                let node_a_updates = node_a_chan.monitor_updating_restored(
                        &&logger,
                        &&keys_provider,
index 7c6ee1e429c1d1a325759d22c0a73207fa50d730,9a1a9df2e162ac4928853f00390ba174b18e8428..be220f824304f4d40bf62c4bcfd1a04ec6dca080
@@@ -1521,13 -1521,11 +1521,11 @@@ pub const MIN_FINAL_CLTV_EXPIRY_DELTA: 
  // then waiting ANTI_REORG_DELAY to be reorg-safe on the outbound HLTC and
  // failing the corresponding htlc backward, and us now seeing the last block of ANTI_REORG_DELAY before
  // LATENCY_GRACE_PERIOD_BLOCKS.
- #[deny(const_err)]
  #[allow(dead_code)]
  const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
  
  // Check for ability of an attacker to make us fail on-chain by delaying an HTLC claim. See
  // ChannelMonitor::should_broadcast_holder_commitment_txn for a description of why this is needed.
- #[deny(const_err)]
  #[allow(dead_code)]
  const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
  
@@@ -2714,10 -2712,9 +2712,10 @@@ wher
        fn close_channel_internal(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
  
 -              let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
 -              let shutdown_result;
 -              loop {
 +              let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new();
 +              let mut shutdown_result = None;
 +
 +              {
                        let per_peer_state = self.per_peer_state.read().unwrap();
  
                        let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                                        if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
                                                let funding_txo_opt = chan.context.get_funding_txo();
                                                let their_features = &peer_state.latest_features;
 -                                              let (shutdown_msg, mut monitor_update_opt, htlcs, local_shutdown_result) =
 +                                              let (shutdown_msg, mut monitor_update_opt, htlcs) =
                                                        chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
                                                failed_htlcs = htlcs;
 -                                              shutdown_result = local_shutdown_result;
 -                                              debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown());
  
                                                // We can send the `shutdown` message before updating the `ChannelMonitor`
                                                // here as we don't need the monitor update to complete until we send a
                                                if let Some(monitor_update) = monitor_update_opt.take() {
                                                        handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
                                                                peer_state_lock, peer_state, per_peer_state, chan);
 -                                                      break;
                                                }
 -
 -                                              if chan.is_shutdown() {
 -                                                      if let ChannelPhase::Funded(chan) = remove_channel_phase!(self, chan_phase_entry) {
 -                                                              if let Ok(channel_update) = self.get_channel_update_for_broadcast(&chan) {
 -                                                                      peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 -                                                                              msg: channel_update
 -                                                                      });
 -                                                              }
 -                                                              self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
 -                                                      }
 -                                              }
 -                                              break;
 +                                      } else {
 +                                              self.issue_channel_close_events(chan_phase_entry.get().context(), ClosureReason::HolderForceClosed);
 +                                              let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
 +                                              shutdown_result = Some(chan_phase.context_mut().force_shutdown(false));
                                        }
                                },
                                hash_map::Entry::Vacant(_) => {
 -                                      // If we reach this point, it means that the channel_id either refers to an unfunded channel or
 -                                      // it does not exist for this peer. Either way, we can attempt to force-close it.
 -                                      //
 -                                      // An appropriate error will be returned for non-existence of the channel if that's the case.
 -                                      mem::drop(peer_state_lock);
 -                                      mem::drop(per_peer_state);
 -                                      return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ())
 +                                      return Err(APIError::ChannelUnavailable {
 +                                              err: format!(
 +                                                      "Channel with id {} not found for the passed counterparty node_id {}",
 +                                                      channel_id, counterparty_node_id,
 +                                              )
 +                                      });
                                },
                        }
                }
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                let (chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) {
 -                      Some(ChannelPhase::UnfundedOutboundV1(chan)) => {
 +                      Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => {
                                let funding_txo = find_funding_output(&chan, &funding_transaction)?;
  
                                let logger = WithChannelContext::from(&self.logger, &chan.context);
                                                (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None, channel_capacity))
                                        } else { unreachable!(); });
                                match funding_res {
 -                                      Ok((chan, funding_msg)) => (chan, funding_msg),
 +                                      Ok(funding_msg) => (chan, funding_msg),
                                        Err((chan, err)) => {
                                                mem::drop(peer_state_lock);
                                                mem::drop(per_peer_state);
                                if id_to_peer.insert(chan.context.channel_id(), chan.context.get_counterparty_node_id()).is_some() {
                                        panic!("id_to_peer map already contained funding txid, which shouldn't be possible");
                                }
 -                              e.insert(ChannelPhase::Funded(chan));
 +                              e.insert(ChannelPhase::UnfundedOutboundV1(chan));
                        }
                }
                Ok(())
                                                // We got a temporary failure updating monitor, but will claim the
                                                // HTLC when the monitor updating is restored (or on chain).
                                                let logger = WithContext::from(&self.logger, None, Some(prev_hop_chan_id));
 -                                              log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
 +                                              log_error!(logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
                                        } else { errs.push((pk, err)); }
                                }
                        }
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(msg.channel_id) {
 -                      hash_map::Entry::Occupied(mut chan_phase_entry) => {
 -                              match chan_phase_entry.get_mut() {
 -                                      ChannelPhase::Funded(ref mut chan) => {
 -                                              let logger = WithChannelContext::from(&self.logger, &chan.context);
 -                                              let monitor = try_chan_phase_entry!(self,
 -                                                      chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger), chan_phase_entry);
 -                                              if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
 -                                                      handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
 -                                                      Ok(())
 -                                              } else {
 -                                                      try_chan_phase_entry!(self, Err(ChannelError::Close("Channel funding outpoint was a duplicate".to_owned())), chan_phase_entry)
 +                      hash_map::Entry::Occupied(chan_phase_entry) => {
 +                              if matches!(chan_phase_entry.get(), ChannelPhase::UnfundedOutboundV1(_)) {
 +                                      let chan = if let ChannelPhase::UnfundedOutboundV1(chan) = chan_phase_entry.remove() { chan } else { unreachable!() };
 +                                      let logger = WithContext::from(
 +                                              &self.logger,
 +                                              Some(chan.context.get_counterparty_node_id()),
 +                                              Some(chan.context.channel_id())
 +                                      );
 +                                      let res =
 +                                              chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger);
 +                                      match res {
 +                                              Ok((chan, monitor)) => {
 +                                                      if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
 +                                                              // We really should be able to insert here without doing a second
 +                                                              // lookup, but sadly rust stdlib doesn't currently allow keeping
 +                                                              // the original Entry around with the value removed.
 +                                                              let mut chan = peer_state.channel_by_id.entry(msg.channel_id).or_insert(ChannelPhase::Funded(chan));
 +                                                              if let ChannelPhase::Funded(ref mut chan) = &mut chan {
 +                                                                      handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
 +                                                              } else { unreachable!(); }
 +                                                              Ok(())
 +                                                      } else {
 +                                                              let e = ChannelError::Close("Channel funding outpoint was a duplicate".to_owned());
 +                                                              return Err(convert_chan_phase_err!(self, e, &mut ChannelPhase::Funded(chan), &msg.channel_id).1);
 +                                                      }
 +                                              },
 +                                              Err((chan, e)) => {
 +                                                      debug_assert!(matches!(e, ChannelError::Close(_)),
 +                                                              "We don't have a channel anymore, so the error better have expected close");
 +                                                      // We've already removed this outbound channel from the map in
 +                                                      // `PeerState` above so at this point we just need to clean up any
 +                                                      // lingering entries concerning this channel as it is safe to do so.
 +                                                      return Err(convert_chan_phase_err!(self, e, &mut ChannelPhase::UnfundedOutboundV1(chan), &msg.channel_id).1);
                                                }
 -                                      },
 -                                      _ => {
 -                                              return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id));
 -                                      },
 +                                      }
 +                              } else {
 +                                      return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id));
                                }
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
  
                let unblock_chan = |phase: &mut ChannelPhase<SP>, pending_msg_events: &mut Vec<MessageSendEvent>| {
                        let node_id = phase.context().get_counterparty_node_id();
 -                      if let ChannelPhase::Funded(chan) = phase {
 -                              let msgs = chan.signer_maybe_unblocked(&self.logger);
 -                              if let Some(updates) = msgs.commitment_update {
 -                                      pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
 -                                              node_id,
 -                                              updates,
 -                                      });
 -                              }
 -                              if let Some(msg) = msgs.funding_signed {
 -                                      pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
 -                                              node_id,
 -                                              msg,
 -                                      });
 -                              }
 -                              if let Some(msg) = msgs.funding_created {
 -                                      pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
 -                                              node_id,
 -                                              msg,
 -                                      });
 +                      match phase {
 +                              ChannelPhase::Funded(chan) => {
 +                                      let msgs = chan.signer_maybe_unblocked(&self.logger);
 +                                      if let Some(updates) = msgs.commitment_update {
 +                                              pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
 +                                                      node_id,
 +                                                      updates,
 +                                              });
 +                                      }
 +                                      if let Some(msg) = msgs.funding_signed {
 +                                              pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
 +                                                      node_id,
 +                                                      msg,
 +                                              });
 +                                      }
 +                                      if let Some(msg) = msgs.channel_ready {
 +                                              send_channel_ready!(self, pending_msg_events, chan, msg);
 +                                      }
                                }
 -                              if let Some(msg) = msgs.channel_ready {
 -                                      send_channel_ready!(self, pending_msg_events, chan, msg);
 +                              ChannelPhase::UnfundedOutboundV1(chan) => {
 +                                      if let Some(msg) = chan.signer_maybe_unblocked(&self.logger) {
 +                                              pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
 +                                                      node_id,
 +                                                      msg,
 +                                              });
 +                                      }
                                }
 +                              ChannelPhase::UnfundedInboundV1(_) => {},
                        }
                };
  
index 01f59528989a7cc1f14c40a0e78cfc29bddb091d,b3805a760da7b87cb082459f0dec394a31fae193..adc611e7e190833fc8a8f87346f2ba2009bab730
@@@ -19,9 -19,8 +19,9 @@@ use crate::events::{ClosureReason, Even
  use crate::ln::channel::{EXPIRE_PREV_CONFIG_TICKS, commit_tx_fee_msat, get_holder_selected_channel_reserve_satoshis, ANCHOR_OUTPUT_VALUE_SATOSHI};
  use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, RecentPaymentDetails, RecipientOnionFields, HTLCForwardInfo, PendingHTLCRouting, PendingAddHTLCInfo};
  use crate::ln::features::{Bolt11InvoiceFeatures, ChannelTypeFeatures};
 -use crate::ln::{msgs, ChannelId, PaymentSecret, PaymentPreimage};
 +use crate::ln::{msgs, ChannelId, PaymentHash, PaymentSecret, PaymentPreimage};
  use crate::ln::msgs::ChannelMessageHandler;
 +use crate::ln::onion_utils;
  use crate::ln::outbound_payment::{IDEMPOTENCY_TIMEOUT_TICKS, Retry};
  use crate::routing::gossip::{EffectiveCapacity, RoutingFees};
  use crate::routing::router::{get_route, Path, PaymentParameters, Route, Router, RouteHint, RouteHintHop, RouteHop, RouteParameters, find_route};
@@@ -32,14 -31,10 +32,14 @@@ use crate::util::errors::APIError
  use crate::util::ser::Writeable;
  use crate::util::string::UntrustedString;
  
 +use bitcoin::hashes::Hash;
 +use bitcoin::hashes::sha256::Hash as Sha256;
  use bitcoin::network::constants::Network;
 +use bitcoin::secp256k1::{Secp256k1, SecretKey};
  
  use crate::prelude::*;
  
 +use crate::ln::functional_test_utils;
  use crate::ln::functional_test_utils::*;
  use crate::routing::gossip::NodeId;
  #[cfg(feature = "std")]
@@@ -3339,6 -3334,7 +3339,7 @@@ fn test_threaded_payment_retries() 
                // We really want std::thread::scope, but its not stable until 1.63. Until then, we get unsafe.
                let node_ref = NodePtr::from_node(&nodes[0]);
                move || {
+                       let _ = &node_ref;
                        let node_a = unsafe { &*node_ref.0 };
                        while Instant::now() < end_time {
                                node_a.node.get_and_clear_pending_events(); // wipe the PendingHTLCsForwardable
@@@ -4199,59 -4195,3 +4200,59 @@@ fn  test_htlc_forward_considers_anchor_
        check_closed_broadcast(&nodes[2], 1, true);
        check_added_monitors(&nodes[2], 1);
  }
 +
 +#[test]
 +fn peel_payment_onion_custom_tlvs() {
 +      let chanmon_cfgs = create_chanmon_cfgs(2);
 +      let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
 +      let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
 +      let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 +      create_announced_chan_between_nodes(&nodes, 0, 1);
 +      let secp_ctx = Secp256k1::new();
 +
 +      let amt_msat = 1000;
 +      let payment_params = PaymentParameters::for_keysend(nodes[1].node.get_our_node_id(),
 +              TEST_FINAL_CLTV, false);
 +      let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
 +      let route = functional_test_utils::get_route(&nodes[0], &route_params).unwrap();
 +      let mut recipient_onion = RecipientOnionFields::spontaneous_empty()
 +              .with_custom_tlvs(vec![(414141, vec![42; 1200])]).unwrap();
 +      let prng_seed = chanmon_cfgs[0].keys_manager.get_secure_random_bytes();
 +      let session_priv = SecretKey::from_slice(&prng_seed[..]).expect("RNG is busted");
 +      let keysend_preimage = PaymentPreimage([42; 32]);
 +      let payment_hash = PaymentHash(Sha256::hash(&keysend_preimage.0).to_byte_array());
 +
 +      let (onion_routing_packet, first_hop_msat, cltv_expiry) = onion_utils::create_payment_onion(
 +              &secp_ctx, &route.paths[0], &session_priv, amt_msat, recipient_onion.clone(),
 +              nodes[0].best_block_info().1, &payment_hash, &Some(keysend_preimage), prng_seed
 +      ).unwrap();
 +
 +      let update_add = msgs::UpdateAddHTLC {
 +              channel_id: ChannelId([0; 32]),
 +              htlc_id: 42,
 +              amount_msat: first_hop_msat,
 +              payment_hash,
 +              cltv_expiry,
 +              skimmed_fee_msat: None,
 +              onion_routing_packet,
 +              blinding_point: None,
 +      };
 +      let peeled_onion = crate::ln::onion_payment::peel_payment_onion(
 +              &update_add, &&chanmon_cfgs[1].keys_manager, &&chanmon_cfgs[1].logger, &secp_ctx,
 +              nodes[1].best_block_info().1, true, false
 +      ).unwrap();
 +      assert_eq!(peeled_onion.incoming_amt_msat, Some(amt_msat));
 +      match peeled_onion.routing {
 +              PendingHTLCRouting::ReceiveKeysend {
 +                      payment_data, payment_metadata, custom_tlvs, ..
 +              } => {
 +                      #[cfg(not(c_bindings))]
 +                      assert_eq!(&custom_tlvs, recipient_onion.custom_tlvs());
 +                      #[cfg(c_bindings)]
 +                      assert_eq!(custom_tlvs, recipient_onion.custom_tlvs());
 +                      assert!(payment_metadata.is_none());
 +                      assert!(payment_data.is_none());
 +              },
 +              _ => panic!()
 +      }
 +}
index 664deb9fa1566f6aa2628150458385673fd16cc8,e23cce95f12cd0bbe50a793ea98d6d8487520a3d..5744c89357c13ef6c7329dfce21379f10efa2d9e
@@@ -30,9 -30,9 +30,9 @@@ use crate::ln::msgs::LightningError
  use crate::ln::script::ShutdownScript;
  use crate::offers::invoice::UnsignedBolt12Invoice;
  use crate::offers::invoice_request::UnsignedInvoiceRequest;
 -use crate::routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId};
 +use crate::routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId, RoutingFees};
  use crate::routing::utxo::{UtxoLookup, UtxoLookupError, UtxoResult};
 -use crate::routing::router::{find_route, InFlightHtlcs, Path, Route, RouteParameters, Router, ScorerAccountingForInFlightHtlcs};
 +use crate::routing::router::{find_route, InFlightHtlcs, Path, Route, RouteParameters, RouteHintHop, Router, ScorerAccountingForInFlightHtlcs};
  use crate::routing::scoring::{ChannelUsage, ScoreUpdate, ScoreLookUp};
  use crate::sync::RwLock;
  use crate::util::config::UserConfig;
@@@ -129,7 -129,6 +129,7 @@@ impl<'a> Router for TestRouter<'a> 
                                let scorer = ScorerAccountingForInFlightHtlcs::new(scorer, &inflight_htlcs);
                                for path in &route.paths {
                                        let mut aggregate_msat = 0u64;
 +                                      let mut prev_hop_node = payer;
                                        for (idx, hop) in path.hops.iter().rev().enumerate() {
                                                aggregate_msat += hop.fee_msat;
                                                let usage = ChannelUsage {
                                                        effective_capacity: EffectiveCapacity::Unknown,
                                                };
  
 -                                              // Since the path is reversed, the last element in our iteration is the first
 -                                              // hop.
                                                if idx == path.hops.len() - 1 {
 -                                                      let first_hops = match first_hops {
 -                                                              Some(hops) => hops,
 -                                                              None => continue,
 -                                                      };
 -                                                      if first_hops.len() == 0 {
 -                                                              continue;
 +                                                      if let Some(first_hops) = first_hops {
 +                                                              if let Some(idx) = first_hops.iter().position(|h| h.get_outbound_payment_scid() == Some(hop.short_channel_id)) {
 +                                                                      let node_id = NodeId::from_pubkey(payer);
 +                                                                      let candidate = CandidateRouteHop::FirstHop {
 +                                                                              details: first_hops[idx],
 +                                                                              payer_node_id: &node_id,
 +                                                                      };
 +                                                                      scorer.channel_penalty_msat(&candidate, usage, &());
 +                                                                      continue;
 +                                                              }
                                                        }
 -                                                      let idx = if first_hops.len() > 1 { route.paths.iter().position(|p| p == path).unwrap_or(0) } else { 0 };
 -                                                      let candidate = CandidateRouteHop::FirstHop {
 -                                                              details: first_hops[idx],
 -                                                              node_id: NodeId::from_pubkey(payer)
 +                                              }
 +                                              let network_graph = self.network_graph.read_only();
 +                                              if let Some(channel) = network_graph.channel(hop.short_channel_id) {
 +                                                      let (directed, _) = channel.as_directed_to(&NodeId::from_pubkey(&hop.pubkey)).unwrap();
 +                                                      let candidate = CandidateRouteHop::PublicHop {
 +                                                              info: directed,
 +                                                              short_channel_id: hop.short_channel_id,
                                                        };
                                                        scorer.channel_penalty_msat(&candidate, usage, &());
                                                } else {
 -                                                      let network_graph = self.network_graph.read_only();
 -                                                      let channel = match network_graph.channel(hop.short_channel_id) {
 -                                                              Some(channel) => channel,
 -                                                              None => continue,
 -                                                      };
 -                                                      let channel = match channel.as_directed_to(&NodeId::from_pubkey(&hop.pubkey)) {
 -                                                              Some(channel) => channel,
 -                                                              None => panic!("Channel directed to {} was not found", hop.pubkey),
 -                                                      };
 -                                                      let candidate = CandidateRouteHop::PublicHop {
 -                                                              info: channel.0,
 +                                                      let target_node_id = NodeId::from_pubkey(&hop.pubkey);
 +                                                      let route_hint = RouteHintHop {
 +                                                              src_node_id: *prev_hop_node,
                                                                short_channel_id: hop.short_channel_id,
 +                                                              fees: RoutingFees { base_msat: 0, proportional_millionths: 0 },
 +                                                              cltv_expiry_delta: 0,
 +                                                              htlc_minimum_msat: None,
 +                                                              htlc_maximum_msat: None,
 +                                                      };
 +                                                      let candidate = CandidateRouteHop::PrivateHop {
 +                                                              hint: &route_hint,
 +                                                              target_node_id: &target_node_id,
                                                        };
                                                        scorer.channel_penalty_msat(&candidate, usage, &());
                                                }
 +                                              prev_hop_node = &hop.pubkey;
                                        }
                                }
                        }
@@@ -234,7 -227,7 +234,7 @@@ pub struct TestChainMonitor<'a> 
        pub added_monitors: Mutex<Vec<(OutPoint, channelmonitor::ChannelMonitor<TestChannelSigner>)>>,
        pub monitor_updates: Mutex<HashMap<ChannelId, Vec<channelmonitor::ChannelMonitorUpdate>>>,
        pub latest_monitor_update_id: Mutex<HashMap<ChannelId, (OutPoint, u64, MonitorUpdateId)>>,
-       pub chain_monitor: chainmonitor::ChainMonitor<TestChannelSigner, &'a TestChainSource, &'a chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger, &'a chainmonitor::Persist<TestChannelSigner>>,
+       pub chain_monitor: chainmonitor::ChainMonitor<TestChannelSigner, &'a TestChainSource, &'a dyn chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger, &'a dyn chainmonitor::Persist<TestChannelSigner>>,
        pub keys_manager: &'a TestKeysInterface,
        /// If this is set to Some(), the next update_channel call (not watch_channel) must be a
        /// ChannelForceClosed event for the given channel_id with should_broadcast set to the given
        pub expect_monitor_round_trip_fail: Mutex<Option<ChannelId>>,
  }
  impl<'a> TestChainMonitor<'a> {
-       pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a chainmonitor::Persist<TestChannelSigner>, keys_manager: &'a TestKeysInterface) -> Self {
+       pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a dyn chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a dyn chainmonitor::Persist<TestChannelSigner>, keys_manager: &'a TestKeysInterface) -> Self {
                Self {
                        added_monitors: Mutex::new(Vec::new()),
                        monitor_updates: Mutex::new(HashMap::new()),
@@@ -1331,7 -1324,7 +1331,7 @@@ impl ScoreLookUp for TestScorer 
        fn channel_penalty_msat(
                &self, candidate: &CandidateRouteHop, usage: ChannelUsage, _score_params: &Self::ScoreParams
        ) -> u64 {
 -              let short_channel_id = match candidate.short_channel_id() {
 +              let short_channel_id = match candidate.globally_unique_short_channel_id() {
                        Some(scid) => scid,
                        None => return 0,
                };