Merge pull request #1857 from TheBlueMatt/2022-11-reload-htlc
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Mon, 5 Dec 2022 22:54:08 +0000 (22:54 +0000)
committerGitHub <noreply@github.com>
Mon, 5 Dec 2022 22:54:08 +0000 (22:54 +0000)
Fail HTLCs which were removed from a channel but not persisted

1  2 
lightning/src/chain/channelmonitor.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/reload_tests.rs

index cc88a0119fc95cf972680e7fcb4e5eb63495130c,5f08c24b728b90fcd3ccb1ae722a312430aa3e95..b251e79f18cc38b8878b83346b86a422f2db8e08
@@@ -826,13 -826,6 +826,13 @@@ pub(crate) struct ChannelMonitorImpl<Si
        /// spending CSV for revocable outputs).
        htlcs_resolved_on_chain: Vec<IrrevocablyResolvedHTLC>,
  
 +      /// The set of `SpendableOutput` events which we have already passed upstream to be claimed.
 +      /// These are tracked explicitly to ensure that we don't generate the same events redundantly
 +      /// if users duplicatively confirm old transactions. Specifically for transactions claiming a
 +      /// revoked remote outpoint we otherwise have no tracking at all once they've reached
 +      /// [`ANTI_REORG_DELAY`], so we have to track them here.
 +      spendable_txids_confirmed: Vec<Txid>,
 +
        // We simply modify best_block in Channel's block_connected so that serialization is
        // consistent but hopefully the users' copy handles block_connected in a consistent way.
        // (we do *not*, however, update them in update_monitor to ensure any local user copies keep
@@@ -1078,7 -1071,6 +1078,7 @@@ impl<Signer: Sign> Writeable for Channe
                        (7, self.funding_spend_seen, required),
                        (9, self.counterparty_node_id, option),
                        (11, self.confirmed_commitment_tx_counterparty_output, option),
 +                      (13, self.spendable_txids_confirmed, vec_type),
                });
  
                Ok(())
@@@ -1187,7 -1179,6 +1187,7 @@@ impl<Signer: Sign> ChannelMonitor<Signe
                        funding_spend_confirmed: None,
                        confirmed_commitment_tx_counterparty_output: None,
                        htlcs_resolved_on_chain: Vec::new(),
 +                      spendable_txids_confirmed: Vec::new(),
  
                        best_block,
                        counterparty_node_id: Some(counterparty_node_id),
@@@ -1837,12 -1828,60 +1837,60 @@@ impl<Signer: Sign> ChannelMonitor<Signe
                res
        }
  
+       /// Gets the set of outbound HTLCs which can be (or have been) resolved by this
+       /// `ChannelMonitor`. This is used to determine if an HTLC was removed from the channel prior
+       /// to the `ChannelManager` having been persisted.
+       ///
+       /// This is similar to [`Self::get_pending_outbound_htlcs`] except it includes HTLCs which were
+       /// resolved by this `ChannelMonitor`.
+       pub(crate) fn get_all_current_outbound_htlcs(&self) -> HashMap<HTLCSource, HTLCOutputInCommitment> {
+               let mut res = HashMap::new();
+               // Just examine the available counterparty commitment transactions. See docs on
+               // `fail_unbroadcast_htlcs`, below, for justification.
+               let us = self.inner.lock().unwrap();
+               macro_rules! walk_counterparty_commitment {
+                       ($txid: expr) => {
+                               if let Some(ref latest_outpoints) = us.counterparty_claimable_outpoints.get($txid) {
+                                       for &(ref htlc, ref source_option) in latest_outpoints.iter() {
+                                               if let &Some(ref source) = source_option {
+                                                       res.insert((**source).clone(), htlc.clone());
+                                               }
+                                       }
+                               }
+                       }
+               }
+               if let Some(ref txid) = us.current_counterparty_commitment_txid {
+                       walk_counterparty_commitment!(txid);
+               }
+               if let Some(ref txid) = us.prev_counterparty_commitment_txid {
+                       walk_counterparty_commitment!(txid);
+               }
+               res
+       }
        /// Gets the set of outbound HTLCs which are pending resolution in this channel.
        /// This is used to reconstruct pending outbound payments on restart in the ChannelManager.
        pub(crate) fn get_pending_outbound_htlcs(&self) -> HashMap<HTLCSource, HTLCOutputInCommitment> {
-               let mut res = HashMap::new();
                let us = self.inner.lock().unwrap();
+               // We're only concerned with the confirmation count of HTLC transactions, and don't
+               // actually care how many confirmations a commitment transaction may or may not have. Thus,
+               // we look for either a FundingSpendConfirmation event or a funding_spend_confirmed.
+               let confirmed_txid = us.funding_spend_confirmed.or_else(|| {
+                       us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
+                               if let OnchainEvent::FundingSpendConfirmation { .. } = event.event {
+                                       Some(event.txid)
+                               } else { None }
+                       })
+               });
+               if confirmed_txid.is_none() {
+                       // If we have not seen a commitment transaction on-chain (ie the channel is not yet
+                       // closed), just get the full set.
+                       mem::drop(us);
+                       return self.get_all_current_outbound_htlcs();
+               }
  
+               let mut res = HashMap::new();
                macro_rules! walk_htlcs {
                        ($holder_commitment: expr, $htlc_iter: expr) => {
                                for (htlc, source) in $htlc_iter {
                        }
                }
  
-               // We're only concerned with the confirmation count of HTLC transactions, and don't
-               // actually care how many confirmations a commitment transaction may or may not have. Thus,
-               // we look for either a FundingSpendConfirmation event or a funding_spend_confirmed.
-               let confirmed_txid = us.funding_spend_confirmed.or_else(|| {
-                       us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
-                               if let OnchainEvent::FundingSpendConfirmation { .. } = event.event {
-                                       Some(event.txid)
+               let txid = confirmed_txid.unwrap();
+               if Some(txid) == us.current_counterparty_commitment_txid || Some(txid) == us.prev_counterparty_commitment_txid {
+                       walk_htlcs!(false, us.counterparty_claimable_outpoints.get(&txid).unwrap().iter().filter_map(|(a, b)| {
+                               if let &Some(ref source) = b {
+                                       Some((a, &**source))
                                } else { None }
-                       })
-               });
-               if let Some(txid) = confirmed_txid {
-                       if Some(txid) == us.current_counterparty_commitment_txid || Some(txid) == us.prev_counterparty_commitment_txid {
-                               walk_htlcs!(false, us.counterparty_claimable_outpoints.get(&txid).unwrap().iter().filter_map(|(a, b)| {
-                                       if let &Some(ref source) = b {
-                                               Some((a, &**source))
-                                       } else { None }
-                               }));
-                       } else if txid == us.current_holder_commitment_tx.txid {
-                               walk_htlcs!(true, us.current_holder_commitment_tx.htlc_outputs.iter().filter_map(|(a, _, c)| {
+                       }));
+               } else if txid == us.current_holder_commitment_tx.txid {
+                       walk_htlcs!(true, us.current_holder_commitment_tx.htlc_outputs.iter().filter_map(|(a, _, c)| {
+                               if let Some(source) = c { Some((a, source)) } else { None }
+                       }));
+               } else if let Some(prev_commitment) = &us.prev_holder_signed_commitment_tx {
+                       if txid == prev_commitment.txid {
+                               walk_htlcs!(true, prev_commitment.htlc_outputs.iter().filter_map(|(a, _, c)| {
                                        if let Some(source) = c { Some((a, source)) } else { None }
                                }));
-                       } else if let Some(prev_commitment) = &us.prev_holder_signed_commitment_tx {
-                               if txid == prev_commitment.txid {
-                                       walk_htlcs!(true, prev_commitment.htlc_outputs.iter().filter_map(|(a, _, c)| {
-                                               if let Some(source) = c { Some((a, source)) } else { None }
-                                       }));
-                               }
-                       }
-               } else {
-                       // If we have not seen a commitment transaction on-chain (ie the channel is not yet
-                       // closed), just examine the available counterparty commitment transactions. See docs
-                       // on `fail_unbroadcast_htlcs`, below, for justification.
-                       macro_rules! walk_counterparty_commitment {
-                               ($txid: expr) => {
-                                       if let Some(ref latest_outpoints) = us.counterparty_claimable_outpoints.get($txid) {
-                                               for &(ref htlc, ref source_option) in latest_outpoints.iter() {
-                                                       if let &Some(ref source) = source_option {
-                                                               res.insert((**source).clone(), htlc.clone());
-                                                       }
-                                               }
-                                       }
-                               }
-                       }
-                       if let Some(ref txid) = us.current_counterparty_commitment_txid {
-                               walk_counterparty_commitment!(txid);
-                       }
-                       if let Some(ref txid) = us.prev_counterparty_commitment_txid {
-                               walk_counterparty_commitment!(txid);
                        }
                }
  
@@@ -2443,8 -2450,8 +2459,8 @@@ impl<Signer: Sign> ChannelMonitorImpl<S
                        let secret = self.get_secret(commitment_number).unwrap();
                        let per_commitment_key = ignore_error!(SecretKey::from_slice(&secret));
                        let per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key);
 -                      let revocation_pubkey = ignore_error!(chan_utils::derive_public_revocation_key(&self.secp_ctx, &per_commitment_point, &self.holder_revocation_basepoint));
 -                      let delayed_key = ignore_error!(chan_utils::derive_public_key(&self.secp_ctx, &PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key), &self.counterparty_commitment_params.counterparty_delayed_payment_base_key));
 +                      let revocation_pubkey = chan_utils::derive_public_revocation_key(&self.secp_ctx, &per_commitment_point, &self.holder_revocation_basepoint);
 +                      let delayed_key = chan_utils::derive_public_key(&self.secp_ctx, &PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key), &self.counterparty_commitment_params.counterparty_delayed_payment_base_key);
  
                        let revokeable_redeemscript = chan_utils::get_revokeable_redeemscript(&revocation_pubkey, self.counterparty_commitment_params.on_counterparty_tx_csv, &delayed_key);
                        let revokeable_p2wsh = revokeable_redeemscript.to_v0_p2wsh();
                        } else { return (claimable_outpoints, to_counterparty_output_info); };
  
                if let Some(transaction) = tx {
 -                      let revokeable_p2wsh_opt =
 -                              if let Ok(revocation_pubkey) = chan_utils::derive_public_revocation_key(
 -                                      &self.secp_ctx, &per_commitment_point, &self.holder_revocation_basepoint)
 -                              {
 -                                      if let Ok(delayed_key) = chan_utils::derive_public_key(&self.secp_ctx,
 -                                              &per_commitment_point,
 -                                              &self.counterparty_commitment_params.counterparty_delayed_payment_base_key)
 -                                      {
 -                                              Some(chan_utils::get_revokeable_redeemscript(&revocation_pubkey,
 -                                                      self.counterparty_commitment_params.on_counterparty_tx_csv,
 -                                                      &delayed_key).to_v0_p2wsh())
 -                                      } else {
 -                                              debug_assert!(false, "Failed to derive a delayed payment key for a commitment state we accepted");
 -                                              None
 -                                      }
 -                              } else {
 -                                      debug_assert!(false, "Failed to derive a revocation pubkey key for a commitment state we accepted");
 -                                      None
 -                              };
 -                      if let Some(revokeable_p2wsh) = revokeable_p2wsh_opt {
 -                              for (idx, outp) in transaction.output.iter().enumerate() {
 -                                      if outp.script_pubkey == revokeable_p2wsh {
 -                                              to_counterparty_output_info =
 -                                                      Some((idx.try_into().expect("Can't have > 2^32 outputs"), outp.value));
 -                                      }
 +                      let revocation_pubkey = chan_utils::derive_public_revocation_key(
 +                              &self.secp_ctx, &per_commitment_point, &self.holder_revocation_basepoint);
 +                      let delayed_key = chan_utils::derive_public_key(&self.secp_ctx,
 +                              &per_commitment_point,
 +                              &self.counterparty_commitment_params.counterparty_delayed_payment_base_key);
 +                      let revokeable_p2wsh = chan_utils::get_revokeable_redeemscript(&revocation_pubkey,
 +                              self.counterparty_commitment_params.on_counterparty_tx_csv,
 +                              &delayed_key).to_v0_p2wsh();
 +                      for (idx, outp) in transaction.output.iter().enumerate() {
 +                              if outp.script_pubkey == revokeable_p2wsh {
 +                                      to_counterparty_output_info =
 +                                              Some((idx.try_into().expect("Can't have > 2^32 outputs"), outp.value));
                                }
                        }
                }
  
                let mut watch_outputs = Vec::new();
                let mut claimable_outpoints = Vec::new();
 -              for tx in &txn_matched {
 +              'tx_iter: for tx in &txn_matched {
 +                      let txid = tx.txid();
 +                      // If a transaction has already been confirmed, ensure we don't bother processing it duplicatively.
 +                      if Some(txid) == self.funding_spend_confirmed {
 +                              log_debug!(logger, "Skipping redundant processing of funding-spend tx {} as it was previously confirmed", txid);
 +                              continue 'tx_iter;
 +                      }
 +                      for ev in self.onchain_events_awaiting_threshold_conf.iter() {
 +                              if ev.txid == txid {
 +                                      if let Some(conf_hash) = ev.block_hash {
 +                                              assert_eq!(header.block_hash(), conf_hash,
 +                                                      "Transaction {} was already confirmed and is being re-confirmed in a different block.\n\
 +                                                      This indicates a severe bug in the transaction connection logic - a reorg should have been processed first!", ev.txid);
 +                                      }
 +                                      log_debug!(logger, "Skipping redundant processing of confirming tx {} as it was previously confirmed", txid);
 +                                      continue 'tx_iter;
 +                              }
 +                      }
 +                      for htlc in self.htlcs_resolved_on_chain.iter() {
 +                              if Some(txid) == htlc.resolving_txid {
 +                                      log_debug!(logger, "Skipping redundant processing of HTLC resolution tx {} as it was previously confirmed", txid);
 +                                      continue 'tx_iter;
 +                              }
 +                      }
 +                      for spendable_txid in self.spendable_txids_confirmed.iter() {
 +                              if txid == *spendable_txid {
 +                                      log_debug!(logger, "Skipping redundant processing of spendable tx {} as it was previously confirmed", txid);
 +                                      continue 'tx_iter;
 +                              }
 +                      }
 +
                        if tx.input.len() == 1 {
                                // Assuming our keys were not leaked (in which case we're screwed no matter what),
                                // commitment transactions and HTLC transactions will all only ever have one input,
                                if prevout.txid == self.funding_info.0.txid && prevout.vout == self.funding_info.0.index as u32 {
                                        let mut balance_spendable_csv = None;
                                        log_info!(logger, "Channel {} closed by funding output spend in txid {}.",
 -                                              log_bytes!(self.funding_info.0.to_channel_id()), tx.txid());
 +                                              log_bytes!(self.funding_info.0.to_channel_id()), txid);
                                        self.funding_spend_seen = true;
                                        let mut commitment_tx_to_counterparty_output = None;
                                        if (tx.input[0].sequence.0 >> 8*3) as u8 == 0x80 && (tx.lock_time.0 >> 8*3) as u8 == 0x20 {
                                                        }
                                                }
                                        }
 -                                      let txid = tx.txid();
                                        self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry {
                                                txid,
                                                transaction: Some((*tx).clone()),
                                        self.pending_events.push(Event::SpendableOutputs {
                                                outputs: vec![descriptor]
                                        });
 +                                      self.spendable_txids_confirmed.push(entry.txid);
                                },
                                OnchainEvent::HTLCSpendConfirmation { commitment_tx_output_idx, preimage, .. } => {
                                        self.htlcs_resolved_on_chain.push(IrrevocablyResolvedHTLC {
@@@ -3803,7 -3793,6 +3819,7 @@@ impl<'a, K: KeysInterface> ReadableArgs
                let mut funding_spend_seen = Some(false);
                let mut counterparty_node_id = None;
                let mut confirmed_commitment_tx_counterparty_output = None;
 +              let mut spendable_txids_confirmed = Some(Vec::new());
                read_tlv_fields!(reader, {
                        (1, funding_spend_confirmed, option),
                        (3, htlcs_resolved_on_chain, vec_type),
                        (7, funding_spend_seen, option),
                        (9, counterparty_node_id, option),
                        (11, confirmed_commitment_tx_counterparty_output, option),
 +                      (13, spendable_txids_confirmed, vec_type),
                });
  
                let mut secp_ctx = Secp256k1::new();
                        funding_spend_confirmed,
                        confirmed_commitment_tx_counterparty_output,
                        htlcs_resolved_on_chain: htlcs_resolved_on_chain.unwrap(),
 +                      spendable_txids_confirmed: spendable_txids_confirmed.unwrap(),
  
                        best_block,
                        counterparty_node_id,
@@@ -4073,7 -4060,7 +4089,7 @@@ mod tests 
                        SecretKey::from_slice(&[41; 32]).unwrap(),
                        [41; 32],
                        0,
 -                      [0; 32]
 +                      [0; 32],
                );
  
                let counterparty_pubkeys = ChannelPublicKeys {
                        }),
                        funding_outpoint: Some(funding_outpoint),
                        opt_anchors: None,
 +                      opt_non_zero_fee_anchors: None,
                };
                // Prune with one old state and a holder commitment tx holding a few overlaps with the
                // old state.
index dcbaa771f93e1f1798dccb0d7c8eea47a0d80498,561b398b91d0b4321d75d6c0851a4bcfa77ece48..6825d6f8d9479b1f7067b0bebfb4242aa4162ace
@@@ -439,6 -439,8 +439,6 @@@ pub(super) struct ReestablishResponses 
        pub raa: Option<msgs::RevokeAndACK>,
        pub commitment_update: Option<msgs::CommitmentUpdate>,
        pub order: RAACommitmentOrder,
 -      pub mon_update: Option<ChannelMonitorUpdate>,
 -      pub holding_cell_failed_htlcs: Vec<(HTLCSource, PaymentHash)>,
        pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
        pub shutdown_msg: Option<msgs::Shutdown>,
  }
@@@ -1038,7 -1040,6 +1038,7 @@@ impl<Signer: Sign> Channel<Signer> 
                                counterparty_parameters: None,
                                funding_outpoint: None,
                                opt_anchors: if opt_anchors { Some(()) } else { None },
 +                              opt_non_zero_fee_anchors: None
                        },
                        funding_transaction: None,
  
                                }),
                                funding_outpoint: None,
                                opt_anchors: if opt_anchors { Some(()) } else { None },
 +                              opt_non_zero_fee_anchors: None
                        },
                        funding_transaction: None,
  
        /// our counterparty!)
        /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
        /// TODO Some magic rust shit to compile-time check this?
 -      fn build_holder_transaction_keys(&self, commitment_number: u64) -> Result<TxCreationKeys, ChannelError> {
 +      fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
                let per_commitment_point = self.holder_signer.get_per_commitment_point(commitment_number, &self.secp_ctx);
                let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
                let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
                let counterparty_pubkeys = self.get_counterparty_pubkeys();
  
 -              Ok(secp_check!(TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint), "Local tx keys generation got bogus keys".to_owned()))
 +              TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
        }
  
        #[inline]
        /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
        /// will sign and send to our counterparty.
        /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
 -      fn build_remote_transaction_keys(&self) -> Result<TxCreationKeys, ChannelError> {
 +      fn build_remote_transaction_keys(&self) -> TxCreationKeys {
                //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
                //may see payments to it!
                let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
                let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
                let counterparty_pubkeys = self.get_counterparty_pubkeys();
  
 -              Ok(secp_check!(TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint), "Remote tx keys generation got bogus keys".to_owned()))
 +              TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
        }
  
        /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
        fn funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<(Txid, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger {
                let funding_script = self.get_funding_redeemscript();
  
 -              let keys = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number)?;
 +              let keys = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number);
                let initial_commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
                {
                        let trusted_tx = initial_commitment_tx.trust();
                        secp_check!(self.secp_ctx.verify_ecdsa(&sighash, &sig, self.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
                }
  
 -              let counterparty_keys = self.build_remote_transaction_keys()?;
 +              let counterparty_keys = self.build_remote_transaction_keys();
                let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
  
                let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
  
                let funding_script = self.get_funding_redeemscript();
  
 -              let counterparty_keys = self.build_remote_transaction_keys()?;
 +              let counterparty_keys = self.build_remote_transaction_keys();
                let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
                let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
                let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
                log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
                        log_bytes!(self.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
  
 -              let holder_signer = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number)?;
 +              let holder_signer = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number);
                let initial_commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
                {
                        let trusted_tx = initial_commitment_tx.trust();
  
                let funding_script = self.get_funding_redeemscript();
  
 -              let keys = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number).map_err(|e| (None, e))?;
 +              let keys = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number);
  
                let commitment_stats = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, false, logger);
                let commitment_txid = {
                        if let Some(_) = htlc.transaction_output_index {
                                let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
                                        self.get_counterparty_selected_contest_delay().unwrap(), &htlc, self.opt_anchors(),
 -                                      &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
 +                                      false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
  
                                let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, self.opt_anchors(), &keys);
                                let htlc_sighashtype = if self.opt_anchors() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
                // Before proposing a feerate update, check that we can actually afford the new fee.
                let inbound_stats = self.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
                let outbound_stats = self.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
 -              let keys = if let Ok(keys) = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number) { keys } else { return None; };
 +              let keys = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number);
                let commitment_stats = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, true, logger);
                let buffer_fee_msat = Channel::<Signer>::commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.opt_anchors()) * 1000;
                let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
                                // Short circuit the whole handler as there is nothing we can resend them
                                return Ok(ReestablishResponses {
                                        channel_ready: None,
 -                                      raa: None, commitment_update: None, mon_update: None,
 +                                      raa: None, commitment_update: None,
                                        order: RAACommitmentOrder::CommitmentFirst,
 -                                      holding_cell_failed_htlcs: Vec::new(),
                                        shutdown_msg, announcement_sigs,
                                });
                        }
                                        next_per_commitment_point,
                                        short_channel_id_alias: Some(self.outbound_scid_alias),
                                }),
 -                              raa: None, commitment_update: None, mon_update: None,
 +                              raa: None, commitment_update: None,
                                order: RAACommitmentOrder::CommitmentFirst,
 -                              holding_cell_failed_htlcs: Vec::new(),
                                shutdown_msg, announcement_sigs,
                        });
                }
                                log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.channel_id()));
                        }
  
 -                      if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
 -                              // We're up-to-date and not waiting on a remote revoke (if we are our
 -                              // channel_reestablish should result in them sending a revoke_and_ack), but we may
 -                              // have received some updates while we were disconnected. Free the holding cell
 -                              // now!
 -                              match self.free_holding_cell_htlcs(logger) {
 -                                      Err(ChannelError::Close(msg)) => Err(ChannelError::Close(msg)),
 -                                      Err(ChannelError::Warn(_)) | Err(ChannelError::Ignore(_)) =>
 -                                              panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
 -                                      Ok((Some((commitment_update, monitor_update)), holding_cell_failed_htlcs)) => {
 -                                              Ok(ReestablishResponses {
 -                                                      channel_ready, shutdown_msg, announcement_sigs,
 -                                                      raa: required_revoke,
 -                                                      commitment_update: Some(commitment_update),
 -                                                      order: self.resend_order.clone(),
 -                                                      mon_update: Some(monitor_update),
 -                                                      holding_cell_failed_htlcs,
 -                                              })
 -                                      },
 -                                      Ok((None, holding_cell_failed_htlcs)) => {
 -                                              Ok(ReestablishResponses {
 -                                                      channel_ready, shutdown_msg, announcement_sigs,
 -                                                      raa: required_revoke,
 -                                                      commitment_update: None,
 -                                                      order: self.resend_order.clone(),
 -                                                      mon_update: None,
 -                                                      holding_cell_failed_htlcs,
 -                                              })
 -                                      },
 -                              }
 -                      } else {
 -                              Ok(ReestablishResponses {
 -                                      channel_ready, shutdown_msg, announcement_sigs,
 -                                      raa: required_revoke,
 -                                      commitment_update: None,
 -                                      order: self.resend_order.clone(),
 -                                      mon_update: None,
 -                                      holding_cell_failed_htlcs: Vec::new(),
 -                              })
 -                      }
 +                      Ok(ReestablishResponses {
 +                              channel_ready, shutdown_msg, announcement_sigs,
 +                              raa: required_revoke,
 +                              commitment_update: None,
 +                              order: self.resend_order.clone(),
 +                      })
                } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
                        if required_revoke.is_some() {
                                log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.channel_id()));
                                self.monitor_pending_commitment_signed = true;
                                Ok(ReestablishResponses {
                                        channel_ready, shutdown_msg, announcement_sigs,
 -                                      commitment_update: None, raa: None, mon_update: None,
 +                                      commitment_update: None, raa: None,
                                        order: self.resend_order.clone(),
 -                                      holding_cell_failed_htlcs: Vec::new(),
                                })
                        } else {
                                Ok(ReestablishResponses {
                                        raa: required_revoke,
                                        commitment_update: Some(self.get_last_commitment_update(logger)),
                                        order: self.resend_order.clone(),
 -                                      mon_update: None,
 -                                      holding_cell_failed_htlcs: Vec::new(),
                                })
                        }
                } else {
                self.funding_tx_confirmed_in
        }
  
 +      /// Returns the current number of confirmations on the funding transaction.
 +      pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
 +              if self.funding_tx_confirmation_height == 0 {
 +                      // We either haven't seen any confirmation yet, or observed a reorg.
 +                      return 0;
 +              }
 +
 +              height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
 +      }
 +
        fn get_holder_selected_contest_delay(&self) -> u16 {
                self.channel_transaction_parameters.holder_selected_contest_delay
        }
  
        /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
        fn get_outbound_funding_created_signature<L: Deref>(&mut self, logger: &L) -> Result<Signature, ChannelError> where L::Target: Logger {
 -              let counterparty_keys = self.build_remote_transaction_keys()?;
 +              let counterparty_keys = self.build_remote_transaction_keys();
                let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
                Ok(self.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
                                .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0)
                        return Err(ChannelError::Ignore(format!("Cannot send value that would put us over the max HTLC value in flight our peer will accept ({})", self.counterparty_max_htlc_value_in_flight_msat)));
                }
  
 -              let keys = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number)?;
 +              let keys = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number);
                let commitment_stats = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, true, logger);
                if !self.is_outbound() {
                        // Check that we won't violate the remote channel reserve by adding this HTLC.
        /// Only fails in case of bad keys. Used for channel_reestablish commitment_signed generation
        /// when we shouldn't change HTLC/channel state.
        fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
 -              let counterparty_keys = self.build_remote_transaction_keys()?;
 +              let counterparty_keys = self.build_remote_transaction_keys();
                let commitment_stats = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
                let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
                let (signature, htlc_signatures);
  
                        for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
                                log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
 -                                      encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.get_holder_selected_contest_delay(), htlc, self.opt_anchors(), &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
 +                                      encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.get_holder_selected_contest_delay(), htlc, self.opt_anchors(), false, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
                                        encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, self.opt_anchors(), &counterparty_keys)),
                                        log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
                                        log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.channel_id()));
                (monitor_update, dropped_outbound_htlcs)
        }
  
-       pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=&HTLCSource> {
+       pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
                self.holding_cell_htlc_updates.iter()
                        .flat_map(|htlc_update| {
                                match htlc_update {
-                                       HTLCUpdateAwaitingACK::AddHTLC { source, .. } => { Some(source) }
-                                       _ => None
+                                       HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
+                                               => Some((source, payment_hash)),
+                                       _ => None,
                                }
                        })
-                       .chain(self.pending_outbound_htlcs.iter().map(|htlc| &htlc.source))
+                       .chain(self.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
        }
  }
  
@@@ -7262,7 -7292,7 +7263,7 @@@ mod tests 
                        // These aren't set in the test vectors:
                        [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
                        10_000_000,
 -                      [0; 32]
 +                      [0; 32],
                );
  
                assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
                let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
                let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
                let htlc_basepoint = &chan.holder_signer.pubkeys().htlc_basepoint;
 -              let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint).unwrap();
 +              let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
  
                macro_rules! test_commitment {
                        ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
                                        let ref htlc = htlcs[$htlc_idx];
                                        let htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.feerate_per_kw,
                                                chan.get_counterparty_selected_contest_delay().unwrap(),
 -                                              &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
 +                                              &htlc, $opt_anchors, false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
                                        let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
                                        let htlc_sighashtype = if $opt_anchors { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
                                        let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
                let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
                assert_eq!(per_commitment_point.serialize()[..], hex::decode("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
  
 -              assert_eq!(chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &base_point).unwrap().serialize()[..],
 +              assert_eq!(chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
                                hex::decode("0235f2dbfaa89b57ec7b055afe29849ef7ddfeb1cefdb9ebdc43f5494984db29e5").unwrap()[..]);
  
 -              assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret).unwrap(),
 +              assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
                                SecretKey::from_slice(&hex::decode("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
  
 -              assert_eq!(chan_utils::derive_public_revocation_key(&secp_ctx, &per_commitment_point, &base_point).unwrap().serialize()[..],
 +              assert_eq!(chan_utils::derive_public_revocation_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
                                hex::decode("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
  
 -              assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret).unwrap(),
 +              assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
                                SecretKey::from_slice(&hex::decode("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
        }
  
index 54d1aeab3f4064999a4513be81d6c93685264105,7f65719a57ff3a531ad0c648dea0973b4726c3df..29553d17ffb3ac2dee788f59c2de7f5bb9a814c1
@@@ -92,8 -92,8 +92,8 @@@ use core::ops::Deref
  pub(super) enum PendingHTLCRouting {
        Forward {
                onion_packet: msgs::OnionPacket,
 -              /// The SCID from the onion that we should forward to. This could be a "real" SCID, an
 -              /// outbound SCID alias, or a phantom node SCID.
 +              /// The SCID from the onion that we should forward to. This could be a real SCID or a fake one
 +              /// generated using `get_fake_scid` from the scid_utils::fake_scid module.
                short_channel_id: u64, // This should be NonZero<u64> eventually when we bump MSRV
        },
        Receive {
@@@ -142,7 -142,6 +142,7 @@@ pub(super) struct PendingAddHTLCInfo 
        prev_short_channel_id: u64,
        prev_htlc_id: u64,
        prev_funding_outpoint: OutPoint,
 +      prev_user_channel_id: u128,
  }
  
  pub(super) enum HTLCForwardInfo {
@@@ -207,24 -206,6 +207,24 @@@ impl Readable for PaymentId 
                Ok(PaymentId(buf))
        }
  }
 +
 +/// An identifier used to uniquely identify an intercepted HTLC to LDK.
 +/// (C-not exported) as we just use [u8; 32] directly
 +#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
 +pub struct InterceptId(pub [u8; 32]);
 +
 +impl Writeable for InterceptId {
 +      fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
 +              self.0.write(w)
 +      }
 +}
 +
 +impl Readable for InterceptId {
 +      fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
 +              let buf: [u8; 32] = Readable::read(r)?;
 +              Ok(InterceptId(buf))
 +      }
 +}
  /// Tracks the inbound corresponding to an outbound HTLC
  #[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
  #[derive(Clone, PartialEq, Eq)]
@@@ -287,16 -268,6 +287,16 @@@ pub(super) enum HTLCFailReason 
        }
  }
  
 +impl HTLCFailReason {
 +      pub(super) fn reason(failure_code: u16, data: Vec<u8>) -> Self {
 +              Self::Reason { failure_code, data }
 +      }
 +
 +      pub(super) fn from_failure_code(failure_code: u16) -> Self {
 +              Self::Reason { failure_code, data: Vec::new() }
 +      }
 +}
 +
  struct ReceiveError {
        err_code: u16,
        err_data: Vec<u8>,
@@@ -427,6 -398,13 +427,6 @@@ pub(super) enum RAACommitmentOrder 
  // Note this is only exposed in cfg(test):
  pub(super) struct ChannelHolder<Signer: Sign> {
        pub(super) by_id: HashMap<[u8; 32], Channel<Signer>>,
 -      /// Map from payment hash to the payment data and any HTLCs which are to us and can be
 -      /// failed/claimed by the user.
 -      ///
 -      /// Note that while this is held in the same mutex as the channels themselves, no consistency
 -      /// guarantees are made about the channels given here actually existing anymore by the time you
 -      /// go to read them!
 -      claimable_htlcs: HashMap<PaymentHash, (events::PaymentPurpose, Vec<ClaimableHTLC>)>,
        /// Messages to send to peers - pushed to in the same lock that they are generated in (except
        /// for broadcast messages, where ordering isn't as strict).
        pub(super) pending_msg_events: Vec<MessageSendEvent>,
@@@ -694,24 -672,20 +694,24 @@@ pub type SimpleRefChannelManager<'a, 'b
  // `total_consistency_lock`
  //  |
  //  |__`forward_htlcs`
 -//  |
 -//  |__`channel_state`
  //  |   |
 -//  |   |__`id_to_peer`
 +//  |   |__`pending_intercepted_htlcs`
 +//  |
 +//  |__`pending_inbound_payments`
  //  |   |
 -//  |   |__`short_to_chan_info`
 +//  |   |__`claimable_htlcs`
  //  |   |
 -//  |   |__`per_peer_state`
 +//  |   |__`pending_outbound_payments`
  //  |       |
 -//  |       |__`outbound_scid_aliases`
 -//  |       |
 -//  |       |__`pending_inbound_payments`
 +//  |       |__`channel_state`
 +//  |           |
 +//  |           |__`id_to_peer`
  //  |           |
 -//  |           |__`pending_outbound_payments`
 +//  |           |__`short_to_chan_info`
 +//  |           |
 +//  |           |__`per_peer_state`
 +//  |               |
 +//  |               |__`outbound_scid_aliases`
  //  |               |
  //  |               |__`best_block`
  //  |               |
@@@ -746,9 -720,9 +746,9 @@@ pub struct ChannelManager<M: Deref, T: 
        channel_state: Mutex<ChannelHolder<<K::Target as KeysInterface>::Signer>>,
  
        /// Storage for PaymentSecrets and any requirements on future inbound payments before we will
 -      /// expose them to users via a PaymentReceived event. HTLCs which do not meet the requirements
 +      /// expose them to users via a PaymentClaimable event. HTLCs which do not meet the requirements
        /// here are failed when we process them as pending-forwardable-HTLCs, and entries are removed
 -      /// after we generate a PaymentReceived upon receipt of all MPP parts or when they time out.
 +      /// after we generate a PaymentClaimable upon receipt of all MPP parts or when they time out.
        ///
        /// See `ChannelManager` struct-level documentation for lock order requirements.
        pending_inbound_payments: Mutex<HashMap<PaymentHash, PendingInboundPayment>>,
        pub(super) forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
        #[cfg(not(test))]
        forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
 +      /// Storage for HTLCs that have been intercepted and bubbled up to the user. We hold them here
 +      /// until the user tells us what we should do with them.
 +      ///
 +      /// See `ChannelManager` struct-level documentation for lock order requirements.
 +      pending_intercepted_htlcs: Mutex<HashMap<InterceptId, PendingAddHTLCInfo>>,
 +
 +      /// Map from payment hash to the payment data and any HTLCs which are to us and can be
 +      /// failed/claimed by the user.
 +      ///
 +      /// Note that, no consistency guarantees are made about the channels given here actually
 +      /// existing anymore by the time you go to read them!
 +      ///
 +      /// See `ChannelManager` struct-level documentation for lock order requirements.
 +      claimable_htlcs: Mutex<HashMap<PaymentHash, (events::PaymentPurpose, Vec<ClaimableHTLC>)>>,
  
        /// The set of outbound SCID aliases across all our channels, including unconfirmed channels
        /// and some closed channels which reached a usable state prior to being closed. This is used
@@@ -1181,10 -1141,6 +1181,10 @@@ pub struct ChannelDetails 
        /// [`ChannelHandshakeConfig::minimum_depth`]: crate::util::config::ChannelHandshakeConfig::minimum_depth
        /// [`ChannelHandshakeLimits::max_minimum_depth`]: crate::util::config::ChannelHandshakeLimits::max_minimum_depth
        pub confirmations_required: Option<u32>,
 +      /// The current number of confirmations on the funding transaction.
 +      ///
 +      /// This value will be `None` for objects serialized with LDK versions prior to 0.0.113.
 +      pub confirmations: Option<u32>,
        /// The number of blocks (after our commitment transaction confirms) that we will need to wait
        /// until we can claim our funds after we force-close the channel. During this time our
        /// counterparty is allowed to punish us if we broadcasted a stale state. If our counterparty
@@@ -1561,6 -1517,134 +1561,6 @@@ macro_rules! emit_channel_ready_event 
        }
  }
  
 -macro_rules! handle_chan_restoration_locked {
 -      ($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr,
 -       $raa: expr, $commitment_update: expr, $order: expr, $chanmon_update: expr,
 -       $pending_forwards: expr, $funding_broadcastable: expr, $channel_ready: expr, $announcement_sigs: expr) => { {
 -              let mut htlc_forwards = None;
 -
 -              let chanmon_update: Option<ChannelMonitorUpdate> = $chanmon_update; // Force type-checking to resolve
 -              let chanmon_update_is_none = chanmon_update.is_none();
 -              let counterparty_node_id = $channel_entry.get().get_counterparty_node_id();
 -              let res = loop {
 -                      let forwards: Vec<(PendingHTLCInfo, u64)> = $pending_forwards; // Force type-checking to resolve
 -                      if !forwards.is_empty() {
 -                              htlc_forwards = Some(($channel_entry.get().get_short_channel_id().unwrap_or($channel_entry.get().outbound_scid_alias()),
 -                                      $channel_entry.get().get_funding_txo().unwrap(), forwards));
 -                      }
 -
 -                      if chanmon_update.is_some() {
 -                              // On reconnect, we, by definition, only resend a channel_ready if there have been
 -                              // no commitment updates, so the only channel monitor update which could also be
 -                              // associated with a channel_ready would be the funding_created/funding_signed
 -                              // monitor update. That monitor update failing implies that we won't send
 -                              // channel_ready until it's been updated, so we can't have a channel_ready and a
 -                              // monitor update here (so we don't bother to handle it correctly below).
 -                              assert!($channel_ready.is_none());
 -                              // A channel monitor update makes no sense without either a channel_ready or a
 -                              // commitment update to process after it. Since we can't have a channel_ready, we
 -                              // only bother to handle the monitor-update + commitment_update case below.
 -                              assert!($commitment_update.is_some());
 -                      }
 -
 -                      if let Some(msg) = $channel_ready {
 -                              // Similar to the above, this implies that we're letting the channel_ready fly
 -                              // before it should be allowed to.
 -                              assert!(chanmon_update.is_none());
 -                              send_channel_ready!($self, $channel_state.pending_msg_events, $channel_entry.get(), msg);
 -                      }
 -                      if let Some(msg) = $announcement_sigs {
 -                              $channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
 -                                      node_id: counterparty_node_id,
 -                                      msg,
 -                              });
 -                      }
 -
 -                      emit_channel_ready_event!($self, $channel_entry.get_mut());
 -
 -                      let funding_broadcastable: Option<Transaction> = $funding_broadcastable; // Force type-checking to resolve
 -                      if let Some(monitor_update) = chanmon_update {
 -                              // We only ever broadcast a funding transaction in response to a funding_signed
 -                              // message and the resulting monitor update. Thus, on channel_reestablish
 -                              // message handling we can't have a funding transaction to broadcast. When
 -                              // processing a monitor update finishing resulting in a funding broadcast, we
 -                              // cannot have a second monitor update, thus this case would indicate a bug.
 -                              assert!(funding_broadcastable.is_none());
 -                              // Given we were just reconnected or finished updating a channel monitor, the
 -                              // only case where we can get a new ChannelMonitorUpdate would be if we also
 -                              // have some commitment updates to send as well.
 -                              assert!($commitment_update.is_some());
 -                              match $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) {
 -                                      ChannelMonitorUpdateStatus::Completed => {},
 -                                      e => {
 -                                              // channel_reestablish doesn't guarantee the order it returns is sensical
 -                                              // for the messages it returns, but if we're setting what messages to
 -                                              // re-transmit on monitor update success, we need to make sure it is sane.
 -                                              let mut order = $order;
 -                                              if $raa.is_none() {
 -                                                      order = RAACommitmentOrder::CommitmentFirst;
 -                                              }
 -                                              break handle_monitor_update_res!($self, e, $channel_entry, order, $raa.is_some(), true);
 -                                      }
 -                              }
 -                      }
 -
 -                      macro_rules! handle_cs { () => {
 -                              if let Some(update) = $commitment_update {
 -                                      $channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
 -                                              node_id: counterparty_node_id,
 -                                              updates: update,
 -                                      });
 -                              }
 -                      } }
 -                      macro_rules! handle_raa { () => {
 -                              if let Some(revoke_and_ack) = $raa {
 -                                      $channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
 -                                              node_id: counterparty_node_id,
 -                                              msg: revoke_and_ack,
 -                                      });
 -                              }
 -                      } }
 -                      match $order {
 -                              RAACommitmentOrder::CommitmentFirst => {
 -                                      handle_cs!();
 -                                      handle_raa!();
 -                              },
 -                              RAACommitmentOrder::RevokeAndACKFirst => {
 -                                      handle_raa!();
 -                                      handle_cs!();
 -                              },
 -                      }
 -                      if let Some(tx) = funding_broadcastable {
 -                              log_info!($self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
 -                              $self.tx_broadcaster.broadcast_transaction(&tx);
 -                      }
 -                      break Ok(());
 -              };
 -
 -              if chanmon_update_is_none {
 -                      // If there was no ChannelMonitorUpdate, we should never generate an Err in the res loop
 -                      // above. Doing so would imply calling handle_err!() from channel_monitor_updated() which
 -                      // should *never* end up calling back to `chain_monitor.update_channel()`.
 -                      assert!(res.is_ok());
 -              }
 -
 -              (htlc_forwards, res, counterparty_node_id)
 -      } }
 -}
 -
 -macro_rules! post_handle_chan_restoration {
 -      ($self: ident, $locked_res: expr) => { {
 -              let (htlc_forwards, res, counterparty_node_id) = $locked_res;
 -
 -              let _ = handle_error!($self, res, counterparty_node_id);
 -
 -              if let Some(forwards) = htlc_forwards {
 -                      $self.forward_htlcs(&mut [forwards][..]);
 -              }
 -      } }
 -}
 -
  impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F, L>
        where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
          T::Target: BroadcasterInterface,
  
                        channel_state: Mutex::new(ChannelHolder{
                                by_id: HashMap::new(),
 -                              claimable_htlcs: HashMap::new(),
                                pending_msg_events: Vec::new(),
                        }),
                        outbound_scid_aliases: Mutex::new(HashSet::new()),
                        pending_inbound_payments: Mutex::new(HashMap::new()),
                        pending_outbound_payments: Mutex::new(HashMap::new()),
                        forward_htlcs: Mutex::new(HashMap::new()),
 +                      claimable_htlcs: Mutex::new(HashMap::new()),
 +                      pending_intercepted_htlcs: Mutex::new(HashMap::new()),
                        id_to_peer: Mutex::new(HashMap::new()),
                        short_to_chan_info: FairRwLock::new(HashMap::new()),
  
                let mut res = Vec::new();
                {
                        let channel_state = self.channel_state.lock().unwrap();
 +                      let best_block_height = self.best_block.read().unwrap().height();
                        res.reserve(channel_state.by_id.len());
                        for (channel_id, channel) in channel_state.by_id.iter().filter(f) {
                                let balance = channel.get_available_balances();
                                        next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
                                        user_channel_id: channel.get_user_id(),
                                        confirmations_required: channel.minimum_depth(),
 +                                      confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)),
                                        force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
                                        is_outbound: channel.is_outbound(),
                                        is_channel_ready: channel.is_usable(),
                                        if *counterparty_node_id != chan_entry.get().get_counterparty_node_id(){
                                                return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() });
                                        }
 -                                      let per_peer_state = self.per_peer_state.read().unwrap();
 -                                      let (shutdown_msg, monitor_update, htlcs) = match per_peer_state.get(&counterparty_node_id) {
 -                                              Some(peer_state) => {
 -                                                      let peer_state = peer_state.lock().unwrap();
 -                                                      let their_features = &peer_state.latest_features;
 -                                                      chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)?
 -                                              },
 -                                              None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }),
 +                                      let (shutdown_msg, monitor_update, htlcs) = {
 +                                              let per_peer_state = self.per_peer_state.read().unwrap();
 +                                              match per_peer_state.get(&counterparty_node_id) {
 +                                                      Some(peer_state) => {
 +                                                              let peer_state = peer_state.lock().unwrap();
 +                                                              let their_features = &peer_state.latest_features;
 +                                                              chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)?
 +                                                      },
 +                                                      None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }),
 +                                              }
                                        };
                                        failed_htlcs = htlcs;
  
                };
  
                for htlc_source in failed_htlcs.drain(..) {
 +                      let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
                        let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id };
 -                      self.fail_htlc_backwards_internal(htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
 +                      self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
                }
  
                let _ = handle_error!(self, result, *counterparty_node_id);
                log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
                for htlc_source in failed_htlcs.drain(..) {
                        let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
 +                      let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
                        let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
 -                      self.fail_htlc_backwards_internal(source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
 +                      self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
                }
                if let Some((funding_txo, monitor_update)) = monitor_update_option {
                        // There isn't anything we can do if we get an update failure - we're already
                                        let forwarding_id_opt = match id_option {
                                                None => { // unknown_next_peer
                                                        // Note that this is likely a timing oracle for detecting whether an scid is a
 -                                                      // phantom.
 -                                                      if fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash) {
 +                                                      // phantom or an intercept.
 +                                                      if (self.default_configuration.accept_intercept_htlcs &&
 +                                                         fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash)) ||
 +                                                         fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash)
 +                                                      {
                                                                None
                                                        } else {
                                                                break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
                let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted");
  
                let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv)
 -                      .map_err(|_| APIError::RouteError{err: "Pubkey along hop was maliciously selected"})?;
 +                      .map_err(|_| APIError::InvalidRoute{err: "Pubkey along hop was maliciously selected"})?;
                let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height, keysend_preimage)?;
                if onion_utils::route_size_insane(&onion_payloads) {
 -                      return Err(APIError::RouteError{err: "Route size too large considering onion data"});
 +                      return Err(APIError::InvalidRoute{err: "Route size too large considering onion data"});
                }
                let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash);
  
                        if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) {
                                match {
                                        if chan.get().get_counterparty_node_id() != path.first().unwrap().pubkey {
 -                                              return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"});
 +                                              return Err(APIError::InvalidRoute{err: "Node ID mismatch on first hop!"});
                                        }
                                        if !chan.get().is_live() {
                                                return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()});
        /// fields for more info.
        ///
        /// If a pending payment is currently in-flight with the same [`PaymentId`] provided, this
 -      /// method will error with an [`APIError::RouteError`]. Note, however, that once a payment
 +      /// method will error with an [`APIError::InvalidRoute`]. Note, however, that once a payment
        /// is no longer pending (either via [`ChannelManager::abandon_payment`], or handling of an
        /// [`Event::PaymentSent`]) LDK will not stop you from sending a second payment with the same
        /// [`PaymentId`].
        /// PaymentSendFailure for more info.
        ///
        /// In general, a path may raise:
 -      ///  * [`APIError::RouteError`] when an invalid route or forwarding parameter (cltv_delta, fee,
 +      ///  * [`APIError::InvalidRoute`] when an invalid route or forwarding parameter (cltv_delta, fee,
        ///    node public key) is specified.
        ///  * [`APIError::ChannelUnavailable`] if the next-hop channel is not available for updates
        ///    (including due to previous monitor update failure or new permanent monitor update
  
        fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
                if route.paths.len() < 1 {
 -                      return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "There must be at least one path to send over"}));
 +                      return Err(PaymentSendFailure::ParameterError(APIError::InvalidRoute{err: "There must be at least one path to send over"}));
                }
                if payment_secret.is_none() && route.paths.len() > 1 {
                        return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError{err: "Payment secret is required for multi-path payments".to_string()}));
                let mut path_errs = Vec::with_capacity(route.paths.len());
                'path_check: for path in route.paths.iter() {
                        if path.len() < 1 || path.len() > 20 {
 -                              path_errs.push(Err(APIError::RouteError{err: "Path didn't go anywhere/had bogus size"}));
 +                              path_errs.push(Err(APIError::InvalidRoute{err: "Path didn't go anywhere/had bogus size"}));
                                continue 'path_check;
                        }
                        for (idx, hop) in path.iter().enumerate() {
                                if idx != path.len() - 1 && hop.pubkey == our_node_id {
 -                                      path_errs.push(Err(APIError::RouteError{err: "Path went through us but wasn't a simple rebalance loop to us"}));
 +                                      path_errs.push(Err(APIError::InvalidRoute{err: "Path went through us but wasn't a simple rebalance loop to us"}));
                                        continue 'path_check;
                                }
                        }
                Ok(())
        }
  
 +      /// Attempts to forward an intercepted HTLC over the provided channel id and with the provided
 +      /// amount to forward. Should only be called in response to an [`HTLCIntercepted`] event.
 +      ///
 +      /// Intercepted HTLCs can be useful for Lightning Service Providers (LSPs) to open a just-in-time
 +      /// channel to a receiving node if the node lacks sufficient inbound liquidity.
 +      ///
 +      /// To make use of intercepted HTLCs, set [`UserConfig::accept_intercept_htlcs`] and use
 +      /// [`ChannelManager::get_intercept_scid`] to generate short channel id(s) to put in the
 +      /// receiver's invoice route hints. These route hints will signal to LDK to generate an
 +      /// [`HTLCIntercepted`] event when it receives the forwarded HTLC, and this method or
 +      /// [`ChannelManager::fail_intercepted_htlc`] MUST be called in response to the event.
 +      ///
 +      /// Note that LDK does not enforce fee requirements in `amt_to_forward_msat`, and will not stop
 +      /// you from forwarding more than you received.
 +      ///
 +      /// Errors if the event was not handled in time, in which case the HTLC was automatically failed
 +      /// backwards.
 +      ///
 +      /// [`UserConfig::accept_intercept_htlcs`]: crate::util::config::UserConfig::accept_intercept_htlcs
 +      /// [`HTLCIntercepted`]: events::Event::HTLCIntercepted
 +      // TODO: when we move to deciding the best outbound channel at forward time, only take
 +      // `next_node_id` and not `next_hop_channel_id`
 +      pub fn forward_intercepted_htlc(&self, intercept_id: InterceptId, next_hop_channel_id: &[u8; 32], _next_node_id: PublicKey, amt_to_forward_msat: u64) -> Result<(), APIError> {
 +              let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 +
 +              let next_hop_scid = match self.channel_state.lock().unwrap().by_id.get(next_hop_channel_id) {
 +                      Some(chan) => {
 +                              if !chan.is_usable() {
 +                                      return Err(APIError::ChannelUnavailable {
 +                                              err: format!("Channel with id {} not fully established", log_bytes!(*next_hop_channel_id))
 +                                      })
 +                              }
 +                              chan.get_short_channel_id().unwrap_or(chan.outbound_scid_alias())
 +                      },
 +                      None => return Err(APIError::ChannelUnavailable {
 +                              err: format!("Channel with id {} not found", log_bytes!(*next_hop_channel_id))
 +                      })
 +              };
 +
 +              let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id)
 +                      .ok_or_else(|| APIError::APIMisuseError {
 +                              err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0))
 +                      })?;
 +
 +              let routing = match payment.forward_info.routing {
 +                      PendingHTLCRouting::Forward { onion_packet, .. } => {
 +                              PendingHTLCRouting::Forward { onion_packet, short_channel_id: next_hop_scid }
 +                      },
 +                      _ => unreachable!() // Only `PendingHTLCRouting::Forward`s are intercepted
 +              };
 +              let pending_htlc_info = PendingHTLCInfo {
 +                      outgoing_amt_msat: amt_to_forward_msat, routing, ..payment.forward_info
 +              };
 +
 +              let mut per_source_pending_forward = [(
 +                      payment.prev_short_channel_id,
 +                      payment.prev_funding_outpoint,
 +                      payment.prev_user_channel_id,
 +                      vec![(pending_htlc_info, payment.prev_htlc_id)]
 +              )];
 +              self.forward_htlcs(&mut per_source_pending_forward);
 +              Ok(())
 +      }
 +
 +      /// Fails the intercepted HTLC indicated by intercept_id. Should only be called in response to
 +      /// an [`HTLCIntercepted`] event. See [`ChannelManager::forward_intercepted_htlc`].
 +      ///
 +      /// Errors if the event was not handled in time, in which case the HTLC was automatically failed
 +      /// backwards.
 +      ///
 +      /// [`HTLCIntercepted`]: events::Event::HTLCIntercepted
 +      pub fn fail_intercepted_htlc(&self, intercept_id: InterceptId) -> Result<(), APIError> {
 +              let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 +
 +              let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id)
 +                      .ok_or_else(|| APIError::APIMisuseError {
 +                              err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0))
 +                      })?;
 +
 +              if let PendingHTLCRouting::Forward { short_channel_id, .. } = payment.forward_info.routing {
 +                      let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
 +                              short_channel_id: payment.prev_short_channel_id,
 +                              outpoint: payment.prev_funding_outpoint,
 +                              htlc_id: payment.prev_htlc_id,
 +                              incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret,
 +                              phantom_shared_secret: None,
 +                      });
 +
 +                      let failure_reason = HTLCFailReason::from_failure_code(0x4000 | 10);
 +                      let destination = HTLCDestination::UnknownNextHop { requested_forward_scid: short_channel_id };
 +                      self.fail_htlc_backwards_internal(&htlc_source, &payment.forward_info.payment_hash, &failure_reason, destination);
 +              } else { unreachable!() } // Only `PendingHTLCRouting::Forward`s are intercepted
 +
 +              Ok(())
 +      }
 +
        /// Processes HTLCs which are pending waiting on random forward delay.
        ///
        /// Should only really ever be called in response to a PendingHTLCsForwardable event.
  
                let mut new_events = Vec::new();
                let mut failed_forwards = Vec::new();
 -              let mut phantom_receives: Vec<(u64, OutPoint, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
 +              let mut phantom_receives: Vec<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
                let mut handle_errors = Vec::new();
                {
                        let mut forward_htlcs = HashMap::new();
                        mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
  
                        for (short_chan_id, mut pending_forwards) in forward_htlcs {
 -                              let mut channel_state_lock = self.channel_state.lock().unwrap();
 -                              let channel_state = &mut *channel_state_lock;
                                if short_chan_id != 0 {
                                        macro_rules! forwarding_channel_not_found {
                                                () => {
                                                        for forward_info in pending_forwards.drain(..) {
                                                                match forward_info {
                                                                        HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
 -                                                                              prev_short_channel_id, prev_htlc_id, prev_funding_outpoint,
 +                                                                              prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
                                                                                forward_info: PendingHTLCInfo {
                                                                                        routing, incoming_shared_secret, payment_hash, outgoing_amt_msat,
                                                                                        outgoing_cltv_value, incoming_amt_msat: _
                                                                                                };
  
                                                                                                failed_forwards.push((htlc_source, payment_hash,
 -                                                                                                      HTLCFailReason::Reason { failure_code: $err_code, data: $err_data },
 +                                                                                                      HTLCFailReason::reason($err_code, $err_data),
                                                                                                        reason
                                                                                                ));
                                                                                                continue;
                                                                                                match next_hop {
                                                                                                        onion_utils::Hop::Receive(hop_data) => {
                                                                                                                match self.construct_recv_pending_htlc_info(hop_data, incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, Some(phantom_shared_secret)) {
 -                                                                                                                      Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, vec![(info, prev_htlc_id)])),
 +                                                                                                                      Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])),
                                                                                                                        Err(ReceiveError { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
                                                                                                                }
                                                                                                        },
                                                        continue;
                                                }
                                        };
 +                                      let mut channel_state_lock = self.channel_state.lock().unwrap();
 +                                      let channel_state = &mut *channel_state_lock;
                                        match channel_state.by_id.entry(forward_chan_id) {
                                                hash_map::Entry::Vacant(_) => {
                                                        forwarding_channel_not_found!();
                                                        for forward_info in pending_forwards.drain(..) {
                                                                match forward_info {
                                                                        HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
 -                                                                              prev_short_channel_id, prev_htlc_id, prev_funding_outpoint ,
 +                                                                              prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id: _,
                                                                                forward_info: PendingHTLCInfo {
                                                                                        incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
                                                                                        routing: PendingHTLCRouting::Forward { onion_packet, .. }, incoming_amt_msat: _,
                                                                                                }
                                                                                                let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
                                                                                                failed_forwards.push((htlc_source, payment_hash,
 -                                                                                                      HTLCFailReason::Reason { failure_code, data },
 +                                                                                                      HTLCFailReason::reason(failure_code, data),
                                                                                                        HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id }
                                                                                                ));
                                                                                                continue;
                                        for forward_info in pending_forwards.drain(..) {
                                                match forward_info {
                                                        HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
 -                                                              prev_short_channel_id, prev_htlc_id, prev_funding_outpoint,
 +                                                              prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
                                                                forward_info: PendingHTLCInfo {
                                                                        routing, incoming_shared_secret, payment_hash, outgoing_amt_msat, ..
                                                                }
                                                                                                incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
                                                                                                phantom_shared_secret,
                                                                                        }), payment_hash,
 -                                                                                      HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data },
 +                                                                                      HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data),
                                                                                        HTLCDestination::FailedPayment { payment_hash: $payment_hash },
                                                                                ));
                                                                        }
                                                                }
 +                                                              let phantom_shared_secret = claimable_htlc.prev_hop.phantom_shared_secret;
 +                                                              let mut receiver_node_id = self.our_network_pubkey;
 +                                                              if phantom_shared_secret.is_some() {
 +                                                                      receiver_node_id = self.keys_manager.get_node_id(Recipient::PhantomNode)
 +                                                                              .expect("Failed to get node_id for phantom node recipient");
 +                                                              }
  
                                                                macro_rules! check_total_value {
                                                                        ($payment_data: expr, $payment_preimage: expr) => {{
                                                                                                payment_secret: $payment_data.payment_secret,
                                                                                        }
                                                                                };
 -                                                                              let (_, htlcs) = channel_state.claimable_htlcs.entry(payment_hash)
 +                                                                              let mut claimable_htlcs = self.claimable_htlcs.lock().unwrap();
 +                                                                              let (_, htlcs) = claimable_htlcs.entry(payment_hash)
                                                                                        .or_insert_with(|| (purpose(), Vec::new()));
                                                                                if htlcs.len() == 1 {
                                                                                        if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
                                                                                                log_bytes!(payment_hash.0), total_value, $payment_data.total_msat);
                                                                                        fail_htlc!(claimable_htlc, payment_hash);
                                                                                } else if total_value == $payment_data.total_msat {
 +                                                                                      let prev_channel_id = prev_funding_outpoint.to_channel_id();
                                                                                        htlcs.push(claimable_htlc);
 -                                                                                      new_events.push(events::Event::PaymentReceived {
 +                                                                                      new_events.push(events::Event::PaymentClaimable {
 +                                                                                              receiver_node_id: Some(receiver_node_id),
                                                                                                payment_hash,
                                                                                                purpose: purpose(),
                                                                                                amount_msat: total_value,
 +                                                                                              via_channel_id: Some(prev_channel_id),
 +                                                                                              via_user_channel_id: Some(prev_user_channel_id),
                                                                                        });
                                                                                        payment_received_generated = true;
                                                                                } else {
                                                                                                check_total_value!(payment_data, payment_preimage);
                                                                                        },
                                                                                        OnionPayload::Spontaneous(preimage) => {
 -                                                                                              match channel_state.claimable_htlcs.entry(payment_hash) {
 +                                                                                              match self.claimable_htlcs.lock().unwrap().entry(payment_hash) {
                                                                                                        hash_map::Entry::Vacant(e) => {
                                                                                                                let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
                                                                                                                e.insert((purpose.clone(), vec![claimable_htlc]));
 -                                                                                                              new_events.push(events::Event::PaymentReceived {
 +                                                                                                              let prev_channel_id = prev_funding_outpoint.to_channel_id();
 +                                                                                                              new_events.push(events::Event::PaymentClaimable {
 +                                                                                                                      receiver_node_id: Some(receiver_node_id),
                                                                                                                        payment_hash,
                                                                                                                        amount_msat: outgoing_amt_msat,
                                                                                                                        purpose,
 +                                                                                                                      via_channel_id: Some(prev_channel_id),
 +                                                                                                                      via_user_channel_id: Some(prev_user_channel_id),
                                                                                                                });
                                                                                                        },
                                                                                                        hash_map::Entry::Occupied(_) => {
                }
  
                for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
 -                      self.fail_htlc_backwards_internal(htlc_source, &payment_hash, failure_reason, destination);
 +                      self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination);
                }
                self.forward_htlcs(&mut phantom_receives);
  
  
                                        true
                                });
 +                      }
  
 -                              channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
 -                                      if htlcs.is_empty() {
 -                                              // This should be unreachable
 -                                              debug_assert!(false);
 +                      self.claimable_htlcs.lock().unwrap().retain(|payment_hash, (_, htlcs)| {
 +                              if htlcs.is_empty() {
 +                                      // This should be unreachable
 +                                      debug_assert!(false);
 +                                      return false;
 +                              }
 +                              if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload {
 +                                      // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
 +                                      // In this case we're not going to handle any timeouts of the parts here.
 +                                      if htlcs[0].total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
 +                                              return true;
 +                                      } else if htlcs.into_iter().any(|htlc| {
 +                                              htlc.timer_ticks += 1;
 +                                              return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
 +                                      }) {
 +                                              timed_out_mpp_htlcs.extend(htlcs.drain(..).map(|htlc: ClaimableHTLC| (htlc.prev_hop, *payment_hash)));
                                                return false;
                                        }
 -                                      if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload {
 -                                              // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
 -                                              // In this case we're not going to handle any timeouts of the parts here.
 -                                              if htlcs[0].total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
 -                                                      return true;
 -                                              } else if htlcs.into_iter().any(|htlc| {
 -                                                      htlc.timer_ticks += 1;
 -                                                      return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
 -                                              }) {
 -                                                      timed_out_mpp_htlcs.extend(htlcs.into_iter().map(|htlc| (htlc.prev_hop.clone(), payment_hash.clone())));
 -                                                      return false;
 -                                              }
 -                                      }
 -                                      true
 -                              });
 -                      }
 +                              }
 +                              true
 +                      });
  
                        for htlc_source in timed_out_mpp_htlcs.drain(..) {
 +                              let source = HTLCSource::PreviousHopData(htlc_source.0.clone());
 +                              let reason = HTLCFailReason::from_failure_code(23);
                                let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 };
 -                              self.fail_htlc_backwards_internal(HTLCSource::PreviousHopData(htlc_source.0.clone()), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }, receiver );
 +                              self.fail_htlc_backwards_internal(&source, &htlc_source.1, &reason, receiver);
                        }
  
                        for (err, counterparty_node_id) in handle_errors.drain(..) {
        }
  
        /// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect
 -      /// after a PaymentReceived event, failing the HTLC back to its origin and freeing resources
 +      /// after a PaymentClaimable event, failing the HTLC back to its origin and freeing resources
        /// along the path (including in our own channel on which we received it).
        ///
        /// Note that in some cases around unclean shutdown, it is possible the payment may have
        /// already been claimed by you via [`ChannelManager::claim_funds`] prior to you seeing (a
 -      /// second copy of) the [`events::Event::PaymentReceived`] event. Alternatively, the payment
 +      /// second copy of) the [`events::Event::PaymentClaimable`] event. Alternatively, the payment
        /// may have already been failed automatically by LDK if it was nearing its expiration time.
        ///
        /// While LDK will never claim a payment automatically on your behalf (i.e. without you calling
        pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
  
 -              let removed_source = {
 -                      let mut channel_state = self.channel_state.lock().unwrap();
 -                      channel_state.claimable_htlcs.remove(payment_hash)
 -              };
 +              let removed_source = self.claimable_htlcs.lock().unwrap().remove(payment_hash);
                if let Some((_, mut sources)) = removed_source {
                        for htlc in sources.drain(..) {
                                let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
                                htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(
                                                self.best_block.read().unwrap().height()));
 -                              self.fail_htlc_backwards_internal(
 -                                              HTLCSource::PreviousHopData(htlc.prev_hop), payment_hash,
 -                                              HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data },
 -                                              HTLCDestination::FailedPayment { payment_hash: *payment_hash });
 +                              let source = HTLCSource::PreviousHopData(htlc.prev_hop);
 +                              let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data);
 +                              let receiver = HTLCDestination::FailedPayment { payment_hash: *payment_hash };
 +                              self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
                        }
                }
        }
                &self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: [u8; 32],
                counterparty_node_id: &PublicKey
        ) {
 -              for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) {
 -                      let (failure_code, onion_failure_data) =
 -                              match self.channel_state.lock().unwrap().by_id.entry(channel_id) {
 -                                      hash_map::Entry::Occupied(chan_entry) => {
 -                                              self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get())
 -                                      },
 -                                      hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
 -                              };
 +              let (failure_code, onion_failure_data) =
 +                      match self.channel_state.lock().unwrap().by_id.entry(channel_id) {
 +                              hash_map::Entry::Occupied(chan_entry) => {
 +                                      self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get())
 +                              },
 +                              hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
 +                      };
  
 +              for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) {
 +                      let reason = HTLCFailReason::reason(failure_code, onion_failure_data.clone());
                        let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id };
 -                      self.fail_htlc_backwards_internal(htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data }, receiver);
 +                      self.fail_htlc_backwards_internal(&htlc_src, &payment_hash, &reason, receiver);
                }
        }
  
        /// Fails an HTLC backwards to the sender of it to us.
        /// Note that we do not assume that channels corresponding to failed HTLCs are still available.
 -      fn fail_htlc_backwards_internal(&self, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason,destination: HTLCDestination) {
 +      fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
                #[cfg(debug_assertions)]
                {
                        // Ensure that the `channel_state` lock is not held when calling this function.
                // from block_connected which may run during initialization prior to the chain_monitor
                // being fully configured. See the docs for `ChannelManagerReadArgs` for more.
                match source {
 -                      HTLCSource::OutboundRoute { ref path, session_priv, payment_id, ref payment_params, .. } => {
 +                      HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, ref payment_params, .. } => {
                                let mut session_priv_bytes = [0; 32];
                                session_priv_bytes.copy_from_slice(&session_priv[..]);
                                let mut outbounds = self.pending_outbound_payments.lock().unwrap();
                                let mut all_paths_failed = false;
                                let mut full_failure_ev = None;
 -                              if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) {
 +                              if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(*payment_id) {
                                        if !payment.get_mut().remove(&session_priv_bytes, Some(&path)) {
                                                log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
                                                return;
                                                all_paths_failed = true;
                                                if payment.get().abandoned() {
                                                        full_failure_ev = Some(events::Event::PaymentFailed {
 -                                                              payment_id,
 +                                                              payment_id: *payment_id,
                                                                payment_hash: payment.get().payment_hash().expect("PendingOutboundPayments::RetriesExceeded always has a payment hash set"),
                                                        });
                                                        payment.remove();
                                                if self.payment_is_probe(payment_hash, &payment_id) {
                                                        if !payment_retryable {
                                                                events::Event::ProbeSuccessful {
 -                                                                      payment_id,
 +                                                                      payment_id: *payment_id,
                                                                        payment_hash: payment_hash.clone(),
                                                                        path: path.clone(),
                                                                }
                                                        } else {
                                                                events::Event::ProbeFailed {
 -                                                                      payment_id,
 +                                                                      payment_id: *payment_id,
                                                                        payment_hash: payment_hash.clone(),
                                                                        path: path.clone(),
                                                                        short_channel_id,
                                                                retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid));
                                                        }
                                                        events::Event::PaymentPathFailed {
 -                                                              payment_id: Some(payment_id),
 +                                                              payment_id: Some(*payment_id),
                                                                payment_hash: payment_hash.clone(),
                                                                payment_failed_permanently: !payment_retryable,
                                                                network_update,
  
                                                if self.payment_is_probe(payment_hash, &payment_id) {
                                                        events::Event::ProbeFailed {
 -                                                              payment_id,
 +                                                              payment_id: *payment_id,
                                                                payment_hash: payment_hash.clone(),
                                                                path: path.clone(),
                                                                short_channel_id: Some(scid),
                                                        }
                                                } else {
                                                        events::Event::PaymentPathFailed {
 -                                                              payment_id: Some(payment_id),
 +                                                              payment_id: Some(*payment_id),
                                                                payment_hash: payment_hash.clone(),
                                                                payment_failed_permanently: false,
                                                                network_update: None,
                                pending_events.push(path_failure);
                                if let Some(ev) = full_failure_ev { pending_events.push(ev); }
                        },
 -                      HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, phantom_shared_secret, outpoint }) => {
 +                      HTLCSource::PreviousHopData(HTLCPreviousHopData { ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret, ref phantom_shared_secret, ref outpoint }) => {
                                let err_packet = match onion_error {
 -                                      HTLCFailReason::Reason { failure_code, data } => {
 +                                      HTLCFailReason::Reason { ref failure_code, ref data } => {
                                                log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with code {}", log_bytes!(payment_hash.0), failure_code);
                                                if let Some(phantom_ss) = phantom_shared_secret {
 -                                                      let phantom_packet = onion_utils::build_failure_packet(&phantom_ss, failure_code, &data[..]).encode();
 -                                                      let encrypted_phantom_packet = onion_utils::encrypt_failure_packet(&phantom_ss, &phantom_packet);
 -                                                      onion_utils::encrypt_failure_packet(&incoming_packet_shared_secret, &encrypted_phantom_packet.data[..])
 +                                                      let phantom_packet = onion_utils::build_failure_packet(phantom_ss, *failure_code, &data[..]).encode();
 +                                                      let encrypted_phantom_packet = onion_utils::encrypt_failure_packet(phantom_ss, &phantom_packet);
 +                                                      onion_utils::encrypt_failure_packet(incoming_packet_shared_secret, &encrypted_phantom_packet.data[..])
                                                } else {
 -                                                      let packet = onion_utils::build_failure_packet(&incoming_packet_shared_secret, failure_code, &data[..]).encode();
 -                                                      onion_utils::encrypt_failure_packet(&incoming_packet_shared_secret, &packet)
 +                                                      let packet = onion_utils::build_failure_packet(incoming_packet_shared_secret, *failure_code, &data[..]).encode();
 +                                                      onion_utils::encrypt_failure_packet(incoming_packet_shared_secret, &packet)
                                                }
                                        },
                                        HTLCFailReason::LightningError { err } => {
                                                log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards with pre-built LightningError", log_bytes!(payment_hash.0));
 -                                              onion_utils::encrypt_failure_packet(&incoming_packet_shared_secret, &err.data)
 +                                              onion_utils::encrypt_failure_packet(incoming_packet_shared_secret, &err.data)
                                        }
                                };
  
                                if forward_htlcs.is_empty() {
                                        forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS));
                                }
 -                              match forward_htlcs.entry(short_channel_id) {
 +                              match forward_htlcs.entry(*short_channel_id) {
                                        hash_map::Entry::Occupied(mut entry) => {
 -                                              entry.get_mut().push(HTLCForwardInfo::FailHTLC { htlc_id, err_packet });
 +                                              entry.get_mut().push(HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet });
                                        },
                                        hash_map::Entry::Vacant(entry) => {
 -                                              entry.insert(vec!(HTLCForwardInfo::FailHTLC { htlc_id, err_packet }));
 +                                              entry.insert(vec!(HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }));
                                        }
                                }
                                mem::drop(forward_htlcs);
                                }
                                pending_events.push(events::Event::HTLCHandlingFailed {
                                        prev_channel_id: outpoint.to_channel_id(),
 -                                      failed_next_destination: destination
 +                                      failed_next_destination: destination,
                                });
                        },
                }
        }
  
 -      /// Provides a payment preimage in response to [`Event::PaymentReceived`], generating any
 +      /// Provides a payment preimage in response to [`Event::PaymentClaimable`], generating any
        /// [`MessageSendEvent`]s needed to claim the payment.
        ///
        /// Note that calling this method does *not* guarantee that the payment has been claimed. You
        /// provided to your [`EventHandler`] when [`process_pending_events`] is next called.
        ///
        /// Note that if you did not set an `amount_msat` when calling [`create_inbound_payment`] or
 -      /// [`create_inbound_payment_for_hash`] you must check that the amount in the `PaymentReceived`
 +      /// [`create_inbound_payment_for_hash`] you must check that the amount in the `PaymentClaimable`
        /// event matches your expectation. If you fail to do so and call this method, you may provide
        /// the sender "proof-of-payment" when they did not fulfill the full expected payment.
        ///
 -      /// [`Event::PaymentReceived`]: crate::util::events::Event::PaymentReceived
 +      /// [`Event::PaymentClaimable`]: crate::util::events::Event::PaymentClaimable
        /// [`Event::PaymentClaimed`]: crate::util::events::Event::PaymentClaimed
        /// [`process_pending_events`]: EventsProvider::process_pending_events
        /// [`create_inbound_payment`]: Self::create_inbound_payment
  
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
  
 -              let removed_source = self.channel_state.lock().unwrap().claimable_htlcs.remove(&payment_hash);
 +              let removed_source = self.claimable_htlcs.lock().unwrap().remove(&payment_hash);
                if let Some((payment_purpose, mut sources)) = removed_source {
                        assert!(!sources.is_empty());
  
                        let mut claimed_any_htlcs = false;
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
 +                      let mut receiver_node_id = Some(self.our_network_pubkey);
                        for htlc in sources.iter() {
                                let chan_id = match self.short_to_chan_info.read().unwrap().get(&htlc.prev_hop.short_channel_id) {
                                        Some((_cp_id, chan_id)) => chan_id.clone(),
                                                break;
                                        }
                                }
 +                              let phantom_shared_secret = htlc.prev_hop.phantom_shared_secret;
 +                              if phantom_shared_secret.is_some() {
 +                                      let phantom_pubkey = self.keys_manager.get_node_id(Recipient::PhantomNode)
 +                                              .expect("Failed to get node_id for phantom node recipient");
 +                                      receiver_node_id = Some(phantom_pubkey)
 +                              }
  
                                claimable_amt_msat += htlc.value;
                        }
                                        let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
                                        htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(
                                                self.best_block.read().unwrap().height()));
 -                                      self.fail_htlc_backwards_internal(
 -                                              HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash,
 -                                              HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data },
 -                                              HTLCDestination::FailedPayment { payment_hash } );
 +                                      let source = HTLCSource::PreviousHopData(htlc.prev_hop);
 +                                      let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data);
 +                                      let receiver = HTLCDestination::FailedPayment { payment_hash };
 +                                      self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
                                }
                        }
  
                        if claimed_any_htlcs {
                                self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed {
 +                                      receiver_node_id,
                                        payment_hash,
                                        purpose: payment_purpose,
                                        amount_msat: claimable_amt_msat,
  
        fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<<K::Target as KeysInterface>::Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop {
                //TODO: Delay the claimed_funds relaying just like we do outbound relay!
 -              let channel_state = &mut **channel_state_lock;
 -              let chan_id = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
 -                      Some((_cp_id, chan_id)) => chan_id.clone(),
 -                      None => {
 -                              return ClaimFundsFromHop::PrevHopForceClosed
 -                      }
 -              };
  
 +              let chan_id = prev_hop.outpoint.to_channel_id();
 +              let channel_state = &mut **channel_state_lock;
                if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) {
                        match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
                                Ok(msgs_monitor_option) => {
                self.our_network_pubkey.clone()
        }
  
 +      /// Handles a channel reentering a functional state, either due to reconnect or a monitor
 +      /// update completion.
 +      fn handle_channel_resumption(&self, pending_msg_events: &mut Vec<MessageSendEvent>,
 +              channel: &mut Channel<<K::Target as KeysInterface>::Signer>, raa: Option<msgs::RevokeAndACK>,
 +              commitment_update: Option<msgs::CommitmentUpdate>, order: RAACommitmentOrder,
 +              pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
 +              channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
 +      -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> {
 +              let mut htlc_forwards = None;
 +
 +              let counterparty_node_id = channel.get_counterparty_node_id();
 +              if !pending_forwards.is_empty() {
 +                      htlc_forwards = Some((channel.get_short_channel_id().unwrap_or(channel.outbound_scid_alias()),
 +                              channel.get_funding_txo().unwrap(), channel.get_user_id(), pending_forwards));
 +              }
 +
 +              if let Some(msg) = channel_ready {
 +                      send_channel_ready!(self, pending_msg_events, channel, msg);
 +              }
 +              if let Some(msg) = announcement_sigs {
 +                      pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
 +                              node_id: counterparty_node_id,
 +                              msg,
 +                      });
 +              }
 +
 +              emit_channel_ready_event!(self, channel);
 +
 +              macro_rules! handle_cs { () => {
 +                      if let Some(update) = commitment_update {
 +                              pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
 +                                      node_id: counterparty_node_id,
 +                                      updates: update,
 +                              });
 +                      }
 +              } }
 +              macro_rules! handle_raa { () => {
 +                      if let Some(revoke_and_ack) = raa {
 +                              pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
 +                                      node_id: counterparty_node_id,
 +                                      msg: revoke_and_ack,
 +                              });
 +                      }
 +              } }
 +              match order {
 +                      RAACommitmentOrder::CommitmentFirst => {
 +                              handle_cs!();
 +                              handle_raa!();
 +                      },
 +                      RAACommitmentOrder::RevokeAndACKFirst => {
 +                              handle_raa!();
 +                              handle_cs!();
 +                      },
 +              }
 +
 +              if let Some(tx) = funding_broadcastable {
 +                      log_info!(self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
 +                      self.tx_broadcaster.broadcast_transaction(&tx);
 +              }
 +
 +              htlc_forwards
 +      }
 +
        fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
  
 -              let chan_restoration_res;
 +              let htlc_forwards;
                let (mut pending_failures, finalized_claims, counterparty_node_id) = {
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
                                        })
                                } else { None }
                        } else { None };
 -                      chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, updates.raa, updates.commitment_update, updates.order, None, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
 +                      htlc_forwards = self.handle_channel_resumption(&mut channel_state.pending_msg_events, channel.get_mut(), updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
                        if let Some(upd) = channel_update {
                                channel_state.pending_msg_events.push(upd);
                        }
  
                        (updates.failed_htlcs, updates.finalized_claimed_htlcs, counterparty_node_id)
                };
 -              post_handle_chan_restoration!(self, chan_restoration_res);
 +              if let Some(forwards) = htlc_forwards {
 +                      self.forward_htlcs(&mut [forwards][..]);
 +              }
                self.finalize_claims(finalized_claims);
                for failure in pending_failures.drain(..) {
                        let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: funding_txo.to_channel_id() };
 -                      self.fail_htlc_backwards_internal(failure.0, &failure.1, failure.2, receiver);
 +                      self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver);
                }
        }
  
                };
                for htlc_source in dropped_htlcs.drain(..) {
                        let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id };
 -                      self.fail_htlc_backwards_internal(htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
 +                      let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
 +                      self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
                }
  
                let _ = handle_error!(self, result, *counterparty_node_id);
                                        let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
                                        try_chan_entry!(self, Err(chan_err), chan);
                                }
 -                              try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }), chan);
 +                              try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::from_failure_code(msg.failure_code)), chan);
                                Ok(())
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
        }
  
        #[inline]
 -      fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, Vec<(PendingHTLCInfo, u64)>)]) {
 -              for &mut (prev_short_channel_id, prev_funding_outpoint, ref mut pending_forwards) in per_source_pending_forwards {
 +      fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)]) {
 +              for &mut (prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
                        let mut forward_event = None;
 +                      let mut new_intercept_events = Vec::new();
 +                      let mut failed_intercept_forwards = Vec::new();
                        if !pending_forwards.is_empty() {
 -                              let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
 -                              if forward_htlcs.is_empty() {
 -                                      forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS))
 -                              }
                                for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
 -                                      match forward_htlcs.entry(match forward_info.routing {
 -                                                      PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
 -                                                      PendingHTLCRouting::Receive { .. } => 0,
 -                                                      PendingHTLCRouting::ReceiveKeysend { .. } => 0,
 -                                      }) {
 +                                      let scid = match forward_info.routing {
 +                                              PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
 +                                              PendingHTLCRouting::Receive { .. } => 0,
 +                                              PendingHTLCRouting::ReceiveKeysend { .. } => 0,
 +                                      };
 +                                      // Pull this now to avoid introducing a lock order with `forward_htlcs`.
 +                                      let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid);
 +
 +                                      let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
 +                                      let forward_htlcs_empty = forward_htlcs.is_empty();
 +                                      match forward_htlcs.entry(scid) {
                                                hash_map::Entry::Occupied(mut entry) => {
                                                        entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
 -                                                              prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, forward_info }));
 +                                                              prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info }));
                                                },
                                                hash_map::Entry::Vacant(entry) => {
 -                                                      entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
 -                                                              prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, forward_info })));
 +                                                      if !is_our_scid && forward_info.incoming_amt_msat.is_some() &&
 +                                                         fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, scid, &self.genesis_hash)
 +                                                      {
 +                                                              let intercept_id = InterceptId(Sha256::hash(&forward_info.incoming_shared_secret).into_inner());
 +                                                              let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
 +                                                              match pending_intercepts.entry(intercept_id) {
 +                                                                      hash_map::Entry::Vacant(entry) => {
 +                                                                              new_intercept_events.push(events::Event::HTLCIntercepted {
 +                                                                                      requested_next_hop_scid: scid,
 +                                                                                      payment_hash: forward_info.payment_hash,
 +                                                                                      inbound_amount_msat: forward_info.incoming_amt_msat.unwrap(),
 +                                                                                      expected_outbound_amount_msat: forward_info.outgoing_amt_msat,
 +                                                                                      intercept_id
 +                                                                              });
 +                                                                              entry.insert(PendingAddHTLCInfo {
 +                                                                                      prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info });
 +                                                                      },
 +                                                                      hash_map::Entry::Occupied(_) => {
 +                                                                              log_info!(self.logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
 +                                                                              let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
 +                                                                                      short_channel_id: prev_short_channel_id,
 +                                                                                      outpoint: prev_funding_outpoint,
 +                                                                                      htlc_id: prev_htlc_id,
 +                                                                                      incoming_packet_shared_secret: forward_info.incoming_shared_secret,
 +                                                                                      phantom_shared_secret: None,
 +                                                                              });
 +
 +                                                                              failed_intercept_forwards.push((htlc_source, forward_info.payment_hash,
 +                                                                                              HTLCFailReason::from_failure_code(0x4000 | 10),
 +                                                                                              HTLCDestination::InvalidForward { requested_forward_scid: scid },
 +                                                                              ));
 +                                                                      }
 +                                                              }
 +                                                      } else {
 +                                                              // We don't want to generate a PendingHTLCsForwardable event if only intercepted
 +                                                              // payments are being processed.
 +                                                              if forward_htlcs_empty {
 +                                                                      forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS));
 +                                                              }
 +                                                              entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
 +                                                                      prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info })));
 +                                                      }
                                                }
                                        }
                                }
                        }
 +
 +                      for (htlc_source, payment_hash, failure_reason, destination) in failed_intercept_forwards.drain(..) {
 +                              self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination);
 +                      }
 +
 +                      if !new_intercept_events.is_empty() {
 +                              let mut events = self.pending_events.lock().unwrap();
 +                              events.append(&mut new_intercept_events);
 +                      }
 +
                        match forward_event {
                                Some(time) => {
                                        let mut pending_events = self.pending_events.lock().unwrap();
                                                        raa_updates.finalized_claimed_htlcs,
                                                        chan.get().get_short_channel_id()
                                                                .unwrap_or(chan.get().outbound_scid_alias()),
 -                                                      chan.get().get_funding_txo().unwrap()))
 +                                                      chan.get().get_funding_txo().unwrap(),
 +                                                      chan.get().get_user_id()))
                                },
                                hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
                self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id);
                match res {
                        Ok((pending_forwards, mut pending_failures, finalized_claim_htlcs,
 -                              short_channel_id, channel_outpoint)) =>
 +                              short_channel_id, channel_outpoint, user_channel_id)) =>
                        {
                                for failure in pending_failures.drain(..) {
                                        let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: channel_outpoint.to_channel_id() };
 -                                      self.fail_htlc_backwards_internal(failure.0, &failure.1, failure.2, receiver);
 +                                      self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver);
                                }
 -                              self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, pending_forwards)]);
 +                              self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, user_channel_id, pending_forwards)]);
                                self.finalize_claims(finalized_claim_htlcs);
                                Ok(())
                        },
        }
  
        fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
 -              let chan_restoration_res;
 -              let (htlcs_failed_forward, need_lnd_workaround) = {
 +              let htlc_forwards;
 +              let need_lnd_workaround = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
  
                                                }
                                        }
                                        let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
 -                                      chan_restoration_res = handle_chan_restoration_locked!(
 -                                              self, channel_state_lock, channel_state, chan, responses.raa, responses.commitment_update, responses.order,
 -                                              responses.mon_update, Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
 +                                      htlc_forwards = self.handle_channel_resumption(
 +                                              &mut channel_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order,
 +                                              Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
                                        if let Some(upd) = channel_update {
                                                channel_state.pending_msg_events.push(upd);
                                        }
 -                                      (responses.holding_cell_failed_htlcs, need_lnd_workaround)
 +                                      need_lnd_workaround
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
                };
 -              post_handle_chan_restoration!(self, chan_restoration_res);
 -              self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id, counterparty_node_id);
 +
 +              if let Some(forwards) = htlc_forwards {
 +                      self.forward_htlcs(&mut [forwards][..]);
 +              }
  
                if let Some(channel_ready_msg) = need_lnd_workaround {
                        self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;
                                                } else {
                                                        log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
                                                        let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
 -                                                      self.fail_htlc_backwards_internal(htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
 +                                                      let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
 +                                                      self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
                                                }
                                        },
                                        MonitorEvent::CommitmentTxConfirmed(funding_outpoint) |
        /// This differs from [`create_inbound_payment_for_hash`] only in that it generates the
        /// [`PaymentHash`] and [`PaymentPreimage`] for you.
        ///
 -      /// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentReceived`], which
 -      /// will have the [`PaymentReceived::payment_preimage`] field filled in. That should then be
 +      /// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentClaimable`], which
 +      /// will have the [`PaymentClaimable::payment_preimage`] field filled in. That should then be
        /// passed directly to [`claim_funds`].
        ///
        /// See [`create_inbound_payment_for_hash`] for detailed documentation on behavior and requirements.
        /// Errors if `min_value_msat` is greater than total bitcoin supply.
        ///
        /// [`claim_funds`]: Self::claim_funds
 -      /// [`PaymentReceived`]: events::Event::PaymentReceived
 -      /// [`PaymentReceived::payment_preimage`]: events::Event::PaymentReceived::payment_preimage
 +      /// [`PaymentClaimable`]: events::Event::PaymentClaimable
 +      /// [`PaymentClaimable::payment_preimage`]: events::Event::PaymentClaimable::payment_preimage
        /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
        pub fn create_inbound_payment(&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32) -> Result<(PaymentHash, PaymentSecret), ()> {
                inbound_payment::create(&self.inbound_payment_key, min_value_msat, invoice_expiry_delta_secs, &self.keys_manager, self.highest_seen_timestamp.load(Ordering::Acquire) as u64)
        /// Gets a [`PaymentSecret`] for a given [`PaymentHash`], for which the payment preimage is
        /// stored external to LDK.
        ///
 -      /// A [`PaymentReceived`] event will only be generated if the [`PaymentSecret`] matches a
 +      /// A [`PaymentClaimable`] event will only be generated if the [`PaymentSecret`] matches a
        /// payment secret fetched via this method or [`create_inbound_payment`], and which is at least
        /// the `min_value_msat` provided here, if one is provided.
        ///
        ///
        /// `min_value_msat` should be set if the invoice being generated contains a value. Any payment
        /// received for the returned [`PaymentHash`] will be required to be at least `min_value_msat`
 -      /// before a [`PaymentReceived`] event will be generated, ensuring that we do not provide the
 +      /// before a [`PaymentClaimable`] event will be generated, ensuring that we do not provide the
        /// sender "proof-of-payment" unless they have paid the required amount.
        ///
        /// `invoice_expiry_delta_secs` describes the number of seconds that the invoice is valid for
        ///
        /// Note that we use block header time to time-out pending inbound payments (with some margin
        /// to compensate for the inaccuracy of block header timestamps). Thus, in practice we will
 -      /// accept a payment and generate a [`PaymentReceived`] event for some time after the expiry.
 +      /// accept a payment and generate a [`PaymentClaimable`] event for some time after the expiry.
        /// If you need exact expiry semantics, you should enforce them upon receipt of
 -      /// [`PaymentReceived`].
 +      /// [`PaymentClaimable`].
        ///
        /// Note that invoices generated for inbound payments should have their `min_final_cltv_expiry`
        /// set to at least [`MIN_FINAL_CLTV_EXPIRY`].
        /// Errors if `min_value_msat` is greater than total bitcoin supply.
        ///
        /// [`create_inbound_payment`]: Self::create_inbound_payment
 -      /// [`PaymentReceived`]: events::Event::PaymentReceived
 +      /// [`PaymentClaimable`]: events::Event::PaymentClaimable
        pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32) -> Result<PaymentSecret, ()> {
                inbound_payment::create_from_hash(&self.inbound_payment_key, min_value_msat, payment_hash, invoice_expiry_delta_secs, self.highest_seen_timestamp.load(Ordering::Acquire) as u64)
        }
                }
        }
  
 +      /// Gets a fake short channel id for use in receiving intercepted payments. These fake scids are
 +      /// used when constructing the route hints for HTLCs intended to be intercepted. See
 +      /// [`ChannelManager::forward_intercepted_htlc`].
 +      ///
 +      /// Note that this method is not guaranteed to return unique values, you may need to call it a few
 +      /// times to get a unique scid.
 +      pub fn get_intercept_scid(&self) -> u64 {
 +              let best_block_height = self.best_block.read().unwrap().height();
 +              let short_to_chan_info = self.short_to_chan_info.read().unwrap();
 +              loop {
 +                      let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager);
 +                      // Ensure the generated scid doesn't conflict with a real channel.
 +                      if short_to_chan_info.contains_key(&scid_candidate) { continue }
 +                      return scid_candidate
 +              }
 +      }
 +
        /// Gets inflight HTLC information by processing pending outbound payments that are in
        /// our channels. May be used during pathfinding to account for in-use channel liquidity.
        pub fn compute_inflight_htlcs(&self) -> InFlightHtlcs {
                let mut inflight_htlcs = InFlightHtlcs::new();
  
                for chan in self.channel_state.lock().unwrap().by_id.values() {
-                       for htlc_source in chan.inflight_htlc_sources() {
+                       for (htlc_source, _) in chan.inflight_htlc_sources() {
                                if let HTLCSource::OutboundRoute { path, .. } = htlc_source {
                                        inflight_htlcs.process_path(path, self.get_our_node_id());
                                }
                events.into_inner()
        }
  
+       #[cfg(test)]
+       pub fn pop_pending_event(&self) -> Option<events::Event> {
+               let mut events = self.pending_events.lock().unwrap();
+               if events.is_empty() { None } else { Some(events.remove(0)) }
+       }
        #[cfg(test)]
        pub fn has_pending_payments(&self) -> bool {
                !self.pending_outbound_payments.lock().unwrap().is_empty()
@@@ -6198,8 -6023,9 +6204,8 @@@ wher
                                if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
                                        for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
                                                let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
 -                                              timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason {
 -                                                      failure_code, data,
 -                                              }, HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() }));
 +                                              timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data),
 +                                                      HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() }));
                                        }
                                        if let Some(channel_ready) = channel_ready_opt {
                                                send_channel_ready!(self, pending_msg_events, channel, channel_ready);
                                }
                                true
                        });
 +              }
  
 -                      if let Some(height) = height_opt {
 -                              channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
 -                                      htlcs.retain(|htlc| {
 -                                              // If height is approaching the number of blocks we think it takes us to get
 -                                              // our commitment transaction confirmed before the HTLC expires, plus the
 -                                              // number of blocks we generally consider it to take to do a commitment update,
 -                                              // just give up on it and fail the HTLC.
 -                                              if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
 -                                                      let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
 -                                                      htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
 -
 -                                                      timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
 -                                                              failure_code: 0x4000 | 15,
 -                                                              data: htlc_msat_height_data
 -                                                      }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }));
 -                                                      false
 -                                              } else { true }
 -                                      });
 -                                      !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
 +              if let Some(height) = height_opt {
 +                      self.claimable_htlcs.lock().unwrap().retain(|payment_hash, (_, htlcs)| {
 +                              htlcs.retain(|htlc| {
 +                                      // If height is approaching the number of blocks we think it takes us to get
 +                                      // our commitment transaction confirmed before the HTLC expires, plus the
 +                                      // number of blocks we generally consider it to take to do a commitment update,
 +                                      // just give up on it and fail the HTLC.
 +                                      if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
 +                                              let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
 +                                              htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
 +
 +                                              timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(),
 +                                                      HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data),
 +                                                      HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }));
 +                                              false
 +                                      } else { true }
                                });
 -                      }
 +                              !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
 +                      });
 +
 +                      let mut intercepted_htlcs = self.pending_intercepted_htlcs.lock().unwrap();
 +                      intercepted_htlcs.retain(|_, htlc| {
 +                              if height >= htlc.forward_info.outgoing_cltv_value - HTLC_FAIL_BACK_BUFFER {
 +                                      let prev_hop_data = HTLCSource::PreviousHopData(HTLCPreviousHopData {
 +                                              short_channel_id: htlc.prev_short_channel_id,
 +                                              htlc_id: htlc.prev_htlc_id,
 +                                              incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret,
 +                                              phantom_shared_secret: None,
 +                                              outpoint: htlc.prev_funding_outpoint,
 +                                      });
 +
 +                                      let requested_forward_scid /* intercept scid */ = match htlc.forward_info.routing {
 +                                              PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
 +                                              _ => unreachable!(),
 +                                      };
 +                                      timed_out_htlcs.push((prev_hop_data, htlc.forward_info.payment_hash,
 +                                                      HTLCFailReason::from_failure_code(0x2000 | 2),
 +                                                      HTLCDestination::InvalidForward { requested_forward_scid }));
 +                                      log_trace!(self.logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
 +                                      false
 +                              } else { true }
 +                      });
                }
  
                self.handle_init_event_channel_failures(failed_channels);
  
                for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) {
 -                      self.fail_htlc_backwards_internal(source, &payment_hash, reason, destination);
 +                      self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, destination);
                }
        }
  
        }
  }
  
 -impl<M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
 +impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref >
        ChannelMessageHandler for ChannelManager<M, T, K, F, L>
        where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
          T::Target: BroadcasterInterface,
@@@ -6702,7 -6506,6 +6708,7 @@@ impl Writeable for ChannelDetails 
                        (6, self.funding_txo, option),
                        (7, self.config, option),
                        (8, self.short_channel_id, option),
 +                      (9, self.confirmations, option),
                        (10, self.channel_value_satoshis, required),
                        (12, self.unspendable_punishment_reserve, option),
                        (14, user_channel_id_low, required),
@@@ -6737,7 -6540,6 +6743,7 @@@ impl Readable for ChannelDetails 
                        (6, funding_txo, option),
                        (7, config, option),
                        (8, short_channel_id, option),
 +                      (9, confirmations, option),
                        (10, channel_value_satoshis, required),
                        (12, unspendable_punishment_reserve, option),
                        (14, user_channel_id_low, required),
                        next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(),
                        inbound_capacity_msat: inbound_capacity_msat.0.unwrap(),
                        confirmations_required,
 +                      confirmations,
                        force_close_spend_delay,
                        is_outbound: is_outbound.0.unwrap(),
                        is_channel_ready: is_channel_ready.0.unwrap(),
@@@ -7045,7 -6846,6 +7051,7 @@@ impl_writeable_tlv_based_enum!(HTLCFail
  
  impl_writeable_tlv_based!(PendingAddHTLCInfo, {
        (0, forward_info, required),
 +      (1, prev_user_channel_id, (default_value, 0)),
        (2, prev_short_channel_id, required),
        (4, prev_htlc_id, required),
        (6, prev_funding_outpoint, required),
@@@ -7140,13 -6940,10 +7146,13 @@@ impl<M: Deref, T: Deref, K: Deref, F: D
                        }
                }
  
 -              let channel_state = self.channel_state.lock().unwrap();
 +              let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
 +              let claimable_htlcs = self.claimable_htlcs.lock().unwrap();
 +              let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
 +
                let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new();
 -              (channel_state.claimable_htlcs.len() as u64).write(writer)?;
 -              for (payment_hash, (purpose, previous_hops)) in channel_state.claimable_htlcs.iter() {
 +              (claimable_htlcs.len() as u64).write(writer)?;
 +              for (payment_hash, (purpose, previous_hops)) in claimable_htlcs.iter() {
                        payment_hash.write(writer)?;
                        (previous_hops.len() as u64).write(writer)?;
                        for htlc in previous_hops.iter() {
                        peer_state.latest_features.write(writer)?;
                }
  
 -              let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
 -              let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
                let events = self.pending_events.lock().unwrap();
                (events.len() as u64).write(writer)?;
                for event in events.iter() {
                                _ => {},
                        }
                }
 +
 +              let mut pending_intercepted_htlcs = None;
 +              let our_pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
 +              if our_pending_intercepts.len() != 0 {
 +                      pending_intercepted_htlcs = Some(our_pending_intercepts);
 +              }
                write_tlv_fields!(writer, {
                        (1, pending_outbound_payments_no_retry, required),
 +                      (2, pending_intercepted_htlcs, option),
                        (3, pending_outbound_payments, required),
                        (5, self.our_network_pubkey, required),
                        (7, self.fake_scid_rand_bytes, required),
@@@ -7420,6 -7212,25 +7426,25 @@@ impl<'a, M: Deref, T: Deref, K: Deref, 
                                                user_channel_id: channel.get_user_id(),
                                                reason: ClosureReason::OutdatedChannelManager
                                        });
+                                       for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() {
+                                               let mut found_htlc = false;
+                                               for (monitor_htlc_source, _) in monitor.get_all_current_outbound_htlcs() {
+                                                       if *channel_htlc_source == monitor_htlc_source { found_htlc = true; break; }
+                                               }
+                                               if !found_htlc {
+                                                       // If we have some HTLCs in the channel which are not present in the newer
+                                                       // ChannelMonitor, they have been removed and should be failed back to
+                                                       // ensure we don't forget them entirely. Note that if the missing HTLC(s)
+                                                       // were actually claimed we'd have generated and ensured the previous-hop
+                                                       // claim update ChannelMonitor updates were persisted prior to persising
+                                                       // the ChannelMonitor update for the forward leg, so attempting to fail the
+                                                       // backwards leg of the HTLC will simply be rejected.
+                                                       log_info!(args.logger,
+                                                               "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager",
+                                                               log_bytes!(channel.channel_id()), log_bytes!(payment_hash.0));
+                                                       failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.get_counterparty_node_id(), channel.channel_id()));
+                                               }
+                                       }
                                } else {
                                        log_info!(args.logger, "Successfully loaded channel {}", log_bytes!(channel.channel_id()));
                                        if let Some(short_channel_id) = channel.get_short_channel_id() {
                                None => continue,
                        }
                }
-               if forward_htlcs_count > 0 {
-                       // If we have pending HTLCs to forward, assume we either dropped a
-                       // `PendingHTLCsForwardable` or the user received it but never processed it as they
-                       // shut down before the timer hit. Either way, set the time_forwardable to a small
-                       // constant as enough time has likely passed that we should simply handle the forwards
-                       // now, or at least after the user gets a chance to reconnect to our peers.
-                       pending_events_read.push(events::Event::PendingHTLCsForwardable {
-                               time_forwardable: Duration::from_secs(2),
-                       });
-               }
  
                let background_event_count: u64 = Readable::read(reader)?;
                let mut pending_background_events_read: Vec<BackgroundEvent> = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<BackgroundEvent>()));
                // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients.
                let mut pending_outbound_payments_no_retry: Option<HashMap<PaymentId, HashSet<[u8; 32]>>> = None;
                let mut pending_outbound_payments = None;
 +              let mut pending_intercepted_htlcs: Option<HashMap<InterceptId, PendingAddHTLCInfo>> = Some(HashMap::new());
                let mut received_network_pubkey: Option<PublicKey> = None;
                let mut fake_scid_rand_bytes: Option<[u8; 32]> = None;
                let mut probing_cookie_secret: Option<[u8; 32]> = None;
                let mut claimable_htlc_purposes = None;
                read_tlv_fields!(reader, {
                        (1, pending_outbound_payments_no_retry, option),
 +                      (2, pending_intercepted_htlcs, option),
                        (3, pending_outbound_payments, option),
                        (5, received_network_pubkey, option),
                        (7, fake_scid_rand_bytes, option),
                                                        }
                                                }
                                        }
+                                       for (htlc_source, htlc) in monitor.get_all_current_outbound_htlcs() {
+                                               if let HTLCSource::PreviousHopData(prev_hop_data) = htlc_source {
+                                                       // The ChannelMonitor is now responsible for this HTLC's
+                                                       // failure/success and will let us know what its outcome is. If we
+                                                       // still have an entry for this HTLC in `forward_htlcs`, we were
+                                                       // apparently not persisted after the monitor was when forwarding
+                                                       // the payment.
+                                                       forward_htlcs.retain(|_, forwards| {
+                                                               forwards.retain(|forward| {
+                                                                       if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
+                                                                               if htlc_info.prev_short_channel_id == prev_hop_data.short_channel_id &&
+                                                                                       htlc_info.prev_htlc_id == prev_hop_data.htlc_id
+                                                                               {
+                                                                                       log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
+                                                                                               log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
+                                                                                       false
+                                                                               } else { true }
+                                                                       } else { true }
+                                                               });
+                                                               !forwards.is_empty()
+                                                       })
+                                               }
+                                       }
                                }
                        }
                }
  
+               if !forward_htlcs.is_empty() {
+                       // If we have pending HTLCs to forward, assume we either dropped a
+                       // `PendingHTLCsForwardable` or the user received it but never processed it as they
+                       // shut down before the timer hit. Either way, set the time_forwardable to a small
+                       // constant as enough time has likely passed that we should simply handle the forwards
+                       // now, or at least after the user gets a chance to reconnect to our peers.
+                       pending_events_read.push(events::Event::PendingHTLCsForwardable {
+                               time_forwardable: Duration::from_secs(2),
+                       });
+               }
                let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material();
                let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
  
                                if let Some((payment_purpose, claimable_htlcs)) = claimable_htlcs.remove(&payment_hash) {
                                        log_info!(args.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", log_bytes!(payment_hash.0));
                                        let mut claimable_amt_msat = 0;
 +                                      let mut receiver_node_id = Some(our_network_pubkey);
 +                                      let phantom_shared_secret = claimable_htlcs[0].prev_hop.phantom_shared_secret;
 +                                      if phantom_shared_secret.is_some() {
 +                                              let phantom_pubkey = args.keys_manager.get_node_id(Recipient::PhantomNode)
 +                                                      .expect("Failed to get node_id for phantom node recipient");
 +                                              receiver_node_id = Some(phantom_pubkey)
 +                                      }
                                        for claimable_htlc in claimable_htlcs {
                                                claimable_amt_msat += claimable_htlc.value;
  
                                                }
                                        }
                                        pending_events_read.push(events::Event::PaymentClaimed {
 +                                              receiver_node_id,
                                                payment_hash,
                                                purpose: payment_purpose,
                                                amount_msat: claimable_amt_msat,
  
                        channel_state: Mutex::new(ChannelHolder {
                                by_id,
 -                              claimable_htlcs,
                                pending_msg_events: Vec::new(),
                        }),
                        inbound_payment_key: expanded_inbound_key,
                        pending_inbound_payments: Mutex::new(pending_inbound_payments),
                        pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()),
 +                      pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()),
  
                        forward_htlcs: Mutex::new(forward_htlcs),
 +                      claimable_htlcs: Mutex::new(claimable_htlcs),
                        outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
                        id_to_peer: Mutex::new(id_to_peer),
                        short_to_chan_info: FairRwLock::new(short_to_chan_info),
                for htlc_source in failed_htlcs.drain(..) {
                        let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
                        let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
 -                      channel_manager.fail_htlc_backwards_internal(source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
 +                      let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
 +                      channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
                }
  
                //TODO: Broadcast channel update for closed channels, but only after we've made a
@@@ -8557,7 -8380,7 +8606,7 @@@ pub mod bench 
                                $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id()));
  
                                expect_pending_htlcs_forwardable!(NodeHolder { node: &$node_b });
 -                              expect_payment_received!(NodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000);
 +                              expect_payment_claimable!(NodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000);
                                $node_b.claim_funds(payment_preimage);
                                expect_payment_claimed!(NodeHolder { node: &$node_b }, payment_hash, 10_000);
  
index 5ec5d2b350a06ef79810df65895f689e9d4485f2,bc7f627758bf58a3591db58afd96640578de0015..4715b98c4b4fd4341c75880da79a472dc35d3e0f
@@@ -107,14 -107,6 +107,14 @@@ pub enum ConnectStyle 
        /// The same as `TransactionsFirst`, however when we have multiple blocks to connect, we only
        /// make a single `best_block_updated` call.
        TransactionsFirstSkippingBlocks,
 +      /// The same as `TransactionsFirst`, however when we have multiple blocks to connect, we only
 +      /// make a single `best_block_updated` call. Further, we call `transactions_confirmed` multiple
 +      /// times to ensure it's idempotent.
 +      TransactionsDuplicativelyFirstSkippingBlocks,
 +      /// The same as `TransactionsFirst`, however when we have multiple blocks to connect, we only
 +      /// make a single `best_block_updated` call. Further, we call `transactions_confirmed` multiple
 +      /// times to ensure it's idempotent.
 +      HighlyRedundantTransactionsFirstSkippingBlocks,
        /// The same as `TransactionsFirst` when connecting blocks. During disconnection only
        /// `transaction_unconfirmed` is called.
        TransactionsFirstReorgsOnlyTip,
@@@ -129,16 -121,14 +129,16 @@@ impl ConnectStyle 
                        use core::hash::{BuildHasher, Hasher};
                        // Get a random value using the only std API to do so - the DefaultHasher
                        let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
 -                      let res = match rand_val % 7 {
 +                      let res = match rand_val % 9 {
                                0 => ConnectStyle::BestBlockFirst,
                                1 => ConnectStyle::BestBlockFirstSkippingBlocks,
                                2 => ConnectStyle::BestBlockFirstReorgsOnlyTip,
                                3 => ConnectStyle::TransactionsFirst,
                                4 => ConnectStyle::TransactionsFirstSkippingBlocks,
 -                              5 => ConnectStyle::TransactionsFirstReorgsOnlyTip,
 -                              6 => ConnectStyle::FullBlockViaListen,
 +                              5 => ConnectStyle::TransactionsDuplicativelyFirstSkippingBlocks,
 +                              6 => ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks,
 +                              7 => ConnectStyle::TransactionsFirstReorgsOnlyTip,
 +                              8 => ConnectStyle::FullBlockViaListen,
                                _ => unreachable!(),
                        };
                        eprintln!("Using Block Connection Style: {:?}", res);
  pub fn connect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, depth: u32) -> BlockHash {
        let skip_intermediaries = match *node.connect_style.borrow() {
                ConnectStyle::BestBlockFirstSkippingBlocks|ConnectStyle::TransactionsFirstSkippingBlocks|
 +                      ConnectStyle::TransactionsDuplicativelyFirstSkippingBlocks|ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks|
                        ConnectStyle::BestBlockFirstReorgsOnlyTip|ConnectStyle::TransactionsFirstReorgsOnlyTip => true,
                _ => false,
        };
@@@ -204,32 -193,8 +204,32 @@@ fn do_connect_block<'a, 'b, 'c, 'd>(nod
                                node.node.best_block_updated(&block.header, height);
                                node.node.transactions_confirmed(&block.header, &txdata, height);
                        },
 -                      ConnectStyle::TransactionsFirst|ConnectStyle::TransactionsFirstSkippingBlocks|ConnectStyle::TransactionsFirstReorgsOnlyTip => {
 +                      ConnectStyle::TransactionsFirst|ConnectStyle::TransactionsFirstSkippingBlocks|
 +                      ConnectStyle::TransactionsDuplicativelyFirstSkippingBlocks|ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks|
 +                      ConnectStyle::TransactionsFirstReorgsOnlyTip => {
 +                              if *node.connect_style.borrow() == ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks {
 +                                      let mut connections = Vec::new();
 +                                      for (block, height) in node.blocks.lock().unwrap().iter() {
 +                                              if !block.txdata.is_empty() {
 +                                                      // Reconnect all transactions we've ever seen to ensure transaction connection
 +                                                      // is *really* idempotent. This is a somewhat likely deployment for some
 +                                                      // esplora implementations of chain sync which try to reduce state and
 +                                                      // complexity as much as possible.
 +                                                      //
 +                                                      // Sadly we have to clone the block here to maintain lockorder. In the
 +                                                      // future we should consider Arc'ing the blocks to avoid this.
 +                                                      connections.push((block.clone(), *height));
 +                                              }
 +                                      }
 +                                      for (old_block, height) in connections {
 +                                              node.chain_monitor.chain_monitor.transactions_confirmed(&old_block.header,
 +                                                      &old_block.txdata.iter().enumerate().collect::<Vec<_>>(), height);
 +                                      }
 +                              }
                                node.chain_monitor.chain_monitor.transactions_confirmed(&block.header, &txdata, height);
 +                              if *node.connect_style.borrow() == ConnectStyle::TransactionsDuplicativelyFirstSkippingBlocks {
 +                                      node.chain_monitor.chain_monitor.transactions_confirmed(&block.header, &txdata, height);
 +                              }
                                call_claimable_balances(node);
                                node.chain_monitor.chain_monitor.best_block_updated(&block.header, height);
                                node.node.transactions_confirmed(&block.header, &txdata, height);
@@@ -261,8 -226,7 +261,8 @@@ pub fn disconnect_blocks<'a, 'b, 'c, 'd
                                node.chain_monitor.chain_monitor.block_disconnected(&orig.0.header, orig.1);
                                Listen::block_disconnected(node.node, &orig.0.header, orig.1);
                        },
 -                      ConnectStyle::BestBlockFirstSkippingBlocks|ConnectStyle::TransactionsFirstSkippingBlocks => {
 +                      ConnectStyle::BestBlockFirstSkippingBlocks|ConnectStyle::TransactionsFirstSkippingBlocks|
 +                      ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks|ConnectStyle::TransactionsDuplicativelyFirstSkippingBlocks => {
                                if i == count - 1 {
                                        node.chain_monitor.chain_monitor.best_block_updated(&prev.0.header, prev.1);
                                        node.node.best_block_updated(&prev.0.header, prev.1);
@@@ -1127,7 -1091,7 +1127,7 @@@ macro_rules! check_closed_event 
                use $crate::util::events::Event;
  
                let events = $node.node.get_and_clear_pending_events();
-               assert_eq!(events.len(), $events);
+               assert_eq!(events.len(), $events, "{:?}", events);
                let expected_reason = $reason;
                let mut issues_discard_funding = false;
                for event in events {
@@@ -1386,7 -1350,7 +1386,7 @@@ macro_rules! expect_pending_htlcs_forwa
                let events = $node.node.get_and_clear_pending_events();
                match events[0] {
                        $crate::util::events::Event::PendingHTLCsForwardable { .. } => { },
-                       _ => panic!("Unexpected event"),
+                       _ => panic!("Unexpected event {:?}", events),
                };
  
                let count = expected_failures.len() + 1;
@@@ -1469,20 -1433,20 +1469,20 @@@ macro_rules! expect_pending_htlcs_forwa
                }
        }}
  }
 -
  #[macro_export]
  #[cfg(any(test, feature = "_bench_unstable", feature = "_test_utils"))]
 -macro_rules! expect_payment_received {
 +macro_rules! expect_payment_claimable {
        ($node: expr, $expected_payment_hash: expr, $expected_payment_secret: expr, $expected_recv_value: expr) => {
 -              expect_payment_received!($node, $expected_payment_hash, $expected_payment_secret, $expected_recv_value, None)
 +              expect_payment_claimable!($node, $expected_payment_hash, $expected_payment_secret, $expected_recv_value, None, $node.node.get_our_node_id())
        };
 -      ($node: expr, $expected_payment_hash: expr, $expected_payment_secret: expr, $expected_recv_value: expr, $expected_payment_preimage: expr) => {
 +      ($node: expr, $expected_payment_hash: expr, $expected_payment_secret: expr, $expected_recv_value: expr, $expected_payment_preimage: expr, $expected_receiver_node_id: expr) => {
                let events = $node.node.get_and_clear_pending_events();
                assert_eq!(events.len(), 1);
                match events[0] {
 -                      $crate::util::events::Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
 +                      $crate::util::events::Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id: _, via_user_channel_id: _ } => {
                                assert_eq!($expected_payment_hash, *payment_hash);
                                assert_eq!($expected_recv_value, amount_msat);
 +                              assert_eq!($expected_receiver_node_id, receiver_node_id.unwrap());
                                match purpose {
                                        $crate::util::events::PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
                                                assert_eq!(&$expected_payment_preimage, payment_preimage);
@@@ -1596,7 -1560,7 +1596,7 @@@ macro_rules! expect_payment_forwarded 
                                if !$downstream_force_closed {
                                        assert!($node.node.list_channels().iter().any(|x| x.counterparty.node_id == $next_node.node.get_our_node_id() && x.channel_id == next_channel_id.unwrap()));
                                }
-                               assert_eq!(claim_from_onchain_tx, $upstream_force_closed);
+                               assert_eq!(claim_from_onchain_tx, $downstream_force_closed);
                        },
                        _ => panic!("Unexpected event"),
                }
@@@ -1774,9 -1738,8 +1774,9 @@@ pub fn do_pass_along_path<'a, 'b, 'c>(o
                        if payment_received_expected {
                                assert_eq!(events_2.len(), 1);
                                match events_2[0] {
 -                                      Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
 +                                      Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, ref via_channel_id, ref via_user_channel_id } => {
                                                assert_eq!(our_payment_hash, *payment_hash);
 +                                              assert_eq!(node.node.get_our_node_id(), receiver_node_id.unwrap());
                                                match &purpose {
                                                        PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
                                                                assert_eq!(expected_preimage, *payment_preimage);
                                                        },
                                                }
                                                assert_eq!(amount_msat, recv_value);
 +                                              assert!(node.node.list_channels().iter().any(|details| details.channel_id == via_channel_id.unwrap()));
 +                                              assert!(node.node.list_channels().iter().any(|details| details.user_channel_id == via_user_channel_id.unwrap()));
                                        },
                                        _ => panic!("Unexpected event"),
                                }
@@@ -1817,7 -1778,7 +1817,7 @@@ pub fn pass_along_route<'a, 'b, 'c>(ori
        assert_eq!(events.len(), expected_route.len());
        for (path_idx, (ev, expected_path)) in events.drain(..).zip(expected_route.iter()).enumerate() {
                // Once we've gotten through all the HTLCs, the last one should result in a
 -              // PaymentReceived (but each previous one should not!), .
 +              // PaymentClaimable (but each previous one should not!), .
                let expect_payment = path_idx == expected_route.len() - 1;
                pass_along_path(origin_node, expected_path, recv_value, our_payment_hash.clone(), Some(our_payment_secret), ev, expect_payment, None);
        }
@@@ -2452,14 -2413,6 +2452,14 @@@ macro_rules! handle_chan_reestablish_ms
                                assert_eq!(*node_id, $dst_node.node.get_our_node_id());
                        }
  
 +                      let mut had_channel_update = false; // ChannelUpdate may be now or later, but not both
 +                      if let Some(&MessageSendEvent::SendChannelUpdate { ref node_id, ref msg }) = msg_events.get(idx) {
 +                              assert_eq!(*node_id, $dst_node.node.get_our_node_id());
 +                              idx += 1;
 +                              assert_eq!(msg.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected.
 +                              had_channel_update = true;
 +                      }
 +
                        let mut revoke_and_ack = None;
                        let mut commitment_update = None;
                        let order = if let Some(ev) = msg_events.get(idx) {
                                assert_eq!(*node_id, $dst_node.node.get_our_node_id());
                                idx += 1;
                                assert_eq!(msg.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected.
 +                              assert!(!had_channel_update);
                        }
  
                        assert_eq!(msg_events.len(), idx);
index 92957040dc1adbc4fea923a4f1db32f83f8860ee,88185315148bc83ce3466b72a37ace7a5912913f..2c7e8e72bbee663fd824a7f3aad5a27b1c118792
  //! Functional tests which test for correct behavior across node restarts.
  
  use crate::chain::{ChannelMonitorUpdateStatus, Watch};
+ use crate::chain::chaininterface::LowerBoundedFeeEstimator;
  use crate::chain::channelmonitor::ChannelMonitor;
+ use crate::chain::keysinterface::KeysInterface;
  use crate::chain::transaction::OutPoint;
  use crate::ln::channelmanager::{self, ChannelManager, ChannelManagerReadArgs, PaymentId};
  use crate::ln::msgs;
  use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
  use crate::util::enforcing_trait_impls::EnforcingSigner;
  use crate::util::test_utils;
- use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
+ use crate::util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
  use crate::util::ser::{Writeable, ReadableArgs};
  use crate::util::config::UserConfig;
  
@@@ -635,7 -637,7 +637,7 @@@ fn test_forwardable_regen() 
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
  
        expect_pending_htlcs_forwardable!(nodes[1]);
 -      expect_payment_received!(nodes[1], payment_hash, payment_secret, 100_000);
 +      expect_payment_claimable!(nodes[1], payment_hash, payment_secret, 100_000);
        check_added_monitors!(nodes[1], 1);
  
        let mut events = nodes[1].node.get_and_clear_pending_msg_events();
        nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
        commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false);
        expect_pending_htlcs_forwardable!(nodes[2]);
 -      expect_payment_received!(nodes[2], payment_hash_2, payment_secret_2, 200_000);
 +      expect_payment_claimable!(nodes[2], payment_hash_2, payment_secret_2, 200_000);
  
        claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
        claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2);
@@@ -654,7 -656,7 +656,7 @@@ fn do_test_partial_claim_before_restart
        // Test what happens if a node receives an MPP payment, claims it, but crashes before
        // persisting the ChannelManager. If `persist_both_monitors` is false, also crash after only
        // updating one of the two channels' ChannelMonitors. As a result, on startup, we'll (a) still
 -      // have the PaymentReceived event, (b) have one (or two) channel(s) that goes on chain with the
 +      // have the PaymentClaimable event, (b) have one (or two) channel(s) that goes on chain with the
        // HTLC preimage in them, and (c) optionally have one channel that is live off-chain but does
        // not have the preimage tied to the still-pending HTLC.
        //
        nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
        check_added_monitors!(nodes[0], 2);
  
 -      // Send the payment through to nodes[3] *without* clearing the PaymentReceived event
 +      // Send the payment through to nodes[3] *without* clearing the PaymentClaimable event
        let mut send_events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(send_events.len(), 2);
        do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[0].clone(), true, false, None);
  
        let original_manager = nodes[3].node.encode();
  
 -      expect_payment_received!(nodes[3], payment_hash, payment_secret, 15_000_000);
 +      expect_payment_claimable!(nodes[3], payment_hash, payment_secret, 15_000_000);
  
        nodes[3].node.claim_funds(payment_preimage);
        check_added_monitors!(nodes[3], 2);
        nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
  
        // During deserialization, we should have closed one channel and broadcast its latest
 -      // commitment transaction. We should also still have the original PaymentReceived event we
 +      // commitment transaction. We should also still have the original PaymentClaimable event we
        // never finished processing.
        let events = nodes[3].node.get_and_clear_pending_events();
        assert_eq!(events.len(), if persist_both_monitors { 4 } else { 3 });
 -      if let Event::PaymentReceived { amount_msat: 15_000_000, .. } = events[0] { } else { panic!(); }
 +      if let Event::PaymentClaimable { amount_msat: 15_000_000, .. } = events[0] { } else { panic!(); }
        if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[1] { } else { panic!(); }
        if persist_both_monitors {
                if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); }
                let ds_msgs = nodes[3].node.get_and_clear_pending_msg_events();
                check_added_monitors!(nodes[3], 1);
                assert_eq!(ds_msgs.len(), 2);
 -              if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[1] {} else { panic!(); }
 +              if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[0] {} else { panic!(); }
  
 -              let cs_updates = match ds_msgs[0] {
 +              let cs_updates = match ds_msgs[1] {
                        MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
                                nodes[2].node.handle_update_fulfill_htlc(&nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
                                check_added_monitors!(nodes[2], 1);
@@@ -811,3 -813,192 +813,192 @@@ fn test_partial_claim_before_restart() 
        do_test_partial_claim_before_restart(false);
        do_test_partial_claim_before_restart(true);
  }
+ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_htlc: bool) {
+       if !use_cs_commitment { assert!(!claim_htlc); }
+       // If we go to forward a payment, and the ChannelMonitor persistence completes, but the
+       // ChannelManager does not, we shouldn't try to forward the payment again, nor should we fail
+       // it back until the ChannelMonitor decides the fate of the HTLC.
+       // This was never an issue, but it may be easy to regress here going forward.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let persister;
+       let new_chain_monitor;
+       let nodes_1_deserialized;
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+       let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
+       let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
+       let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000);
+       let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes());
+       let htlc_expiry = nodes[0].best_block_info().1 + TEST_FINAL_CLTV;
+       nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), payment_id).unwrap();
+       check_added_monitors!(nodes[0], 1);
+       let payment_event = SendEvent::from_node(&nodes[0]);
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+       commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+       let node_encoded = nodes[1].node.encode();
+       expect_pending_htlcs_forwardable!(nodes[1]);
+       let payment_event = SendEvent::from_node(&nodes[1]);
+       nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
+       nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg);
+       check_added_monitors!(nodes[2], 1);
+       if claim_htlc {
+               get_monitor!(nodes[2], chan_id_2).provide_payment_preimage(&payment_hash, &payment_preimage,
+                       &nodes[2].tx_broadcaster, &LowerBoundedFeeEstimator(nodes[2].fee_estimator), &nodes[2].logger);
+       }
+       assert!(nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+       let _ = nodes[2].node.get_and_clear_pending_msg_events();
+       nodes[2].node.force_close_broadcasting_latest_txn(&chan_id_2, &nodes[1].node.get_our_node_id()).unwrap();
+       let cs_commitment_tx = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+       assert_eq!(cs_commitment_tx.len(), if claim_htlc { 2 } else { 1 });
+       check_added_monitors!(nodes[2], 1);
+       check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
+       check_closed_broadcast!(nodes[2], true);
+       let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode();
+       let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode();
+       reload_node!(nodes[1], node_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);
+       check_closed_event!(nodes[1], 1, ClosureReason::OutdatedChannelManager);
+       let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+       assert_eq!(bs_commitment_tx.len(), 1);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true);
+       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       if use_cs_commitment {
+               // If we confirm a commitment transaction that has the HTLC on-chain, nodes[1] should wait
+               // for an HTLC-spending transaction before it does anything with the HTLC upstream.
+               confirm_transaction(&nodes[1], &cs_commitment_tx[0]);
+               assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+               assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+               if claim_htlc {
+                       confirm_transaction(&nodes[1], &cs_commitment_tx[1]);
+               } else {
+                       connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1);
+                       let bs_htlc_timeout_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+                       assert_eq!(bs_htlc_timeout_tx.len(), 1);
+                       confirm_transaction(&nodes[1], &bs_htlc_timeout_tx[0]);
+               }
+       } else {
+               confirm_transaction(&nodes[1], &bs_commitment_tx[0]);
+       }
+       if !claim_htlc {
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
+       } else {
+               expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, true);
+       }
+       check_added_monitors!(nodes[1], 1);
+       let events = nodes[1].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       match &events[0] {
+               MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { update_fulfill_htlcs, update_fail_htlcs, commitment_signed, .. }, .. } => {
+                       if claim_htlc {
+                               nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
+                       } else {
+                               nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
+                       }
+                       commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
+               },
+               _ => panic!("Unexpected event"),
+       }
+       if claim_htlc {
+               expect_payment_sent!(nodes[0], payment_preimage);
+       } else {
+               expect_payment_failed!(nodes[0], payment_hash, false);
+       }
+ }
+ #[test]
+ fn forwarded_payment_no_manager_persistence() {
+       do_forwarded_payment_no_manager_persistence(true, true);
+       do_forwarded_payment_no_manager_persistence(true, false);
+       do_forwarded_payment_no_manager_persistence(false, false);
+ }
+ #[test]
+ fn removed_payment_no_manager_persistence() {
+       // If an HTLC is failed to us on a channel, and the ChannelMonitor persistence completes, but
+       // the corresponding ChannelManager persistence does not, we need to ensure that the HTLC is
+       // still failed back to the previous hop even though the ChannelMonitor now no longer is aware
+       // of the HTLC. This was previously broken as no attempt was made to figure out which HTLCs
+       // were left dangling when a channel was force-closed due to a stale ChannelManager.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let persister;
+       let new_chain_monitor;
+       let nodes_1_deserialized;
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+       let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
+       let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
+       let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
+       let node_encoded = nodes[1].node.encode();
+       nodes[2].node.fail_htlc_backwards(&payment_hash);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [HTLCDestination::FailedPayment { payment_hash }]);
+       check_added_monitors!(nodes[2], 1);
+       let events = nodes[2].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       match &events[0] {
+               MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. }, .. } => {
+                       nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &update_fail_htlcs[0]);
+                       commitment_signed_dance!(nodes[1], nodes[2], commitment_signed, false);
+               },
+               _ => panic!("Unexpected event"),
+       }
+       let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode();
+       let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode();
+       reload_node!(nodes[1], node_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);
+       match nodes[1].node.pop_pending_event().unwrap() {
+               Event::ChannelClosed { ref reason, .. } => {
+                       assert_eq!(*reason, ClosureReason::OutdatedChannelManager);
+               },
+               _ => panic!("Unexpected event"),
+       }
+       // Now that the ChannelManager has force-closed the channel which had the HTLC removed, it is
+       // now forgotten everywhere. The ChannelManager should have, as a side-effect of reload,
+       // learned that the HTLC is gone from the ChannelMonitor and added it to the to-fail-back set.
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true);
+       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
+       check_added_monitors!(nodes[1], 1);
+       let events = nodes[1].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       match &events[0] {
+               MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. }, .. } => {
+                       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
+                       commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
+               },
+               _ => panic!("Unexpected event"),
+       }
+       expect_payment_failed!(nodes[0], payment_hash, false);
+ }