Merge pull request #1403 from jurvis/jurvis/add-paymentforwardingfailed-event
authorJeffrey Czyz <jkczyz@gmail.com>
Tue, 26 Jul 2022 00:23:53 +0000 (19:23 -0500)
committerGitHub <noreply@github.com>
Tue, 26 Jul 2022 00:23:53 +0000 (19:23 -0500)
Add HTLCHandlingFailed event

1  2 
fuzz/src/chanmon_consistency.rs
lightning/src/chain/chainmonitor.rs
lightning/src/chain/channelmonitor.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/onion_route_tests.rs
lightning/src/ln/priv_short_conf_tests.rs
lightning/src/ln/reorg_tests.rs
lightning/src/util/test_utils.rs

index 71c1ebc330469474c5b3e4083ea5b207b63437de,dc9504878f6d3f07fcb2c137579b8dd092688184..f384d21927e91baf377a292e0a7c043ec904d366
@@@ -140,7 -140,7 +140,7 @@@ impl chain::Watch<EnforcingSigner> for 
                };
                let deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::
                        read(&mut Cursor::new(&map_entry.get().1), &*self.keys).unwrap().1;
 -              deserialized_monitor.update_monitor(&update, &&TestBroadcaster{}, &&FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap();
 +              deserialized_monitor.update_monitor(&update, &&TestBroadcaster{}, &FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap();
                let mut ser = VecWriter(Vec::new());
                deserialized_monitor.write(&mut ser).unwrap();
                map_entry.insert((update.update_id, ser.0));
                self.chain_monitor.update_channel(funding_txo, update)
        }
  
-       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>)> {
+       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
                return self.chain_monitor.release_pending_monitor_events();
        }
  }
@@@ -860,6 -860,7 +860,7 @@@ pub fn do_test<Out: Output>(data: &[u8]
                                                events::Event::PendingHTLCsForwardable { .. } => {
                                                        nodes[$node].process_pending_htlc_forwards();
                                                },
+                                               events::Event::HTLCHandlingFailed { .. } => {},
                                                _ => if out.may_fail.load(atomic::Ordering::Acquire) {
                                                        return;
                                                } else {
index 239affc73dd74e44514458f726940e77c0d39fcc,eff81f1484a4ed073db3c99bf5c38bf1596afdd6..5c4ede0b16161819a8c7b54f4b8696cb5274912a
@@@ -43,6 -43,7 +43,7 @@@ use prelude::*
  use sync::{RwLock, RwLockReadGuard, Mutex, MutexGuard};
  use core::ops::Deref;
  use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
+ use bitcoin::secp256k1::PublicKey;
  
  #[derive(Clone, Copy, Hash, PartialEq, Eq)]
  /// A specific update's ID stored in a `MonitorUpdateId`, separated out to make the contents
@@@ -235,7 -236,7 +236,7 @@@ pub struct ChainMonitor<ChannelSigner: 
        persister: P,
        /// "User-provided" (ie persistence-completion/-failed) [`MonitorEvent`]s. These came directly
        /// from the user and not from a [`ChannelMonitor`].
-       pending_monitor_events: Mutex<Vec<(OutPoint, Vec<MonitorEvent>)>>,
+       pending_monitor_events: Mutex<Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)>>,
        /// The best block height seen, used as a proxy for the passage of time.
        highest_chain_height: AtomicUsize,
  }
@@@ -299,7 -300,7 +300,7 @@@ where C::Target: chain::Filter
                                                        log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
                                                Err(ChannelMonitorUpdateErr::PermanentFailure) => {
                                                        monitor_state.channel_perm_failed.store(true, Ordering::Release);
-                                                       self.pending_monitor_events.lock().unwrap().push((*funding_outpoint, vec![MonitorEvent::UpdateFailed(*funding_outpoint)]));
+                                                       self.pending_monitor_events.lock().unwrap().push((*funding_outpoint, vec![MonitorEvent::UpdateFailed(*funding_outpoint)], monitor.get_counterparty_node_id()));
                                                },
                                                Err(ChannelMonitorUpdateErr::TemporaryFailure) => {
                                                        log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
                                self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::UpdateCompleted {
                                        funding_txo,
                                        monitor_update_id: monitor_data.monitor.get_latest_update_id(),
-                               }]));
+                               }], monitor_data.monitor.get_counterparty_node_id()));
                        },
                        MonitorUpdateId { contents: UpdateOrigin::ChainSync(_) } => {
                                if !monitor_data.has_pending_chainsync_updates(&pending_monitor_updates) {
        /// channel_monitor_updated once with the highest ID.
        #[cfg(any(test, fuzzing))]
        pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) {
+               let monitors = self.monitors.read().unwrap();
+               let counterparty_node_id = monitors.get(&funding_txo).and_then(|m| m.monitor.get_counterparty_node_id());
                self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::UpdateCompleted {
                        funding_txo,
                        monitor_update_id,
-               }]));
+               }], counterparty_node_id));
        }
  
        #[cfg(any(test, fuzzing, feature = "_test_utils"))]
@@@ -636,7 -639,7 +639,7 @@@ where C::Target: chain::Filter
                        Some(monitor_state) => {
                                let monitor = &monitor_state.monitor;
                                log_trace!(self.logger, "Updating ChannelMonitor for channel {}", log_funding_info!(monitor));
 -                              let update_res = monitor.update_monitor(&update, &self.broadcaster, &self.fee_estimator, &self.logger);
 +                              let update_res = monitor.update_monitor(&update, &self.broadcaster, &*self.fee_estimator, &self.logger);
                                if update_res.is_err() {
                                        log_error!(self.logger, "Failed to update ChannelMonitor for channel {}.", log_funding_info!(monitor));
                                }
                }
        }
  
-       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>)> {
+       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
                let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0);
                for monitor_state in self.monitors.read().unwrap().values() {
                        let is_pending_monitor_update = monitor_state.has_pending_chainsync_updates(&monitor_state.pending_monitor_updates.lock().unwrap());
                                let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events();
                                if monitor_events.len() > 0 {
                                        let monitor_outpoint = monitor_state.monitor.get_funding_txo().0;
-                                       pending_monitor_events.push((monitor_outpoint, monitor_events));
+                                       let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id();
+                                       pending_monitor_events.push((monitor_outpoint, monitor_events, counterparty_node_id));
                                }
                        }
                }
index 8a6f02452ec1abb6c61812ac4bc1974e54a07df2,220951e07f99d19eea145cbb497445658268046d..38dcd6cf0e70daf2457800fc8fc3d7ee68d16247
@@@ -965,13 -965,6 +965,13 @@@ impl<Signer: Sign> Writeable for Channe
  }
  
  impl<Signer: Sign> ChannelMonitor<Signer> {
 +      /// For lockorder enforcement purposes, we need to have a single site which constructs the
 +      /// `inner` mutex, otherwise cases where we lock two monitors at the same time (eg in our
 +      /// PartialEq implementation) we may decide a lockorder violation has occurred.
 +      fn from_impl(imp: ChannelMonitorImpl<Signer>) -> Self {
 +              ChannelMonitor { inner: Mutex::new(imp) }
 +      }
 +
        pub(crate) fn new(secp_ctx: Secp256k1<secp256k1::All>, keys: Signer, shutdown_script: Option<Script>,
                          on_counterparty_tx_csv: u16, destination_script: &Script, funding_info: (OutPoint, Script),
                          channel_parameters: &ChannelTransactionParameters,
                let mut outputs_to_watch = HashMap::new();
                outputs_to_watch.insert(funding_info.0.txid, vec![(funding_info.0.index as u32, funding_info.1.clone())]);
  
 -              ChannelMonitor {
 -                      inner: Mutex::new(ChannelMonitorImpl {
 -                              latest_update_id: 0,
 -                              commitment_transaction_number_obscure_factor,
 +              Self::from_impl(ChannelMonitorImpl {
 +                      latest_update_id: 0,
 +                      commitment_transaction_number_obscure_factor,
  
 -                              destination_script: destination_script.clone(),
 -                              broadcasted_holder_revokable_script: None,
 -                              counterparty_payment_script,
 -                              shutdown_script,
 +                      destination_script: destination_script.clone(),
 +                      broadcasted_holder_revokable_script: None,
 +                      counterparty_payment_script,
 +                      shutdown_script,
  
 -                              channel_keys_id,
 -                              holder_revocation_basepoint,
 -                              funding_info,
 -                              current_counterparty_commitment_txid: None,
 -                              prev_counterparty_commitment_txid: None,
 +                      channel_keys_id,
 +                      holder_revocation_basepoint,
 +                      funding_info,
 +                      current_counterparty_commitment_txid: None,
 +                      prev_counterparty_commitment_txid: None,
  
 -                              counterparty_commitment_params,
 -                              funding_redeemscript,
 -                              channel_value_satoshis,
 -                              their_cur_per_commitment_points: None,
 +                      counterparty_commitment_params,
 +                      funding_redeemscript,
 +                      channel_value_satoshis,
 +                      their_cur_per_commitment_points: None,
  
 -                              on_holder_tx_csv: counterparty_channel_parameters.selected_contest_delay,
 +                      on_holder_tx_csv: counterparty_channel_parameters.selected_contest_delay,
  
 -                              commitment_secrets: CounterpartyCommitmentSecrets::new(),
 -                              counterparty_claimable_outpoints: HashMap::new(),
 -                              counterparty_commitment_txn_on_chain: HashMap::new(),
 -                              counterparty_hash_commitment_number: HashMap::new(),
 +                      commitment_secrets: CounterpartyCommitmentSecrets::new(),
 +                      counterparty_claimable_outpoints: HashMap::new(),
 +                      counterparty_commitment_txn_on_chain: HashMap::new(),
 +                      counterparty_hash_commitment_number: HashMap::new(),
  
 -                              prev_holder_signed_commitment_tx: None,
 -                              current_holder_commitment_tx: holder_commitment_tx,
 -                              current_counterparty_commitment_number: 1 << 48,
 -                              current_holder_commitment_number,
 +                      prev_holder_signed_commitment_tx: None,
 +                      current_holder_commitment_tx: holder_commitment_tx,
 +                      current_counterparty_commitment_number: 1 << 48,
 +                      current_holder_commitment_number,
  
 -                              payment_preimages: HashMap::new(),
 -                              pending_monitor_events: Vec::new(),
 -                              pending_events: Vec::new(),
 +                      payment_preimages: HashMap::new(),
 +                      pending_monitor_events: Vec::new(),
 +                      pending_events: Vec::new(),
  
 -                              onchain_events_awaiting_threshold_conf: Vec::new(),
 -                              outputs_to_watch,
 +                      onchain_events_awaiting_threshold_conf: Vec::new(),
 +                      outputs_to_watch,
  
 -                              onchain_tx_handler,
 +                      onchain_tx_handler,
  
 -                              lockdown_from_offchain: false,
 -                              holder_tx_signed: false,
 -                              funding_spend_seen: false,
 -                              funding_spend_confirmed: None,
 -                              htlcs_resolved_on_chain: Vec::new(),
 +                      lockdown_from_offchain: false,
 +                      holder_tx_signed: false,
 +                      funding_spend_seen: false,
 +                      funding_spend_confirmed: None,
 +                      htlcs_resolved_on_chain: Vec::new(),
  
 -                              best_block,
 -                              counterparty_node_id: Some(counterparty_node_id),
 +                      best_block,
 +                      counterparty_node_id: Some(counterparty_node_id),
  
 -                              secp_ctx,
 -                      }),
 -              }
 +                      secp_ctx,
 +              })
        }
  
        #[cfg(test)]
                &self,
                updates: &ChannelMonitorUpdate,
                broadcaster: &B,
 -              fee_estimator: &F,
 +              fee_estimator: F,
                logger: &L,
        ) -> Result<(), ()>
        where
                self.inner.lock().unwrap().get_cur_holder_commitment_number()
        }
  
+       pub(crate) fn get_counterparty_node_id(&self) -> Option<PublicKey> {
+               self.inner.lock().unwrap().counterparty_node_id
+       }
        /// Used by ChannelManager deserialization to broadcast the latest holder state if its copy of
        /// the Channel was out-of-date. You may use it to get a broadcastable holder toxic tx in case of
        /// fallen-behind, i.e when receiving a channel_reestablish with a proof that our counterparty side knows
@@@ -1949,10 -1948,10 +1953,10 @@@ impl<Signer: Sign> ChannelMonitorImpl<S
                self.pending_monitor_events.push(MonitorEvent::CommitmentTxConfirmed(self.funding_info.0));
        }
  
 -      pub fn update_monitor<B: Deref, F: Deref, L: Deref>(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &L) -> Result<(), ()>
 +      pub fn update_monitor<B: Deref, F: Deref, L: Deref>(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: F, logger: &L) -> Result<(), ()>
        where B::Target: BroadcasterInterface,
 -                  F::Target: FeeEstimator,
 -                  L::Target: Logger,
 +              F::Target: FeeEstimator,
 +              L::Target: Logger,
        {
                log_info!(logger, "Applying update to monitor {}, bringing update_id from {} to {} with {} changes.",
                        log_funding_info!(self), self.latest_update_id, updates.update_id, updates.updates.len());
                                },
                                ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage } => {
                                        log_trace!(logger, "Updating ChannelMonitor with payment preimage");
 -                                      let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
 +                                      let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&*fee_estimator);
                                        self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()), &payment_preimage, broadcaster, &bounded_fee_estimator, logger)
                                },
                                ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } => {
@@@ -3366,58 -3365,60 +3370,58 @@@ impl<'a, Signer: Sign, K: KeysInterface
                let mut secp_ctx = Secp256k1::new();
                secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes());
  
 -              Ok((best_block.block_hash(), ChannelMonitor {
 -                      inner: Mutex::new(ChannelMonitorImpl {
 -                              latest_update_id,
 -                              commitment_transaction_number_obscure_factor,
 +              Ok((best_block.block_hash(), ChannelMonitor::from_impl(ChannelMonitorImpl {
 +                      latest_update_id,
 +                      commitment_transaction_number_obscure_factor,
  
 -                              destination_script,
 -                              broadcasted_holder_revokable_script,
 -                              counterparty_payment_script,
 -                              shutdown_script,
 +                      destination_script,
 +                      broadcasted_holder_revokable_script,
 +                      counterparty_payment_script,
 +                      shutdown_script,
  
 -                              channel_keys_id,
 -                              holder_revocation_basepoint,
 -                              funding_info,
 -                              current_counterparty_commitment_txid,
 -                              prev_counterparty_commitment_txid,
 +                      channel_keys_id,
 +                      holder_revocation_basepoint,
 +                      funding_info,
 +                      current_counterparty_commitment_txid,
 +                      prev_counterparty_commitment_txid,
  
 -                              counterparty_commitment_params,
 -                              funding_redeemscript,
 -                              channel_value_satoshis,
 -                              their_cur_per_commitment_points,
 +                      counterparty_commitment_params,
 +                      funding_redeemscript,
 +                      channel_value_satoshis,
 +                      their_cur_per_commitment_points,
  
 -                              on_holder_tx_csv,
 +                      on_holder_tx_csv,
  
 -                              commitment_secrets,
 -                              counterparty_claimable_outpoints,
 -                              counterparty_commitment_txn_on_chain,
 -                              counterparty_hash_commitment_number,
 +                      commitment_secrets,
 +                      counterparty_claimable_outpoints,
 +                      counterparty_commitment_txn_on_chain,
 +                      counterparty_hash_commitment_number,
  
 -                              prev_holder_signed_commitment_tx,
 -                              current_holder_commitment_tx,
 -                              current_counterparty_commitment_number,
 -                              current_holder_commitment_number,
 +                      prev_holder_signed_commitment_tx,
 +                      current_holder_commitment_tx,
 +                      current_counterparty_commitment_number,
 +                      current_holder_commitment_number,
  
 -                              payment_preimages,
 -                              pending_monitor_events: pending_monitor_events.unwrap(),
 -                              pending_events,
 +                      payment_preimages,
 +                      pending_monitor_events: pending_monitor_events.unwrap(),
 +                      pending_events,
  
 -                              onchain_events_awaiting_threshold_conf,
 -                              outputs_to_watch,
 +                      onchain_events_awaiting_threshold_conf,
 +                      outputs_to_watch,
  
 -                              onchain_tx_handler,
 +                      onchain_tx_handler,
  
 -                              lockdown_from_offchain,
 -                              holder_tx_signed,
 -                              funding_spend_seen: funding_spend_seen.unwrap(),
 -                              funding_spend_confirmed,
 -                              htlcs_resolved_on_chain: htlcs_resolved_on_chain.unwrap(),
 +                      lockdown_from_offchain,
 +                      holder_tx_signed,
 +                      funding_spend_seen: funding_spend_seen.unwrap(),
 +                      funding_spend_confirmed,
 +                      htlcs_resolved_on_chain: htlcs_resolved_on_chain.unwrap(),
  
 -                              best_block,
 -                              counterparty_node_id,
 +                      best_block,
 +                      counterparty_node_id,
  
 -                              secp_ctx,
 -                      }),
 -              }))
 +                      secp_ctx,
 +              })))
        }
  }
  
@@@ -3537,7 -3538,7 +3541,7 @@@ mod tests 
  
                let broadcaster = TestBroadcaster::new(Arc::clone(&nodes[1].blocks));
                assert!(
 -                      pre_update_monitor.update_monitor(&replay_update, &&broadcaster, &&chanmon_cfgs[1].fee_estimator, &nodes[1].logger)
 +                      pre_update_monitor.update_monitor(&replay_update, &&broadcaster, &chanmon_cfgs[1].fee_estimator, &nodes[1].logger)
                        .is_err());
                // Even though we error'd on the first update, we should still have generated an HTLC claim
                // transaction
index 972707a0bc3732f8729d29b96bf91165b2223231,ed5bff63d47dd7ce961ebd7cf270e83b150e208e..6da5de04365ee4b1cc20f7501c62605a91d84643
@@@ -917,7 -917,7 +917,7 @@@ impl<Signer: Sign> Channel<Signer> 
                        return Err(APIError::APIMisuseError { err: format!("Holder selected channel  reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
                }
  
 -              let feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
 +              let feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
  
                let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
                let commitment_tx_fee = Self::commit_tx_fee_msat(feerate, MIN_AFFORDABLE_HTLC_COUNT, opt_anchors);
                // We generally don't care too much if they set the feerate to something very high, but it
                // could result in the channel being useless due to everything being dust.
                let upper_limit = cmp::max(250 * 25,
 -                      fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64 * 10);
 +                      fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64 * 10);
                if feerate_per_kw as u64 > upper_limit {
                        return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit)));
                }
 -              let lower_limit = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background);
 +              let lower_limit = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background);
                // Some fee estimators round up to the next full sat/vbyte (ie 250 sats per kw), causing
                // occasional issues with feerate disagreements between an initiator that wants a feerate
                // of 1.1 sat/vbyte and a receiver that wants 1.1 rounded up to 2. Thus, we always add 250
                // Propose a range from our current Background feerate to our Normal feerate plus our
                // force_close_avoidance_max_fee_satoshis.
                // If we fail to come to consensus, we'll have to force-close.
 -              let mut proposed_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background);
 -              let normal_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
 +              let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background);
 +              let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
                let mut proposed_max_feerate = if self.is_outbound() { normal_feerate } else { u32::max_value() };
  
                // The spec requires that (when the channel does not have anchors) we only send absolute
        /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
        /// Also returns the list of payment_hashes for channels which we can safely fail backwards
        /// immediately (others we will have to allow to time out).
-       pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>) {
+       pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>) {
                // Note that we MUST only generate a monitor update that indicates force-closure - we're
                // called during initialization prior to the chain_monitor in the encompassing ChannelManager
                // being fully configured in some cases. Thus, its likely any monitor events we generate will
                // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
                // return them to fail the payment.
                let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
+               let counterparty_node_id = self.get_counterparty_node_id();
                for htlc_update in self.holding_cell_htlc_updates.drain(..) {
                        match htlc_update {
                                HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
-                                       dropped_outbound_htlcs.push((source, payment_hash));
+                                       dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
                                },
                                _ => {}
                        }
@@@ -6572,7 -6573,7 +6573,7 @@@ mod tests 
        use ln::channel::{Channel, InboundHTLCOutput, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator};
        use ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS};
        use ln::features::{InitFeatures, ChannelTypeFeatures};
 -      use ln::msgs::{ChannelUpdate, DataLossProtect, DecodeError, OptionalField, UnsignedChannelUpdate};
 +      use ln::msgs::{ChannelUpdate, DataLossProtect, DecodeError, OptionalField, UnsignedChannelUpdate, MAX_VALUE_MSAT};
        use ln::script::ShutdownScript;
        use ln::chan_utils;
        use ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight};
                                flags: 0,
                                cltv_expiry_delta: 100,
                                htlc_minimum_msat: 5,
 -                              htlc_maximum_msat: OptionalField::Absent,
 +                              htlc_maximum_msat: MAX_VALUE_MSAT,
                                fee_base_msat: 110,
                                fee_proportional_millionths: 11,
                                excess_data: Vec::new(),
index 56d705ef71e2ba64ec95a53e6d99dd9dd9eb775d,504ed00ab38b62f0308fa9f3fc64b83b4b4a941b..67c7b58e879cac181b31bd9a42e3775aa84d7ed6
@@@ -48,11 -48,11 +48,11 @@@ use routing::router::{PaymentParameters
  use ln::msgs;
  use ln::msgs::NetAddress;
  use ln::onion_utils;
 -use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT, OptionalField};
 +use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT};
  use ln::wire::Encode;
  use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner, Recipient};
  use util::config::{UserConfig, ChannelConfig};
- use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
+ use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
  use util::{byte_utils, events};
  use util::scid_utils::fake_scid;
  use util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
@@@ -281,7 -281,7 +281,7 @@@ enum ClaimFundsFromHop 
        DuplicateClaim,
  }
  
- type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>);
+ type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>);
  
  /// Error type returned across the channel_state mutex boundary. When an Err is generated for a
  /// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
@@@ -1890,7 -1890,8 +1890,8 @@@ impl<Signer: Sign, M: Deref, T: Deref, 
                };
  
                for htlc_source in failed_htlcs.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id };
+                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
                }
  
                let _ = handle_error!(self, result, *counterparty_node_id);
                let (monitor_update_option, mut failed_htlcs) = shutdown_res;
                log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
                for htlc_source in failed_htlcs.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+                       let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
+                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: channel_id };
+                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
                }
                if let Some((funding_txo, monitor_update)) = monitor_update_option {
                        // There isn't anything we can do if we get an update failure - we're already
                        flags: (!were_node_one) as u8 | ((!chan.is_live() as u8) << 1),
                        cltv_expiry_delta: chan.get_cltv_expiry_delta(),
                        htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(),
 -                      htlc_maximum_msat: OptionalField::Present(chan.get_announced_htlc_max_msat()),
 +                      htlc_maximum_msat: chan.get_announced_htlc_max_msat(),
                        fee_base_msat: chan.get_outbound_forwarding_fee_base_msat(),
                        fee_proportional_millionths: chan.get_fee_proportional_millionths(),
                        excess_data: Vec::new(),
                                                                        HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
                                                                                routing, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value },
                                                                                prev_funding_outpoint } => {
+                                                                                       macro_rules! failure_handler {
+                                                                                               ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
+                                                                                                       log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+                                                                                                       let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
+                                                                                                               short_channel_id: prev_short_channel_id,
+                                                                                                               outpoint: prev_funding_outpoint,
+                                                                                                               htlc_id: prev_htlc_id,
+                                                                                                               incoming_packet_shared_secret: incoming_shared_secret,
+                                                                                                               phantom_shared_secret: $phantom_ss,
+                                                                                                       });
+                                                                                                       let reason = if $next_hop_unknown {
+                                                                                                               HTLCDestination::UnknownNextHop { requested_forward_scid: short_chan_id }
+                                                                                                       } else {
+                                                                                                               HTLCDestination::FailedPayment{ payment_hash }
+                                                                                                       };
+                                                                                                       failed_forwards.push((htlc_source, payment_hash,
+                                                                                                               HTLCFailReason::Reason { failure_code: $err_code, data: $err_data },
+                                                                                                               reason
+                                                                                                       ));
+                                                                                                       continue;
+                                                                                               }
+                                                                                       }
                                                                                        macro_rules! fail_forward {
                                                                                                ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
                                                                                                        {
-                                                                                                               log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
-                                                                                                               let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
-                                                                                                                       short_channel_id: prev_short_channel_id,
-                                                                                                                       outpoint: prev_funding_outpoint,
-                                                                                                                       htlc_id: prev_htlc_id,
-                                                                                                                       incoming_packet_shared_secret: incoming_shared_secret,
-                                                                                                                       phantom_shared_secret: $phantom_ss,
-                                                                                                               });
-                                                                                                               failed_forwards.push((htlc_source, payment_hash,
-                                                                                                                       HTLCFailReason::Reason { failure_code: $err_code, data: $err_data }
-                                                                                                               ));
-                                                                                                               continue;
+                                                                                                               failure_handler!($msg, $err_code, $err_data, $phantom_ss, true);
+                                                                                                       }
+                                                                                               }
+                                                                                       }
+                                                                                       macro_rules! failed_payment {
+                                                                                               ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
+                                                                                                       {
+                                                                                                               failure_handler!($msg, $err_code, $err_data, $phantom_ss, false);
                                                                                                        }
                                                                                                }
                                                                                        }
                                                                                                                        // `update_fail_malformed_htlc`, meaning here we encrypt the error as
                                                                                                                        // if it came from us (the second-to-last hop) but contains the sha256
                                                                                                                        // of the onion.
-                                                                                                                       fail_forward!(err_msg, err_code, sha256_of_onion.to_vec(), None);
+                                                                                                                       failed_payment!(err_msg, err_code, sha256_of_onion.to_vec(), None);
                                                                                                                },
                                                                                                                Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => {
-                                                                                                                       fail_forward!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret));
+                                                                                                                       failed_payment!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret));
                                                                                                                },
                                                                                                        };
                                                                                                        match next_hop {
                                                                                                                onion_utils::Hop::Receive(hop_data) => {
                                                                                                                        match self.construct_recv_pending_htlc_info(hop_data, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value, Some(phantom_shared_secret)) {
                                                                                                                                Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, vec![(info, prev_htlc_id)])),
-                                                                                                                               Err(ReceiveError { err_code, err_data, msg }) => fail_forward!(msg, err_code, err_data, Some(phantom_shared_secret))
+                                                                                                                               Err(ReceiveError { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
                                                                                                                        }
                                                                                                                },
                                                                                                                _ => panic!(),
                                                                                        }
                                                                                        let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
                                                                                        failed_forwards.push((htlc_source, payment_hash,
-                                                                                               HTLCFailReason::Reason { failure_code, data }
+                                                                                               HTLCFailReason::Reason { failure_code, data },
+                                                                                               HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id }
                                                                                        ));
                                                                                        continue;
                                                                                },
                                                                };
  
                                                                macro_rules! fail_htlc {
-                                                                       ($htlc: expr) => {
+                                                                       ($htlc: expr, $payment_hash: expr) => {
                                                                                let mut htlc_msat_height_data = byte_utils::be64_to_array($htlc.value).to_vec();
                                                                                htlc_msat_height_data.extend_from_slice(
                                                                                        &byte_utils::be32_to_array(self.best_block.read().unwrap().height()),
                                                                                                incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
                                                                                                phantom_shared_secret,
                                                                                        }), payment_hash,
-                                                                                       HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data }
+                                                                                       HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data },
+                                                                                       HTLCDestination::FailedPayment { payment_hash: $payment_hash },
                                                                                ));
                                                                        }
                                                                }
                                                                                if htlcs.len() == 1 {
                                                                                        if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
                                                                                                log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0));
-                                                                                               fail_htlc!(claimable_htlc);
+                                                                                               fail_htlc!(claimable_htlc, payment_hash);
                                                                                                continue
                                                                                        }
                                                                                }
                                                                                if total_value >= msgs::MAX_VALUE_MSAT || total_value > $payment_data.total_msat {
                                                                                        log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)",
                                                                                                log_bytes!(payment_hash.0), total_value, $payment_data.total_msat);
-                                                                                       fail_htlc!(claimable_htlc);
+                                                                                       fail_htlc!(claimable_htlc, payment_hash);
                                                                                } else if total_value == $payment_data.total_msat {
                                                                                        htlcs.push(claimable_htlc);
                                                                                        new_events.push(events::Event::PaymentReceived {
                                                                                                let payment_preimage = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) {
                                                                                                        Ok(payment_preimage) => payment_preimage,
                                                                                                        Err(()) => {
-                                                                                                               fail_htlc!(claimable_htlc);
+                                                                                                               fail_htlc!(claimable_htlc, payment_hash);
                                                                                                                continue
                                                                                                        }
                                                                                                };
                                                                                                        },
                                                                                                        hash_map::Entry::Occupied(_) => {
                                                                                                                log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0));
-                                                                                                               fail_htlc!(claimable_htlc);
+                                                                                                               fail_htlc!(claimable_htlc, payment_hash);
                                                                                                        }
                                                                                                }
                                                                                        }
                                                                        hash_map::Entry::Occupied(inbound_payment) => {
                                                                                if payment_data.is_none() {
                                                                                        log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0));
-                                                                                       fail_htlc!(claimable_htlc);
+                                                                                       fail_htlc!(claimable_htlc, payment_hash);
                                                                                        continue
                                                                                };
                                                                                let payment_data = payment_data.unwrap();
                                                                                if inbound_payment.get().payment_secret != payment_data.payment_secret {
                                                                                        log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0));
-                                                                                       fail_htlc!(claimable_htlc);
+                                                                                       fail_htlc!(claimable_htlc, payment_hash);
                                                                                } else if inbound_payment.get().min_value_msat.is_some() && payment_data.total_msat < inbound_payment.get().min_value_msat.unwrap() {
                                                                                        log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our minimum value (had {}, needed {}).",
                                                                                                log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap());
-                                                                                       fail_htlc!(claimable_htlc);
+                                                                                       fail_htlc!(claimable_htlc, payment_hash);
                                                                                } else {
                                                                                        let payment_received_generated = check_total_value!(payment_data, inbound_payment.get().payment_preimage);
                                                                                        if payment_received_generated {
                        }
                }
  
-               for (htlc_source, payment_hash, failure_reason) in failed_forwards.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, failure_reason);
+               for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
+                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, failure_reason, destination);
                }
                self.forward_htlcs(&mut phantom_receives);
  
                PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
                        let mut should_persist = NotifyOption::SkipPersist;
  
 -                      let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
 +                      let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
  
                        let mut handle_errors = Vec::new();
                        {
                        let mut should_persist = NotifyOption::SkipPersist;
                        if self.process_background_events() { should_persist = NotifyOption::DoPersist; }
  
 -                      let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
 +                      let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
  
                        let mut handle_errors = Vec::new();
                        let mut timed_out_mpp_htlcs = Vec::new();
                        }
  
                        for htlc_source in timed_out_mpp_htlcs.drain(..) {
-                               self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() });
+                               let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 };
+                               self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0.clone()), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }, receiver );
                        }
  
                        for (err, counterparty_node_id) in handle_errors.drain(..) {
                                                self.best_block.read().unwrap().height()));
                                self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
                                                HTLCSource::PreviousHopData(htlc.prev_hop), payment_hash,
-                                               HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data });
+                                               HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data },
+                                               HTLCDestination::FailedPayment { payment_hash: *payment_hash });
                        }
                }
        }
        // be surfaced to the user.
        fn fail_holding_cell_htlcs(
                &self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: [u8; 32],
-               _counterparty_node_id: &PublicKey
+               counterparty_node_id: &PublicKey
        ) {
                for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) {
                        match htlc_src {
                                                        hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
                                                };
                                        let channel_state = self.channel_state.lock().unwrap();
-                                       self.fail_htlc_backwards_internal(channel_state,
-                                               htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data});
+                                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id };
+                                       self.fail_htlc_backwards_internal(channel_state, htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data }, receiver)
                                },
                                HTLCSource::OutboundRoute { session_priv, payment_id, path, payment_params, .. } => {
                                        let mut session_priv_bytes = [0; 32];
        /// to fail and take the channel_state lock for each iteration (as we take ownership and may
        /// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to
        /// still-available channels.
-       fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason) {
+       fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason, destination: HTLCDestination) {
                //TODO: There is a timing attack here where if a node fails an HTLC back to us they can
                //identify whether we sent it or not based on the (I presume) very different runtime
                //between the branches here. We should make this async and move it into the forward HTLCs
                                        return;
                                }
                                mem::drop(channel_state_lock);
 -                              let retry = if let Some(payment_params_data) = payment_params {
 +                              let mut retry = if let Some(payment_params_data) = payment_params {
                                        let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
                                        Some(RouteParameters {
                                                payment_params: payment_params_data.clone(),
                                                        // TODO: If we decided to blame ourselves (or one of our channels) in
                                                        // process_onion_failure we should close that channel as it implies our
                                                        // next-hop is needlessly blaming us!
 +                                                      if let Some(scid) = short_channel_id {
 +                                                              retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid));
 +                                                      }
                                                        events::Event::PaymentPathFailed {
                                                                payment_id: Some(payment_id),
                                                                payment_hash: payment_hash.clone(),
                                                // ChannelDetails.
                                                // TODO: For non-temporary failures, we really should be closing the
                                                // channel here as we apparently can't relay through them anyway.
 +                                              let scid = path.first().unwrap().short_channel_id;
 +                                              retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid));
                                                events::Event::PaymentPathFailed {
                                                        payment_id: Some(payment_id),
                                                        payment_hash: payment_hash.clone(),
                                                        network_update: None,
                                                        all_paths_failed,
                                                        path: path.clone(),
 -                                                      short_channel_id: Some(path.first().unwrap().short_channel_id),
 +                                                      short_channel_id: Some(scid),
                                                        retry,
  #[cfg(test)]
                                                        error_code: Some(*failure_code),
                                pending_events.push(path_failure);
                                if let Some(ev) = full_failure_ev { pending_events.push(ev); }
                        },
-                       HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, phantom_shared_secret, .. }) => {
+                       HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, phantom_shared_secret, outpoint }) => {
                                let err_packet = match onion_error {
                                        HTLCFailReason::Reason { failure_code, data } => {
                                                log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with code {}", log_bytes!(payment_hash.0), failure_code);
                                        }
                                }
                                mem::drop(channel_state_lock);
+                               let mut pending_events = self.pending_events.lock().unwrap();
                                if let Some(time) = forward_event {
-                                       let mut pending_events = self.pending_events.lock().unwrap();
                                        pending_events.push(events::Event::PendingHTLCsForwardable {
                                                time_forwardable: time
                                        });
                                }
+                               pending_events.push(events::Event::HTLCHandlingFailed {
+                                       prev_channel_id: outpoint.to_channel_id(),
+                                       failed_next_destination: destination
+                               });
                        },
                }
        }
                                                        self.best_block.read().unwrap().height()));
                                        self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
                                                                         HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash,
-                                                                        HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data });
+                                                                        HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data },
+                                                                        HTLCDestination::FailedPayment { payment_hash } );
                                } else {
                                        match self.claim_funds_from_hop(channel_state.as_mut().unwrap(), htlc.prev_hop, payment_preimage) {
                                                ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
  
                let chan_restoration_res;
-               let (mut pending_failures, finalized_claims) = {
+               let (mut pending_failures, finalized_claims, counterparty_node_id) = {
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
                        let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) {
                                return;
                        }
  
+                       let counterparty_node_id = channel.get().get_counterparty_node_id();
                        let updates = channel.get_mut().monitor_updating_restored(&self.logger, self.get_our_node_id(), self.genesis_hash, self.best_block.read().unwrap().height());
                        let channel_update = if updates.channel_ready.is_some() && channel.get().is_usable() {
                                // We only send a channel_update in the case where we are just now sending a
                        if let Some(upd) = channel_update {
                                channel_state.pending_msg_events.push(upd);
                        }
-                       (updates.failed_htlcs, updates.finalized_claimed_htlcs)
+                       (updates.failed_htlcs, updates.finalized_claimed_htlcs, counterparty_node_id)
                };
                post_handle_chan_restoration!(self, chan_restoration_res);
                self.finalize_claims(finalized_claims);
                for failure in pending_failures.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
+                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: funding_txo.to_channel_id() };
+                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2, receiver);
                }
        }
  
                        }
                };
                for htlc_source in dropped_htlcs.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id };
+                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
                }
  
                let _ = handle_error!(self, result, *counterparty_node_id);
                                short_channel_id, channel_outpoint)) =>
                        {
                                for failure in pending_failures.drain(..) {
-                                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
+                                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: channel_outpoint.to_channel_id() };
+                                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2, receiver);
                                }
                                self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, pending_forwards)]);
                                self.finalize_claims(finalized_claim_htlcs);
                let mut failed_channels = Vec::new();
                let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
                let has_pending_monitor_events = !pending_monitor_events.is_empty();
-               for (funding_outpoint, mut monitor_events) in pending_monitor_events.drain(..) {
+               for (funding_outpoint, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
                        for monitor_event in monitor_events.drain(..) {
                                match monitor_event {
                                        MonitorEvent::HTLCEvent(htlc_update) => {
                                                        self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id());
                                                } else {
                                                        log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
-                                                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+                                                       let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
+                                                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
                                                }
                                        },
                                        MonitorEvent::CommitmentTxConfirmed(funding_outpoint) |
@@@ -5834,7 -5869,7 +5874,7 @@@ wher
                                                let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
                                                timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason {
                                                        failure_code, data,
-                                               }));
+                                               }, HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() }));
                                        }
                                        if let Some(channel_ready) = channel_ready_opt {
                                                send_channel_ready!(short_to_chan_info, pending_msg_events, channel, channel_ready);
                                                if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
                                                        let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
                                                        htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
                                                        timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
                                                                failure_code: 0x4000 | 15,
                                                                data: htlc_msat_height_data
-                                                       }));
+                                                       }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }));
                                                        false
                                                } else { true }
                                        });
  
                self.handle_init_event_channel_failures(failed_channels);
  
-               for (source, payment_hash, reason) in timed_out_htlcs.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason);
+               for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) {
+                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason, destination);
                }
        }
  
@@@ -7286,7 -7322,9 +7327,9 @@@ impl<'a, Signer: Sign, M: Deref, T: Der
                };
  
                for htlc_source in failed_htlcs.drain(..) {
-                       channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+                       let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
+                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
+                       channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
                }
  
                //TODO: Broadcast channel update for closed channels, but only after we've made a
@@@ -7311,7 -7349,7 +7354,7 @@@ mod tests 
        use ln::msgs::ChannelMessageHandler;
        use routing::router::{PaymentParameters, RouteParameters, find_route};
        use util::errors::APIError;
-       use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
+       use util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
        use util::test_utils;
        use chain::keysinterface::KeysInterface;
  
                check_added_monitors!(nodes[1], 0);
                commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
                expect_pending_htlcs_forwardable!(nodes[1]);
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
                check_added_monitors!(nodes[1], 1);
                let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
                assert!(updates.update_add_htlcs.is_empty());
                nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
                check_added_monitors!(nodes[1], 0);
                commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+               // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
+               // fails), the second will process the resulting failure and fail the HTLC backward
                expect_pending_htlcs_forwardable!(nodes[1]);
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
                check_added_monitors!(nodes[1], 1);
                let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
                assert!(updates.update_add_htlcs.is_empty());
                check_added_monitors!(nodes[1], 0);
                commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
                expect_pending_htlcs_forwardable!(nodes[1]);
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
                check_added_monitors!(nodes[1], 1);
                let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
                assert!(updates.update_add_htlcs.is_empty());
index cff632e4bc86b6a9e2a9f36b9b9787c0d3d6f8e7,38a593141f4fea7d974635c5c30a0555ecf07828..54d199a26f83f85cec4567baa1b1fae45d9e6faa
@@@ -24,7 -24,7 +24,7 @@@ use util::enforcing_trait_impls::Enforc
  use util::scid_utils;
  use util::test_utils;
  use util::test_utils::{panicking, TestChainMonitor};
- use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
+ use util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
  use util::errors::APIError;
  use util::config::UserConfig;
  use util::ser::{ReadableArgs, Writeable};
@@@ -46,6 -46,7 +46,7 @@@ use core::cell::RefCell
  use alloc::rc::Rc;
  use sync::{Arc, Mutex};
  use core::mem;
+ use core::iter::repeat;
  
  pub const CHAN_CONFIRM_DEPTH: u32 = 10;
  
@@@ -352,11 -353,6 +353,11 @@@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 
                                }
                        }
  
 +                      let broadcaster = test_utils::TestBroadcaster {
 +                              txn_broadcasted: Mutex::new(self.tx_broadcaster.txn_broadcasted.lock().unwrap().clone()),
 +                              blocks: Arc::new(Mutex::new(self.tx_broadcaster.blocks.lock().unwrap().clone())),
 +                      };
 +
                        // Before using all the new monitors to check the watch outpoints, use the full set of
                        // them to ensure we can write and reload our ChannelManager.
                        {
                                        keys_manager: self.keys_manager,
                                        fee_estimator: &test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) },
                                        chain_monitor: self.chain_monitor,
 -                                      tx_broadcaster: &test_utils::TestBroadcaster {
 -                                              txn_broadcasted: Mutex::new(self.tx_broadcaster.txn_broadcasted.lock().unwrap().clone()),
 -                                              blocks: Arc::new(Mutex::new(self.tx_broadcaster.blocks.lock().unwrap().clone())),
 -                                      },
 +                                      tx_broadcaster: &broadcaster,
                                        logger: &self.logger,
                                        channel_monitors,
                                }).unwrap();
                        }
  
                        let persister = test_utils::TestPersister::new();
 -                      let broadcaster = test_utils::TestBroadcaster {
 -                              txn_broadcasted: Mutex::new(self.tx_broadcaster.txn_broadcasted.lock().unwrap().clone()),
 -                              blocks: Arc::new(Mutex::new(self.tx_broadcaster.blocks.lock().unwrap().clone())),
 -                      };
                        let chain_source = test_utils::TestChainSource::new(Network::Testnet);
                        let chain_monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &broadcaster, &self.logger, &feeest, &persister, &self.keys_manager);
                        for deserialized_monitor in deserialized_monitors.drain(..) {
@@@ -1185,7 -1188,7 +1186,7 @@@ macro_rules! commitment_signed_dance 
                {
                        commitment_signed_dance!($node_a, $node_b, $commitment_signed, $fail_backwards, true);
                        if $fail_backwards {
-                               $crate::expect_pending_htlcs_forwardable!($node_a);
+                               expect_pending_htlcs_forwardable_and_htlc_handling_failed!($node_a, vec![$crate::util::events::HTLCDestination::NextHopChannel{ node_id: Some($node_b.node.get_our_node_id()), channel_id: $commitment_signed.channel_id }]);
                                check_added_monitors!($node_a, 1);
  
                                let channel_state = $node_a.node.channel_state.lock().unwrap();
@@@ -1253,23 -1256,72 +1254,72 @@@ macro_rules! get_route_and_payment_has
  }
  
  #[macro_export]
- /// Clears (and ignores) a PendingHTLCsForwardable event
macro_rules! expect_pending_htlcs_forwardable_ignore {
-       ($node: expr) => {{
+ macro_rules! expect_pending_htlcs_forwardable_conditions {
      ($node: expr, $expected_failures: expr) => {{
+               let expected_failures = $expected_failures;
                let events = $node.node.get_and_clear_pending_events();
-               assert_eq!(events.len(), 1);
                match events[0] {
                        $crate::util::events::Event::PendingHTLCsForwardable { .. } => { },
                        _ => panic!("Unexpected event"),
                };
+               let count = expected_failures.len() + 1;
+               assert_eq!(events.len(), count);
+               if expected_failures.len() > 0 {
+                       expect_htlc_handling_failed_destinations!(events, expected_failures)
+               }
        }}
  }
  
+ #[macro_export]
+ macro_rules! expect_htlc_handling_failed_destinations {
+       ($events: expr, $expected_failures: expr) => {{
+               for event in $events {
+                       match event {
+                               $crate::util::events::Event::PendingHTLCsForwardable { .. } => { },
+                               $crate::util::events::Event::HTLCHandlingFailed { ref failed_next_destination, .. } => {
+                                       assert!($expected_failures.contains(&failed_next_destination))
+                               },
+                               _ => panic!("Unexpected destination"),
+                       }
+               }
+       }}
+ }
+ #[macro_export]
+ /// Clears (and ignores) a PendingHTLCsForwardable event
+ macro_rules! expect_pending_htlcs_forwardable_ignore {
+       ($node: expr) => {{
+               expect_pending_htlcs_forwardable_conditions!($node, vec![]);
+       }};
+ }
+ #[macro_export]
+ /// Clears (and ignores) PendingHTLCsForwardable and HTLCHandlingFailed events
+ macro_rules! expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore {
+       ($node: expr, $expected_failures: expr) => {{
+               expect_pending_htlcs_forwardable_conditions!($node, $expected_failures);
+       }};
+ }
  #[macro_export]
  /// Handles a PendingHTLCsForwardable event
  macro_rules! expect_pending_htlcs_forwardable {
        ($node: expr) => {{
-               $crate::expect_pending_htlcs_forwardable_ignore!($node);
+               expect_pending_htlcs_forwardable_ignore!($node);
+               $node.node.process_pending_htlc_forwards();
+               // Ensure process_pending_htlc_forwards is idempotent.
+               $node.node.process_pending_htlc_forwards();
+       }};
+ }
+ #[macro_export]
+ /// Handles a PendingHTLCsForwardable and HTLCHandlingFailed event
+ macro_rules! expect_pending_htlcs_forwardable_and_htlc_handling_failed {
+       ($node: expr, $expected_failures: expr) => {{
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!($node, $expected_failures);
                $node.node.process_pending_htlc_forwards();
  
                // Ensure process_pending_htlc_forwards is idempotent.
@@@ -1490,7 -1542,7 +1540,7 @@@ pub fn expect_payment_failed_conditions
        let mut events = node.node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        let expected_payment_id = match events.pop().unwrap() {
 -              Event::PaymentPathFailed { payment_hash, rejected_by_dest, path, retry, payment_id, network_update,
 +              Event::PaymentPathFailed { payment_hash, rejected_by_dest, path, retry, payment_id, network_update, short_channel_id,
                        #[cfg(test)]
                        error_code,
                        #[cfg(test)]
                        assert!(retry.is_some(), "expected retry.is_some()");
                        assert_eq!(retry.as_ref().unwrap().final_value_msat, path.last().unwrap().fee_msat, "Retry amount should match last hop in path");
                        assert_eq!(retry.as_ref().unwrap().payment_params.payee_pubkey, path.last().unwrap().pubkey, "Retry payee node_id should match last hop in path");
 +                      if let Some(scid) = short_channel_id {
 +                              assert!(retry.as_ref().unwrap().payment_params.previously_failed_channels.contains(&scid));
 +                      }
  
                        #[cfg(test)]
                        {
@@@ -1814,7 -1863,8 +1864,8 @@@ pub fn fail_payment_along_route<'a, 'b
                assert_eq!(path.last().unwrap().node.get_our_node_id(), expected_paths[0].last().unwrap().node.get_our_node_id());
        }
        expected_paths[0].last().unwrap().node.fail_htlc_backwards(&our_payment_hash);
-       expect_pending_htlcs_forwardable!(expected_paths[0].last().unwrap());
+       let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::FailedPayment { payment_hash: our_payment_hash }).take(expected_paths.len()).collect();
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(expected_paths[0].last().unwrap(), expected_destinations);
  
        pass_failed_payment_back(origin_node, expected_paths, skip_last, our_payment_hash);
  }
@@@ -1855,7 -1905,7 +1906,7 @@@ pub fn pass_failed_payment_back<'a, 'b
                                node.node.handle_update_fail_htlc(&prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0);
                                commitment_signed_dance!(node, prev_node, next_msgs.as_ref().unwrap().1, update_next_node);
                                if !update_next_node {
-                                       expect_pending_htlcs_forwardable!(node);
+                                       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(node, vec![HTLCDestination::NextHopChannel { node_id: Some(prev_node.node.get_our_node_id()), channel_id: next_msgs.as_ref().unwrap().0.channel_id }]);
                                }
                        }
                        let events = node.node.get_and_clear_pending_msg_events();
index 7572abff8313e90cdd149483a3ec5e919c864398,8e7b3e8fbbd02b8a0697ca57cf29de87ab87100c..be517ea4a7698de94cd9fca02f7b57157a17e99f
@@@ -28,10 -28,10 +28,10 @@@ use routing::gossip::NetworkGraph
  use routing::router::{PaymentParameters, Route, RouteHop, RouteParameters, find_route, get_route};
  use ln::features::{ChannelFeatures, InitFeatures, InvoiceFeatures, NodeFeatures};
  use ln::msgs;
 -use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, OptionalField, ErrorAction};
 +use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
  use util::enforcing_trait_impls::EnforcingSigner;
  use util::{byte_utils, test_utils};
- use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason};
+ use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
  use util::errors::APIError;
  use util::ser::{Writeable, ReadableArgs};
  use util::config::UserConfig;
@@@ -54,6 -54,7 +54,7 @@@ use io
  use prelude::*;
  use alloc::collections::BTreeSet;
  use core::default::Default;
+ use core::iter::repeat;
  use sync::{Arc, Mutex};
  
  use ln::functional_test_utils::*;
@@@ -1059,6 -1060,26 +1060,6 @@@ fn fake_network_test() 
        fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
        claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
  
 -      // Add a duplicate new channel from 2 to 4
 -      let chan_5 = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known());
 -
 -      // Send some payments across both channels
 -      let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
 -      let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
 -      let payment_preimage_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
 -
 -
 -      route_over_limit(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000);
 -      let events = nodes[0].node.get_and_clear_pending_msg_events();
 -      assert_eq!(events.len(), 0);
 -      nodes[0].logger.assert_log_regex("lightning::ln::channelmanager".to_string(), regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap(), 1);
 -
 -      //TODO: Test that routes work again here as we've been notified that the channel is full
 -
 -      claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_3);
 -      claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_4);
 -      claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_5);
 -
        // Close down the channels...
        close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
        check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
        close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
        check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
        check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
 -      close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false);
 -      check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 -      check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
  }
  
  #[test]
@@@ -1127,7 -1151,7 +1128,7 @@@ fn holding_cell_htlc_counting() 
        // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
        // fails), the second will process the resulting failure and fail the HTLC backward.
        expect_pending_htlcs_forwardable!(nodes[1]);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
        check_added_monitors!(nodes[1], 1);
  
        let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@@ -2593,7 -2617,7 +2594,7 @@@ fn claim_htlc_outputs_single_tx() 
                check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                let mut events = nodes[0].node.get_and_clear_pending_events();
                expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
-               match events[1] {
+               match events.last().unwrap() {
                        Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
                        _ => panic!("Unexpected event"),
                }
@@@ -2905,7 -2929,7 +2906,7 @@@ fn do_test_htlc_on_chain_timeout(connec
        check_spends!(commitment_tx[0], chan_2.3);
        nodes[2].node.fail_htlc_backwards(&payment_hash);
        check_added_monitors!(nodes[2], 0);
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]);
        check_added_monitors!(nodes[2], 1);
  
        let events = nodes[2].node.get_and_clear_pending_msg_events();
                }
        }
  
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
        check_added_monitors!(nodes[1], 1);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
@@@ -3045,7 -3069,7 +3046,7 @@@ fn test_simple_commitment_revoked_fail_
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
  
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
        check_added_monitors!(nodes[1], 1);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
@@@ -3108,7 -3132,7 +3109,7 @@@ fn do_test_commitment_revoked_fail_back
        let (_, third_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
  
        nodes[2].node.fail_htlc_backwards(&first_payment_hash);
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]);
        check_added_monitors!(nodes[2], 1);
        let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        assert!(updates.update_add_htlcs.is_empty());
        // Drop the last RAA from 3 -> 2
  
        nodes[2].node.fail_htlc_backwards(&second_payment_hash);
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: second_payment_hash }]);
        check_added_monitors!(nodes[2], 1);
        let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        assert!(updates.update_add_htlcs.is_empty());
        check_added_monitors!(nodes[2], 1);
  
        nodes[2].node.fail_htlc_backwards(&third_payment_hash);
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: third_payment_hash }]);
        check_added_monitors!(nodes[2], 1);
        let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        assert!(updates.update_add_htlcs.is_empty());
                // commitment transaction for nodes[0] until process_pending_htlc_forwards().
                check_added_monitors!(nodes[1], 1);
                let events = nodes[1].node.get_and_clear_pending_events();
-               assert_eq!(events.len(), 1);
+               assert_eq!(events.len(), 2);
                match events[0] {
                        Event::PendingHTLCsForwardable { .. } => { },
                        _ => panic!("Unexpected event"),
                };
+               match events[1] {
+                       Event::HTLCHandlingFailed { .. } => { },
+                       _ => panic!("Unexpected event"),
+               }
                // Deliberately don't process the pending fail-back so they all fail back at once after
                // block connection just like the !deliver_bs_raa case
        }
        assert!(ANTI_REORG_DELAY > PAYMENT_EXPIRY_BLOCKS); // We assume payments will also expire
  
        let events = nodes[1].node.get_and_clear_pending_events();
-       assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 4 });
+       assert_eq!(events.len(), if deliver_bs_raa { 2 + (nodes.len() - 1) } else { 4 + nodes.len() });
        match events[0] {
                Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { },
                _ => panic!("Unexepected event"),
@@@ -3395,7 -3423,7 +3400,7 @@@ fn test_htlc_ignore_latest_remote_commi
        check_added_monitors!(nodes[0], 1);
        check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
  
 -      let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +      let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(node_txn.len(), 3);
        assert_eq!(node_txn[0], node_txn[1]);
  
@@@ -4262,7 -4290,7 +4267,7 @@@ fn do_test_htlc_timeout(send_partial_mp
                connect_block(&nodes[1], &block);
        }
  
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
  
        check_added_monitors!(nodes[1], 1);
        let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@@ -4326,7 -4354,7 +4331,7 @@@ fn do_test_holding_cell_htlc_add_timeou
        connect_blocks(&nodes[1], 1);
  
        if forwarded_htlc {
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
                check_added_monitors!(nodes[1], 1);
                let fail_commit = nodes[1].node.get_and_clear_pending_msg_events();
                assert_eq!(fail_commit.len(), 1);
@@@ -4813,7 -4841,7 +4818,7 @@@ fn test_claim_on_remote_sizeable_push_m
        check_added_monitors!(nodes[0], 1);
        check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
  
 -      let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +      let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(node_txn.len(), 1);
        check_spends!(node_txn[0], chan.3);
        assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
@@@ -5019,7 -5047,7 +5024,7 @@@ fn test_static_spendable_outputs_justic
        check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
  
 -      let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +      let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(revoked_htlc_txn.len(), 2);
        check_spends!(revoked_htlc_txn[0], chan_1.3);
        assert_eq!(revoked_htlc_txn[1].input.len(), 1);
@@@ -5374,7 -5402,7 +5379,7 @@@ fn test_duplicate_payment_hash_one_fail
  
        mine_transaction(&nodes[1], &htlc_timeout_tx);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
        let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        assert!(htlc_updates.update_add_htlcs.is_empty());
        assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
@@@ -5494,18 -5522,18 +5499,18 @@@ fn do_test_fail_backwards_unrevoked_rem
                &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
        let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
  
-       create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
-       create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
-       let chan = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known());
-       create_announced_chan_between_nodes(&nodes, 3, 4, InitFeatures::known(), InitFeatures::known());
-       create_announced_chan_between_nodes(&nodes, 3, 5, InitFeatures::known(), InitFeatures::known());
+       let _chan_0_2 = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
+       let _chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
+       let chan_2_3 = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known());
+       let chan_3_4 = create_announced_chan_between_nodes(&nodes, 3, 4, InitFeatures::known(), InitFeatures::known());
+       let chan_3_5  = create_announced_chan_between_nodes(&nodes, 3, 5, InitFeatures::known(), InitFeatures::known());
  
        // Rebalance and check output sanity...
        send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000);
        send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000);
-       assert_eq!(get_local_commitment_txn!(nodes[3], chan.2)[0].output.len(), 2);
+       assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2);
  
-       let ds_dust_limit = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis;
+       let ds_dust_limit = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan_2_3.2).unwrap().holder_dust_limit_satoshis;
        // 0th HTLC:
        let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
        // 1st HTLC:
        // Double-check that six of the new HTLC were added
        // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie,
        // with to_local and to_remote outputs, 8 outputs and 6 HTLCs not included).
-       assert_eq!(get_local_commitment_txn!(nodes[3], chan.2).len(), 1);
-       assert_eq!(get_local_commitment_txn!(nodes[3], chan.2)[0].output.len(), 8);
+       assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2).len(), 1);
+       assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 8);
  
        // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go.
        // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs
        nodes[4].node.fail_htlc_backwards(&payment_hash_5);
        nodes[4].node.fail_htlc_backwards(&payment_hash_6);
        check_added_monitors!(nodes[4], 0);
-       expect_pending_htlcs_forwardable!(nodes[4]);
+       let failed_destinations = vec![
+               HTLCDestination::FailedPayment { payment_hash: payment_hash_1 },
+               HTLCDestination::FailedPayment { payment_hash: payment_hash_3 },
+               HTLCDestination::FailedPayment { payment_hash: payment_hash_5 },
+               HTLCDestination::FailedPayment { payment_hash: payment_hash_6 },
+       ];
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[4], failed_destinations);
        check_added_monitors!(nodes[4], 1);
  
        let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id());
        nodes[5].node.fail_htlc_backwards(&payment_hash_2);
        nodes[5].node.fail_htlc_backwards(&payment_hash_4);
        check_added_monitors!(nodes[5], 0);
-       expect_pending_htlcs_forwardable!(nodes[5]);
+       let failed_destinations_2 = vec![
+               HTLCDestination::FailedPayment { payment_hash: payment_hash_2 },
+               HTLCDestination::FailedPayment { payment_hash: payment_hash_4 },
+       ];
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[5], failed_destinations_2);
        check_added_monitors!(nodes[5], 1);
  
        let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id());
        nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]);
        commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false);
  
-       let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan.2);
-       expect_pending_htlcs_forwardable!(nodes[3]);
+       let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
+       // After 4 and 2 removes respectively above in nodes[4] and nodes[5], nodes[3] should receive 6 PaymentForwardedFailed events
+       let failed_destinations_3 = vec![
+               HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
+               HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
+               HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
+               HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
+               HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
+               HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
+       ];
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations_3);
        check_added_monitors!(nodes[3], 1);
        let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
        nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]);
        //
        // Alternatively, we may broadcast the previous commitment transaction, which should only
        // result in failures for the below-dust HTLCs, ie the 0th, 1st, 2nd, 3rd, 9th, and 10th HTLCs.
-       let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan.2);
+       let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
  
        if announce_latest {
                mine_transaction(&nodes[2], &ds_last_commitment_tx[0]);
        }
        let events = nodes[2].node.get_and_clear_pending_events();
        let close_event = if deliver_last_raa {
-               assert_eq!(events.len(), 2);
-               events[1].clone()
+               assert_eq!(events.len(), 2 + 6);
+               events.last().clone().unwrap()
        } else {
                assert_eq!(events.len(), 1);
-               events[0].clone()
+               events.last().clone().unwrap()
        };
        match close_event {
                Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
        check_closed_broadcast!(nodes[2], true);
        if deliver_last_raa {
                expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
+               let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect();
+               expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations);
        } else {
-               expect_pending_htlcs_forwardable!(nodes[2]);
+               let expected_destinations: Vec<HTLCDestination> = if announce_latest {
+                       repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(9).collect()
+               } else {
+                       repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(6).collect()
+               };
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], expected_destinations);
        }
        check_added_monitors!(nodes[2], 3);
  
@@@ -5989,7 -6047,7 +6024,7 @@@ fn do_htlc_claim_previous_remote_commit
        let htlc_value = if use_dust { 50000 } else { 3000000 };
        let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
        nodes[1].node.fail_htlc_backwards(&our_payment_hash);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
        check_added_monitors!(nodes[1], 1);
  
        let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@@ -6449,7 -6507,7 +6484,7 @@@ fn test_fail_holding_cell_htlc_upon_fre
  
        // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process.
        let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events();
-       assert_eq!(process_htlc_forwards_event.len(), 1);
+       assert_eq!(process_htlc_forwards_event.len(), 2);
        match &process_htlc_forwards_event[0] {
                &Event::PendingHTLCsForwardable { .. } => {},
                _ => panic!("Unexpected event"),
@@@ -7074,7 -7132,7 +7109,7 @@@ fn test_update_fulfill_htlc_bolt2_after
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
-       create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
+       let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
  
        let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
  
  
        check_added_monitors!(nodes[1], 0);
        commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
        let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events_4.len(), 1);
  
@@@ -7166,7 -7224,7 +7201,7 @@@ fn do_test_failure_delay_dust_htlc_loca
        // Fail one HTLC to prune it in the will-be-latest-local commitment tx
        nodes[1].node.fail_htlc_backwards(&payment_hash_2);
        check_added_monitors!(nodes[1], 0);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
        check_added_monitors!(nodes[1], 1);
  
        let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@@ -7567,7 -7625,7 +7602,7 @@@ fn test_check_htlc_underpaying() 
        // Note that we first have to wait a random delay before processing the receipt of the HTLC,
        // and then will wait a second random delay before failing the HTLC back:
        expect_pending_htlcs_forwardable!(nodes[1]);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
  
        // Node 3 is expecting payment of 100_000 but received 10_000,
        // it should fail htlc like we didn't know the preimage.
@@@ -7824,7 -7882,7 +7859,7 @@@ fn test_bump_penalty_txn_on_revoked_htl
        check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], 49); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
  
 -      let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +      let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(revoked_htlc_txn.len(), 3);
        check_spends!(revoked_htlc_txn[1], chan.3);
  
        connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[2].clone()] });
        let events = nodes[0].node.get_and_clear_pending_events();
        expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
-       match events[1] {
+       match events.last().unwrap() {
                Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
                _ => panic!("Unexpected event"),
        }
@@@ -8085,26 -8143,22 +8120,26 @@@ fn test_counterparty_raa_skip_no_crash(
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
  
 -      let mut guard = nodes[0].node.channel_state.lock().unwrap();
 -      let keys = guard.by_id.get_mut(&channel_id).unwrap().get_signer();
 +      let per_commitment_secret;
 +      let next_per_commitment_point;
 +      {
 +              let mut guard = nodes[0].node.channel_state.lock().unwrap();
 +              let keys = guard.by_id.get_mut(&channel_id).unwrap().get_signer();
  
 -      const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
 +              const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
  
 -      // Make signer believe we got a counterparty signature, so that it allows the revocation
 -      keys.get_enforcement_state().last_holder_commitment -= 1;
 -      let per_commitment_secret = keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
 +              // Make signer believe we got a counterparty signature, so that it allows the revocation
 +              keys.get_enforcement_state().last_holder_commitment -= 1;
 +              per_commitment_secret = keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
  
 -      // Must revoke without gaps
 -      keys.get_enforcement_state().last_holder_commitment -= 1;
 -      keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
 +              // Must revoke without gaps
 +              keys.get_enforcement_state().last_holder_commitment -= 1;
 +              keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
  
 -      keys.get_enforcement_state().last_holder_commitment -= 1;
 -      let next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
 -              &SecretKey::from_slice(&keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
 +              keys.get_enforcement_state().last_holder_commitment -= 1;
 +              next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
 +                      &SecretKey::from_slice(&keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
 +      }
  
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(),
                &msgs::RevokeAndACK { channel_id, per_commitment_secret, next_per_commitment_point });
@@@ -8125,19 -8179,19 +8160,19 @@@ fn test_bump_txn_sanitize_tracking_maps
  
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known());
        // Lock HTLC in both directions
-       let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
-       route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000).0;
+       let (payment_preimage_1, _, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000);
+       let (_, payment_hash_2, _) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000);
  
        let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
        assert_eq!(revoked_local_txn[0].input.len(), 1);
        assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
  
        // Revoke local commitment tx
-       claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
+       claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
  
        // Broadcast set of revoked txn on A
        connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH);
-       expect_pending_htlcs_forwardable_ignore!(nodes[0]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[0], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
  
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
@@@ -8312,19 -8366,19 +8347,19 @@@ fn test_channel_update_has_correct_htlc
  
        // Assert that `node[0]`'s `ChannelUpdate` is capped at 50 percent of the `channel_value`, as
        // that's the value of `node[1]`'s `holder_max_htlc_value_in_flight_msat`.
 -      assert_eq!(node_0_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_50_percent_msat));
 +      assert_eq!(node_0_chan_update.contents.htlc_maximum_msat, channel_value_50_percent_msat);
        // Assert that `node[1]`'s `ChannelUpdate` is capped at 30 percent of the `channel_value`, as
        // that's the value of `node[0]`'s `holder_max_htlc_value_in_flight_msat`.
 -      assert_eq!(node_1_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_30_percent_msat));
 +      assert_eq!(node_1_chan_update.contents.htlc_maximum_msat, channel_value_30_percent_msat);
  
        // Assert that `node[2]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
        // the value of `node[3]`'s `holder_max_htlc_value_in_flight_msat` (100%), exceeds 90% of the
        // `channel_value`.
 -      assert_eq!(node_2_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_90_percent_msat));
 +      assert_eq!(node_2_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
        // Assert that `node[3]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
        // the value of `node[2]`'s `holder_max_htlc_value_in_flight_msat` (95%), exceeds 90% of the
        // `channel_value`.
 -      assert_eq!(node_3_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_90_percent_msat));
 +      assert_eq!(node_3_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
  }
  
  #[test]
@@@ -8446,12 -8500,12 +8481,12 @@@ fn test_reject_funding_before_inbound_c
        // `MessageSendEvent::SendAcceptChannel` event. The message is passed to `nodes[0]`
        // `handle_accept_channel`, which is required in order for `create_funding_transaction` to
        // succeed when `nodes[0]` is passed to it.
 -      {
 +      let accept_chan_msg = {
                let mut lock;
                let channel = get_channel_ref!(&nodes[1], lock, temp_channel_id);
 -              let accept_chan_msg = channel.get_accept_channel_message();
 -              nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_chan_msg);
 -      }
 +              channel.get_accept_channel_message()
 +      };
 +      nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_chan_msg);
  
        let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
  
@@@ -8690,7 -8744,7 +8725,7 @@@ fn test_bad_secret_hash() 
        // All the below cases should end up being handled exactly identically, so we macro the
        // resulting events.
        macro_rules! handle_unknown_invalid_payment_data {
-               () => {
+               ($payment_hash: expr) => {
                        check_added_monitors!(nodes[0], 1);
                        let mut events = nodes[0].node.get_and_clear_pending_msg_events();
                        let payment_event = SendEvent::from_event(events.pop().unwrap());
                        // We have to forward pending HTLCs once to process the receipt of the HTLC and then
                        // again to process the pending backwards-failure of the HTLC
                        expect_pending_htlcs_forwardable!(nodes[1]);
-                       expect_pending_htlcs_forwardable!(nodes[1]);
+                       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment{ payment_hash: $payment_hash }]);
                        check_added_monitors!(nodes[1], 1);
  
                        // We should fail the payment back
  
        // Send a payment with the right payment hash but the wrong payment secret
        nodes[0].node.send_payment(&route, our_payment_hash, &Some(random_payment_secret)).unwrap();
-       handle_unknown_invalid_payment_data!();
+       handle_unknown_invalid_payment_data!(our_payment_hash);
        expect_payment_failed!(nodes[0], our_payment_hash, true, expected_error_code, expected_error_data);
  
        // Send a payment with a random payment hash, but the right payment secret
        nodes[0].node.send_payment(&route, random_payment_hash, &Some(our_payment_secret)).unwrap();
-       handle_unknown_invalid_payment_data!();
+       handle_unknown_invalid_payment_data!(random_payment_hash);
        expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
  
        // Send a payment with a random payment hash and random payment secret
        nodes[0].node.send_payment(&route, random_payment_hash, &Some(random_payment_secret)).unwrap();
-       handle_unknown_invalid_payment_data!();
+       handle_unknown_invalid_payment_data!(random_payment_hash);
        expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
  }
  
@@@ -9557,7 -9611,7 +9592,7 @@@ fn do_test_tx_confirmed_skipping_blocks
                // additional block built on top of the current chain.
                nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
                        &nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1);
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]);
                check_added_monitors!(nodes[1], 1);
  
                let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@@ -9740,7 -9794,11 +9775,11 @@@ fn do_test_dup_htlc_second_rejected(tes
                // Now we go fail back the first HTLC from the user end.
                nodes[1].node.fail_htlc_backwards(&our_payment_hash);
  
-               expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+               let expected_destinations = vec![
+                       HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
+                       HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
+               ];
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1],  expected_destinations);
                nodes[1].node.process_pending_htlc_forwards();
  
                check_added_monitors!(nodes[1], 1);
                if let Event::PaymentPathFailed { .. } = failure_events[1] {} else { panic!(); }
        } else {
                // Let the second HTLC fail and claim the first
-               expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
                nodes[1].node.process_pending_htlc_forwards();
  
                check_added_monitors!(nodes[1], 1);
@@@ -9799,7 -9857,7 +9838,7 @@@ fn test_inconsistent_mpp_params() 
        create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, InitFeatures::known(), InitFeatures::known());
        create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0, InitFeatures::known(), InitFeatures::known());
        create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
-       create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+       let chan_2_3 =create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
  
        let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id())
                .with_features(InvoiceFeatures::known());
        }
        expect_pending_htlcs_forwardable_ignore!(nodes[3]);
        nodes[3].node.process_pending_htlc_forwards();
-       expect_pending_htlcs_forwardable_ignore!(nodes[3]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
        nodes[3].node.process_pending_htlc_forwards();
  
        check_added_monitors!(nodes[3], 1);
        nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
        commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false);
  
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]);
        check_added_monitors!(nodes[2], 1);
  
        let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
@@@ -9983,7 -10041,11 +10022,11 @@@ fn test_double_partial_claim() 
        connect_blocks(&nodes[3], TEST_FINAL_CLTV);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV); // To get the same height for sending later
  
-       expect_pending_htlcs_forwardable!(nodes[3]);
+       let failed_destinations = vec![
+               HTLCDestination::FailedPayment { payment_hash },
+               HTLCDestination::FailedPayment { payment_hash },
+       ];
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations);
  
        pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash);
  
index ed1516efcb01f2c7955bd1285e476158225d58f8,b9d86fe3a6f9785868b3d906852efa0a443a10f3..e2c6432337fcadf0e5a5a498132681f26a7f997f
@@@ -21,9 -21,9 +21,9 @@@ use routing::gossip::{NetworkUpdate, Ro
  use routing::router::{get_route, PaymentParameters, Route, RouteHint, RouteHintHop};
  use ln::features::{InitFeatures, InvoiceFeatures, NodeFeatures};
  use ln::msgs;
 -use ln::msgs::{ChannelMessageHandler, ChannelUpdate, OptionalField};
 +use ln::msgs::{ChannelMessageHandler, ChannelUpdate};
  use ln::wire::Encode;
- use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+ use util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
  use util::ser::{ReadableArgs, Writeable, Writer};
  use util::{byte_utils, test_utils};
  use util::config::{UserConfig, ChannelConfig};
@@@ -126,7 -126,7 +126,7 @@@ fn run_onion_failure_test_with_fail_int
                                expect_htlc_forward!(&nodes[2]);
                                expect_event!(&nodes[2], Event::PaymentReceived);
                                callback_node();
-                               expect_pending_htlcs_forwardable!(nodes[2]);
+                               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]);
                        }
  
                        let update_2_1 = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
@@@ -227,7 -227,7 +227,7 @@@ impl msgs::ChannelUpdate 
                                flags: 0,
                                cltv_expiry_delta: 0,
                                htlc_minimum_msat: 0,
 -                              htlc_maximum_msat: OptionalField::Absent,
 +                              htlc_maximum_msat: msgs::MAX_VALUE_MSAT,
                                fee_base_msat: 0,
                                fee_proportional_millionths: 0,
                                excess_data: vec![],
@@@ -1036,7 -1036,7 +1036,7 @@@ fn test_phantom_onion_hmac_failure() 
        };
        expect_pending_htlcs_forwardable_ignore!(nodes[1]);
        nodes[1].node.process_pending_htlc_forwards();
-       expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
        nodes[1].node.process_pending_htlc_forwards();
        let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        check_added_monitors!(&nodes[1], 1);
@@@ -1108,7 -1108,7 +1108,7 @@@ fn test_phantom_invalid_onion_payload(
        }
        expect_pending_htlcs_forwardable_ignore!(nodes[1]);
        nodes[1].node.process_pending_htlc_forwards();
-       expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
        nodes[1].node.process_pending_htlc_forwards();
        let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        check_added_monitors!(&nodes[1], 1);
@@@ -1164,7 -1164,7 +1164,7 @@@ fn test_phantom_final_incorrect_cltv_ex
        }
        expect_pending_htlcs_forwardable_ignore!(nodes[1]);
        nodes[1].node.process_pending_htlc_forwards();
-       expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
        nodes[1].node.process_pending_htlc_forwards();
        let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        check_added_monitors!(&nodes[1], 1);
@@@ -1210,7 -1210,7 +1210,7 @@@ fn test_phantom_failure_too_low_cltv() 
  
        expect_pending_htlcs_forwardable_ignore!(nodes[1]);
        nodes[1].node.process_pending_htlc_forwards();
-       expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
        nodes[1].node.process_pending_htlc_forwards();
        let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        check_added_monitors!(&nodes[1], 1);
@@@ -1255,7 -1255,7 +1255,7 @@@ fn test_phantom_failure_too_low_recv_am
        nodes[1].node.process_pending_htlc_forwards();
        expect_pending_htlcs_forwardable_ignore!(nodes[1]);
        nodes[1].node.process_pending_htlc_forwards();
-       expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]);
        nodes[1].node.process_pending_htlc_forwards();
        let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        check_added_monitors!(&nodes[1], 1);
@@@ -1352,7 -1352,7 +1352,7 @@@ fn test_phantom_failure_reject_payment(
        nodes[1].node.process_pending_htlc_forwards();
        expect_payment_received!(nodes[1], payment_hash, payment_secret, recv_amt_msat);
        nodes[1].node.fail_htlc_backwards(&payment_hash);
-       expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
        nodes[1].node.process_pending_htlc_forwards();
  
        let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
index d5dd51cac4e86b83b77a38d22874a8915e9c398d,5a02d15a9d1e7534caacdaec66aa36c080554c77..ed0311945e6de9d491986906f5be72c0cacb07ae
@@@ -19,10 -19,10 +19,10 @@@ use routing::gossip::RoutingFees
  use routing::router::{PaymentParameters, RouteHint, RouteHintHop};
  use ln::features::{InitFeatures, InvoiceFeatures, ChannelTypeFeatures};
  use ln::msgs;
 -use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, OptionalField, ChannelUpdate, ErrorAction};
 +use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ChannelUpdate, ErrorAction};
  use ln::wire::Encode;
  use util::enforcing_trait_impls::EnforcingSigner;
- use util::events::{ClosureReason, Event, MessageSendEvent, MessageSendEventsProvider};
+ use util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
  use util::config::UserConfig;
  use util::ser::{Writeable, ReadableArgs};
  use util::test_utils;
@@@ -478,7 -478,7 +478,7 @@@ fn test_scid_alias_returned() 
        let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
  
        create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0, InitFeatures::known(), InitFeatures::known());
-       create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000, 0, InitFeatures::known(), InitFeatures::known());
+       let chan = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000, 0, InitFeatures::known(), InitFeatures::known());
  
        let last_hop = nodes[2].node.list_usable_channels();
        let mut hop_hints = vec![RouteHint(vec![RouteHintHop {
        commitment_signed_dance!(nodes[1], nodes[0], &as_updates.commitment_signed, false, true);
  
        expect_pending_htlcs_forwardable!(nodes[1]);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan.0.channel_id }]);
        check_added_monitors!(nodes[1], 1);
  
        let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
                flags: 1,
                cltv_expiry_delta: accept_forward_cfg.channel_config.cltv_expiry_delta,
                htlc_minimum_msat: 1_000,
 -              htlc_maximum_msat: OptionalField::Present(1_000_000), // Defaults to 10% of the channel value
 +              htlc_maximum_msat: 1_000_000, // Defaults to 10% of the channel value
                fee_base_msat: last_hop[0].counterparty.forwarding_info.as_ref().unwrap().fee_base_msat,
                fee_proportional_millionths: last_hop[0].counterparty.forwarding_info.as_ref().unwrap().fee_proportional_millionths,
                excess_data: Vec::new(),
index 561fa8104694985f321c692a4cf949c7ef6ddddb,f97bdb1bd95943395f15a33b114fa8a60bad1d77..fab6962e514957209b832c786916575be95fed4c
@@@ -16,7 -16,7 +16,7 @@@ use ln::channelmanager::{ChannelManager
  use ln::features::InitFeatures;
  use ln::msgs::ChannelMessageHandler;
  use util::enforcing_trait_impls::EnforcingSigner;
- use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
+ use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
  use util::test_utils;
  use util::ser::{ReadableArgs, Writeable};
  
@@@ -82,7 -82,7 +82,7 @@@ fn do_test_onchain_htlc_reorg(local_com
                check_added_monitors!(nodes[2], 1);
                check_closed_broadcast!(nodes[2], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
                check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
 -              let node_2_commitment_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
 +              let node_2_commitment_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
                assert_eq!(node_2_commitment_txn.len(), 3); // ChannelMonitor: 1 offered HTLC-Claim, ChannelManger: 1 local commitment tx, 1 Received HTLC-Claim
                assert_eq!(node_2_commitment_txn[1].output.len(), 2); // to-remote and Received HTLC (to-self is dust)
                check_spends!(node_2_commitment_txn[1], chan_2.3);
                        txdata: vec![],
                };
                connect_block(&nodes[1], &block);
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
        }
  
        check_added_monitors!(nodes[1], 1);
index c2d14073d990dd9c4a407c93c1a7fef87724f92d,fe99b06c9bcf46e24388c2a8447055bf635a4104..9306fb254f42f8c071d7b6880b22b00bb072fd2d
@@@ -19,6 -19,7 +19,6 @@@ use chain::transaction::OutPoint
  use chain::keysinterface;
  use ln::features::{ChannelFeatures, InitFeatures};
  use ln::{msgs, wire};
 -use ln::msgs::OptionalField;
  use ln::script::ShutdownScript;
  use routing::scoring::FixedPenaltyScorer;
  use util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
@@@ -168,7 -169,7 +168,7 @@@ impl<'a> chain::Watch<EnforcingSigner> 
                update_res
        }
  
-       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>)> {
+       fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
                return self.chain_monitor.release_pending_monitor_events();
        }
  }
@@@ -406,7 -407,7 +406,7 @@@ fn get_dummy_channel_update(short_chan_
                        flags: 0,
                        cltv_expiry_delta: 0,
                        htlc_minimum_msat: 0,
 -                      htlc_maximum_msat: OptionalField::Absent,
 +                      htlc_maximum_msat: msgs::MAX_VALUE_MSAT,
                        fee_base_msat: 0,
                        fee_proportional_millionths: 0,
                        excess_data: vec![],