Merge pull request #1076 from TheBlueMatt/2021-09-forwardable-regen
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Wed, 29 Sep 2021 20:24:37 +0000 (20:24 +0000)
committerGitHub <noreply@github.com>
Wed, 29 Sep 2021 20:24:37 +0000 (20:24 +0000)
1  2 
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_tests.rs
lightning/src/util/events.rs

index a4684bfe6cc2960438bfdc9e2e575ed2b7aa2f79,49a6bd0a427284c889f950e0fcde677706f2d72e..a0ffe263c84e492773bc8d79b2b9ccfe90910095
@@@ -43,6 -43,7 +43,6 @@@ use chain::transaction::{OutPoint, Tran
  // Since this struct is returned in `list_channels` methods, expose it here in case users want to
  // construct one themselves.
  use ln::{PaymentHash, PaymentPreimage, PaymentSecret};
 -pub use ln::channel::CounterpartyForwardingInfo;
  use ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch};
  use ln::features::{InitFeatures, NodeFeatures};
  use routing::router::{Route, RouteHop};
@@@ -52,9 -53,9 +52,9 @@@ use ln::onion_utils
  use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField};
  use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner};
  use util::config::UserConfig;
 -use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
 +use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
  use util::{byte_utils, events};
 -use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer};
 +use util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer};
  use util::chacha20::{ChaCha20, ChaChaReader};
  use util::logger::{Logger, Level};
  use util::errors::APIError;
@@@ -172,22 -173,6 +172,22 @@@ struct ClaimableHTLC 
        onion_payload: OnionPayload,
  }
  
 +/// A payment identifier used to correlate an MPP payment's per-path HTLC sources internally.
 +#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
 +pub(crate) struct MppId(pub [u8; 32]);
 +
 +impl Writeable for MppId {
 +      fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
 +              self.0.write(w)
 +      }
 +}
 +
 +impl Readable for MppId {
 +      fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
 +              let buf: [u8; 32] = Readable::read(r)?;
 +              Ok(MppId(buf))
 +      }
 +}
  /// Tracks the inbound corresponding to an outbound HTLC
  #[derive(Clone, PartialEq)]
  pub(crate) enum HTLCSource {
                /// Technically we can recalculate this from the route, but we cache it here to avoid
                /// doing a double-pass on route when we get a failure back
                first_hop_htlc_msat: u64,
 +              mpp_id: MppId,
        },
  }
  #[cfg(test)]
@@@ -208,7 -192,6 +208,7 @@@ impl HTLCSource 
                        path: Vec::new(),
                        session_priv: SecretKey::from_slice(&[1; 32]).unwrap(),
                        first_hop_htlc_msat: 0,
 +                      mpp_id: MppId([2; 32]),
                }
        }
  }
@@@ -242,7 -225,6 +242,7 @@@ type ShutdownResult = (Option<(OutPoint
  
  struct MsgHandleErrInternal {
        err: msgs::LightningError,
 +      chan_id: Option<[u8; 32]>, // If Some a channel of ours has been closed
        shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
  }
  impl MsgHandleErrInternal {
                                        },
                                },
                        },
 +                      chan_id: None,
                        shutdown_finish: None,
                }
        }
                                err,
                                action: msgs::ErrorAction::IgnoreError,
                        },
 +                      chan_id: None,
                        shutdown_finish: None,
                }
        }
        #[inline]
        fn from_no_close(err: msgs::LightningError) -> Self {
 -              Self { err, shutdown_finish: None }
 +              Self { err, chan_id: None, shutdown_finish: None }
        }
        #[inline]
        fn from_finish_shutdown(err: String, channel_id: [u8; 32], shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
                                        },
                                },
                        },
 +                      chan_id: Some(channel_id),
                        shutdown_finish: Some((shutdown_res, channel_update)),
                }
        }
                                        },
                                },
                        },
 +                      chan_id: None,
                        shutdown_finish: None,
                }
        }
@@@ -489,17 -467,14 +489,17 @@@ pub struct ChannelManager<Signer: Sign
        /// The session_priv bytes of outbound payments which are pending resolution.
        /// The authoritative state of these HTLCs resides either within Channels or ChannelMonitors
        /// (if the channel has been force-closed), however we track them here to prevent duplicative
 -      /// PaymentSent/PaymentFailed events. Specifically, in the case of a duplicative
 +      /// PaymentSent/PaymentPathFailed events. Specifically, in the case of a duplicative
        /// update_fulfill_htlc message after a reconnect, we may "claim" a payment twice.
        /// Additionally, because ChannelMonitors are often not re-serialized after connecting block(s)
        /// which may generate a claim event, we may receive similar duplicate claim/fail MonitorEvents
        /// after reloading from disk while replaying blocks against ChannelMonitors.
        ///
 +      /// Each payment has each of its MPP part's session_priv bytes in the HashSet of the map (even
 +      /// payments over a single path).
 +      ///
        /// Locked *after* channel_state.
 -      pending_outbound_payments: Mutex<HashSet<[u8; 32]>>,
 +      pending_outbound_payments: Mutex<HashMap<MppId, HashSet<[u8; 32]>>>,
  
        our_network_key: SecretKey,
        our_network_pubkey: PublicKey,
@@@ -651,19 -626,6 +651,19 @@@ const CHECK_CLTV_EXPIRY_SANITY: u32 = M
  #[allow(dead_code)]
  const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
  
 +/// Information needed for constructing an invoice route hint for this channel.
 +#[derive(Clone, Debug, PartialEq)]
 +pub struct CounterpartyForwardingInfo {
 +      /// Base routing fee in millisatoshis.
 +      pub fee_base_msat: u32,
 +      /// Amount in millionths of a satoshi the channel will charge per transferred satoshi.
 +      pub fee_proportional_millionths: u32,
 +      /// The minimum difference in cltv_expiry between an ingoing HTLC and its outgoing counterpart,
 +      /// such that the outgoing HTLC is forwardable to this counterparty. See `msgs::ChannelUpdate`'s
 +      /// `cltv_expiry_delta` for more details.
 +      pub cltv_expiry_delta: u16,
 +}
 +
  /// Channel parameters which apply to our counterparty. These are split out from [`ChannelDetails`]
  /// to better separate parameters.
  #[derive(Clone, Debug, PartialEq)]
@@@ -818,13 -780,12 +818,13 @@@ macro_rules! handle_error 
        ($self: ident, $internal: expr, $counterparty_node_id: expr) => {
                match $internal {
                        Ok(msg) => Ok(msg),
 -                      Err(MsgHandleErrInternal { err, shutdown_finish }) => {
 +                      Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => {
                                #[cfg(debug_assertions)]
                                {
                                        // In testing, ensure there are no deadlocks where the lock is already held upon
                                        // entering the macro.
                                        assert!($self.channel_state.try_lock().is_ok());
 +                                      assert!($self.pending_events.try_lock().is_ok());
                                }
  
                                let mut msg_events = Vec::with_capacity(2);
                                                        msg: update
                                                });
                                        }
 +                                      if let Some(channel_id) = chan_id {
 +                                              $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id,  reason: ClosureReason::ProcessingError { err: err.err.clone() } });
 +                                      }
                                }
  
                                log_error!($self.logger, "{}", err.err);
@@@ -1180,7 -1138,7 +1180,7 @@@ impl<Signer: Sign, M: Deref, T: Deref, 
                                pending_msg_events: Vec::new(),
                        }),
                        pending_inbound_payments: Mutex::new(HashMap::new()),
 -                      pending_outbound_payments: Mutex::new(HashSet::new()),
 +                      pending_outbound_payments: Mutex::new(HashMap::new()),
  
                        our_network_key: keys_manager.get_node_secret(),
                        our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret()),
                                                                msg: channel_update
                                                        });
                                                }
 +                                              if let Ok(mut pending_events_lock) = self.pending_events.lock() {
 +                                                      pending_events_lock.push(events::Event::ChannelClosed {
 +                                                              channel_id: *channel_id,
 +                                                              reason: ClosureReason::HolderForceClosed
 +                                                      });
 +                                              }
                                        }
                                        break Ok(());
                                },
                }
        }
  
 -      fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result<PublicKey, APIError> {
 +      /// `peer_node_id` should be set when we receive a message from a peer, but not set when the
 +      /// user closes, which will be re-exposed as the `ChannelClosed` reason.
 +      fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>, peer_msg: Option<&String>) -> Result<PublicKey, APIError> {
                let mut chan = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
                                if let Some(short_id) = chan.get().get_short_channel_id() {
                                        channel_state.short_to_id.remove(&short_id);
                                }
 +                              let mut pending_events_lock = self.pending_events.lock().unwrap();
 +                              if peer_node_id.is_some() {
 +                                      if let Some(peer_msg) = peer_msg {
 +                                              pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, reason: ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() } });
 +                                      }
 +                              } else {
 +                                      pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, reason: ClosureReason::HolderForceClosed });
 +                              }
                                chan.remove_entry().1
                        } else {
                                return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
        /// the chain and rejecting new HTLCs on the given channel. Fails if channel_id is unknown to the manager.
        pub fn force_close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 -              match self.force_close_channel_with_peer(channel_id, None) {
 +              match self.force_close_channel_with_peer(channel_id, None, None) {
                        Ok(counterparty_node_id) => {
                                self.channel_state.lock().unwrap().pending_msg_events.push(
                                        events::MessageSendEvent::HandleError {
        }
  
        // Only public for testing, this should otherwise never be called direcly
 -      pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, keysend_preimage: &Option<PaymentPreimage>) -> Result<(), APIError> {
 +      pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, mpp_id: MppId, keysend_preimage: &Option<PaymentPreimage>) -> Result<(), APIError> {
                log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id);
                let prng_seed = self.keys_manager.get_secure_random_bytes();
                let session_priv_bytes = self.keys_manager.get_secure_random_bytes();
                let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash);
  
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 -              assert!(self.pending_outbound_payments.lock().unwrap().insert(session_priv_bytes));
 +              let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
 +              let sessions = pending_outbounds.entry(mpp_id).or_insert(HashSet::new());
 +              assert!(sessions.insert(session_priv_bytes));
  
                let err: Result<(), _> = loop {
                        let mut channel_lock = self.channel_state.lock().unwrap();
                                                path: path.clone(),
                                                session_priv: session_priv.clone(),
                                                first_hop_htlc_msat: htlc_msat,
 +                                              mpp_id,
                                        }, onion_packet, &self.logger), channel_state, chan)
                                } {
                                        Some((update_add, commitment_signed, monitor_update)) => {
                let mut total_value = 0;
                let our_node_id = self.get_our_node_id();
                let mut path_errs = Vec::with_capacity(route.paths.len());
 +              let mpp_id = MppId(self.keys_manager.get_secure_random_bytes());
                'path_check: for path in route.paths.iter() {
                        if path.len() < 1 || path.len() > 20 {
                                path_errs.push(Err(APIError::RouteError{err: "Path didn't go anywhere/had bogus size"}));
                let cur_height = self.best_block.read().unwrap().height() + 1;
                let mut results = Vec::new();
                for path in route.paths.iter() {
 -                      results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height, &keysend_preimage));
 +                      results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height, mpp_id, &keysend_preimage));
                }
                let mut has_ok = false;
                let mut has_err = false;
                                                                                        if let Some(short_id) = channel.get_short_channel_id() {
                                                                                                channel_state.short_to_id.remove(&short_id);
                                                                                        }
 +                                                                                      // ChannelClosed event is generated by handle_error for us.
                                                                                        Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
                                                                                },
                                                                                ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
                                        self.fail_htlc_backwards_internal(channel_state,
                                                htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data});
                                },
 -                              HTLCSource::OutboundRoute { session_priv, .. } => {
 -                                      if {
 -                                              let mut session_priv_bytes = [0; 32];
 -                                              session_priv_bytes.copy_from_slice(&session_priv[..]);
 -                                              self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes)
 -                                      } {
 -                                              self.pending_events.lock().unwrap().push(
 -                                                      events::Event::PaymentFailed {
 -                                                              payment_hash,
 -                                                              rejected_by_dest: false,
 -                                                              network_update: None,
 -#[cfg(test)]
 -                                                              error_code: None,
 -#[cfg(test)]
 -                                                              error_data: None,
 +                              HTLCSource::OutboundRoute { session_priv, mpp_id, path, .. } => {
 +                                      let mut session_priv_bytes = [0; 32];
 +                                      session_priv_bytes.copy_from_slice(&session_priv[..]);
 +                                      let mut outbounds = self.pending_outbound_payments.lock().unwrap();
 +                                      if let hash_map::Entry::Occupied(mut sessions) = outbounds.entry(mpp_id) {
 +                                              if sessions.get_mut().remove(&session_priv_bytes) {
 +                                                      self.pending_events.lock().unwrap().push(
 +                                                              events::Event::PaymentPathFailed {
 +                                                                      payment_hash,
 +                                                                      rejected_by_dest: false,
 +                                                                      network_update: None,
 +                                                                      all_paths_failed: sessions.get().len() == 0,
 +                                                                      path: path.clone(),
 +                                                                      #[cfg(test)]
 +                                                                      error_code: None,
 +                                                                      #[cfg(test)]
 +                                                                      error_data: None,
 +                                                              }
 +                                                      );
 +                                                      if sessions.get().len() == 0 {
 +                                                              sessions.remove();
                                                        }
 -                                              )
 +                                              }
                                        } else {
                                                log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
                                        }
                // from block_connected which may run during initialization prior to the chain_monitor
                // being fully configured. See the docs for `ChannelManagerReadArgs` for more.
                match source {
 -                      HTLCSource::OutboundRoute { ref path, session_priv, .. } => {
 -                              if {
 -                                      let mut session_priv_bytes = [0; 32];
 -                                      session_priv_bytes.copy_from_slice(&session_priv[..]);
 -                                      !self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes)
 -                              } {
 +                      HTLCSource::OutboundRoute { ref path, session_priv, mpp_id, .. } => {
 +                              let mut session_priv_bytes = [0; 32];
 +                              session_priv_bytes.copy_from_slice(&session_priv[..]);
 +                              let mut outbounds = self.pending_outbound_payments.lock().unwrap();
 +                              let mut all_paths_failed = false;
 +                              if let hash_map::Entry::Occupied(mut sessions) = outbounds.entry(mpp_id) {
 +                                      if !sessions.get_mut().remove(&session_priv_bytes) {
 +                                              log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
 +                                              return;
 +                                      }
 +                                      if sessions.get().len() == 0 {
 +                                              all_paths_failed = true;
 +                                              sessions.remove();
 +                                      }
 +                              } else {
                                        log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
                                        return;
                                }
                                                // process_onion_failure we should close that channel as it implies our
                                                // next-hop is needlessly blaming us!
                                                self.pending_events.lock().unwrap().push(
 -                                                      events::Event::PaymentFailed {
 +                                                      events::Event::PaymentPathFailed {
                                                                payment_hash: payment_hash.clone(),
                                                                rejected_by_dest: !payment_retryable,
                                                                network_update,
 +                                                              all_paths_failed,
 +                                                              path: path.clone(),
  #[cfg(test)]
                                                                error_code: onion_error_code,
  #[cfg(test)]
                                                // TODO: For non-temporary failures, we really should be closing the
                                                // channel here as we apparently can't relay through them anyway.
                                                self.pending_events.lock().unwrap().push(
 -                                                      events::Event::PaymentFailed {
 +                                                      events::Event::PaymentPathFailed {
                                                                payment_hash: payment_hash.clone(),
                                                                rejected_by_dest: path.len() == 1,
                                                                network_update: None,
 +                                                              all_paths_failed,
 +                                                              path: path.clone(),
  #[cfg(test)]
                                                                error_code: Some(*failure_code),
  #[cfg(test)]
  
        fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool) {
                match source {
 -                      HTLCSource::OutboundRoute { session_priv, .. } => {
 +                      HTLCSource::OutboundRoute { session_priv, mpp_id, .. } => {
                                mem::drop(channel_state_lock);
 -                              if {
 -                                      let mut session_priv_bytes = [0; 32];
 -                                      session_priv_bytes.copy_from_slice(&session_priv[..]);
 -                                      self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes)
 -                              } {
 -                                      let mut pending_events = self.pending_events.lock().unwrap();
 -                                      pending_events.push(events::Event::PaymentSent {
 -                                              payment_preimage
 -                                      });
 +                              let mut session_priv_bytes = [0; 32];
 +                              session_priv_bytes.copy_from_slice(&session_priv[..]);
 +                              let mut outbounds = self.pending_outbound_payments.lock().unwrap();
 +                              let found_payment = if let Some(mut sessions) = outbounds.remove(&mpp_id) {
 +                                      sessions.remove(&session_priv_bytes)
 +                              } else { false };
 +                              if found_payment {
 +                                      self.pending_events.lock().unwrap().push(
 +                                              events::Event::PaymentSent { payment_preimage }
 +                                      );
                                } else {
                                        log_trace!(self.logger, "Received duplicative fulfill for HTLC with payment_preimage {}", log_bytes!(payment_preimage.0));
                                }
                                        msg: update
                                });
                        }
 +                      self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: msg.channel_id,  reason: ClosureReason::CooperativeClosure });
                }
                Ok(())
        }
                                                self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                                        }
                                },
 -                              MonitorEvent::CommitmentTxBroadcasted(funding_outpoint) => {
 +                              MonitorEvent::CommitmentTxConfirmed(funding_outpoint) => {
                                        let mut channel_lock = self.channel_state.lock().unwrap();
                                        let channel_state = &mut *channel_lock;
                                        let by_id = &mut channel_state.by_id;
                                                                msg: update
                                                        });
                                                }
 +                                              self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(),  reason: ClosureReason::CommitmentTxConfirmed });
                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                        node_id: chan.get_counterparty_node_id(),
                                                        action: msgs::ErrorAction::SendErrorMessage {
                                        Err(e) => {
                                                let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id);
                                                handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
 +                                              // ChannelClosed event is generated by handle_error for us
                                                !close_channel
                                        }
                                }
                                                                });
                                                        }
  
 +                                                      if let Ok(mut pending_events_lock) = self.pending_events.lock() {
 +                                                              pending_events_lock.push(events::Event::ChannelClosed {
 +                                                                      channel_id: *channel_id,
 +                                                                      reason: ClosureReason::CooperativeClosure
 +                                                              });
 +                                                      }
 +
                                                        log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
                                                        self.tx_broadcaster.broadcast_transaction(&tx);
                                                        false
@@@ -4529,7 -4436,6 +4529,7 @@@ wher
                                                        msg: update
                                                });
                                        }
 +                                      self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: channel.channel_id(),  reason: ClosureReason::CommitmentTxConfirmed });
                                        pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                node_id: channel.get_counterparty_node_id(),
                                                action: msgs::ErrorAction::SendErrorMessage { msg: e },
@@@ -4720,7 -4626,6 +4720,7 @@@ impl<Signer: Sign, M: Deref , T: Deref 
                                                                msg: update
                                                        });
                                                }
 +                                              self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(),  reason: ClosureReason::DisconnectedPeer });
                                                false
                                        } else {
                                                true
                                                        if let Some(short_id) = chan.get_short_channel_id() {
                                                                short_to_id.remove(&short_id);
                                                        }
 +                                                      self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(),  reason: ClosureReason::DisconnectedPeer });
                                                        return false;
                                                } else {
                                                        no_channels_remain = false;
                        for chan in self.list_channels() {
                                if chan.counterparty.node_id == *counterparty_node_id {
                                        // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
 -                                      let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id));
 +                                      let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id), Some(&msg.data));
                                }
                        }
                } else {
                        // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
 -                      let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id));
 +                      let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id), Some(&msg.data));
                }
        }
  }
@@@ -4933,74 -4837,10 +4933,74 @@@ impl_writeable_tlv_based!(PendingHTLCIn
        (8, outgoing_cltv_value, required)
  });
  
 -impl_writeable_tlv_based_enum!(HTLCFailureMsg, ;
 -      (0, Relay),
 -      (1, Malformed),
 -);
 +
 +impl Writeable for HTLCFailureMsg {
 +      fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
 +              match self {
 +                      HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id, htlc_id, reason }) => {
 +                              0u8.write(writer)?;
 +                              channel_id.write(writer)?;
 +                              htlc_id.write(writer)?;
 +                              reason.write(writer)?;
 +                      },
 +                      HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
 +                              channel_id, htlc_id, sha256_of_onion, failure_code
 +                      }) => {
 +                              1u8.write(writer)?;
 +                              channel_id.write(writer)?;
 +                              htlc_id.write(writer)?;
 +                              sha256_of_onion.write(writer)?;
 +                              failure_code.write(writer)?;
 +                      },
 +              }
 +              Ok(())
 +      }
 +}
 +
 +impl Readable for HTLCFailureMsg {
 +      fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
 +              let id: u8 = Readable::read(reader)?;
 +              match id {
 +                      0 => {
 +                              Ok(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
 +                                      channel_id: Readable::read(reader)?,
 +                                      htlc_id: Readable::read(reader)?,
 +                                      reason: Readable::read(reader)?,
 +                              }))
 +                      },
 +                      1 => {
 +                              Ok(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
 +                                      channel_id: Readable::read(reader)?,
 +                                      htlc_id: Readable::read(reader)?,
 +                                      sha256_of_onion: Readable::read(reader)?,
 +                                      failure_code: Readable::read(reader)?,
 +                              }))
 +                      },
 +                      // In versions prior to 0.0.101, HTLCFailureMsg objects were written with type 0 or 1 but
 +                      // weren't length-prefixed and thus didn't support reading the TLV stream suffix of the network
 +                      // messages contained in the variants.
 +                      // In version 0.0.101, support for reading the variants with these types was added, and
 +                      // we should migrate to writing these variants when UpdateFailHTLC or
 +                      // UpdateFailMalformedHTLC get TLV fields.
 +                      2 => {
 +                              let length: BigSize = Readable::read(reader)?;
 +                              let mut s = FixedLengthReader::new(reader, length.0);
 +                              let res = Readable::read(&mut s)?;
 +                              s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes
 +                              Ok(HTLCFailureMsg::Relay(res))
 +                      },
 +                      3 => {
 +                              let length: BigSize = Readable::read(reader)?;
 +                              let mut s = FixedLengthReader::new(reader, length.0);
 +                              let res = Readable::read(&mut s)?;
 +                              s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes
 +                              Ok(HTLCFailureMsg::Malformed(res))
 +                      },
 +                      _ => Err(DecodeError::UnknownRequiredFeature),
 +              }
 +      }
 +}
 +
  impl_writeable_tlv_based_enum!(PendingHTLCStatus, ;
        (0, Forward),
        (1, Fail),
@@@ -5071,60 -4911,14 +5071,60 @@@ impl Readable for ClaimableHTLC 
        }
  }
  
 -impl_writeable_tlv_based_enum!(HTLCSource,
 -      (0, OutboundRoute) => {
 -              (0, session_priv, required),
 -              (2, first_hop_htlc_msat, required),
 -              (4, path, vec_type),
 -      }, ;
 -      (1, PreviousHopData)
 -);
 +impl Readable for HTLCSource {
 +      fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
 +              let id: u8 = Readable::read(reader)?;
 +              match id {
 +                      0 => {
 +                              let mut session_priv: ::util::ser::OptionDeserWrapper<SecretKey> = ::util::ser::OptionDeserWrapper(None);
 +                              let mut first_hop_htlc_msat: u64 = 0;
 +                              let mut path = Some(Vec::new());
 +                              let mut mpp_id = None;
 +                              read_tlv_fields!(reader, {
 +                                      (0, session_priv, required),
 +                                      (1, mpp_id, option),
 +                                      (2, first_hop_htlc_msat, required),
 +                                      (4, path, vec_type),
 +                              });
 +                              if mpp_id.is_none() {
 +                                      // For backwards compat, if there was no mpp_id written, use the session_priv bytes
 +                                      // instead.
 +                                      mpp_id = Some(MppId(*session_priv.0.unwrap().as_ref()));
 +                              }
 +                              Ok(HTLCSource::OutboundRoute {
 +                                      session_priv: session_priv.0.unwrap(),
 +                                      first_hop_htlc_msat: first_hop_htlc_msat,
 +                                      path: path.unwrap(),
 +                                      mpp_id: mpp_id.unwrap(),
 +                              })
 +                      }
 +                      1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
 +                      _ => Err(DecodeError::UnknownRequiredFeature),
 +              }
 +      }
 +}
 +
 +impl Writeable for HTLCSource {
 +      fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::io::Error> {
 +              match self {
 +                      HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, mpp_id } => {
 +                              0u8.write(writer)?;
 +                              let mpp_id_opt = Some(mpp_id);
 +                              write_tlv_fields!(writer, {
 +                                      (0, session_priv, required),
 +                                      (1, mpp_id_opt, option),
 +                                      (2, first_hop_htlc_msat, required),
 +                                      (4, path, vec_type),
 +                               });
 +                      }
 +                      HTLCSource::PreviousHopData(ref field) => {
 +                              1u8.write(writer)?;
 +                              field.write(writer)?;
 +                      }
 +              }
 +              Ok(())
 +      }
 +}
  
  impl_writeable_tlv_based_enum!(HTLCFailReason,
        (0, LightningError) => {
@@@ -5245,21 -5039,12 +5245,21 @@@ impl<Signer: Sign, M: Deref, T: Deref, 
                }
  
                let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
 -              (pending_outbound_payments.len() as u64).write(writer)?;
 -              for session_priv in pending_outbound_payments.iter() {
 -                      session_priv.write(writer)?;
 +              // For backwards compat, write the session privs and their total length.
 +              let mut num_pending_outbounds_compat: u64 = 0;
 +              for (_, outbounds) in pending_outbound_payments.iter() {
 +                      num_pending_outbounds_compat += outbounds.len() as u64;
 +              }
 +              num_pending_outbounds_compat.write(writer)?;
 +              for (_, outbounds) in pending_outbound_payments.iter() {
 +                      for outbound in outbounds.iter() {
 +                              outbound.write(writer)?;
 +                      }
                }
  
 -              write_tlv_fields!(writer, {});
 +              write_tlv_fields!(writer, {
 +                      (1, pending_outbound_payments, required),
 +              });
  
                Ok(())
        }
@@@ -5396,7 -5181,6 +5396,7 @@@ impl<'a, Signer: Sign, M: Deref, T: Der
                let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
                let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
 +              let mut channel_closures = Vec::new();
                for _ in 0..channel_count {
                        let mut channel: Channel<Signer> = Channel::read(reader, &args.keys_manager)?;
                        let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
                                        let (_, mut new_failed_htlcs) = channel.force_shutdown(true);
                                        failed_htlcs.append(&mut new_failed_htlcs);
                                        monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
 +                                      channel_closures.push(events::Event::ChannelClosed {
 +                                              channel_id: channel.channel_id(),
 +                                              reason: ClosureReason::OutdatedChannelManager
 +                                      });
                                } else {
                                        if let Some(short_channel_id) = channel.get_short_channel_id() {
                                                short_to_id.insert(short_channel_id, channel.channel_id());
                                None => continue,
                        }
                }
+               if forward_htlcs_count > 0 {
+                       // If we have pending HTLCs to forward, assume we either dropped a
+                       // `PendingHTLCsForwardable` or the user received it but never processed it as they
+                       // shut down before the timer hit. Either way, set the time_forwardable to a small
+                       // constant as enough time has likely passed that we should simply handle the forwards
+                       // now, or at least after the user gets a chance to reconnect to our peers.
+                       pending_events_read.push(events::Event::PendingHTLCsForwardable {
+                               time_forwardable: Duration::from_secs(2),
+                       });
+               }
  
                let background_event_count: u64 = Readable::read(reader)?;
                let mut pending_background_events_read: Vec<BackgroundEvent> = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<BackgroundEvent>()));
                        }
                }
  
 -              let pending_outbound_payments_count: u64 = Readable::read(reader)?;
 -              let mut pending_outbound_payments: HashSet<[u8; 32]> = HashSet::with_capacity(cmp::min(pending_outbound_payments_count as usize, MAX_ALLOC_SIZE/32));
 -              for _ in 0..pending_outbound_payments_count {
 -                      if !pending_outbound_payments.insert(Readable::read(reader)?) {
 -                              return Err(DecodeError::InvalidValue);
 -                      }
 +              let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?;
 +              let mut pending_outbound_payments_compat: HashMap<MppId, HashSet<[u8; 32]>> =
 +                      HashMap::with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32));
 +              for _ in 0..pending_outbound_payments_count_compat {
 +                      let session_priv = Readable::read(reader)?;
 +                      if pending_outbound_payments_compat.insert(MppId(session_priv), [session_priv].iter().cloned().collect()).is_some() {
 +                              return Err(DecodeError::InvalidValue)
 +                      };
                }
  
 -              read_tlv_fields!(reader, {});
 +              let mut pending_outbound_payments = None;
 +              read_tlv_fields!(reader, {
 +                      (1, pending_outbound_payments, option),
 +              });
 +              if pending_outbound_payments.is_none() {
 +                      pending_outbound_payments = Some(pending_outbound_payments_compat);
 +              }
  
                let mut secp_ctx = Secp256k1::new();
                secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes());
  
 +              if !channel_closures.is_empty() {
 +                      pending_events_read.append(&mut channel_closures);
 +              }
 +
                let channel_manager = ChannelManager {
                        genesis_hash,
                        fee_estimator: args.fee_estimator,
                                pending_msg_events: Vec::new(),
                        }),
                        pending_inbound_payments: Mutex::new(pending_inbound_payments),
 -                      pending_outbound_payments: Mutex::new(pending_outbound_payments),
 +                      pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()),
  
                        our_network_key: args.keys_manager.get_node_secret(),
                        our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &args.keys_manager.get_node_secret()),
@@@ -5596,7 -5374,7 +5606,7 @@@ mod tests 
        use bitcoin::hashes::sha256::Hash as Sha256;
        use core::time::Duration;
        use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
 -      use ln::channelmanager::PaymentSendFailure;
 +      use ln::channelmanager::{MppId, PaymentSendFailure};
        use ln::features::{InitFeatures, InvoiceFeatures};
        use ln::functional_test_utils::*;
        use ln::msgs;
                let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
                let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
                let (payment_preimage, our_payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[1]);
 +              let mpp_id = MppId([42; 32]);
                // Use the utility function send_payment_along_path to send the payment with MPP data which
                // indicates there are more HTLCs coming.
                let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
 -              nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, &None).unwrap();
 +              nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, mpp_id, &None).unwrap();
                check_added_monitors!(nodes[0], 1);
                let mut events = nodes[0].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 1);
                expect_payment_failed!(nodes[0], our_payment_hash, true);
  
                // Send the second half of the original MPP payment.
 -              nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, &None).unwrap();
 +              nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, mpp_id, &None).unwrap();
                check_added_monitors!(nodes[0], 1);
                let mut events = nodes[0].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 1);
                nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
                check_added_monitors!(nodes[0], 1);
  
 -              // There's an existing bug that generates a PaymentSent event for each MPP path, so handle that here.
 +              // Note that successful MPP payments will generate 1 event upon the first path's success. No
 +              // further events will be generated for subsequence path successes.
                let events = nodes[0].node.get_and_clear_pending_events();
                match events[0] {
                        Event::PaymentSent { payment_preimage: ref preimage } => {
                                assert_eq!(payment_preimage, *preimage);
                        },
                        _ => panic!("Unexpected event"),
 -              }
 -              match events[1] {
 -                      Event::PaymentSent { payment_preimage: ref preimage } => {
 -                              assert_eq!(payment_preimage, *preimage);
 -                      },
 -                      _ => panic!("Unexpected event"),
                }
        }
  
index 3be75abbb0a1ff03be9de394b897cdb023a9fc9b,3c2a61e3aa2fa6ec8aa56a34506d2723d0f788e6..4b0016199de1a453cdd1caf88a7a8c56b5367443
@@@ -19,7 -19,7 +19,7 @@@ use chain::transaction::OutPoint
  use chain::keysinterface::BaseSign;
  use ln::{PaymentPreimage, PaymentSecret, PaymentHash};
  use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC};
 -use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA};
 +use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, MppId, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA};
  use ln::channel::{Channel, ChannelError};
  use ln::{chan_utils, onion_utils};
  use ln::chan_utils::HTLC_SUCCESS_TX_WEIGHT;
@@@ -30,7 -30,7 +30,7 @@@ use ln::msgs
  use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
  use util::enforcing_trait_impls::EnforcingSigner;
  use util::{byte_utils, test_utils};
 -use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
 +use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason};
  use util::errors::APIError;
  use util::ser::{Writeable, ReadableArgs};
  use util::config::UserConfig;
@@@ -638,7 -638,6 +638,7 @@@ fn test_update_fee_that_funder_cannot_a
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
 +      check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") });
  }
  
  #[test]
@@@ -739,8 -738,6 +739,8 @@@ fn test_update_fee_with_fundee_update_a
        send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
        close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
  }
  
  #[test]
@@@ -853,8 -850,6 +853,8 @@@ fn test_update_fee() 
        assert_eq!(get_feerate!(nodes[0], channel_id), feerate + 30);
        assert_eq!(get_feerate!(nodes[1], channel_id), feerate + 30);
        close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
  }
  
  #[test]
@@@ -982,20 -977,10 +982,20 @@@ fn fake_network_test() 
  
        // Close down the channels...
        close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
        close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 +      check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
        close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
 +      check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
 +      check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
        close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 +      check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
        close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 +      check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
  }
  
  #[test]
@@@ -1191,7 -1176,6 +1191,7 @@@ fn test_duplicate_htlc_different_direct
  
        mine_transaction(&nodes[0], &remote_txn[0]);
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
  
        // Check we only broadcast 1 timeout tx
@@@ -1473,7 -1457,6 +1473,7 @@@ fn test_chan_reserve_violation_inbound_
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() });
  }
  
  #[test]
@@@ -1497,7 -1480,7 +1497,7 @@@ fn test_chan_reserve_dust_inbound_htlcs
        push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000) * 1000;
        create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt, InitFeatures::known(), InitFeatures::known());
  
 -      let dust_amt = crate::ln::channel::MIN_DUST_LIMIT_SATOSHIS * 1000
 +      let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000
                + feerate_per_kw as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000 * 1000 - 1;
        // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel
        // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the
@@@ -1600,7 -1583,6 +1600,7 @@@ fn test_chan_reserve_violation_inbound_
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() });
  }
  
  #[test]
@@@ -2057,8 -2039,6 +2057,8 @@@ fn channel_monitor_network_test() 
        check_closed_broadcast!(nodes[0], true);
        assert_eq!(nodes[0].node.list_channels().len(), 0);
        assert_eq!(nodes[1].node.list_channels().len(), 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
 +      check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
  
        // One pending HTLC is discarded by the force-close:
        let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0;
        check_closed_broadcast!(nodes[2], true);
        assert_eq!(nodes[1].node.list_channels().len(), 0);
        assert_eq!(nodes[2].node.list_channels().len(), 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
 +      check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
  
        macro_rules! claim_funds {
                ($node: expr, $prev_node: expr, $preimage: expr) => {
        check_closed_broadcast!(nodes[3], true);
        assert_eq!(nodes[2].node.list_channels().len(), 0);
        assert_eq!(nodes[3].node.list_channels().len(), 1);
 +      check_closed_event!(nodes[2], 1, ClosureReason::DisconnectedPeer);
 +      check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
  
        // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
        // confusing us in the following tests.
        assert_eq!(nodes[4].node.list_channels().len(), 0);
  
        nodes[3].chain_monitor.chain_monitor.monitors.write().unwrap().insert(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon);
 +      check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
 +      check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed);
  }
  
  #[test]
@@@ -2247,7 -2221,6 +2247,7 @@@ fn test_justice_tx() 
                        node_txn.truncate(1);
                }
                check_added_monitors!(nodes[1], 1);
 +              check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE);
  
                mine_transaction(&nodes[0], &revoked_local_txn[0]);
                // Verify broadcast of revoked HTLC-timeout
                let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
                check_added_monitors!(nodes[0], 1);
 +              check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                // Broadcast revoked HTLC-timeout on node 1
                mine_transaction(&nodes[1], &node_txn[1]);
                test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone());
                test_txn_broadcast(&nodes[0], &chan_6, None, HTLCType::NONE);
  
                mine_transaction(&nodes[1], &revoked_local_txn[0]);
 +              check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
                check_added_monitors!(nodes[1], 1);
                mine_transaction(&nodes[0], &node_txn[1]);
 +              check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone());
        }
        get_announce_close_broadcast_events(&nodes, 0, 1);
@@@ -2329,7 -2299,6 +2329,7 @@@ fn revoked_output_claim() 
        // Inform nodes[1] that nodes[0] broadcast a stale tx
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx against revoked to_local output, ChannelManager: local commitment tx
  
        // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
        get_announce_close_broadcast_events(&nodes, 0, 1);
 -      check_added_monitors!(nodes[0], 1)
 +      check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
  }
  
  #[test]
@@@ -2377,10 -2345,8 +2377,10 @@@ fn claim_htlc_outputs_shared_tx() 
        {
                mine_transaction(&nodes[0], &revoked_local_txn[0]);
                check_added_monitors!(nodes[0], 1);
 +              check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                mine_transaction(&nodes[1], &revoked_local_txn[0]);
                check_added_monitors!(nodes[1], 1);
 +              check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
                expect_payment_failed!(nodes[1], payment_hash_2, true);
  
@@@ -2437,13 -2403,7 +2437,13 @@@ fn claim_htlc_outputs_single_tx() 
                check_added_monitors!(nodes[0], 1);
                confirm_transaction_at(&nodes[1], &revoked_local_txn[0], 100);
                check_added_monitors!(nodes[1], 1);
 -              expect_pending_htlcs_forwardable_ignore!(nodes[0]);
 +              check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
 +              let mut events = nodes[0].node.get_and_clear_pending_events();
 +              expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
 +              match events[1] {
 +                      Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
 +                      _ => panic!("Unexpected event"),
 +              }
  
                connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
                expect_payment_failed!(nodes[1], payment_hash_2, true);
@@@ -2541,7 -2501,6 +2541,7 @@@ fn test_htlc_on_chain_success() 
        mine_transaction(&nodes[2], &commitment_tx[0]);
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
 +      check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
        let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 3 (commitment tx, 2*htlc-success tx), ChannelMonitor : 2 (2 * HTLC-Success tx)
        assert_eq!(node_txn.len(), 5);
        assert_eq!(node_txn[0], node_txn[3]);
                added_monitors.clear();
        }
        let forwarded_events = nodes[1].node.get_and_clear_pending_events();
 -      assert_eq!(forwarded_events.len(), 2);
 -      if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[0] {
 -              } else { panic!(); }
 +      assert_eq!(forwarded_events.len(), 3);
 +      match forwarded_events[0] {
 +              Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
 +              _ => panic!("Unexpected event"),
 +      }
        if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[1] {
                } else { panic!(); }
 +      if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[2] {
 +              } else { panic!(); }
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        {
                let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
        mine_transaction(&nodes[1], &node_a_commitment_tx[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(node_txn.len(), 6); // ChannelManager : 3 (commitment tx + HTLC-Sucess * 2), ChannelMonitor : 3 (HTLC-Success, 2* RBF bumps of above HTLC txn)
        let commitment_spend =
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
        let events = nodes[0].node.get_and_clear_pending_events();
 -      assert_eq!(events.len(), 2);
 +      assert_eq!(events.len(), 3);
        let mut first_claimed = false;
        for event in events {
                match event {
                                        assert_eq!(payment_preimage, our_payment_preimage_2);
                                }
                        },
 +                      Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {},
                        _ => panic!("Unexpected event"),
                }
        }
@@@ -2747,7 -2700,6 +2747,7 @@@ fn do_test_htlc_on_chain_timeout(connec
        mine_transaction(&nodes[2], &commitment_tx[0]);
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
 +      check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
        let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx)
        assert_eq!(node_txn.len(), 1);
        check_spends!(node_txn[0], chan_2.3);
        // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
        connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1);
        mine_transaction(&nodes[1], &commitment_tx[0]);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let timeout_tx;
        {
                let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
  
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 commitment tx, ChannelMonitor : 1 timeout tx
        assert_eq!(node_txn.len(), 2);
        check_spends!(node_txn[0], chan_1.3);
@@@ -2864,7 -2814,6 +2864,7 @@@ fn test_simple_commitment_revoked_fail_
        let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
  
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
@@@ -3014,19 -2963,15 +3014,19 @@@ fn do_test_commitment_revoked_fail_back
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
  
        let events = nodes[1].node.get_and_clear_pending_events();
 -      assert_eq!(events.len(), if deliver_bs_raa { 1 } else { 2 });
 +      assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 3 });
        match events[0] {
 -              Event::PaymentFailed { ref payment_hash, .. } => {
 +              Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { },
 +              _ => panic!("Unexepected event"),
 +      }
 +      match events[1] {
 +              Event::PaymentPathFailed { ref payment_hash, .. } => {
                        assert_eq!(*payment_hash, fourth_payment_hash);
                },
                _ => panic!("Unexpected event"),
        }
        if !deliver_bs_raa {
 -              match events[1] {
 +              match events[2] {
                        Event::PendingHTLCsForwardable { .. } => { },
                        _ => panic!("Unexpected event"),
                };
                        let events = nodes[0].node.get_and_clear_pending_events();
                        assert_eq!(events.len(), 3);
                        match events[0] {
 -                              Event::PaymentFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
 +                              Event::PaymentPathFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
                                        assert!(failed_htlcs.insert(payment_hash.0));
                                        // If we delivered B's RAA we got an unknown preimage error, not something
                                        // that we should update our routing table for.
                                _ => panic!("Unexpected event"),
                        }
                        match events[1] {
 -                              Event::PaymentFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
 +                              Event::PaymentPathFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
                                        assert!(failed_htlcs.insert(payment_hash.0));
                                        assert!(network_update.is_some());
                                },
                                _ => panic!("Unexpected event"),
                        }
                        match events[2] {
 -                              Event::PaymentFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
 +                              Event::PaymentPathFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
                                        assert!(failed_htlcs.insert(payment_hash.0));
                                        assert!(network_update.is_some());
                                },
@@@ -3186,21 -3131,9 +3186,21 @@@ fn fail_backward_pending_htlc_upon_chan
                };
                nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
        }
 -
 +      let events = nodes[0].node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), 2);
        // Check that Alice fails backward the pending HTLC from the second payment.
 -      expect_payment_failed!(nodes[0], failed_payment_hash, true);
 +      match events[0] {
 +              Event::PaymentPathFailed { payment_hash, .. } => {
 +                      assert_eq!(payment_hash, failed_payment_hash);
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +      match events[1] {
 +              Event::ChannelClosed { reason: ClosureReason::ProcessingError { ref err }, .. } => {
 +                      assert_eq!(err, "Remote side tried to send a 0-msat HTLC");
 +              },
 +              _ => panic!("Unexpected event {:?}", events[1]),
 +      }
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
  }
@@@ -3220,7 -3153,6 +3220,7 @@@ fn test_htlc_ignore_latest_remote_commi
        connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
  
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 3);
        connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[1].clone()]});
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
  
        // Duplicate the connect_block call since this may happen due to other listeners
        // registering new transactions
@@@ -3285,7 -3216,6 +3285,7 @@@ fn test_force_close_fail_back() 
        nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id).unwrap();
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
 +      check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
        let tx = {
                let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
                // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
        // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
  
        // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
        {
@@@ -3379,7 -3308,7 +3379,7 @@@ fn test_simple_peer_disconnect() 
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
  
        claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_preimage_3);
 -      fail_payment_along_route(&nodes[0], &[&nodes[1], &nodes[2]], true, payment_hash_5);
 +      fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5);
  
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
        {
                        _ => panic!("Unexpected event"),
                }
                match events[1] {
 -                      Event::PaymentFailed { payment_hash, rejected_by_dest, .. } => {
 +                      Event::PaymentPathFailed { payment_hash, rejected_by_dest, .. } => {
                                assert_eq!(payment_hash, payment_hash_5);
                                assert!(rejected_by_dest);
                        },
@@@ -3957,8 -3886,7 +3957,8 @@@ fn do_test_htlc_timeout(send_partial_mp
                // Use the utility function send_payment_along_path to send the payment with MPP data which
                // indicates there are more HTLCs coming.
                let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
 -              nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200000, cur_height, &None).unwrap();
 +              let mpp_id = MppId([42; 32]);
 +              nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200000, cur_height, mpp_id, &None).unwrap();
                check_added_monitors!(nodes[0], 1);
                let mut events = nodes[0].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 1);
@@@ -4155,34 -4083,6 +4155,34 @@@ fn test_no_txn_manager_serialize_deseri
        send_payment(&nodes[0], &[&nodes[1]], 1000000);
  }
  
 +#[test]
 +fn mpp_failure() {
 +      let chanmon_cfgs = create_chanmon_cfgs(4);
 +      let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
 +      let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
 +      let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
 +
 +      let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
 +      let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
 +      let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
 +      let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
 +      let logger = test_utils::TestLogger::new();
 +
 +      let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[3]);
 +      let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
 +      let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
 +      let path = route.paths[0].clone();
 +      route.paths.push(path);
 +      route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
 +      route.paths[0][0].short_channel_id = chan_1_id;
 +      route.paths[0][1].short_channel_id = chan_3_id;
 +      route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
 +      route.paths[1][0].short_channel_id = chan_2_id;
 +      route.paths[1][1].short_channel_id = chan_4_id;
 +      send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret);
 +      fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash);
 +}
 +
  #[test]
  fn test_dup_htlc_onchain_fails_on_reload() {
        // When a Channel is closed, any outbound HTLCs which were relayed through it are simply
        //
        // If, due to an on-chain event, an HTLC is failed/claimed, and then we serialize the
        // ChannelManager, we generally expect there not to be a duplicate HTLC fail/claim (eg via a
 -      // PaymentFailed event appearing). However, because we may not serialize the relevant
 +      // PaymentPathFailed event appearing). However, because we may not serialize the relevant
        // ChannelMonitor at the same time, this isn't strictly guaranteed. In order to provide this
        // consistency, the ChannelManager explicitly tracks pending-onchain-resolution outbound HTLCs
        // and de-duplicates ChannelMonitor events.
        nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
  
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
        connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone(), node_txn[2].clone()]});
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
  
        header.prev_blockhash = nodes[0].best_block_hash();
@@@ -4577,7 -4475,6 +4577,7 @@@ fn test_manager_serialize_deserialize_i
                check_added_monitors!(nodes[0], 1);
        }
        nodes[0].node = &nodes_0_deserialized;
 +      check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
  
        // nodes[1] and nodes[2] have no lost state with nodes[0]...
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
@@@ -4641,7 -4538,6 +4641,7 @@@ fn test_claim_sizeable_push_msat() 
        nodes[1].node.force_close_channel(&chan.2).unwrap();
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 1);
        check_spends!(node_txn[0], chan.3);
@@@ -4670,7 -4566,6 +4670,7 @@@ fn test_claim_on_remote_sizeable_push_m
        nodes[0].node.force_close_channel(&chan.2).unwrap();
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
  
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 1);
        mine_transaction(&nodes[1], &node_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
  
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@@ -4708,7 -4602,6 +4708,7 @@@ fn test_claim_on_remote_revoked_sizeabl
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
  
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        mine_transaction(&nodes[1], &node_txn[0]);
@@@ -4761,7 -4654,6 +4761,7 @@@ fn test_static_spendable_outputs_preima
        check_spends!(node_txn[2], node_txn[1]);
  
        mine_transaction(&nodes[1], &node_txn[0]);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
  
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@@ -4806,7 -4698,6 +4806,7 @@@ fn test_static_spendable_outputs_timeou
        assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
  
        mine_transaction(&nodes[1], &node_txn[1]);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
        expect_payment_failed!(nodes[1], our_payment_hash, true);
  
@@@ -4837,7 -4728,6 +4837,7 @@@ fn test_static_spendable_outputs_justic
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
  
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 2);
@@@ -4874,7 -4764,6 +4874,7 @@@ fn test_static_spendable_outputs_justic
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
  
        let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[1].clone()] });
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
  
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 3); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs, ChannelManager: local commitment tx
@@@ -4947,7 -4835,6 +4947,7 @@@ fn test_static_spendable_outputs_justic
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
  
        assert_eq!(revoked_htlc_txn.len(), 2);
        connect_block(&nodes[0], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] });
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
  
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 3); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success, ChannelManager: local commitment tx
@@@ -5045,7 -4931,6 +5045,7 @@@ fn test_onchain_to_onchain_claim() 
        mine_transaction(&nodes[2], &commitment_tx[0]);
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
 +      check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
  
        let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Success tx), ChannelMonitor : 1 (HTLC-Success tx)
        assert_eq!(c_txn.len(), 3);
        let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
        connect_block(&nodes[1], &Block { header, txdata: vec![c_txn[1].clone(), c_txn[2].clone()]});
        check_added_monitors!(nodes[1], 1);
 -      expect_payment_forwarded!(nodes[1], Some(1000), true);
 +      let events = nodes[1].node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), 2);
 +      match events[0] {
 +              Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
 +              _ => panic!("Unexpected event"),
 +      }
 +      match events[1] {
 +              Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx } => {
 +                      assert_eq!(fee_earned_msat, Some(1000));
 +                      assert_eq!(claim_from_onchain_tx, true);
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
        {
                let mut b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
                // ChannelMonitor: claim tx
                check_spends!(b_txn[0], chan_2.3); // B local commitment tx, issued by ChannelManager
                b_txn.clear();
        }
 +      check_added_monitors!(nodes[1], 1);
        let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(msg_events.len(), 3);
 -      check_added_monitors!(nodes[1], 1);
        match msg_events[0] {
                MessageSendEvent::BroadcastChannelUpdate { .. } => {},
                _ => panic!("Unexpected event"),
        // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
        let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
        mine_transaction(&nodes[1], &commitment_tx[0]);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        // ChannelMonitor: HTLC-Success tx, ChannelManager: local commitment tx + HTLC-Success tx
        assert_eq!(b_txn.len(), 3);
@@@ -5164,7 -5036,6 +5164,7 @@@ fn test_duplicate_payment_hash_one_fail
        mine_transaction(&nodes[1], &commitment_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32 - 1); // Confirm blocks until the HTLC expires
  
        let htlc_timeout_tx;
        nodes[2].node.claim_funds(our_payment_preimage);
        mine_transaction(&nodes[2], &commitment_txn[0]);
        check_added_monitors!(nodes[2], 2);
 +      check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
        let events = nodes[2].node.get_and_clear_pending_msg_events();
        match events[0] {
                MessageSendEvent::UpdateHTLCs { .. } => {},
@@@ -5279,7 -5149,6 +5279,7 @@@ fn test_dynamic_spendable_outputs_local
        check_added_monitors!(nodes[1], 1);
        mine_transaction(&nodes[1], &local_txn[0]);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        match events[0] {
                MessageSendEvent::UpdateHTLCs { .. } => {},
@@@ -5450,26 -5319,9 +5450,26 @@@ fn do_test_fail_backwards_unrevoked_rem
        } else {
                mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]);
        }
 +      let events = nodes[2].node.get_and_clear_pending_events();
 +      let close_event = if deliver_last_raa {
 +              assert_eq!(events.len(), 2);
 +              events[1].clone()
 +      } else {
 +              assert_eq!(events.len(), 1);
 +              events[0].clone()
 +      };
 +      match close_event {
 +              Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
 +              _ => panic!("Unexpected event"),
 +      }
 +
        connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
        check_closed_broadcast!(nodes[2], true);
 -      expect_pending_htlcs_forwardable!(nodes[2]);
 +      if deliver_last_raa {
 +              expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
 +      } else {
 +              expect_pending_htlcs_forwardable!(nodes[2]);
 +      }
        check_added_monitors!(nodes[2], 3);
  
        let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events();
        let mut as_failds = HashSet::new();
        let mut as_updates = 0;
        for event in as_events.iter() {
 -              if let &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event {
 +              if let &Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event {
                        assert!(as_failds.insert(*payment_hash));
                        if *payment_hash != payment_hash_2 {
                                assert_eq!(*rejected_by_dest, deliver_last_raa);
        let mut bs_failds = HashSet::new();
        let mut bs_updates = 0;
        for event in bs_events.iter() {
 -              if let &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event {
 +              if let &Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event {
                        assert!(bs_failds.insert(*payment_hash));
                        if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 {
                                assert_eq!(*rejected_by_dest, deliver_last_raa);
@@@ -5606,7 -5458,6 +5606,7 @@@ fn test_dynamic_spendable_outputs_local
        mine_transaction(&nodes[0], &local_txn[0]);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
  
        let htlc_timeout = {
@@@ -5690,7 -5541,6 +5690,7 @@@ fn test_key_derivation_params() 
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
  
        let htlc_timeout = {
                let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@@ -5731,7 -5581,6 +5731,7 @@@ fn test_static_output_closing_tx() 
        let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
  
        mine_transaction(&nodes[0], &closing_tx);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
  
        let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
        check_spends!(spend_txn[0], closing_tx);
  
        mine_transaction(&nodes[1], &closing_tx);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
  
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@@ -5790,7 -5638,6 +5790,7 @@@ fn do_htlc_claim_local_commitment_only(
        test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
  }
  
  fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
        test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
  }
  
  fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
                test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
 +              check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        } else {
                expect_payment_failed!(nodes[0], our_payment_hash, true);
        }
@@@ -5985,7 -5830,7 +5985,7 @@@ fn bolt2_open_channel_sane_dust_limit(
        let push_msat=10001;
        nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).unwrap();
        let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 -      node0_to_1_send_open_channel.dust_limit_satoshis = 661;
 +      node0_to_1_send_open_channel.dust_limit_satoshis = 547;
        node0_to_1_send_open_channel.channel_reserve_satoshis = 100001;
  
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &node0_to_1_send_open_channel);
                },
                _ => panic!("Unexpected event"),
        };
 -      assert_eq!(err_msg.data, "dust_limit_satoshis (661) is greater than the implementation limit (660)");
 +      assert_eq!(err_msg.data, "dust_limit_satoshis (547) is greater than the implementation limit (546)");
  }
  
  // Test that if we fail to send an HTLC that is being freed from the holding cell, and the HTLC
@@@ -6068,10 -5913,9 +6068,10 @@@ fn test_fail_holding_cell_htlc_upon_fre
        let events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        match &events[0] {
 -              &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref error_code, ref error_data } => {
 +              &Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref error_code, ref error_data, ref all_paths_failed, path: _ } => {
                        assert_eq!(our_payment_hash.clone(), *payment_hash);
                        assert_eq!(*rejected_by_dest, false);
 +                      assert_eq!(*all_paths_failed, true);
                        assert_eq!(*network_update, None);
                        assert_eq!(*error_code, None);
                        assert_eq!(*error_data, None);
@@@ -6155,10 -5999,9 +6155,10 @@@ fn test_free_and_fail_holding_cell_htlc
        let events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        match &events[0] {
 -              &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref error_code, ref error_data } => {
 +              &Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref error_code, ref error_data, ref all_paths_failed, path: _ } => {
                        assert_eq!(payment_hash_2.clone(), *payment_hash);
                        assert_eq!(*rejected_by_dest, false);
 +                      assert_eq!(*all_paths_failed, true);
                        assert_eq!(*network_update, None);
                        assert_eq!(*error_code, None);
                        assert_eq!(*error_data, None);
@@@ -6412,7 -6255,6 +6412,7 @@@ fn test_update_add_htlc_bolt2_receiver_
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1);
        check_closed_broadcast!(nodes[1], true).unwrap();
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() });
  }
  
  #[test]
@@@ -6540,7 -6382,6 +6540,7 @@@ fn test_update_add_htlc_bolt2_receiver_
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
  }
  
  #[test]
@@@ -6577,7 -6418,6 +6577,7 @@@ fn test_update_add_htlc_bolt2_receiver_
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
  }
  
  #[test]
@@@ -6622,7 -6462,6 +6622,7 @@@ fn test_update_add_htlc_bolt2_receiver_
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
  }
  
  #[test]
@@@ -6648,7 -6487,6 +6648,7 @@@ fn test_update_add_htlc_bolt2_receiver_
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
  }
  
  #[test]
@@@ -6674,7 -6512,6 +6674,7 @@@ fn test_update_add_htlc_bolt2_receiver_
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height");
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
  }
  
  #[test]
@@@ -6724,7 -6561,6 +6724,7 @@@ fn test_update_add_htlc_bolt2_receiver_
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
  }
  
  #[test]
@@@ -6758,7 -6594,6 +6758,7 @@@ fn test_update_fulfill_htlc_bolt2_updat
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
  }
  
  #[test]
@@@ -6792,7 -6627,6 +6792,7 @@@ fn test_update_fulfill_htlc_bolt2_updat
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
  }
  
  #[test]
@@@ -6826,7 -6660,6 +6826,7 @@@ fn test_update_fulfill_htlc_bolt2_updat
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
  }
  
  #[test]
@@@ -6868,7 -6701,6 +6868,7 @@@ fn test_update_fulfill_htlc_bolt2_incor
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find");
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
  }
  
  #[test]
@@@ -6910,7 -6742,6 +6910,7 @@@ fn test_update_fulfill_htlc_bolt2_wrong
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
  }
  
  #[test]
@@@ -6959,7 -6790,6 +6959,7 @@@ fn test_update_fulfill_htlc_bolt2_missi
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set");
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
  }
  
  #[test]
@@@ -7102,17 -6932,16 +7102,17 @@@ fn do_test_failure_delay_dust_htlc_loca
  
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
  
        assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
        let events = nodes[0].node.get_and_clear_pending_events();
 -      // Only 2 PaymentFailed events should show up, over-dust HTLC has to be failed by timeout tx
 +      // Only 2 PaymentPathFailed events should show up, over-dust HTLC has to be failed by timeout tx
        assert_eq!(events.len(), 2);
        let mut first_failed = false;
        for event in events {
                match event {
 -                      Event::PaymentFailed { payment_hash, .. } => {
 +                      Event::PaymentPathFailed { payment_hash, .. } => {
                                if payment_hash == payment_hash_1 {
                                        assert!(!first_failed);
                                        first_failed = true;
@@@ -7163,7 -6992,6 +7163,7 @@@ fn do_test_sweep_outbound_htlc_failure_
        if local {
                // We fail dust-HTLC 1 by broadcast of local commitment tx
                mine_transaction(&nodes[0], &as_commitment_tx[0]);
 +              check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
                expect_payment_failed!(nodes[0], dust_hash, true);
  
                mine_transaction(&nodes[0], &bs_commitment_tx[0]);
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
 +              check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
                connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
                timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[1].clone());
                        assert_eq!(events.len(), 2);
                        let first;
                        match events[0] {
 -                              Event::PaymentFailed { payment_hash, .. } => {
 +                              Event::PaymentPathFailed { payment_hash, .. } => {
                                        if payment_hash == dust_hash { first = true; }
                                        else { first = false; }
                                },
                                _ => panic!("Unexpected event"),
                        }
                        match events[1] {
 -                              Event::PaymentFailed { payment_hash, .. } => {
 +                              Event::PaymentPathFailed { payment_hash, .. } => {
                                        if first { assert_eq!(payment_hash, non_dust_hash); }
                                        else { assert_eq!(payment_hash, dust_hash); }
                                },
@@@ -7265,17 -7092,14 +7265,17 @@@ fn test_user_configurable_csv_delay() 
        let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
        accept_channel.to_self_delay = 200;
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
 +      let reason_msg;
        if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] {
                match action {
                        &ErrorAction::SendErrorMessage { ref msg } => {
                                assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(msg.data.as_str()));
 +                              reason_msg = msg.data.clone();
                        },
 -                      _ => { assert!(false); }
 +                      _ => { panic!(); }
                }
 -      } else { assert!(false); }
 +      } else { panic!(); }
 +      check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg });
  
        // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Channel::new_from_req()
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
@@@ -7388,10 -7212,10 +7388,10 @@@ fn test_data_loss_protect() 
  
        // Check we close channel detecting A is fallen-behind
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
 +      check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Peer attempted to reestablish channel with a very old local commitment transaction".to_string() });
        assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Peer attempted to reestablish channel with a very old local commitment transaction");
        check_added_monitors!(nodes[1], 1);
  
 -
        // Check A is able to claim to_remote output
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(node_txn.len(), 1);
        assert_eq!(node_txn[0].output.len(), 2);
        mine_transaction(&nodes[0], &node_txn[0]);
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting".to_string() });
        let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
        assert_eq!(spend_txn.len(), 1);
        check_spends!(spend_txn[0], node_txn[0]);
@@@ -7842,7 -7665,6 +7842,7 @@@ fn test_bump_penalty_txn_on_revoked_htl
        connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] });
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], 49); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
  
        let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        connect_block(&nodes[0], &Block { header: header_11, txdata: vec![revoked_local_txn[0].clone()] });
        let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_11.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[2].clone()] });
 -      expect_pending_htlcs_forwardable_ignore!(nodes[0]);
 +      let events = nodes[0].node.get_and_clear_pending_events();
 +      expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
 +      match events[1] {
 +              Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
 +              _ => panic!("Unexpected event"),
 +      }
        let first;
        let feerate_1;
        let penalty_txn;
@@@ -8115,7 -7932,6 +8115,7 @@@ fn test_counterparty_raa_skip_no_crash(
                &msgs::RevokeAndACK { channel_id, per_commitment_secret, next_per_commitment_point });
        assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() });
  }
  
  #[test]
@@@ -8148,7 -7964,6 +8148,7 @@@ fn test_bump_txn_sanitize_tracking_maps
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        let penalty_txn = {
                let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_txn.len(), 4); //ChannelMonitor: justice txn * 3, ChannelManager: local commitment tx
@@@ -8632,7 -8447,6 +8632,7 @@@ fn test_pre_lockin_no_chan_closed_updat
        let channel_id = ::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
        assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
 +      check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Hi".to_string() });
  }
  
  #[test]
@@@ -8667,7 -8481,6 +8667,7 @@@ fn test_htlc_no_detection() 
        chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &Block { header, txdata: vec![local_txn[0].clone()] }, nodes[0].best_block_info().1 + 1);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1);
  
        let htlc_timeout = {
@@@ -8727,7 -8540,6 +8727,7 @@@ fn do_test_onchain_htlc_settlement_afte
        nodes[force_closing_node].node.force_close_channel(&chan_ab.2).unwrap();
        check_closed_broadcast!(nodes[force_closing_node], true);
        check_added_monitors!(nodes[force_closing_node], 1);
 +      check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed);
        if go_onchain_before_fulfill {
                let txn_to_broadcast = match broadcast_alice {
                        true => alice_txn.clone(),
                if broadcast_alice {
                        check_closed_broadcast!(nodes[1], true);
                        check_added_monitors!(nodes[1], 1);
 +                      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                }
                assert_eq!(bob_txn.len(), 1);
                check_spends!(bob_txn[0], chan_ab.3);
                if broadcast_alice {
                        check_closed_broadcast!(nodes[1], true);
                        check_added_monitors!(nodes[1], 1);
 +                      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                }
                let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
                if broadcast_alice {
@@@ -9036,7 -8846,6 +9036,7 @@@ fn test_error_chans_closed() 
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
        check_added_monitors!(nodes[0], 1);
        check_closed_broadcast!(nodes[0], false);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "ERR".to_string() });
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
        assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
        assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
        let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() });
        check_added_monitors!(nodes[0], 2);
 +      check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: "ERR".to_string() });
        let events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 2);
        match events[0] {
@@@ -9111,7 -8919,6 +9111,7 @@@ fn test_invalid_funding_tx() 
        nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
  
        confirm_transaction_at(&nodes[1], &tx, 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        check_added_monitors!(nodes[1], 1);
        let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events_2.len(), 1);
@@@ -9155,7 -8962,6 +9155,7 @@@ fn do_test_tx_confirmed_skipping_blocks
  
        nodes[1].node.force_close_channel(&channel_id).unwrap();
        check_closed_broadcast!(nodes[1], true);
 +      check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
        check_added_monitors!(nodes[1], 1);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(node_txn.len(), 1);
@@@ -9208,6 -9014,125 +9208,125 @@@ fn test_tx_confirmed_skipping_blocks_im
        do_test_tx_confirmed_skipping_blocks_immediate_broadcast(true);
  }
  
+ #[test]
+ fn test_forwardable_regen() {
+       // Tests that if we reload a ChannelManager while forwards are pending we will regenerate the
+       // PendingHTLCsForwardable event automatically, ensuring we don't forget to forward/receive
+       // HTLCs.
+       // We test it for both payment receipt and payment forwarding.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let persister: test_utils::TestPersister;
+       let new_chain_monitor: test_utils::TestChainMonitor;
+       let nodes_1_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+       create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+       create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
+       // First send a payment to nodes[1]
+       let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
+       nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+       check_added_monitors!(nodes[0], 1);
+       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       let payment_event = SendEvent::from_event(events.pop().unwrap());
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+       commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+       expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+       // Next send a payment which is forwarded by nodes[1]
+       let (route_2, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 200_000);
+       nodes[0].node.send_payment(&route_2, payment_hash_2, &Some(payment_secret_2)).unwrap();
+       check_added_monitors!(nodes[0], 1);
+       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       let payment_event = SendEvent::from_event(events.pop().unwrap());
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+       commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+       // There is already a PendingHTLCsForwardable event "pending" so another one will not be
+       // generated
+       assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+       // Now restart nodes[1] and make sure it regenerates a single PendingHTLCsForwardable
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+       nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+       let nodes_1_serialized = nodes[1].node.encode();
+       let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
+       let mut chan_1_monitor_serialized = test_utils::TestVecWriter(Vec::new());
+       {
+               let monitors = nodes[1].chain_monitor.chain_monitor.monitors.read().unwrap();
+               let mut monitor_iter = monitors.iter();
+               monitor_iter.next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
+               monitor_iter.next().unwrap().1.write(&mut chan_1_monitor_serialized).unwrap();
+       }
+       persister = test_utils::TestPersister::new();
+       let keys_manager = &chanmon_cfgs[1].keys_manager;
+       new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[1].chain_source), nodes[1].tx_broadcaster.clone(), nodes[1].logger, node_cfgs[1].fee_estimator, &persister, keys_manager);
+       nodes[1].chain_monitor = &new_chain_monitor;
+       let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
+       let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
+               &mut chan_0_monitor_read, keys_manager).unwrap();
+       assert!(chan_0_monitor_read.is_empty());
+       let mut chan_1_monitor_read = &chan_1_monitor_serialized.0[..];
+       let (_, mut chan_1_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
+               &mut chan_1_monitor_read, keys_manager).unwrap();
+       assert!(chan_1_monitor_read.is_empty());
+       let mut nodes_1_read = &nodes_1_serialized[..];
+       let (_, nodes_1_deserialized_tmp) = {
+               let mut channel_monitors = HashMap::new();
+               channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
+               channel_monitors.insert(chan_1_monitor.get_funding_txo().0, &mut chan_1_monitor);
+               <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_1_read, ChannelManagerReadArgs {
+                       default_config: UserConfig::default(),
+                       keys_manager,
+                       fee_estimator: node_cfgs[1].fee_estimator,
+                       chain_monitor: nodes[1].chain_monitor,
+                       tx_broadcaster: nodes[1].tx_broadcaster.clone(),
+                       logger: nodes[1].logger,
+                       channel_monitors,
+               }).unwrap()
+       };
+       nodes_1_deserialized = nodes_1_deserialized_tmp;
+       assert!(nodes_1_read.is_empty());
+       assert!(nodes[1].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+       assert!(nodes[1].chain_monitor.watch_channel(chan_1_monitor.get_funding_txo().0, chan_1_monitor).is_ok());
+       nodes[1].node = &nodes_1_deserialized;
+       check_added_monitors!(nodes[1], 2);
+       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       // Note that nodes[1] and nodes[2] resend their funding_locked here since they haven't updated
+       // the commitment state.
+       reconnect_nodes(&nodes[1], &nodes[2], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_payment_received!(nodes[1], payment_hash, payment_secret, 100_000);
+       check_added_monitors!(nodes[1], 1);
+       let mut events = nodes[1].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       let payment_event = SendEvent::from_event(events.pop().unwrap());
+       nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
+       commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false);
+       expect_pending_htlcs_forwardable!(nodes[2]);
+       expect_payment_received!(nodes[2], payment_hash_2, payment_secret_2, 200_000);
+       claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
+       claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2);
+ }
  #[test]
  fn test_keysend_payments_to_public_node() {
        let chanmon_cfgs = create_chanmon_cfgs(2);
@@@ -9263,3 -9188,116 +9382,3 @@@ fn test_keysend_payments_to_private_nod
        pass_along_path(&nodes[0], &path, 10000, payment_hash, None, event, true, Some(test_preimage));
        claim_payment(&nodes[0], &path, test_preimage);
  }
 -
 -fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, at_forward: bool, on_holder_tx: bool) {
 -      // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat` policy.
 -      //
 -      // At HTLC forward (`send_payment()`), if the sum of the trimmed-to-dust HTLC inbound and
 -      // trimmed-to-dust HTLC outbound balance and this new payment as included on next counterparty
 -      // commitment are above our `max_dust_htlc_exposure_msat`, we'll reject the update.
 -      // At HTLC reception (`update_add_htlc()`), if the sum of the trimmed-to-dust HTLC inbound
 -      // and trimmed-to-dust HTLC outbound balance and this new received HTLC as included on next
 -      // counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll fail the update.
 -      // Note, we return a `temporary_channel_failure` (0x1000 | 7), as the channel might be
 -      // available again for HTLC processing once the dust bandwidth has cleared up.
 -
 -      let chanmon_cfgs = create_chanmon_cfgs(2);
 -      let mut config = test_default_channel_config();
 -      config.channel_options.max_dust_htlc_exposure_msat = 5_000_000; // default setting value
 -      let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
 -      let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(config)]);
 -      let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 -
 -      nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
 -      let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 -      open_channel.max_htlc_value_in_flight_msat = 50_000_000;
 -      open_channel.max_accepted_htlcs = 60;
 -      nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel);
 -      let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
 -      if on_holder_tx {
 -              accept_channel.dust_limit_satoshis = 660;
 -      }
 -      nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
 -
 -      let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], 1_000_000, 42);
 -
 -      if on_holder_tx {
 -              if let Some(mut chan) = nodes[1].node.channel_state.lock().unwrap().by_id.get_mut(&temporary_channel_id) {
 -                      chan.holder_dust_limit_satoshis = 660;
 -              }
 -      }
 -
 -      nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
 -      nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
 -      check_added_monitors!(nodes[1], 1);
 -
 -      nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
 -      check_added_monitors!(nodes[0], 1);
 -
 -      let (funding_locked, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
 -      let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
 -      update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
 -
 -      if on_holder_tx {
 -              if dust_outbound_balance {
 -                      for i in 0..2 {
 -                              let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 2_300_000);
 -                              if let Err(_) = nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at dust HTLC {}", i); }
 -                      }
 -              } else {
 -                      for _ in 0..2 {
 -                              route_payment(&nodes[0], &[&nodes[1]], 2_300_000);
 -                      }
 -              }
 -      } else {
 -              if dust_outbound_balance {
 -                      for i in 0..25 {
 -                              let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 200_000); // + 177_000 msat of HTLC-success tx at 253 sats/kWU
 -                              if let Err(_) = nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at dust HTLC {}", i); }
 -                      }
 -              } else {
 -                      for _ in 0..25 {
 -                              route_payment(&nodes[0], &[&nodes[1]], 200_000); // + 167_000 msat of HTLC-timeout tx at 253 sats/kWU
 -                      }
 -              }
 -      }
 -
 -      if at_forward {
 -              let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { 2_300_000 } else { 200_000 });
 -              let mut config = UserConfig::default();
 -              if on_holder_tx {
 -                      unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", 6_900_000, config.channel_options.max_dust_htlc_exposure_msat)));
 -              } else {
 -                      unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", 5_200_000, config.channel_options.max_dust_htlc_exposure_msat)));
 -              }
 -      } else {
 -              let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1 ], if on_holder_tx { 2_300_000 } else { 200_000 });
 -              nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
 -              check_added_monitors!(nodes[0], 1);
 -              let mut events = nodes[0].node.get_and_clear_pending_msg_events();
 -              assert_eq!(events.len(), 1);
 -              let payment_event = SendEvent::from_event(events.remove(0));
 -              nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
 -              if on_holder_tx {
 -                      nodes[1].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", 6_900_000, config.channel_options.max_dust_htlc_exposure_msat), 1);
 -              } else {
 -                      nodes[1].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", 5_200_000, config.channel_options.max_dust_htlc_exposure_msat), 1);
 -              }
 -      }
 -
 -      let _ = nodes[1].node.get_and_clear_pending_msg_events();
 -      let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
 -      added_monitors.clear();
 -}
 -
 -#[test]
 -fn test_max_dust_htlc_exposure() {
 -      do_test_max_dust_htlc_exposure(true, true, true);
 -      do_test_max_dust_htlc_exposure(false, true, true);
 -      do_test_max_dust_htlc_exposure(false, false, true);
 -      do_test_max_dust_htlc_exposure(false, false, false);
 -      do_test_max_dust_htlc_exposure(true, true, false);
 -      do_test_max_dust_htlc_exposure(true, false, false);
 -      do_test_max_dust_htlc_exposure(true, false, true);
 -      do_test_max_dust_htlc_exposure(false, true, false);
 -}
index 1319f7d2101cc1c5b861acc5c3c86e78dd72277b,7436f3c7e2044369ea97b879f1fbdb6bf613b41d..c8cedeb14cd9ac8cfaebb070733dcc130d5462ba
  
  use chain::keysinterface::SpendableOutputDescriptor;
  use ln::msgs;
 +use ln::msgs::DecodeError;
  use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
  use routing::network_graph::NetworkUpdate;
 -use util::ser::{Writeable, Writer, MaybeReadable, Readable, VecReadWrapper, VecWriteWrapper};
 +use util::ser::{BigSize, FixedLengthReader, Writeable, Writer, MaybeReadable, Readable, VecReadWrapper, VecWriteWrapper};
 +use routing::router::RouteHop;
  
  use bitcoin::blockdata::script::Script;
  
@@@ -70,60 -68,6 +70,60 @@@ pub enum PaymentPurpose 
        SpontaneousPayment(PaymentPreimage),
  }
  
 +#[derive(Clone, Debug, PartialEq)]
 +/// The reason the channel was closed. See individual variants more details.
 +pub enum ClosureReason {
 +      /// Closure generated from receiving a peer error message.
 +      ///
 +      /// Our counterparty may have broadcasted their latest commitment state, and we have
 +      /// as well.
 +      CounterpartyForceClosed {
 +              /// The error which the peer sent us.
 +              ///
 +              /// The string should be sanitized before it is used (e.g emitted to logs
 +              /// or printed to stdout). Otherwise, a well crafted error message may exploit
 +              /// a security vulnerability in the terminal emulator or the logging subsystem.
 +              peer_msg: String,
 +      },
 +      /// Closure generated from [`ChannelManager::force_close_channel`], called by the user.
 +      ///
 +      /// [`ChannelManager::force_close_channel`]: crate::ln::channelmanager::ChannelManager::force_close_channel.
 +      HolderForceClosed,
 +      /// The channel was closed after negotiating a cooperative close and we've now broadcasted
 +      /// the cooperative close transaction. Note the shutdown may have been initiated by us.
 +      //TODO: split between CounterpartyInitiated/LocallyInitiated
 +      CooperativeClosure,
 +      /// A commitment transaction was confirmed on chain, closing the channel. Most likely this
 +      /// commitment transaction came from our counterparty, but it may also have come from
 +      /// a copy of our own `ChannelMonitor`.
 +      CommitmentTxConfirmed,
 +      /// Closure generated from processing an event, likely a HTLC forward/relay/reception.
 +      ProcessingError {
 +              /// A developer-readable error message which we generated.
 +              err: String,
 +      },
 +      /// The `PeerManager` informed us that we've disconnected from the peer. We close channels
 +      /// if the `PeerManager` informed us that it is unlikely we'll be able to connect to the
 +      /// peer again in the future or if the peer disconnected before we finished negotiating
 +      /// the channel open. The first case may be caused by incompatible features which our
 +      /// counterparty, or we, require.
 +      //TODO: split between PeerUnconnectable/PeerDisconnected ?
 +      DisconnectedPeer,
 +      /// Closure generated from `ChannelManager::read` if the ChannelMonitor is newer than
 +      /// the ChannelManager deserialized.
 +      OutdatedChannelManager
 +}
 +
 +impl_writeable_tlv_based_enum_upgradable!(ClosureReason,
 +      (0, CounterpartyForceClosed) => { (1, peer_msg, required) },
 +      (2, HolderForceClosed) => {},
 +      (6, CommitmentTxConfirmed) => {},
 +      (4, CooperativeClosure) => {},
 +      (8, ProcessingError) => { (1, err, required) },
 +      (10, DisconnectedPeer) => {},
 +      (12, OutdatedChannelManager) => {},
 +);
 +
  /// An Event which you should probably take some action in response to.
  ///
  /// Note that while Writeable and Readable are implemented for Event, you probably shouldn't use
@@@ -168,11 -112,8 +168,11 @@@ pub enum Event 
                /// payment is to pay an invoice or to send a spontaneous payment.
                purpose: PaymentPurpose,
        },
 -      /// Indicates an outbound payment we made succeeded (ie it made it all the way to its target
 +      /// Indicates an outbound payment we made succeeded (i.e. it made it all the way to its target
        /// and we got back the payment preimage for it).
 +      ///
 +      /// Note for MPP payments: in rare cases, this event may be preceded by a `PaymentPathFailed`
 +      /// event. In this situation, you SHOULD treat this payment as having succeeded.
        PaymentSent {
                /// The preimage to the hash given to ChannelManager::send_payment.
                /// Note that this serves as a payment receipt, if you wish to have such a thing, you must
        },
        /// Indicates an outbound payment we made failed. Probably some intermediary node dropped
        /// something. You may wish to retry with a different route.
 -      PaymentFailed {
 +      PaymentPathFailed {
                /// The hash which was given to ChannelManager::send_payment.
                payment_hash: PaymentHash,
                /// Indicates the payment was rejected for some reason by the recipient. This implies that
                /// [`NetworkGraph`]: crate::routing::network_graph::NetworkGraph
                /// [`NetGraphMsgHandler`]: crate::routing::network_graph::NetGraphMsgHandler
                network_update: Option<NetworkUpdate>,
 +              /// For both single-path and multi-path payments, this is set if all paths of the payment have
 +              /// failed. This will be set to false if (1) this is an MPP payment and (2) other parts of the
 +              /// larger MPP payment were still in flight when this event was generated.
 +              all_paths_failed: bool,
 +              /// The payment path that failed.
 +              path: Vec<RouteHop>,
  #[cfg(test)]
                error_code: Option<u16>,
  #[cfg(test)]
                /// transaction.
                claim_from_onchain_tx: bool,
        },
 +      /// Used to indicate that a channel with the given `channel_id` is in the process of closure.
 +      ChannelClosed  {
 +              /// The channel_id of the channel which has been closed. Note that on-chain transactions
 +              /// resolving the channel are likely still awaiting confirmation.
 +              channel_id: [u8; 32],
 +              /// The reason the channel was closed.
 +              reason: ClosureReason
 +      }
  }
  
  impl Writeable for Event {
                                        (0, payment_preimage, required),
                                });
                        },
 -                      &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update,
 +                      &Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update,
 +                                                  ref all_paths_failed, ref path,
                                #[cfg(test)]
                                ref error_code,
                                #[cfg(test)]
                                        (0, payment_hash, required),
                                        (1, network_update, option),
                                        (2, rejected_by_dest, required),
 +                                      (3, all_paths_failed, required),
 +                                      (5, path, vec_type),
                                });
                        },
                        &Event::PendingHTLCsForwardable { time_forwardable: _ } => {
                                4u8.write(writer)?;
-                               write_tlv_fields!(writer, {});
-                               // We don't write the time_fordwardable out at all, as we presume when the user
-                               // deserializes us at least that much time has elapsed.
+                               // Note that we now ignore these on the read end as we'll re-generate them in
+                               // ChannelManager, we write them here only for backwards compatibility.
                        },
                        &Event::SpendableOutputs { ref outputs } => {
                                5u8.write(writer)?;
                                        (2, claim_from_onchain_tx, required),
                                });
                        },
 +                      &Event::ChannelClosed { ref channel_id, ref reason } => {
 +                              9u8.write(writer)?;
 +                              write_tlv_fields!(writer, {
 +                                      (0, channel_id, required),
 +                                      (2, reason, required)
 +                              });
 +                      },
 +                      // Note that, going forward, all new events must only write data inside of
 +                      // `write_tlv_fields`. Versions 0.0.101+ will ignore odd-numbered events that write
 +                      // data via `write_tlv_fields`.
                }
                Ok(())
        }
  impl MaybeReadable for Event {
        fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, msgs::DecodeError> {
                match Readable::read(reader)? {
 +                      // Note that we do not write a length-prefixed TLV for FundingGenerationReady events,
 +                      // unlike all other events, thus we return immediately here.
                        0u8 => Ok(None),
                        1u8 => {
                                let f = || {
                                        let mut payment_hash = PaymentHash([0; 32]);
                                        let mut rejected_by_dest = false;
                                        let mut network_update = None;
 +                                      let mut all_paths_failed = Some(true);
 +                                      let mut path: Option<Vec<RouteHop>> = Some(vec![]);
                                        read_tlv_fields!(reader, {
                                                (0, payment_hash, required),
                                                (1, network_update, ignorable),
                                                (2, rejected_by_dest, required),
 +                                              (3, all_paths_failed, option),
 +                                              (5, path, vec_type),
                                        });
 -                                      Ok(Some(Event::PaymentFailed {
 +                                      Ok(Some(Event::PaymentPathFailed {
                                                payment_hash,
                                                rejected_by_dest,
                                                network_update,
 +                                              all_paths_failed: all_paths_failed.unwrap(),
 +                                              path: path.unwrap(),
                                                #[cfg(test)]
                                                error_code,
                                                #[cfg(test)]
                                };
                                f()
                        },
-                       4u8 => {
-                               let f = || {
-                                       read_tlv_fields!(reader, {});
-                                       Ok(Some(Event::PendingHTLCsForwardable {
-                                               time_forwardable: Duration::from_secs(0)
-                                       }))
-                               };
-                               f()
-                       },
+                       4u8 => Ok(None),
                        5u8 => {
                                let f = || {
                                        let mut outputs = VecReadWrapper(Vec::new());
                                };
                                f()
                        },
 +                      9u8 => {
 +                              let mut channel_id = [0; 32];
 +                              let mut reason = None;
 +                              read_tlv_fields!(reader, {
 +                                      (0, channel_id, required),
 +                                      (2, reason, ignorable),
 +                              });
 +                              if reason.is_none() { return Ok(None); }
 +                              Ok(Some(Event::ChannelClosed { channel_id, reason: reason.unwrap() }))
 +                      },
                        // Versions prior to 0.0.100 did not ignore odd types, instead returning InvalidValue.
 -                      x if x % 2 == 1 => Ok(None),
 +                      // Version 0.0.100 failed to properly ignore odd types, possibly resulting in corrupt
 +                      // reads.
 +                      x if x % 2 == 1 => {
 +                              // If the event is of unknown type, assume it was written with `write_tlv_fields`,
 +                              // which prefixes the whole thing with a length BigSize. Because the event is
 +                              // odd-type unknown, we should treat it as `Ok(None)` even if it has some TLV
 +                              // fields that are even. Thus, we avoid using `read_tlv_fields` and simply read
 +                              // exactly the number of bytes specified, ignoring them entirely.
 +                              let tlv_len: BigSize = Readable::read(reader)?;
 +                              FixedLengthReader::new(reader, tlv_len.0)
 +                                      .eat_remaining().map_err(|_| msgs::DecodeError::ShortRead)?;
 +                              Ok(None)
 +                      },
                        _ => Err(msgs::DecodeError::InvalidValue)
                }
        }