Merge pull request #1065 from TheBlueMatt/2021-08-bump-dust
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Mon, 27 Sep 2021 20:39:02 +0000 (20:39 +0000)
committerGitHub <noreply@github.com>
Mon, 27 Sep 2021 20:39:02 +0000 (20:39 +0000)
Increase our default/minimum dust limit and decrease our max

1  2 
lightning/src/ln/channel.rs
lightning/src/ln/functional_tests.rs

index 858e307b77e429a6c992c0759bcdf751148ac251,f412bf2f6e3f0f08dac556d52387bef94052f8b7..11995c50d7fa0e1314fe3edcfadaca4993b7ec10
@@@ -26,7 -26,7 +26,7 @@@ use ln::{PaymentPreimage, PaymentHash}
  use ln::features::{ChannelFeatures, InitFeatures};
  use ln::msgs;
  use ln::msgs::{DecodeError, OptionalField, DataLossProtect};
- use ln::script::ShutdownScript;
+ use ln::script::{self, ShutdownScript};
  use ln::channelmanager::{CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, HTLCFailReason, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT};
  use ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, HTLC_SUCCESS_TX_WEIGHT, HTLC_TIMEOUT_TX_WEIGHT, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
  use ln::chan_utils;
@@@ -44,7 -44,6 +44,6 @@@ use util::scid_utils::scid_from_parts
  use io;
  use prelude::*;
  use core::{cmp,mem,fmt};
- use core::convert::TryFrom;
  use core::ops::Deref;
  #[cfg(any(test, feature = "fuzztarget", debug_assertions))]
  use sync::Mutex;
@@@ -555,21 -554,24 +554,24 @@@ pub const ANCHOR_OUTPUT_VALUE_SATOSHI: 
  /// it's 2^24.
  pub const MAX_FUNDING_SATOSHIS: u64 = 1 << 24;
  
- /// Maximum counterparty `dust_limit_satoshis` allowed. 2 * standard dust threshold on p2wsh output
- /// Scales up on Bitcoin Core's proceeding policy with dust outputs. A typical p2wsh output is 43
- /// bytes to which Core's `GetDustThreshold()` sums up a minimal spend of 67 bytes (even if
- /// a p2wsh witnessScript might be *effectively* smaller), `dustRelayFee` is set to 3000sat/kb, thus
- /// 110 * 3000 / 1000 = 330. Per-protocol rules, all time-sensitive outputs are p2wsh, a value of
- /// 330 sats is the lower bound desired to ensure good propagation of transactions. We give a bit
- /// of margin to our counterparty and pick up 660 satoshis as an accepted `dust_limit_satoshis`
- /// upper bound to avoid negotiation conflicts with other implementations.
- pub const MAX_DUST_LIMIT_SATOSHIS: u64 = 2 * 330;
- /// A typical p2wsh output is 43 bytes to which Core's `GetDustThreshold()` sums up a minimal
- /// spend of 67 bytes (even if a p2wsh witnessScript might be *effectively* smaller), `dustRelayFee`
- /// is set to 3000sat/kb, thus 110 * 3000 / 1000 = 330. Per-protocol rules, all time-sensitive outputs
- /// are p2wsh, a value of 330 sats is the lower bound desired to ensure good propagation of transactions.
- pub const MIN_DUST_LIMIT_SATOSHIS: u64 = 330;
+ /// The maximum network dust limit for standard script formats. This currently represents the
+ /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
+ /// transaction non-standard and thus refuses to relay it.
+ /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
+ /// implementations use this value for their dust limit today.
+ pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
+ /// The maximum channel dust limit we will accept from our counterparty.
+ pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
+ /// The dust limit is used for both the commitment transaction outputs as well as the closing
+ /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
+ /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
+ /// In order to avoid having to concern ourselves with standardness during the closing process, we
+ /// simply require our counterparty to use a dust limit which will leave any segwit output
+ /// standard.
+ /// See https://github.com/lightningnetwork/lightning-rfc/issues/905 for more details.
+ pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
  
  /// Used to return a simple Error back to ChannelManager. Will get converted to a
  /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
@@@ -636,7 -638,7 +638,7 @@@ impl<Signer: Sign> Channel<Signer> 
                        return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
                }
                let holder_selected_channel_reserve_satoshis = Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(channel_value_satoshis);
-               if holder_selected_channel_reserve_satoshis < MIN_DUST_LIMIT_SATOSHIS {
+               if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
                        return Err(APIError::APIMisuseError { err: format!("Holder selected channel  reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
                }
  
  
                        feerate_per_kw: feerate,
                        counterparty_dust_limit_satoshis: 0,
-                       holder_dust_limit_satoshis: MIN_DUST_LIMIT_SATOSHIS,
+                       holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
                        counterparty_max_htlc_value_in_flight_msat: 0,
                        counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
                        counterparty_htlc_minimum_msat: 0,
                if msg.max_accepted_htlcs < config.peer_channel_config_limits.min_max_accepted_htlcs {
                        return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.peer_channel_config_limits.min_max_accepted_htlcs)));
                }
-               if msg.dust_limit_satoshis < MIN_DUST_LIMIT_SATOSHIS {
-                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_DUST_LIMIT_SATOSHIS)));
+               if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
                }
-               if msg.dust_limit_satoshis >  MAX_DUST_LIMIT_SATOSHIS {
-                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_DUST_LIMIT_SATOSHIS)));
+               if msg.dust_limit_satoshis >  MAX_CHAN_DUST_LIMIT_SATOSHIS {
+                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
                }
  
                // Convert things into internal flags and prep our state:
                let background_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background);
  
                let holder_selected_channel_reserve_satoshis = Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis);
-               if holder_selected_channel_reserve_satoshis < MIN_DUST_LIMIT_SATOSHIS {
-                       return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_DUST_LIMIT_SATOSHIS)));
+               if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+                       return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
                }
-               if msg.channel_reserve_satoshis < MIN_DUST_LIMIT_SATOSHIS {
-                       return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is smaller than our dust limit ({})", msg.channel_reserve_satoshis, MIN_DUST_LIMIT_SATOSHIS)));
+               if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+                       return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is smaller than our dust limit ({})", msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
                }
                if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
                        return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
                                        if script.len() == 0 {
                                                None
                                        } else {
-                                               match ShutdownScript::try_from((script.clone(), their_features)) {
-                                                       Ok(shutdown_script) => Some(shutdown_script.into_inner()),
-                                                       Err(_) => return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))),
+                                               if !script::is_bolt2_compliant(&script, their_features) {
+                                                       return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
                                                }
+                                               Some(script.clone())
                                        }
                                },
                                // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
                        feerate_per_kw: msg.feerate_per_kw,
                        channel_value_satoshis: msg.funding_satoshis,
                        counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
-                       holder_dust_limit_satoshis: MIN_DUST_LIMIT_SATOSHIS,
+                       holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
                        counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
                        counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
                        counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
                if msg.max_accepted_htlcs < config.peer_channel_config_limits.min_max_accepted_htlcs {
                        return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.peer_channel_config_limits.min_max_accepted_htlcs)));
                }
-               if msg.dust_limit_satoshis < MIN_DUST_LIMIT_SATOSHIS {
-                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_DUST_LIMIT_SATOSHIS)));
+               if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
                }
-               if msg.dust_limit_satoshis > MAX_DUST_LIMIT_SATOSHIS {
-                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_DUST_LIMIT_SATOSHIS)));
+               if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
+                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
                }
                if msg.minimum_depth > config.peer_channel_config_limits.max_minimum_depth {
                        return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", config.peer_channel_config_limits.max_minimum_depth, msg.minimum_depth)));
                                        if script.len() == 0 {
                                                None
                                        } else {
-                                               match ShutdownScript::try_from((script.clone(), their_features)) {
-                                                       Ok(shutdown_script) => Some(shutdown_script.into_inner()),
-                                                       Err(_) => return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))),
+                                               if !script::is_bolt2_compliant(&script, their_features) {
+                                                       return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
                                                }
+                                               Some(script.clone())
                                        }
                                },
                                // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
                }
                assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0);
  
-               let shutdown_scriptpubkey = match ShutdownScript::try_from((msg.scriptpubkey.clone(), their_features)) {
-                       Ok(script) => script.into_inner(),
-                       Err(_) => return Err(ChannelError::Close(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex()))),
-               };
+               if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
+                       return Err(ChannelError::Close(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex())));
+               }
  
                if self.counterparty_shutdown_scriptpubkey.is_some() {
-                       if Some(&shutdown_scriptpubkey) != self.counterparty_shutdown_scriptpubkey.as_ref() {
-                               return Err(ChannelError::Close(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", shutdown_scriptpubkey.to_bytes().to_hex())));
+                       if Some(&msg.scriptpubkey) != self.counterparty_shutdown_scriptpubkey.as_ref() {
+                               return Err(ChannelError::Close(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_bytes().to_hex())));
                        }
                } else {
-                       self.counterparty_shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
+                       self.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
                }
  
                // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
                        },
                };
  
+               for outp in closing_tx.trust().built_transaction().output.iter() {
+                       if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
+                               return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
+                       }
+               }
                assert!(self.shutdown_scriptpubkey.is_some());
                if let Some((last_fee, sig)) = self.last_sent_closing_fee {
                        if last_fee == msg.fee_satoshis {
@@@ -5509,7 -5516,7 +5516,7 @@@ mod tests 
        use bitcoin::hashes::hex::FromHex;
        use hex;
        use ln::{PaymentPreimage, PaymentHash};
 -      use ln::channelmanager::HTLCSource;
 +      use ln::channelmanager::{HTLCSource, MppId};
        use ln::channel::{Channel,InboundHTLCOutput,OutboundHTLCOutput,InboundHTLCState,OutboundHTLCState,HTLCOutputInCommitment,HTLCCandidate,HTLCInitiator,TxCreationKeys};
        use ln::channel::MAX_FUNDING_SATOSHIS;
        use ln::features::InitFeatures;
                                path: Vec::new(),
                                session_priv: SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
                                first_hop_htlc_msat: 548,
 +                              mpp_id: MppId([42; 32]),
                        }
                });
  
index 03b62ac185f44052246c40e341d0b29fc7918e0e,e6529415f2a5ed2226afadf87caa6ca76543738c..3be75abbb0a1ff03be9de394b897cdb023a9fc9b
@@@ -19,7 -19,7 +19,7 @@@ use chain::transaction::OutPoint
  use chain::keysinterface::BaseSign;
  use ln::{PaymentPreimage, PaymentSecret, PaymentHash};
  use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC};
 -use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA};
 +use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, MppId, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA};
  use ln::channel::{Channel, ChannelError};
  use ln::{chan_utils, onion_utils};
  use ln::chan_utils::HTLC_SUCCESS_TX_WEIGHT;
@@@ -30,7 -30,7 +30,7 @@@ use ln::msgs
  use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
  use util::enforcing_trait_impls::EnforcingSigner;
  use util::{byte_utils, test_utils};
 -use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
 +use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason};
  use util::errors::APIError;
  use util::ser::{Writeable, ReadableArgs};
  use util::config::UserConfig;
@@@ -638,7 -638,6 +638,7 @@@ fn test_update_fee_that_funder_cannot_a
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
 +      check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") });
  }
  
  #[test]
@@@ -739,8 -738,6 +739,8 @@@ fn test_update_fee_with_fundee_update_a
        send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
        close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
  }
  
  #[test]
@@@ -853,8 -850,6 +853,8 @@@ fn test_update_fee() 
        assert_eq!(get_feerate!(nodes[0], channel_id), feerate + 30);
        assert_eq!(get_feerate!(nodes[1], channel_id), feerate + 30);
        close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
  }
  
  #[test]
@@@ -982,20 -977,10 +982,20 @@@ fn fake_network_test() 
  
        // Close down the channels...
        close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
        close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 +      check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
        close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
 +      check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
 +      check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
        close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 +      check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
        close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 +      check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
  }
  
  #[test]
@@@ -1191,7 -1176,6 +1191,7 @@@ fn test_duplicate_htlc_different_direct
  
        mine_transaction(&nodes[0], &remote_txn[0]);
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
  
        // Check we only broadcast 1 timeout tx
@@@ -1473,7 -1457,6 +1473,7 @@@ fn test_chan_reserve_violation_inbound_
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() });
  }
  
  #[test]
@@@ -1497,7 -1480,7 +1497,7 @@@ fn test_chan_reserve_dust_inbound_htlcs
        push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000) * 1000;
        create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt, InitFeatures::known(), InitFeatures::known());
  
-       let dust_amt = crate::ln::channel::MIN_DUST_LIMIT_SATOSHIS * 1000
+       let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000
                + feerate_per_kw as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000 * 1000 - 1;
        // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel
        // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the
@@@ -1600,7 -1583,6 +1600,7 @@@ fn test_chan_reserve_violation_inbound_
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() });
  }
  
  #[test]
@@@ -2057,8 -2039,6 +2057,8 @@@ fn channel_monitor_network_test() 
        check_closed_broadcast!(nodes[0], true);
        assert_eq!(nodes[0].node.list_channels().len(), 0);
        assert_eq!(nodes[1].node.list_channels().len(), 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
 +      check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
  
        // One pending HTLC is discarded by the force-close:
        let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0;
        check_closed_broadcast!(nodes[2], true);
        assert_eq!(nodes[1].node.list_channels().len(), 0);
        assert_eq!(nodes[2].node.list_channels().len(), 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
 +      check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
  
        macro_rules! claim_funds {
                ($node: expr, $prev_node: expr, $preimage: expr) => {
        check_closed_broadcast!(nodes[3], true);
        assert_eq!(nodes[2].node.list_channels().len(), 0);
        assert_eq!(nodes[3].node.list_channels().len(), 1);
 +      check_closed_event!(nodes[2], 1, ClosureReason::DisconnectedPeer);
 +      check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
  
        // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
        // confusing us in the following tests.
        assert_eq!(nodes[4].node.list_channels().len(), 0);
  
        nodes[3].chain_monitor.chain_monitor.monitors.write().unwrap().insert(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon);
 +      check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
 +      check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed);
  }
  
  #[test]
@@@ -2247,7 -2221,6 +2247,7 @@@ fn test_justice_tx() 
                        node_txn.truncate(1);
                }
                check_added_monitors!(nodes[1], 1);
 +              check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE);
  
                mine_transaction(&nodes[0], &revoked_local_txn[0]);
                // Verify broadcast of revoked HTLC-timeout
                let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
                check_added_monitors!(nodes[0], 1);
 +              check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                // Broadcast revoked HTLC-timeout on node 1
                mine_transaction(&nodes[1], &node_txn[1]);
                test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone());
                test_txn_broadcast(&nodes[0], &chan_6, None, HTLCType::NONE);
  
                mine_transaction(&nodes[1], &revoked_local_txn[0]);
 +              check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
                check_added_monitors!(nodes[1], 1);
                mine_transaction(&nodes[0], &node_txn[1]);
 +              check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone());
        }
        get_announce_close_broadcast_events(&nodes, 0, 1);
@@@ -2329,7 -2299,6 +2329,7 @@@ fn revoked_output_claim() 
        // Inform nodes[1] that nodes[0] broadcast a stale tx
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx against revoked to_local output, ChannelManager: local commitment tx
  
        // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
        get_announce_close_broadcast_events(&nodes, 0, 1);
 -      check_added_monitors!(nodes[0], 1)
 +      check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
  }
  
  #[test]
@@@ -2377,10 -2345,8 +2377,10 @@@ fn claim_htlc_outputs_shared_tx() 
        {
                mine_transaction(&nodes[0], &revoked_local_txn[0]);
                check_added_monitors!(nodes[0], 1);
 +              check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                mine_transaction(&nodes[1], &revoked_local_txn[0]);
                check_added_monitors!(nodes[1], 1);
 +              check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
                expect_payment_failed!(nodes[1], payment_hash_2, true);
  
@@@ -2437,13 -2403,7 +2437,13 @@@ fn claim_htlc_outputs_single_tx() 
                check_added_monitors!(nodes[0], 1);
                confirm_transaction_at(&nodes[1], &revoked_local_txn[0], 100);
                check_added_monitors!(nodes[1], 1);
 -              expect_pending_htlcs_forwardable_ignore!(nodes[0]);
 +              check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
 +              let mut events = nodes[0].node.get_and_clear_pending_events();
 +              expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
 +              match events[1] {
 +                      Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
 +                      _ => panic!("Unexpected event"),
 +              }
  
                connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
                expect_payment_failed!(nodes[1], payment_hash_2, true);
@@@ -2541,7 -2501,6 +2541,7 @@@ fn test_htlc_on_chain_success() 
        mine_transaction(&nodes[2], &commitment_tx[0]);
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
 +      check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
        let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 3 (commitment tx, 2*htlc-success tx), ChannelMonitor : 2 (2 * HTLC-Success tx)
        assert_eq!(node_txn.len(), 5);
        assert_eq!(node_txn[0], node_txn[3]);
                added_monitors.clear();
        }
        let forwarded_events = nodes[1].node.get_and_clear_pending_events();
 -      assert_eq!(forwarded_events.len(), 2);
 -      if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[0] {
 -              } else { panic!(); }
 +      assert_eq!(forwarded_events.len(), 3);
 +      match forwarded_events[0] {
 +              Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
 +              _ => panic!("Unexpected event"),
 +      }
        if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[1] {
                } else { panic!(); }
 +      if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[2] {
 +              } else { panic!(); }
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        {
                let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
        mine_transaction(&nodes[1], &node_a_commitment_tx[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(node_txn.len(), 6); // ChannelManager : 3 (commitment tx + HTLC-Sucess * 2), ChannelMonitor : 3 (HTLC-Success, 2* RBF bumps of above HTLC txn)
        let commitment_spend =
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
        let events = nodes[0].node.get_and_clear_pending_events();
 -      assert_eq!(events.len(), 2);
 +      assert_eq!(events.len(), 3);
        let mut first_claimed = false;
        for event in events {
                match event {
                                        assert_eq!(payment_preimage, our_payment_preimage_2);
                                }
                        },
 +                      Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {},
                        _ => panic!("Unexpected event"),
                }
        }
@@@ -2747,7 -2700,6 +2747,7 @@@ fn do_test_htlc_on_chain_timeout(connec
        mine_transaction(&nodes[2], &commitment_tx[0]);
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
 +      check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
        let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx)
        assert_eq!(node_txn.len(), 1);
        check_spends!(node_txn[0], chan_2.3);
        // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
        connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1);
        mine_transaction(&nodes[1], &commitment_tx[0]);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let timeout_tx;
        {
                let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
  
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 commitment tx, ChannelMonitor : 1 timeout tx
        assert_eq!(node_txn.len(), 2);
        check_spends!(node_txn[0], chan_1.3);
@@@ -2864,7 -2814,6 +2864,7 @@@ fn test_simple_commitment_revoked_fail_
        let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
  
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
@@@ -3014,19 -2963,15 +3014,19 @@@ fn do_test_commitment_revoked_fail_back
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
  
        let events = nodes[1].node.get_and_clear_pending_events();
 -      assert_eq!(events.len(), if deliver_bs_raa { 1 } else { 2 });
 +      assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 3 });
        match events[0] {
 -              Event::PaymentFailed { ref payment_hash, .. } => {
 +              Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { },
 +              _ => panic!("Unexepected event"),
 +      }
 +      match events[1] {
 +              Event::PaymentPathFailed { ref payment_hash, .. } => {
                        assert_eq!(*payment_hash, fourth_payment_hash);
                },
                _ => panic!("Unexpected event"),
        }
        if !deliver_bs_raa {
 -              match events[1] {
 +              match events[2] {
                        Event::PendingHTLCsForwardable { .. } => { },
                        _ => panic!("Unexpected event"),
                };
                        let events = nodes[0].node.get_and_clear_pending_events();
                        assert_eq!(events.len(), 3);
                        match events[0] {
 -                              Event::PaymentFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
 +                              Event::PaymentPathFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
                                        assert!(failed_htlcs.insert(payment_hash.0));
                                        // If we delivered B's RAA we got an unknown preimage error, not something
                                        // that we should update our routing table for.
                                _ => panic!("Unexpected event"),
                        }
                        match events[1] {
 -                              Event::PaymentFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
 +                              Event::PaymentPathFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
                                        assert!(failed_htlcs.insert(payment_hash.0));
                                        assert!(network_update.is_some());
                                },
                                _ => panic!("Unexpected event"),
                        }
                        match events[2] {
 -                              Event::PaymentFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
 +                              Event::PaymentPathFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
                                        assert!(failed_htlcs.insert(payment_hash.0));
                                        assert!(network_update.is_some());
                                },
@@@ -3186,21 -3131,9 +3186,21 @@@ fn fail_backward_pending_htlc_upon_chan
                };
                nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
        }
 -
 +      let events = nodes[0].node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), 2);
        // Check that Alice fails backward the pending HTLC from the second payment.
 -      expect_payment_failed!(nodes[0], failed_payment_hash, true);
 +      match events[0] {
 +              Event::PaymentPathFailed { payment_hash, .. } => {
 +                      assert_eq!(payment_hash, failed_payment_hash);
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
 +      match events[1] {
 +              Event::ChannelClosed { reason: ClosureReason::ProcessingError { ref err }, .. } => {
 +                      assert_eq!(err, "Remote side tried to send a 0-msat HTLC");
 +              },
 +              _ => panic!("Unexpected event {:?}", events[1]),
 +      }
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
  }
@@@ -3220,7 -3153,6 +3220,7 @@@ fn test_htlc_ignore_latest_remote_commi
        connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
  
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 3);
        connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[1].clone()]});
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
  
        // Duplicate the connect_block call since this may happen due to other listeners
        // registering new transactions
@@@ -3285,7 -3216,6 +3285,7 @@@ fn test_force_close_fail_back() 
        nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id).unwrap();
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
 +      check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
        let tx = {
                let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
                // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
        // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
  
        // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
        {
@@@ -3379,7 -3308,7 +3379,7 @@@ fn test_simple_peer_disconnect() 
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
  
        claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_preimage_3);
 -      fail_payment_along_route(&nodes[0], &[&nodes[1], &nodes[2]], true, payment_hash_5);
 +      fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5);
  
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
        {
                        _ => panic!("Unexpected event"),
                }
                match events[1] {
 -                      Event::PaymentFailed { payment_hash, rejected_by_dest, .. } => {
 +                      Event::PaymentPathFailed { payment_hash, rejected_by_dest, .. } => {
                                assert_eq!(payment_hash, payment_hash_5);
                                assert!(rejected_by_dest);
                        },
@@@ -3957,8 -3886,7 +3957,8 @@@ fn do_test_htlc_timeout(send_partial_mp
                // Use the utility function send_payment_along_path to send the payment with MPP data which
                // indicates there are more HTLCs coming.
                let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
 -              nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200000, cur_height, &None).unwrap();
 +              let mpp_id = MppId([42; 32]);
 +              nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200000, cur_height, mpp_id, &None).unwrap();
                check_added_monitors!(nodes[0], 1);
                let mut events = nodes[0].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 1);
@@@ -4155,34 -4083,6 +4155,34 @@@ fn test_no_txn_manager_serialize_deseri
        send_payment(&nodes[0], &[&nodes[1]], 1000000);
  }
  
 +#[test]
 +fn mpp_failure() {
 +      let chanmon_cfgs = create_chanmon_cfgs(4);
 +      let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
 +      let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
 +      let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
 +
 +      let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
 +      let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
 +      let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
 +      let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
 +      let logger = test_utils::TestLogger::new();
 +
 +      let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[3]);
 +      let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
 +      let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
 +      let path = route.paths[0].clone();
 +      route.paths.push(path);
 +      route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
 +      route.paths[0][0].short_channel_id = chan_1_id;
 +      route.paths[0][1].short_channel_id = chan_3_id;
 +      route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
 +      route.paths[1][0].short_channel_id = chan_2_id;
 +      route.paths[1][1].short_channel_id = chan_4_id;
 +      send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret);
 +      fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash);
 +}
 +
  #[test]
  fn test_dup_htlc_onchain_fails_on_reload() {
        // When a Channel is closed, any outbound HTLCs which were relayed through it are simply
        //
        // If, due to an on-chain event, an HTLC is failed/claimed, and then we serialize the
        // ChannelManager, we generally expect there not to be a duplicate HTLC fail/claim (eg via a
 -      // PaymentFailed event appearing). However, because we may not serialize the relevant
 +      // PaymentPathFailed event appearing). However, because we may not serialize the relevant
        // ChannelMonitor at the same time, this isn't strictly guaranteed. In order to provide this
        // consistency, the ChannelManager explicitly tracks pending-onchain-resolution outbound HTLCs
        // and de-duplicates ChannelMonitor events.
        nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
  
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
        connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone(), node_txn[2].clone()]});
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
  
        header.prev_blockhash = nodes[0].best_block_hash();
@@@ -4577,7 -4475,6 +4577,7 @@@ fn test_manager_serialize_deserialize_i
                check_added_monitors!(nodes[0], 1);
        }
        nodes[0].node = &nodes_0_deserialized;
 +      check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
  
        // nodes[1] and nodes[2] have no lost state with nodes[0]...
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
@@@ -4641,7 -4538,6 +4641,7 @@@ fn test_claim_sizeable_push_msat() 
        nodes[1].node.force_close_channel(&chan.2).unwrap();
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 1);
        check_spends!(node_txn[0], chan.3);
@@@ -4670,7 -4566,6 +4670,7 @@@ fn test_claim_on_remote_sizeable_push_m
        nodes[0].node.force_close_channel(&chan.2).unwrap();
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
  
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 1);
        mine_transaction(&nodes[1], &node_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
  
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@@ -4708,7 -4602,6 +4708,7 @@@ fn test_claim_on_remote_revoked_sizeabl
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
  
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        mine_transaction(&nodes[1], &node_txn[0]);
@@@ -4761,7 -4654,6 +4761,7 @@@ fn test_static_spendable_outputs_preima
        check_spends!(node_txn[2], node_txn[1]);
  
        mine_transaction(&nodes[1], &node_txn[0]);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
  
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@@ -4806,7 -4698,6 +4806,7 @@@ fn test_static_spendable_outputs_timeou
        assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
  
        mine_transaction(&nodes[1], &node_txn[1]);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
        expect_payment_failed!(nodes[1], our_payment_hash, true);
  
@@@ -4837,7 -4728,6 +4837,7 @@@ fn test_static_spendable_outputs_justic
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
  
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 2);
@@@ -4874,7 -4764,6 +4874,7 @@@ fn test_static_spendable_outputs_justic
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
  
        let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[1].clone()] });
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
  
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 3); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs, ChannelManager: local commitment tx
@@@ -4947,7 -4835,6 +4947,7 @@@ fn test_static_spendable_outputs_justic
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
  
        assert_eq!(revoked_htlc_txn.len(), 2);
        connect_block(&nodes[0], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] });
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
  
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 3); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success, ChannelManager: local commitment tx
@@@ -5045,7 -4931,6 +5045,7 @@@ fn test_onchain_to_onchain_claim() 
        mine_transaction(&nodes[2], &commitment_tx[0]);
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
 +      check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
  
        let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Success tx), ChannelMonitor : 1 (HTLC-Success tx)
        assert_eq!(c_txn.len(), 3);
        let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
        connect_block(&nodes[1], &Block { header, txdata: vec![c_txn[1].clone(), c_txn[2].clone()]});
        check_added_monitors!(nodes[1], 1);
 -      expect_payment_forwarded!(nodes[1], Some(1000), true);
 +      let events = nodes[1].node.get_and_clear_pending_events();
 +      assert_eq!(events.len(), 2);
 +      match events[0] {
 +              Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
 +              _ => panic!("Unexpected event"),
 +      }
 +      match events[1] {
 +              Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx } => {
 +                      assert_eq!(fee_earned_msat, Some(1000));
 +                      assert_eq!(claim_from_onchain_tx, true);
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
        {
                let mut b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
                // ChannelMonitor: claim tx
                check_spends!(b_txn[0], chan_2.3); // B local commitment tx, issued by ChannelManager
                b_txn.clear();
        }
 +      check_added_monitors!(nodes[1], 1);
        let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(msg_events.len(), 3);
 -      check_added_monitors!(nodes[1], 1);
        match msg_events[0] {
                MessageSendEvent::BroadcastChannelUpdate { .. } => {},
                _ => panic!("Unexpected event"),
        // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
        let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
        mine_transaction(&nodes[1], &commitment_tx[0]);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        // ChannelMonitor: HTLC-Success tx, ChannelManager: local commitment tx + HTLC-Success tx
        assert_eq!(b_txn.len(), 3);
@@@ -5164,7 -5036,6 +5164,7 @@@ fn test_duplicate_payment_hash_one_fail
        mine_transaction(&nodes[1], &commitment_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32 - 1); // Confirm blocks until the HTLC expires
  
        let htlc_timeout_tx;
        nodes[2].node.claim_funds(our_payment_preimage);
        mine_transaction(&nodes[2], &commitment_txn[0]);
        check_added_monitors!(nodes[2], 2);
 +      check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
        let events = nodes[2].node.get_and_clear_pending_msg_events();
        match events[0] {
                MessageSendEvent::UpdateHTLCs { .. } => {},
@@@ -5279,7 -5149,6 +5279,7 @@@ fn test_dynamic_spendable_outputs_local
        check_added_monitors!(nodes[1], 1);
        mine_transaction(&nodes[1], &local_txn[0]);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        match events[0] {
                MessageSendEvent::UpdateHTLCs { .. } => {},
@@@ -5450,26 -5319,9 +5450,26 @@@ fn do_test_fail_backwards_unrevoked_rem
        } else {
                mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]);
        }
 +      let events = nodes[2].node.get_and_clear_pending_events();
 +      let close_event = if deliver_last_raa {
 +              assert_eq!(events.len(), 2);
 +              events[1].clone()
 +      } else {
 +              assert_eq!(events.len(), 1);
 +              events[0].clone()
 +      };
 +      match close_event {
 +              Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
 +              _ => panic!("Unexpected event"),
 +      }
 +
        connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
        check_closed_broadcast!(nodes[2], true);
 -      expect_pending_htlcs_forwardable!(nodes[2]);
 +      if deliver_last_raa {
 +              expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
 +      } else {
 +              expect_pending_htlcs_forwardable!(nodes[2]);
 +      }
        check_added_monitors!(nodes[2], 3);
  
        let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events();
        let mut as_failds = HashSet::new();
        let mut as_updates = 0;
        for event in as_events.iter() {
 -              if let &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event {
 +              if let &Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event {
                        assert!(as_failds.insert(*payment_hash));
                        if *payment_hash != payment_hash_2 {
                                assert_eq!(*rejected_by_dest, deliver_last_raa);
        let mut bs_failds = HashSet::new();
        let mut bs_updates = 0;
        for event in bs_events.iter() {
 -              if let &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event {
 +              if let &Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event {
                        assert!(bs_failds.insert(*payment_hash));
                        if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 {
                                assert_eq!(*rejected_by_dest, deliver_last_raa);
@@@ -5606,7 -5458,6 +5606,7 @@@ fn test_dynamic_spendable_outputs_local
        mine_transaction(&nodes[0], &local_txn[0]);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
  
        let htlc_timeout = {
@@@ -5690,7 -5541,6 +5690,7 @@@ fn test_key_derivation_params() 
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
  
        let htlc_timeout = {
                let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@@ -5731,7 -5581,6 +5731,7 @@@ fn test_static_output_closing_tx() 
        let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
  
        mine_transaction(&nodes[0], &closing_tx);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
  
        let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
        check_spends!(spend_txn[0], closing_tx);
  
        mine_transaction(&nodes[1], &closing_tx);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
  
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@@ -5790,7 -5638,6 +5790,7 @@@ fn do_htlc_claim_local_commitment_only(
        test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
  }
  
  fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
        test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
  }
  
  fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
                test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
 +              check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        } else {
                expect_payment_failed!(nodes[0], our_payment_hash, true);
        }
@@@ -5985,7 -5830,7 +5985,7 @@@ fn bolt2_open_channel_sane_dust_limit(
        let push_msat=10001;
        nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).unwrap();
        let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
-       node0_to_1_send_open_channel.dust_limit_satoshis = 661;
+       node0_to_1_send_open_channel.dust_limit_satoshis = 547;
        node0_to_1_send_open_channel.channel_reserve_satoshis = 100001;
  
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &node0_to_1_send_open_channel);
                },
                _ => panic!("Unexpected event"),
        };
-       assert_eq!(err_msg.data, "dust_limit_satoshis (661) is greater than the implementation limit (660)");
+       assert_eq!(err_msg.data, "dust_limit_satoshis (547) is greater than the implementation limit (546)");
  }
  
  // Test that if we fail to send an HTLC that is being freed from the holding cell, and the HTLC
@@@ -6068,10 -5913,9 +6068,10 @@@ fn test_fail_holding_cell_htlc_upon_fre
        let events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        match &events[0] {
 -              &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref error_code, ref error_data } => {
 +              &Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref error_code, ref error_data, ref all_paths_failed, path: _ } => {
                        assert_eq!(our_payment_hash.clone(), *payment_hash);
                        assert_eq!(*rejected_by_dest, false);
 +                      assert_eq!(*all_paths_failed, true);
                        assert_eq!(*network_update, None);
                        assert_eq!(*error_code, None);
                        assert_eq!(*error_data, None);
@@@ -6155,10 -5999,9 +6155,10 @@@ fn test_free_and_fail_holding_cell_htlc
        let events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        match &events[0] {
 -              &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref error_code, ref error_data } => {
 +              &Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref error_code, ref error_data, ref all_paths_failed, path: _ } => {
                        assert_eq!(payment_hash_2.clone(), *payment_hash);
                        assert_eq!(*rejected_by_dest, false);
 +                      assert_eq!(*all_paths_failed, true);
                        assert_eq!(*network_update, None);
                        assert_eq!(*error_code, None);
                        assert_eq!(*error_data, None);
@@@ -6412,7 -6255,6 +6412,7 @@@ fn test_update_add_htlc_bolt2_receiver_
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1);
        check_closed_broadcast!(nodes[1], true).unwrap();
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() });
  }
  
  #[test]
@@@ -6540,7 -6382,6 +6540,7 @@@ fn test_update_add_htlc_bolt2_receiver_
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
  }
  
  #[test]
@@@ -6577,7 -6418,6 +6577,7 @@@ fn test_update_add_htlc_bolt2_receiver_
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
  }
  
  #[test]
@@@ -6622,7 -6462,6 +6622,7 @@@ fn test_update_add_htlc_bolt2_receiver_
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
  }
  
  #[test]
@@@ -6648,7 -6487,6 +6648,7 @@@ fn test_update_add_htlc_bolt2_receiver_
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
  }
  
  #[test]
@@@ -6674,7 -6512,6 +6674,7 @@@ fn test_update_add_htlc_bolt2_receiver_
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height");
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
  }
  
  #[test]
@@@ -6724,7 -6561,6 +6724,7 @@@ fn test_update_add_htlc_bolt2_receiver_
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
  }
  
  #[test]
@@@ -6758,7 -6594,6 +6758,7 @@@ fn test_update_fulfill_htlc_bolt2_updat
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
  }
  
  #[test]
@@@ -6792,7 -6627,6 +6792,7 @@@ fn test_update_fulfill_htlc_bolt2_updat
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
  }
  
  #[test]
@@@ -6826,7 -6660,6 +6826,7 @@@ fn test_update_fulfill_htlc_bolt2_updat
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
  }
  
  #[test]
@@@ -6868,7 -6701,6 +6868,7 @@@ fn test_update_fulfill_htlc_bolt2_incor
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find");
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
  }
  
  #[test]
@@@ -6910,7 -6742,6 +6910,7 @@@ fn test_update_fulfill_htlc_bolt2_wrong
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
  }
  
  #[test]
@@@ -6959,7 -6790,6 +6959,7 @@@ fn test_update_fulfill_htlc_bolt2_missi
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set");
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
  }
  
  #[test]
@@@ -7102,17 -6932,16 +7102,17 @@@ fn do_test_failure_delay_dust_htlc_loca
  
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
  
        assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
        let events = nodes[0].node.get_and_clear_pending_events();
 -      // Only 2 PaymentFailed events should show up, over-dust HTLC has to be failed by timeout tx
 +      // Only 2 PaymentPathFailed events should show up, over-dust HTLC has to be failed by timeout tx
        assert_eq!(events.len(), 2);
        let mut first_failed = false;
        for event in events {
                match event {
 -                      Event::PaymentFailed { payment_hash, .. } => {
 +                      Event::PaymentPathFailed { payment_hash, .. } => {
                                if payment_hash == payment_hash_1 {
                                        assert!(!first_failed);
                                        first_failed = true;
@@@ -7163,7 -6992,6 +7163,7 @@@ fn do_test_sweep_outbound_htlc_failure_
        if local {
                // We fail dust-HTLC 1 by broadcast of local commitment tx
                mine_transaction(&nodes[0], &as_commitment_tx[0]);
 +              check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
                expect_payment_failed!(nodes[0], dust_hash, true);
  
                mine_transaction(&nodes[0], &bs_commitment_tx[0]);
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
 +              check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
                connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
                timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[1].clone());
                        assert_eq!(events.len(), 2);
                        let first;
                        match events[0] {
 -                              Event::PaymentFailed { payment_hash, .. } => {
 +                              Event::PaymentPathFailed { payment_hash, .. } => {
                                        if payment_hash == dust_hash { first = true; }
                                        else { first = false; }
                                },
                                _ => panic!("Unexpected event"),
                        }
                        match events[1] {
 -                              Event::PaymentFailed { payment_hash, .. } => {
 +                              Event::PaymentPathFailed { payment_hash, .. } => {
                                        if first { assert_eq!(payment_hash, non_dust_hash); }
                                        else { assert_eq!(payment_hash, dust_hash); }
                                },
@@@ -7265,17 -7092,14 +7265,17 @@@ fn test_user_configurable_csv_delay() 
        let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
        accept_channel.to_self_delay = 200;
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
 +      let reason_msg;
        if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] {
                match action {
                        &ErrorAction::SendErrorMessage { ref msg } => {
                                assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(msg.data.as_str()));
 +                              reason_msg = msg.data.clone();
                        },
 -                      _ => { assert!(false); }
 +                      _ => { panic!(); }
                }
 -      } else { assert!(false); }
 +      } else { panic!(); }
 +      check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg });
  
        // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Channel::new_from_req()
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
@@@ -7388,10 -7212,10 +7388,10 @@@ fn test_data_loss_protect() 
  
        // Check we close channel detecting A is fallen-behind
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
 +      check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Peer attempted to reestablish channel with a very old local commitment transaction".to_string() });
        assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Peer attempted to reestablish channel with a very old local commitment transaction");
        check_added_monitors!(nodes[1], 1);
  
 -
        // Check A is able to claim to_remote output
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(node_txn.len(), 1);
        assert_eq!(node_txn[0].output.len(), 2);
        mine_transaction(&nodes[0], &node_txn[0]);
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting".to_string() });
        let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
        assert_eq!(spend_txn.len(), 1);
        check_spends!(spend_txn[0], node_txn[0]);
@@@ -7842,7 -7665,6 +7842,7 @@@ fn test_bump_penalty_txn_on_revoked_htl
        connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] });
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], 49); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
  
        let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        connect_block(&nodes[0], &Block { header: header_11, txdata: vec![revoked_local_txn[0].clone()] });
        let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_11.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[2].clone()] });
 -      expect_pending_htlcs_forwardable_ignore!(nodes[0]);
 +      let events = nodes[0].node.get_and_clear_pending_events();
 +      expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
 +      match events[1] {
 +              Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
 +              _ => panic!("Unexpected event"),
 +      }
        let first;
        let feerate_1;
        let penalty_txn;
@@@ -8115,7 -7932,6 +8115,7 @@@ fn test_counterparty_raa_skip_no_crash(
                &msgs::RevokeAndACK { channel_id, per_commitment_secret, next_per_commitment_point });
        assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
        check_added_monitors!(nodes[1], 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() });
  }
  
  #[test]
@@@ -8148,7 -7964,6 +8148,7 @@@ fn test_bump_txn_sanitize_tracking_maps
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        let penalty_txn = {
                let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_txn.len(), 4); //ChannelMonitor: justice txn * 3, ChannelManager: local commitment tx
@@@ -8632,7 -8447,6 +8632,7 @@@ fn test_pre_lockin_no_chan_closed_updat
        let channel_id = ::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
        assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
 +      check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Hi".to_string() });
  }
  
  #[test]
@@@ -8667,7 -8481,6 +8667,7 @@@ fn test_htlc_no_detection() 
        chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &Block { header, txdata: vec![local_txn[0].clone()] }, nodes[0].best_block_info().1 + 1);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1);
  
        let htlc_timeout = {
@@@ -8727,7 -8540,6 +8727,7 @@@ fn do_test_onchain_htlc_settlement_afte
        nodes[force_closing_node].node.force_close_channel(&chan_ab.2).unwrap();
        check_closed_broadcast!(nodes[force_closing_node], true);
        check_added_monitors!(nodes[force_closing_node], 1);
 +      check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed);
        if go_onchain_before_fulfill {
                let txn_to_broadcast = match broadcast_alice {
                        true => alice_txn.clone(),
                if broadcast_alice {
                        check_closed_broadcast!(nodes[1], true);
                        check_added_monitors!(nodes[1], 1);
 +                      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                }
                assert_eq!(bob_txn.len(), 1);
                check_spends!(bob_txn[0], chan_ab.3);
                if broadcast_alice {
                        check_closed_broadcast!(nodes[1], true);
                        check_added_monitors!(nodes[1], 1);
 +                      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                }
                let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
                if broadcast_alice {
@@@ -9036,7 -8846,6 +9036,7 @@@ fn test_error_chans_closed() 
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
        check_added_monitors!(nodes[0], 1);
        check_closed_broadcast!(nodes[0], false);
 +      check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "ERR".to_string() });
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
        assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
        assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
        let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() });
        check_added_monitors!(nodes[0], 2);
 +      check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: "ERR".to_string() });
        let events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 2);
        match events[0] {
@@@ -9111,7 -8919,6 +9111,7 @@@ fn test_invalid_funding_tx() 
        nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
  
        confirm_transaction_at(&nodes[1], &tx, 1);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        check_added_monitors!(nodes[1], 1);
        let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events_2.len(), 1);
@@@ -9155,7 -8962,6 +9155,7 @@@ fn do_test_tx_confirmed_skipping_blocks
  
        nodes[1].node.force_close_channel(&channel_id).unwrap();
        check_closed_broadcast!(nodes[1], true);
 +      check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
        check_added_monitors!(nodes[1], 1);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(node_txn.len(), 1);
@@@ -9263,116 -9069,3 +9263,3 @@@ fn test_keysend_payments_to_private_nod
        pass_along_path(&nodes[0], &path, 10000, payment_hash, None, event, true, Some(test_preimage));
        claim_payment(&nodes[0], &path, test_preimage);
  }
- fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, at_forward: bool, on_holder_tx: bool) {
-       // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat` policy.
-       //
-       // At HTLC forward (`send_payment()`), if the sum of the trimmed-to-dust HTLC inbound and
-       // trimmed-to-dust HTLC outbound balance and this new payment as included on next counterparty
-       // commitment are above our `max_dust_htlc_exposure_msat`, we'll reject the update.
-       // At HTLC reception (`update_add_htlc()`), if the sum of the trimmed-to-dust HTLC inbound
-       // and trimmed-to-dust HTLC outbound balance and this new received HTLC as included on next
-       // counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll fail the update.
-       // Note, we return a `temporary_channel_failure` (0x1000 | 7), as the channel might be
-       // available again for HTLC processing once the dust bandwidth has cleared up.
-       let chanmon_cfgs = create_chanmon_cfgs(2);
-       let mut config = test_default_channel_config();
-       config.channel_options.max_dust_htlc_exposure_msat = 5_000_000; // default setting value
-       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(config)]);
-       let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
-       let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
-       open_channel.max_htlc_value_in_flight_msat = 50_000_000;
-       open_channel.max_accepted_htlcs = 60;
-       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel);
-       let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
-       if on_holder_tx {
-               accept_channel.dust_limit_satoshis = 660;
-       }
-       nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
-       let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], 1_000_000, 42);
-       if on_holder_tx {
-               if let Some(mut chan) = nodes[1].node.channel_state.lock().unwrap().by_id.get_mut(&temporary_channel_id) {
-                       chan.holder_dust_limit_satoshis = 660;
-               }
-       }
-       nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
-       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
-       check_added_monitors!(nodes[1], 1);
-       nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
-       check_added_monitors!(nodes[0], 1);
-       let (funding_locked, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
-       let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
-       update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
-       if on_holder_tx {
-               if dust_outbound_balance {
-                       for i in 0..2 {
-                               let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 2_300_000);
-                               if let Err(_) = nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at dust HTLC {}", i); }
-                       }
-               } else {
-                       for _ in 0..2 {
-                               route_payment(&nodes[0], &[&nodes[1]], 2_300_000);
-                       }
-               }
-       } else {
-               if dust_outbound_balance {
-                       for i in 0..25 {
-                               let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 200_000); // + 177_000 msat of HTLC-success tx at 253 sats/kWU
-                               if let Err(_) = nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at dust HTLC {}", i); }
-                       }
-               } else {
-                       for _ in 0..25 {
-                               route_payment(&nodes[0], &[&nodes[1]], 200_000); // + 167_000 msat of HTLC-timeout tx at 253 sats/kWU
-                       }
-               }
-       }
-       if at_forward {
-               let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { 2_300_000 } else { 200_000 });
-               let mut config = UserConfig::default();
-               if on_holder_tx {
-                       unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", 6_900_000, config.channel_options.max_dust_htlc_exposure_msat)));
-               } else {
-                       unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", 5_200_000, config.channel_options.max_dust_htlc_exposure_msat)));
-               }
-       } else {
-               let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1 ], if on_holder_tx { 2_300_000 } else { 200_000 });
-               nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
-               check_added_monitors!(nodes[0], 1);
-               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
-               assert_eq!(events.len(), 1);
-               let payment_event = SendEvent::from_event(events.remove(0));
-               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
-               if on_holder_tx {
-                       nodes[1].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", 6_900_000, config.channel_options.max_dust_htlc_exposure_msat), 1);
-               } else {
-                       nodes[1].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", 5_200_000, config.channel_options.max_dust_htlc_exposure_msat), 1);
-               }
-       }
-       let _ = nodes[1].node.get_and_clear_pending_msg_events();
-       let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
-       added_monitors.clear();
- }
- #[test]
- fn test_max_dust_htlc_exposure() {
-       do_test_max_dust_htlc_exposure(true, true, true);
-       do_test_max_dust_htlc_exposure(false, true, true);
-       do_test_max_dust_htlc_exposure(false, false, true);
-       do_test_max_dust_htlc_exposure(false, false, false);
-       do_test_max_dust_htlc_exposure(true, true, false);
-       do_test_max_dust_htlc_exposure(true, false, false);
-       do_test_max_dust_htlc_exposure(true, false, true);
-       do_test_max_dust_htlc_exposure(false, true, false);
- }