Merge pull request #1065 from TheBlueMatt/2021-08-bump-dust
[rust-lightning] / lightning / src / ln / functional_tests.rs
index e6529415f2a5ed2226afadf87caa6ca76543738c..3be75abbb0a1ff03be9de394b897cdb023a9fc9b 100644 (file)
@@ -19,7 +19,7 @@ use chain::transaction::OutPoint;
 use chain::keysinterface::BaseSign;
 use ln::{PaymentPreimage, PaymentSecret, PaymentHash};
 use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC};
-use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA};
+use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, MppId, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA};
 use ln::channel::{Channel, ChannelError};
 use ln::{chan_utils, onion_utils};
 use ln::chan_utils::HTLC_SUCCESS_TX_WEIGHT;
@@ -30,7 +30,7 @@ use ln::msgs;
 use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
 use util::enforcing_trait_impls::EnforcingSigner;
 use util::{byte_utils, test_utils};
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason};
 use util::errors::APIError;
 use util::ser::{Writeable, ReadableArgs};
 use util::config::UserConfig;
@@ -638,6 +638,7 @@ fn test_update_fee_that_funder_cannot_afford() {
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") });
 }
 
 #[test]
@@ -738,6 +739,8 @@ fn test_update_fee_with_fundee_update_add_htlc() {
        send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
        close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 }
 
 #[test]
@@ -850,6 +853,8 @@ fn test_update_fee() {
        assert_eq!(get_feerate!(nodes[0], channel_id), feerate + 30);
        assert_eq!(get_feerate!(nodes[1], channel_id), feerate + 30);
        close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 }
 
 #[test]
@@ -977,10 +982,20 @@ fn fake_network_test() {
 
        // Close down the channels...
        close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
        close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
        close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
+       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
        close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
        close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
 }
 
 #[test]
@@ -1176,6 +1191,7 @@ fn test_duplicate_htlc_different_direction_onchain() {
 
        mine_transaction(&nodes[0], &remote_txn[0]);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
 
        // Check we only broadcast 1 timeout tx
@@ -1457,6 +1473,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() });
 }
 
 #[test]
@@ -1583,6 +1600,7 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() });
 }
 
 #[test]
@@ -2039,6 +2057,8 @@ fn channel_monitor_network_test() {
        check_closed_broadcast!(nodes[0], true);
        assert_eq!(nodes[0].node.list_channels().len(), 0);
        assert_eq!(nodes[1].node.list_channels().len(), 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
 
        // One pending HTLC is discarded by the force-close:
        let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0;
@@ -2059,6 +2079,8 @@ fn channel_monitor_network_test() {
        check_closed_broadcast!(nodes[2], true);
        assert_eq!(nodes[1].node.list_channels().len(), 0);
        assert_eq!(nodes[2].node.list_channels().len(), 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
+       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
 
        macro_rules! claim_funds {
                ($node: expr, $prev_node: expr, $preimage: expr) => {
@@ -2101,6 +2123,8 @@ fn channel_monitor_network_test() {
        check_closed_broadcast!(nodes[3], true);
        assert_eq!(nodes[2].node.list_channels().len(), 0);
        assert_eq!(nodes[3].node.list_channels().len(), 1);
+       check_closed_event!(nodes[2], 1, ClosureReason::DisconnectedPeer);
+       check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
 
        // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
        // confusing us in the following tests.
@@ -2172,6 +2196,8 @@ fn channel_monitor_network_test() {
        assert_eq!(nodes[4].node.list_channels().len(), 0);
 
        nodes[3].chain_monitor.chain_monitor.monitors.write().unwrap().insert(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon);
+       check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed);
 }
 
 #[test]
@@ -2221,6 +2247,7 @@ fn test_justice_tx() {
                        node_txn.truncate(1);
                }
                check_added_monitors!(nodes[1], 1);
+               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE);
 
                mine_transaction(&nodes[0], &revoked_local_txn[0]);
@@ -2228,6 +2255,7 @@ fn test_justice_tx() {
                // Verify broadcast of revoked HTLC-timeout
                let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
                check_added_monitors!(nodes[0], 1);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                // Broadcast revoked HTLC-timeout on node 1
                mine_transaction(&nodes[1], &node_txn[1]);
                test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone());
@@ -2269,9 +2297,11 @@ fn test_justice_tx() {
                test_txn_broadcast(&nodes[0], &chan_6, None, HTLCType::NONE);
 
                mine_transaction(&nodes[1], &revoked_local_txn[0]);
+               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
                check_added_monitors!(nodes[1], 1);
                mine_transaction(&nodes[0], &node_txn[1]);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone());
        }
        get_announce_close_broadcast_events(&nodes, 0, 1);
@@ -2299,6 +2329,7 @@ fn revoked_output_claim() {
        // Inform nodes[1] that nodes[0] broadcast a stale tx
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx against revoked to_local output, ChannelManager: local commitment tx
 
@@ -2308,7 +2339,8 @@ fn revoked_output_claim() {
        // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
        get_announce_close_broadcast_events(&nodes, 0, 1);
-       check_added_monitors!(nodes[0], 1)
+       check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
 }
 
 #[test]
@@ -2345,8 +2377,10 @@ fn claim_htlc_outputs_shared_tx() {
        {
                mine_transaction(&nodes[0], &revoked_local_txn[0]);
                check_added_monitors!(nodes[0], 1);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                mine_transaction(&nodes[1], &revoked_local_txn[0]);
                check_added_monitors!(nodes[1], 1);
+               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
                expect_payment_failed!(nodes[1], payment_hash_2, true);
 
@@ -2403,7 +2437,13 @@ fn claim_htlc_outputs_single_tx() {
                check_added_monitors!(nodes[0], 1);
                confirm_transaction_at(&nodes[1], &revoked_local_txn[0], 100);
                check_added_monitors!(nodes[1], 1);
-               expect_pending_htlcs_forwardable_ignore!(nodes[0]);
+               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+               let mut events = nodes[0].node.get_and_clear_pending_events();
+               expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
+               match events[1] {
+                       Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
+                       _ => panic!("Unexpected event"),
+               }
 
                connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
                expect_payment_failed!(nodes[1], payment_hash_2, true);
@@ -2501,6 +2541,7 @@ fn test_htlc_on_chain_success() {
        mine_transaction(&nodes[2], &commitment_tx[0]);
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
+       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
        let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 3 (commitment tx, 2*htlc-success tx), ChannelMonitor : 2 (2 * HTLC-Success tx)
        assert_eq!(node_txn.len(), 5);
        assert_eq!(node_txn[0], node_txn[3]);
@@ -2526,11 +2567,15 @@ fn test_htlc_on_chain_success() {
                added_monitors.clear();
        }
        let forwarded_events = nodes[1].node.get_and_clear_pending_events();
-       assert_eq!(forwarded_events.len(), 2);
-       if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[0] {
-               } else { panic!(); }
+       assert_eq!(forwarded_events.len(), 3);
+       match forwarded_events[0] {
+               Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
+               _ => panic!("Unexpected event"),
+       }
        if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[1] {
                } else { panic!(); }
+       if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[2] {
+               } else { panic!(); }
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        {
                let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
@@ -2597,6 +2642,7 @@ fn test_htlc_on_chain_success() {
        mine_transaction(&nodes[1], &node_a_commitment_tx[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(node_txn.len(), 6); // ChannelManager : 3 (commitment tx + HTLC-Sucess * 2), ChannelMonitor : 3 (HTLC-Success, 2* RBF bumps of above HTLC txn)
        let commitment_spend =
@@ -2632,7 +2678,7 @@ fn test_htlc_on_chain_success() {
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
        let events = nodes[0].node.get_and_clear_pending_events();
-       assert_eq!(events.len(), 2);
+       assert_eq!(events.len(), 3);
        let mut first_claimed = false;
        for event in events {
                match event {
@@ -2644,6 +2690,7 @@ fn test_htlc_on_chain_success() {
                                        assert_eq!(payment_preimage, our_payment_preimage_2);
                                }
                        },
+                       Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {},
                        _ => panic!("Unexpected event"),
                }
        }
@@ -2700,6 +2747,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
        mine_transaction(&nodes[2], &commitment_tx[0]);
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
+       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
        let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx)
        assert_eq!(node_txn.len(), 1);
        check_spends!(node_txn[0], chan_2.3);
@@ -2709,6 +2757,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
        // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
        connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1);
        mine_transaction(&nodes[1], &commitment_tx[0]);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let timeout_tx;
        {
                let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -2776,6 +2825,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
 
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 commitment tx, ChannelMonitor : 1 timeout tx
        assert_eq!(node_txn.len(), 2);
        check_spends!(node_txn[0], chan_1.3);
@@ -2814,6 +2864,7 @@ fn test_simple_commitment_revoked_fail_backward() {
        let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
 
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
@@ -2963,15 +3014,19 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        let events = nodes[1].node.get_and_clear_pending_events();
-       assert_eq!(events.len(), if deliver_bs_raa { 1 } else { 2 });
+       assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 3 });
        match events[0] {
-               Event::PaymentFailed { ref payment_hash, .. } => {
+               Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { },
+               _ => panic!("Unexepected event"),
+       }
+       match events[1] {
+               Event::PaymentPathFailed { ref payment_hash, .. } => {
                        assert_eq!(*payment_hash, fourth_payment_hash);
                },
                _ => panic!("Unexpected event"),
        }
        if !deliver_bs_raa {
-               match events[1] {
+               match events[2] {
                        Event::PendingHTLCsForwardable { .. } => { },
                        _ => panic!("Unexpected event"),
                };
@@ -3021,7 +3076,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
                        let events = nodes[0].node.get_and_clear_pending_events();
                        assert_eq!(events.len(), 3);
                        match events[0] {
-                               Event::PaymentFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
+                               Event::PaymentPathFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
                                        assert!(failed_htlcs.insert(payment_hash.0));
                                        // If we delivered B's RAA we got an unknown preimage error, not something
                                        // that we should update our routing table for.
@@ -3032,14 +3087,14 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
                                _ => panic!("Unexpected event"),
                        }
                        match events[1] {
-                               Event::PaymentFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
+                               Event::PaymentPathFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
                                        assert!(failed_htlcs.insert(payment_hash.0));
                                        assert!(network_update.is_some());
                                },
                                _ => panic!("Unexpected event"),
                        }
                        match events[2] {
-                               Event::PaymentFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
+                               Event::PaymentPathFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => {
                                        assert!(failed_htlcs.insert(payment_hash.0));
                                        assert!(network_update.is_some());
                                },
@@ -3131,9 +3186,21 @@ fn fail_backward_pending_htlc_upon_channel_failure() {
                };
                nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
        }
-
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 2);
        // Check that Alice fails backward the pending HTLC from the second payment.
-       expect_payment_failed!(nodes[0], failed_payment_hash, true);
+       match events[0] {
+               Event::PaymentPathFailed { payment_hash, .. } => {
+                       assert_eq!(payment_hash, failed_payment_hash);
+               },
+               _ => panic!("Unexpected event"),
+       }
+       match events[1] {
+               Event::ChannelClosed { reason: ClosureReason::ProcessingError { ref err }, .. } => {
+                       assert_eq!(err, "Remote side tried to send a 0-msat HTLC");
+               },
+               _ => panic!("Unexpected event {:?}", events[1]),
+       }
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 }
@@ -3153,6 +3220,7 @@ fn test_htlc_ignore_latest_remote_commitment() {
        connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
 
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 3);
@@ -3162,6 +3230,7 @@ fn test_htlc_ignore_latest_remote_commitment() {
        connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[1].clone()]});
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
 
        // Duplicate the connect_block call since this may happen due to other listeners
        // registering new transactions
@@ -3216,6 +3285,7 @@ fn test_force_close_fail_back() {
        nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id).unwrap();
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
+       check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
        let tx = {
                let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
                // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
@@ -3230,6 +3300,7 @@ fn test_force_close_fail_back() {
        // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
 
        // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
        {
@@ -3308,7 +3379,7 @@ fn test_simple_peer_disconnect() {
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 
        claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_preimage_3);
-       fail_payment_along_route(&nodes[0], &[&nodes[1], &nodes[2]], true, payment_hash_5);
+       fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5);
 
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
        {
@@ -3321,7 +3392,7 @@ fn test_simple_peer_disconnect() {
                        _ => panic!("Unexpected event"),
                }
                match events[1] {
-                       Event::PaymentFailed { payment_hash, rejected_by_dest, .. } => {
+                       Event::PaymentPathFailed { payment_hash, rejected_by_dest, .. } => {
                                assert_eq!(payment_hash, payment_hash_5);
                                assert!(rejected_by_dest);
                        },
@@ -3886,7 +3957,8 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) {
                // Use the utility function send_payment_along_path to send the payment with MPP data which
                // indicates there are more HTLCs coming.
                let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
-               nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200000, cur_height, &None).unwrap();
+               let mpp_id = MppId([42; 32]);
+               nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200000, cur_height, mpp_id, &None).unwrap();
                check_added_monitors!(nodes[0], 1);
                let mut events = nodes[0].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 1);
@@ -4083,6 +4155,34 @@ fn test_no_txn_manager_serialize_deserialize() {
        send_payment(&nodes[0], &[&nodes[1]], 1000000);
 }
 
+#[test]
+fn mpp_failure() {
+       let chanmon_cfgs = create_chanmon_cfgs(4);
+       let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+       let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+       let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+       let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+       let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+       let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+       let logger = test_utils::TestLogger::new();
+
+       let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[3]);
+       let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
+       let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
+       let path = route.paths[0].clone();
+       route.paths.push(path);
+       route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
+       route.paths[0][0].short_channel_id = chan_1_id;
+       route.paths[0][1].short_channel_id = chan_3_id;
+       route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
+       route.paths[1][0].short_channel_id = chan_2_id;
+       route.paths[1][1].short_channel_id = chan_4_id;
+       send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret);
+       fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash);
+}
+
 #[test]
 fn test_dup_htlc_onchain_fails_on_reload() {
        // When a Channel is closed, any outbound HTLCs which were relayed through it are simply
@@ -4092,7 +4192,7 @@ fn test_dup_htlc_onchain_fails_on_reload() {
        //
        // If, due to an on-chain event, an HTLC is failed/claimed, and then we serialize the
        // ChannelManager, we generally expect there not to be a duplicate HTLC fail/claim (eg via a
-       // PaymentFailed event appearing). However, because we may not serialize the relevant
+       // PaymentPathFailed event appearing). However, because we may not serialize the relevant
        // ChannelMonitor at the same time, this isn't strictly guaranteed. In order to provide this
        // consistency, the ChannelManager explicitly tracks pending-onchain-resolution outbound HTLCs
        // and de-duplicates ChannelMonitor events.
@@ -4114,6 +4214,7 @@ fn test_dup_htlc_onchain_fails_on_reload() {
        nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
 
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
@@ -4131,6 +4232,7 @@ fn test_dup_htlc_onchain_fails_on_reload() {
        connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone(), node_txn[2].clone()]});
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
 
        header.prev_blockhash = nodes[0].best_block_hash();
@@ -4475,6 +4577,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
                check_added_monitors!(nodes[0], 1);
        }
        nodes[0].node = &nodes_0_deserialized;
+       check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
 
        // nodes[1] and nodes[2] have no lost state with nodes[0]...
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
@@ -4538,6 +4641,7 @@ fn test_claim_sizeable_push_msat() {
        nodes[1].node.force_close_channel(&chan.2).unwrap();
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 1);
        check_spends!(node_txn[0], chan.3);
@@ -4566,6 +4670,7 @@ fn test_claim_on_remote_sizeable_push_msat() {
        nodes[0].node.force_close_channel(&chan.2).unwrap();
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
 
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 1);
@@ -4575,6 +4680,7 @@ fn test_claim_on_remote_sizeable_push_msat() {
        mine_transaction(&nodes[1], &node_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@ -4602,6 +4708,7 @@ fn test_claim_on_remote_revoked_sizeable_push_msat() {
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
 
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        mine_transaction(&nodes[1], &node_txn[0]);
@@ -4654,6 +4761,7 @@ fn test_static_spendable_outputs_preimage_tx() {
        check_spends!(node_txn[2], node_txn[1]);
 
        mine_transaction(&nodes[1], &node_txn[0]);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@ -4698,6 +4806,7 @@ fn test_static_spendable_outputs_timeout_tx() {
        assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
 
        mine_transaction(&nodes[1], &node_txn[1]);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
        expect_payment_failed!(nodes[1], our_payment_hash, true);
 
@@ -4728,6 +4837,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
 
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 2);
@@ -4764,6 +4874,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
 
        let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -4779,6 +4890,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
        connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[1].clone()] });
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
 
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 3); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs, ChannelManager: local commitment tx
@@ -4835,6 +4947,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
 
        assert_eq!(revoked_htlc_txn.len(), 2);
@@ -4851,6 +4964,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
        connect_block(&nodes[0], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] });
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
 
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 3); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success, ChannelManager: local commitment tx
@@ -4931,6 +5045,7 @@ fn test_onchain_to_onchain_claim() {
        mine_transaction(&nodes[2], &commitment_tx[0]);
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
+       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
 
        let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Success tx), ChannelMonitor : 1 (HTLC-Success tx)
        assert_eq!(c_txn.len(), 3);
@@ -4947,7 +5062,19 @@ fn test_onchain_to_onchain_claim() {
        let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
        connect_block(&nodes[1], &Block { header, txdata: vec![c_txn[1].clone(), c_txn[2].clone()]});
        check_added_monitors!(nodes[1], 1);
-       expect_payment_forwarded!(nodes[1], Some(1000), true);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 2);
+       match events[0] {
+               Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
+               _ => panic!("Unexpected event"),
+       }
+       match events[1] {
+               Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx } => {
+                       assert_eq!(fee_earned_msat, Some(1000));
+                       assert_eq!(claim_from_onchain_tx, true);
+               },
+               _ => panic!("Unexpected event"),
+       }
        {
                let mut b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
                // ChannelMonitor: claim tx
@@ -4955,9 +5082,9 @@ fn test_onchain_to_onchain_claim() {
                check_spends!(b_txn[0], chan_2.3); // B local commitment tx, issued by ChannelManager
                b_txn.clear();
        }
+       check_added_monitors!(nodes[1], 1);
        let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(msg_events.len(), 3);
-       check_added_monitors!(nodes[1], 1);
        match msg_events[0] {
                MessageSendEvent::BroadcastChannelUpdate { .. } => {},
                _ => panic!("Unexpected event"),
@@ -4979,6 +5106,7 @@ fn test_onchain_to_onchain_claim() {
        // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
        let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
        mine_transaction(&nodes[1], &commitment_tx[0]);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        // ChannelMonitor: HTLC-Success tx, ChannelManager: local commitment tx + HTLC-Success tx
        assert_eq!(b_txn.len(), 3);
@@ -5036,6 +5164,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        mine_transaction(&nodes[1], &commitment_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32 - 1); // Confirm blocks until the HTLC expires
 
        let htlc_timeout_tx;
@@ -5062,6 +5191,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        nodes[2].node.claim_funds(our_payment_preimage);
        mine_transaction(&nodes[2], &commitment_txn[0]);
        check_added_monitors!(nodes[2], 2);
+       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
        let events = nodes[2].node.get_and_clear_pending_msg_events();
        match events[0] {
                MessageSendEvent::UpdateHTLCs { .. } => {},
@@ -5149,6 +5279,7 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
        check_added_monitors!(nodes[1], 1);
        mine_transaction(&nodes[1], &local_txn[0]);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        match events[0] {
                MessageSendEvent::UpdateHTLCs { .. } => {},
@@ -5319,9 +5450,26 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        } else {
                mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]);
        }
+       let events = nodes[2].node.get_and_clear_pending_events();
+       let close_event = if deliver_last_raa {
+               assert_eq!(events.len(), 2);
+               events[1].clone()
+       } else {
+               assert_eq!(events.len(), 1);
+               events[0].clone()
+       };
+       match close_event {
+               Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
+               _ => panic!("Unexpected event"),
+       }
+
        connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
        check_closed_broadcast!(nodes[2], true);
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       if deliver_last_raa {
+               expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
+       } else {
+               expect_pending_htlcs_forwardable!(nodes[2]);
+       }
        check_added_monitors!(nodes[2], 3);
 
        let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events();
@@ -5370,7 +5518,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        let mut as_failds = HashSet::new();
        let mut as_updates = 0;
        for event in as_events.iter() {
-               if let &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event {
+               if let &Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event {
                        assert!(as_failds.insert(*payment_hash));
                        if *payment_hash != payment_hash_2 {
                                assert_eq!(*rejected_by_dest, deliver_last_raa);
@@ -5395,7 +5543,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        let mut bs_failds = HashSet::new();
        let mut bs_updates = 0;
        for event in bs_events.iter() {
-               if let &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event {
+               if let &Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event {
                        assert!(bs_failds.insert(*payment_hash));
                        if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 {
                                assert_eq!(*rejected_by_dest, deliver_last_raa);
@@ -5458,6 +5606,7 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
        mine_transaction(&nodes[0], &local_txn[0]);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
 
        let htlc_timeout = {
@@ -5541,6 +5690,7 @@ fn test_key_derivation_params() {
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
 
        let htlc_timeout = {
                let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -5581,6 +5731,7 @@ fn test_static_output_closing_tx() {
        let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
 
        mine_transaction(&nodes[0], &closing_tx);
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
@@ -5588,6 +5739,7 @@ fn test_static_output_closing_tx() {
        check_spends!(spend_txn[0], closing_tx);
 
        mine_transaction(&nodes[1], &closing_tx);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@ -5638,6 +5790,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) {
        test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
 }
 
 fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
@@ -5670,6 +5823,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
        test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
 }
 
 fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
@@ -5718,6 +5872,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no
                test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        } else {
                expect_payment_failed!(nodes[0], our_payment_hash, true);
        }
@@ -5913,9 +6068,10 @@ fn test_fail_holding_cell_htlc_upon_free() {
        let events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        match &events[0] {
-               &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref error_code, ref error_data } => {
+               &Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref error_code, ref error_data, ref all_paths_failed, path: _ } => {
                        assert_eq!(our_payment_hash.clone(), *payment_hash);
                        assert_eq!(*rejected_by_dest, false);
+                       assert_eq!(*all_paths_failed, true);
                        assert_eq!(*network_update, None);
                        assert_eq!(*error_code, None);
                        assert_eq!(*error_data, None);
@@ -5999,9 +6155,10 @@ fn test_free_and_fail_holding_cell_htlcs() {
        let events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        match &events[0] {
-               &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref error_code, ref error_data } => {
+               &Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref error_code, ref error_data, ref all_paths_failed, path: _ } => {
                        assert_eq!(payment_hash_2.clone(), *payment_hash);
                        assert_eq!(*rejected_by_dest, false);
+                       assert_eq!(*all_paths_failed, true);
                        assert_eq!(*network_update, None);
                        assert_eq!(*error_code, None);
                        assert_eq!(*error_data, None);
@@ -6255,6 +6412,7 @@ fn test_update_add_htlc_bolt2_receiver_zero_value_msat() {
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1);
        check_closed_broadcast!(nodes[1], true).unwrap();
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() });
 }
 
 #[test]
@@ -6382,6 +6540,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6418,6 +6577,7 @@ fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6462,6 +6622,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6487,6 +6648,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6512,6 +6674,7 @@ fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height");
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6561,6 +6724,7 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6594,6 +6758,7 @@ fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6627,6 +6792,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6660,6 +6826,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment()
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6701,6 +6868,7 @@ fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find");
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6742,6 +6910,7 @@ fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6790,6 +6959,7 @@ fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_messag
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set");
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
 }
 
 #[test]
@@ -6932,16 +7102,17 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
 
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
 
        assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
        let events = nodes[0].node.get_and_clear_pending_events();
-       // Only 2 PaymentFailed events should show up, over-dust HTLC has to be failed by timeout tx
+       // Only 2 PaymentPathFailed events should show up, over-dust HTLC has to be failed by timeout tx
        assert_eq!(events.len(), 2);
        let mut first_failed = false;
        for event in events {
                match event {
-                       Event::PaymentFailed { payment_hash, .. } => {
+                       Event::PaymentPathFailed { payment_hash, .. } => {
                                if payment_hash == payment_hash_1 {
                                        assert!(!first_failed);
                                        first_failed = true;
@@ -6992,6 +7163,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
        if local {
                // We fail dust-HTLC 1 by broadcast of local commitment tx
                mine_transaction(&nodes[0], &as_commitment_tx[0]);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
                expect_payment_failed!(nodes[0], dust_hash, true);
 
@@ -7011,6 +7183,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
                mine_transaction(&nodes[0], &bs_commitment_tx[0]);
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
                connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
                timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[1].clone());
@@ -7029,14 +7202,14 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
                        assert_eq!(events.len(), 2);
                        let first;
                        match events[0] {
-                               Event::PaymentFailed { payment_hash, .. } => {
+                               Event::PaymentPathFailed { payment_hash, .. } => {
                                        if payment_hash == dust_hash { first = true; }
                                        else { first = false; }
                                },
                                _ => panic!("Unexpected event"),
                        }
                        match events[1] {
-                               Event::PaymentFailed { payment_hash, .. } => {
+                               Event::PaymentPathFailed { payment_hash, .. } => {
                                        if first { assert_eq!(payment_hash, non_dust_hash); }
                                        else { assert_eq!(payment_hash, dust_hash); }
                                },
@@ -7092,14 +7265,17 @@ fn test_user_configurable_csv_delay() {
        let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
        accept_channel.to_self_delay = 200;
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
+       let reason_msg;
        if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] {
                match action {
                        &ErrorAction::SendErrorMessage { ref msg } => {
                                assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(msg.data.as_str()));
+                               reason_msg = msg.data.clone();
                        },
-                       _ => { assert!(false); }
+                       _ => { panic!(); }
                }
-       } else { assert!(false); }
+       } else { panic!(); }
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg });
 
        // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Channel::new_from_req()
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
@@ -7212,10 +7388,10 @@ fn test_data_loss_protect() {
 
        // Check we close channel detecting A is fallen-behind
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Peer attempted to reestablish channel with a very old local commitment transaction".to_string() });
        assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Peer attempted to reestablish channel with a very old local commitment transaction");
        check_added_monitors!(nodes[1], 1);
 
-
        // Check A is able to claim to_remote output
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(node_txn.len(), 1);
@@ -7223,6 +7399,7 @@ fn test_data_loss_protect() {
        assert_eq!(node_txn[0].output.len(), 2);
        mine_transaction(&nodes[0], &node_txn[0]);
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting".to_string() });
        let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
        assert_eq!(spend_txn.len(), 1);
        check_spends!(spend_txn[0], node_txn[0]);
@@ -7665,6 +7842,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] });
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], 49); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
 
        let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -7686,7 +7864,12 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        connect_block(&nodes[0], &Block { header: header_11, txdata: vec![revoked_local_txn[0].clone()] });
        let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_11.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[2].clone()] });
-       expect_pending_htlcs_forwardable_ignore!(nodes[0]);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
+       match events[1] {
+               Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
+               _ => panic!("Unexpected event"),
+       }
        let first;
        let feerate_1;
        let penalty_txn;
@@ -7932,6 +8115,7 @@ fn test_counterparty_raa_skip_no_crash() {
                &msgs::RevokeAndACK { channel_id, per_commitment_secret, next_per_commitment_point });
        assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() });
 }
 
 #[test]
@@ -7964,6 +8148,7 @@ fn test_bump_txn_sanitize_tracking_maps() {
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        let penalty_txn = {
                let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_txn.len(), 4); //ChannelMonitor: justice txn * 3, ChannelManager: local commitment tx
@@ -8447,6 +8632,7 @@ fn test_pre_lockin_no_chan_closed_update() {
        let channel_id = ::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
        assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
+       check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Hi".to_string() });
 }
 
 #[test]
@@ -8481,6 +8667,7 @@ fn test_htlc_no_detection() {
        chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &Block { header, txdata: vec![local_txn[0].clone()] }, nodes[0].best_block_info().1 + 1);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1);
 
        let htlc_timeout = {
@@ -8540,6 +8727,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
        nodes[force_closing_node].node.force_close_channel(&chan_ab.2).unwrap();
        check_closed_broadcast!(nodes[force_closing_node], true);
        check_added_monitors!(nodes[force_closing_node], 1);
+       check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed);
        if go_onchain_before_fulfill {
                let txn_to_broadcast = match broadcast_alice {
                        true => alice_txn.clone(),
@@ -8551,6 +8739,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
                if broadcast_alice {
                        check_closed_broadcast!(nodes[1], true);
                        check_added_monitors!(nodes[1], 1);
+                       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                }
                assert_eq!(bob_txn.len(), 1);
                check_spends!(bob_txn[0], chan_ab.3);
@@ -8631,6 +8820,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
                if broadcast_alice {
                        check_closed_broadcast!(nodes[1], true);
                        check_added_monitors!(nodes[1], 1);
+                       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                }
                let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
                if broadcast_alice {
@@ -8846,6 +9036,7 @@ fn test_error_chans_closed() {
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
        check_added_monitors!(nodes[0], 1);
        check_closed_broadcast!(nodes[0], false);
+       check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "ERR".to_string() });
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
        assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
        assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
@@ -8855,6 +9046,7 @@ fn test_error_chans_closed() {
        let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() });
        check_added_monitors!(nodes[0], 2);
+       check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: "ERR".to_string() });
        let events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 2);
        match events[0] {
@@ -8919,6 +9111,7 @@ fn test_invalid_funding_tx() {
        nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
 
        confirm_transaction_at(&nodes[1], &tx, 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        check_added_monitors!(nodes[1], 1);
        let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events_2.len(), 1);
@@ -8962,6 +9155,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
 
        nodes[1].node.force_close_channel(&channel_id).unwrap();
        check_closed_broadcast!(nodes[1], true);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
        check_added_monitors!(nodes[1], 1);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(node_txn.len(), 1);