]> git.bitcoin.ninja Git - rust-lightning/commitdiff
Add ChannelClosed generation at cooperative/force-close/error processing
authorAntoine Riard <dev@ariard.me>
Tue, 13 Jul 2021 16:56:40 +0000 (12:56 -0400)
committerAntoine Riard <dev@ariard.me>
Fri, 17 Sep 2021 22:41:16 +0000 (18:41 -0400)
When we detect a channel `is_shutdown()` or call on it
`force_shutdown()`, we notify the user with a Event::ChannelClosed
informing about the id and closure reason in a best effort.

lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/monitor_tests.rs
lightning/src/ln/onion_route_tests.rs
lightning/src/ln/reorg_tests.rs
lightning/src/util/events.rs

index 9a5d2eaa90525e1ee205e83c5f736db35c699ece..946da77aa52ec1147aa2249186616b9549f2b12f 100644 (file)
@@ -81,6 +81,7 @@ fn do_test_simple_monitor_permanent_update_fail(persister_fail: bool) {
        // PaymentFailed event
 
        assert_eq!(nodes[0].node.list_channels().len(), 0);
+       check_closed_event!(nodes[0], 1);
 }
 
 #[test]
@@ -216,7 +217,8 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
        commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
 
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
 
        let events_3 = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events_3.len(), 1);
@@ -269,6 +271,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail
        // PaymentFailed event
 
        assert_eq!(nodes[0].node.list_channels().len(), 0);
+       check_closed_event!(nodes[0], 1);
 }
 
 #[test]
@@ -590,7 +593,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
        check_added_monitors!(nodes[0], 1);
 
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
 
        let events_5 = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events_5.len(), 1);
@@ -710,7 +714,8 @@ fn test_monitor_update_fail_cs() {
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa);
        check_added_monitors!(nodes[1], 1);
 
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
 
        let events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
@@ -761,7 +766,6 @@ fn test_monitor_update_fail_no_rebroadcast() {
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-       assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
        check_added_monitors!(nodes[1], 1);
 
        *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
@@ -769,7 +773,8 @@ fn test_monitor_update_fail_no_rebroadcast() {
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        check_added_monitors!(nodes[1], 0);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
 
        let events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
@@ -850,12 +855,14 @@ fn test_monitor_update_raa_while_paused() {
 
        nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
        check_added_monitors!(nodes[0], 1);
-       expect_pending_htlcs_forwardable!(nodes[0]);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[0], events);
        expect_payment_received!(nodes[0], our_payment_hash_2, our_payment_secret_2, 1000000);
 
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
        check_added_monitors!(nodes[1], 1);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        expect_payment_received!(nodes[1], our_payment_hash_1, our_payment_secret_1, 1000000);
 
        claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
@@ -880,7 +887,8 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
 
        // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
        assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1));
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       let events = nodes[2].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[2], events);
        check_added_monitors!(nodes[2], 1);
 
        let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
@@ -908,7 +916,8 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
        commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false);
 
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        check_added_monitors!(nodes[1], 0);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
@@ -917,7 +926,6 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
        nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
-       assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        check_added_monitors!(nodes[1], 1);
 
@@ -939,7 +947,8 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
 
        // Call forward_pending_htlcs and check that the new HTLC was simply added to the holding cell
        // and not forwarded.
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        check_added_monitors!(nodes[1], 0);
        assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 
@@ -967,7 +976,8 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
        let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        check_added_monitors!(nodes[1], 1);
 
        let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events();
@@ -1007,7 +1017,8 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
 
        nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0);
        commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false);
-       expect_payment_failed!(nodes[0], payment_hash_1, true);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_payment_failed!(nodes[0], events, payment_hash_1, true);
 
        nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]);
        let as_cs;
@@ -1091,7 +1102,8 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
        check_added_monitors!(nodes[2], 1);
        assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
 
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       let events = nodes[2].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[2], events);
 
        let events_6 = nodes[2].node.get_and_clear_pending_events();
        assert_eq!(events_6.len(), 2);
@@ -1105,7 +1117,8 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
        };
 
        if test_ignore_second_cs {
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               let events = nodes[1].node.get_and_clear_pending_events();
+               expect_pending_htlcs_forwardable!(nodes[1], events);
                check_added_monitors!(nodes[1], 1);
 
                send_event = SendEvent::from_node(&nodes[1]);
@@ -1114,7 +1127,8 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
                nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
                commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false);
 
-               expect_pending_htlcs_forwardable!(nodes[0]);
+               let events = nodes[0].node.get_and_clear_pending_events();
+               expect_pending_htlcs_forwardable!(nodes[0], events);
 
                let events_9 = nodes[0].node.get_and_clear_pending_events();
                assert_eq!(events_9.len(), 1);
@@ -1294,7 +1308,8 @@ fn raa_no_response_awaiting_raa_state() {
        // nodes[1] should be AwaitingRAA here!
        check_added_monitors!(nodes[1], 0);
        let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        expect_payment_received!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
 
        // We send a third payment here, which is somewhat of a redundant test, but the
@@ -1325,7 +1340,8 @@ fn raa_no_response_awaiting_raa_state() {
        // Finally deliver the RAA to nodes[1] which results in a CS response to the last update
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
        check_added_monitors!(nodes[1], 1);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
        let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 
@@ -1338,7 +1354,8 @@ fn raa_no_response_awaiting_raa_state() {
 
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
        check_added_monitors!(nodes[1], 1);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        expect_payment_received!(nodes[1], payment_hash_3, payment_secret_3, 1000000);
 
        claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
@@ -1454,7 +1471,8 @@ fn claim_while_disconnected_monitor_update_fail() {
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
        check_added_monitors!(nodes[1], 1);
 
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let mut events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
 
        nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
@@ -1536,7 +1554,8 @@ fn monitor_failed_no_reestablish_response() {
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
        check_added_monitors!(nodes[1], 1);
 
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let mut events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        expect_payment_received!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
 
        claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
@@ -1624,7 +1643,8 @@ fn first_message_on_recv_ordering() {
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
 
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        expect_payment_received!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
 
        let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -1637,7 +1657,8 @@ fn first_message_on_recv_ordering() {
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
        check_added_monitors!(nodes[1], 1);
 
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
 
        claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
@@ -1715,16 +1736,19 @@ fn test_monitor_update_fail_claim() {
        let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]);
        commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false);
-       expect_payment_sent!(nodes[0], payment_preimage_1);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_payment_sent!(nodes[0], payment_preimage_1, events);
 
        // Get the payment forwards, note that they were batched into one commitment update.
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        check_added_monitors!(nodes[1], 1);
        let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]);
        nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[1]);
        commitment_signed_dance!(nodes[0], nodes[1], bs_forward_update.commitment_signed, false);
-       expect_pending_htlcs_forwardable!(nodes[0]);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[0], events);
 
        let events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 2);
@@ -1777,7 +1801,8 @@ fn test_monitor_update_on_pending_forwards() {
 
        let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
        assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1));
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       let events = nodes[2].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[2], events);
        check_added_monitors!(nodes[2], 1);
 
        let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
@@ -1800,7 +1825,8 @@ fn test_monitor_update_on_pending_forwards() {
        commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
 
        *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
@@ -1877,7 +1903,8 @@ fn monitor_update_claim_fail_no_response() {
 
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
        check_added_monitors!(nodes[1], 1);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
 
        let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -1926,7 +1953,6 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
        nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
        check_added_monitors!(nodes[0], 1);
-       assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
        *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
        let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
@@ -1985,6 +2011,8 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
 
        send_payment(&nodes[0], &[&nodes[1]], 8000000);
        close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
+       check_closed_event!(nodes[0], 1);
+       check_closed_event!(nodes[1], 1);
 }
 
 #[test]
@@ -2140,7 +2168,8 @@ fn test_pending_update_fee_ack_on_reconnect() {
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
        check_added_monitors!(nodes[1], 1);
 
-       expect_pending_htlcs_forwardable!(nodes[0]);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[0], events);
        expect_payment_received!(nodes[0], payment_hash, payment_secret, 1_000_000);
 
        claim_payment(&nodes[1], &[&nodes[0]], payment_preimage);
@@ -2410,7 +2439,8 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
                        assert!(updates.update_fee.is_none());
                        assert_eq!(updates.update_fulfill_htlcs.len(), 1);
                        nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
-                       expect_payment_sent!(nodes[1], payment_preimage_0);
+                       let events = nodes[1].node.get_and_clear_pending_events();
+                       expect_payment_sent!(nodes[1], payment_preimage_0, events);
                        assert_eq!(updates.update_add_htlcs.len(), 1);
                        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
                        updates.commitment_signed
@@ -2423,13 +2453,15 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
 
        let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        expect_payment_received!(nodes[1], payment_hash_1, payment_secret_1, 100000);
        check_added_monitors!(nodes[1], 1);
 
        commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false);
 
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 100000);
 
        claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
@@ -2496,7 +2528,8 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
        };
        if second_fails {
                assert!(nodes[2].node.fail_htlc_backwards(&payment_hash));
-               expect_pending_htlcs_forwardable!(nodes[2]);
+               let events = nodes[2].node.get_and_clear_pending_events();
+               expect_pending_htlcs_forwardable!(nodes[2], events);
                check_added_monitors!(nodes[2], 1);
                get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        } else {
@@ -2516,7 +2549,8 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
                bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
                assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
                nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
-               expect_payment_sent!(nodes[0], payment_preimage);
+               let events = nodes[0].node.get_and_clear_pending_events();
+               expect_payment_sent!(nodes[0], payment_preimage, events);
                if htlc_status == HTLCStatusAtDupClaim::Cleared {
                        commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
                }
@@ -2529,7 +2563,8 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
 
        if second_fails {
                reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               let events = nodes[1].node.get_and_clear_pending_events();
+               expect_pending_htlcs_forwardable!(nodes[1], events);
        } else {
                reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
        }
@@ -2537,12 +2572,14 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
        if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
                nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa.unwrap());
                check_added_monitors!(nodes[1], 1);
-               expect_pending_htlcs_forwardable_ignore!(nodes[1]); // We finally receive the second payment, but don't claim it
+               let events = nodes[1].node.get_and_clear_pending_events();
+               expect_pending_htlcs_forwardable_ignore!(nodes[1], events); // We finally receive the second payment, but don't claim it
 
                bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
                assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
                nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
-               expect_payment_sent!(nodes[0], payment_preimage);
+               let events = nodes[0].node.get_and_clear_pending_events();
+               expect_payment_sent!(nodes[0], payment_preimage, events);
        }
        if htlc_status != HTLCStatusAtDupClaim::Cleared {
                commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
@@ -2705,7 +2742,8 @@ fn double_temp_error() {
        assert_eq!(node_id, nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_1);
        check_added_monitors!(nodes[0], 0);
-       expect_payment_sent!(nodes[0], payment_preimage_1);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_payment_sent!(nodes[0], payment_preimage_1, events);
        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_b1);
        check_added_monitors!(nodes[0], 1);
        nodes[0].node.process_pending_htlc_forwards();
@@ -2748,5 +2786,6 @@ fn double_temp_error() {
        check_added_monitors!(nodes[0], 0);
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
        commitment_signed_dance!(nodes[0], nodes[1], commitment_signed_b2, false);
-       expect_payment_sent!(nodes[0], payment_preimage_2);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_payment_sent!(nodes[0], payment_preimage_2, events);
 }
index dccca13ff978bb59f17d45f04c0a3dd5eece23ad..dcc80382c33a9ea9728ba4eb4a3a3c96d95472d6 100644 (file)
@@ -52,7 +52,7 @@ use ln::onion_utils;
 use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField};
 use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner};
 use util::config::UserConfig;
-use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureDescriptor};
 use util::{byte_utils, events};
 use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer};
 use util::chacha20::{ChaCha20, ChaChaReader};
@@ -835,6 +835,9 @@ macro_rules! handle_error {
                                                        msg: update
                                                });
                                        }
+                                       if let Some(channel_id) = chan_id {
+                                               $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id,  err: ClosureDescriptor::ProcessingError });
+                                       }
                                }
 
                                log_error!($self.logger, "{}", err.err);
@@ -1469,6 +1472,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                msg: update
                        });
                }
+               self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: *channel_id,  err: ClosureDescriptor::ForceClosed });
 
                Ok(chan.get_counterparty_node_id())
        }
@@ -2421,6 +2425,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        if let Some(short_id) = channel.get_short_channel_id() {
                                                                                                channel_state.short_to_id.remove(&short_id);
                                                                                        }
+                                                                                       // ChannelClosed event is generated by handle_errors for us.
                                                                                        Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
                                                                                },
                                                                                ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
@@ -3550,6 +3555,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        msg: update
                                });
                        }
+                       self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: msg.channel_id,  err: ClosureDescriptor::CooperativeClosure });
                }
                Ok(())
        }
@@ -3961,6 +3967,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                msg: update
                                                        });
                                                }
+                                               self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(),  err: ClosureDescriptor::UnknownOnchainCommitment });
                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                        node_id: chan.get_counterparty_node_id(),
                                                        action: msgs::ErrorAction::SendErrorMessage {
@@ -4022,6 +4029,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        Err(e) => {
                                                let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id);
                                                handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
+                                               // ChannelClosed event is generated by handle_error for us
                                                !close_channel
                                        }
                                }
@@ -4495,6 +4503,7 @@ where
                                                        msg: update
                                                });
                                        }
+                                       self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: channel.channel_id(),  err: ClosureDescriptor::UnknownOnchainCommitment });
                                        pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                node_id: channel.get_counterparty_node_id(),
                                                action: msgs::ErrorAction::SendErrorMessage { msg: e },
@@ -4685,6 +4694,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                                                msg: update
                                                        });
                                                }
+                                               self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(),  err: ClosureDescriptor::DisconnectedPeer });
                                                false
                                        } else {
                                                true
@@ -4699,6 +4709,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                                        if let Some(short_id) = chan.get_short_channel_id() {
                                                                short_to_id.remove(&short_id);
                                                        }
+                                                       self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(),  err: ClosureDescriptor::ProcessingError });
                                                        return false;
                                                } else {
                                                        no_channels_remain = false;
@@ -5657,8 +5668,10 @@ mod tests {
                nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
                check_added_monitors!(nodes[1], 0);
                commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
-               expect_pending_htlcs_forwardable!(nodes[1]);
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               let events = nodes[1].node.get_and_clear_pending_events();
+               expect_pending_htlcs_forwardable!(nodes[1], events);
+               let events = nodes[1].node.get_and_clear_pending_events();
+               expect_pending_htlcs_forwardable!(nodes[1], events);
                check_added_monitors!(nodes[1], 1);
                let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
                assert!(updates.update_add_htlcs.is_empty());
@@ -5668,7 +5681,8 @@ mod tests {
                assert!(updates.update_fee.is_none());
                nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
                commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
-               expect_payment_failed!(nodes[0], our_payment_hash, true);
+               let events = nodes[0].node.get_and_clear_pending_events();
+               expect_payment_failed!(nodes[0], events, our_payment_hash, true);
 
                // Send the second half of the original MPP payment.
                nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, mpp_id, &None).unwrap();
@@ -5748,8 +5762,10 @@ mod tests {
                nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
                check_added_monitors!(nodes[1], 0);
                commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
-               expect_pending_htlcs_forwardable!(nodes[1]);
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               let events = nodes[1].node.get_and_clear_pending_events();
+               expect_pending_htlcs_forwardable!(nodes[1], events);
+               let events = nodes[1].node.get_and_clear_pending_events();
+               expect_pending_htlcs_forwardable!(nodes[1], events);
                check_added_monitors!(nodes[1], 1);
                let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
                assert!(updates.update_add_htlcs.is_empty());
@@ -5759,7 +5775,8 @@ mod tests {
                assert!(updates.update_fee.is_none());
                nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
                commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
-               expect_payment_failed!(nodes[0], payment_hash, true);
+               let events = nodes[0].node.get_and_clear_pending_events();
+               expect_payment_failed!(nodes[0], events, payment_hash, true);
 
                // Finally, claim the original payment.
                claim_payment(&nodes[0], &expected_route, payment_preimage);
@@ -5786,8 +5803,10 @@ mod tests {
                nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
                check_added_monitors!(nodes[1], 0);
                commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
-               expect_pending_htlcs_forwardable!(nodes[1]);
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               let events = nodes[1].node.get_and_clear_pending_events();
+               expect_pending_htlcs_forwardable!(nodes[1], events);
+               let events = nodes[1].node.get_and_clear_pending_events();
+               expect_pending_htlcs_forwardable!(nodes[1], events);
                check_added_monitors!(nodes[1], 1);
                let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
                assert!(updates.update_add_htlcs.is_empty());
@@ -5797,7 +5816,8 @@ mod tests {
                assert!(updates.update_fee.is_none());
                nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
                commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
-               expect_payment_failed!(nodes[0], payment_hash, true);
+               let events = nodes[0].node.get_and_clear_pending_events();
+               expect_payment_failed!(nodes[0], events, payment_hash, true);
 
                // Finally, succeed the keysend payment.
                claim_payment(&nodes[0], &expected_route, payment_preimage);
index dd1fedd9733c30b99002771ba7dffd9ccb426cbf..ac9f4b396cff3a0da2b9fd6fcba40fad44b80963 100644 (file)
@@ -743,16 +743,16 @@ macro_rules! get_closing_signed_broadcast {
 #[macro_export]
 macro_rules! check_closed_broadcast {
        ($node: expr, $with_error_msg: expr) => {{
-               let events = $node.node.get_and_clear_pending_msg_events();
-               assert_eq!(events.len(), if $with_error_msg { 2 } else { 1 });
-               match events[0] {
+               let msg_events = $node.node.get_and_clear_pending_msg_events();
+               assert_eq!(msg_events.len(), if $with_error_msg { 2 } else { 1 });
+               match msg_events[0] {
                        MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
                                assert_eq!(msg.contents.flags & 2, 2);
                        },
                        _ => panic!("Unexpected event"),
                }
                if $with_error_msg {
-                       match events[1] {
+                       match msg_events[1] {
                                MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
                                        // TODO: Check node_id
                                        Some(msg.clone())
@@ -763,6 +763,19 @@ macro_rules! check_closed_broadcast {
        }}
 }
 
+/// Check that a channel's closing channel event has been issued
+#[macro_export]
+macro_rules! check_closed_event {
+       ($node: expr, $events: expr) => {{
+               let events = $node.node.get_and_clear_pending_events();
+               assert_eq!(events.len(), $events);
+               match events[0] {
+                       Event::ChannelClosed { .. } => {}
+                       _ => panic!("Unexpected event"),
+               }
+       }}
+}
+
 pub fn close_channel<'a, 'b, 'c>(outbound_node: &Node<'a, 'b, 'c>, inbound_node: &Node<'a, 'b, 'c>, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, Transaction) {
        let (node_a, broadcaster_a, struct_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) } else { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) };
        let (node_b, broadcaster_b, struct_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) } else { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) };
@@ -927,7 +940,8 @@ macro_rules! commitment_signed_dance {
                {
                        commitment_signed_dance!($node_a, $node_b, $commitment_signed, $fail_backwards, true);
                        if $fail_backwards {
-                               expect_pending_htlcs_forwardable!($node_a);
+                               let events = $node_a.node.get_and_clear_pending_events();
+                               expect_pending_htlcs_forwardable!($node_a, events);
                                check_added_monitors!($node_a, 1);
 
                                let channel_state = $node_a.node.channel_state.lock().unwrap();
@@ -969,10 +983,9 @@ macro_rules! get_route_and_payment_hash {
 }
 
 macro_rules! expect_pending_htlcs_forwardable_ignore {
-       ($node: expr) => {{
-               let events = $node.node.get_and_clear_pending_events();
-               assert_eq!(events.len(), 1);
-               match events[0] {
+       ($node: expr, $events: expr) => {{
+               assert_eq!($events.len(), 1);
+               match $events[0] {
                        Event::PendingHTLCsForwardable { .. } => { },
                        _ => panic!("Unexpected event"),
                };
@@ -980,8 +993,8 @@ macro_rules! expect_pending_htlcs_forwardable_ignore {
 }
 
 macro_rules! expect_pending_htlcs_forwardable {
-       ($node: expr) => {{
-               expect_pending_htlcs_forwardable_ignore!($node);
+       ($node: expr, $events: expr) => {{
+               expect_pending_htlcs_forwardable_ignore!($node, $events);
                $node.node.process_pending_htlc_forwards();
        }}
 }
@@ -1009,10 +1022,9 @@ macro_rules! expect_payment_received {
 }
 
 macro_rules! expect_payment_sent {
-       ($node: expr, $expected_payment_preimage: expr) => {
-               let events = $node.node.get_and_clear_pending_events();
-               assert_eq!(events.len(), 1);
-               match events[0] {
+       ($node: expr, $expected_payment_preimage: expr, $events: expr) => {
+               assert_eq!($events.len(), 1);
+               match $events[0] {
                        Event::PaymentSent { ref payment_preimage } => {
                                assert_eq!($expected_payment_preimage, *payment_preimage);
                        },
@@ -1066,8 +1078,8 @@ macro_rules! expect_payment_failed_with_update {
 
 #[cfg(test)]
 macro_rules! expect_payment_failed {
-       ($node: expr, $expected_payment_hash: expr, $rejected_by_dest: expr $(, $expected_error_code: expr, $expected_error_data: expr)*) => {
-               let events = $node.node.get_and_clear_pending_events();
+       ($node: expr, $events: expr, $expected_payment_hash: expr, $rejected_by_dest: expr $(, $expected_error_code: expr, $expected_error_data: expr)*) => {
+               let events: Vec<Event> = $events;
                assert_eq!(events.len(), 1);
                match events[0] {
                        Event::PaymentFailed { ref payment_hash, rejected_by_dest, network_update: _, ref error_code, ref error_data, .. } => {
@@ -1102,7 +1114,8 @@ pub fn pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path
                check_added_monitors!(node, 0);
                commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false);
 
-               expect_pending_htlcs_forwardable!(node);
+               let events = node.node.get_and_clear_pending_events();
+               expect_pending_htlcs_forwardable!(node, events);
 
                if idx == expected_path.len() - 1 {
                        let events_2 = node.node.get_and_clear_pending_events();
@@ -1245,7 +1258,8 @@ pub fn claim_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, exp
                }
        }
        if !skip_last {
-               expect_payment_sent!(origin_node, our_payment_preimage);
+               let events = origin_node.node.get_and_clear_pending_events();
+               expect_payment_sent!(origin_node, our_payment_preimage, events);
        }
 }
 
@@ -1295,7 +1309,8 @@ pub fn fail_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expe
                assert_eq!(path.last().unwrap().node.get_our_node_id(), expected_paths[0].last().unwrap().node.get_our_node_id());
        }
        assert!(expected_paths[0].last().unwrap().node.fail_htlc_backwards(&our_payment_hash));
-       expect_pending_htlcs_forwardable!(expected_paths[0].last().unwrap());
+       let events = expected_paths[0].last().unwrap().node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(expected_paths[0].last().unwrap(), events);
        check_added_monitors!(expected_paths[0].last().unwrap(), expected_paths.len());
 
        let mut per_path_msgs: Vec<((msgs::UpdateFailHTLC, msgs::CommitmentSigned), PublicKey)> = Vec::with_capacity(expected_paths.len());
@@ -1330,7 +1345,8 @@ pub fn fail_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expe
                                node.node.handle_update_fail_htlc(&prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0);
                                commitment_signed_dance!(node, prev_node, next_msgs.as_ref().unwrap().1, update_next_node);
                                if !update_next_node {
-                                       expect_pending_htlcs_forwardable!(node);
+                                       let events = node.node.get_and_clear_pending_events();
+                                       expect_pending_htlcs_forwardable!(node, events);
                                }
                        }
                        let events = node.node.get_and_clear_pending_msg_events();
index 85eef456c21103354b9dd90a1dc16947555bc9c6..b7b1e94bc3cb5e29415b55a7da594c286341cec8 100644 (file)
@@ -638,6 +638,7 @@ fn test_update_fee_that_funder_cannot_afford() {
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
+       check_closed_event!(nodes[1], 1);
 }
 
 #[test]
@@ -724,7 +725,8 @@ fn test_update_fee_with_fundee_update_add_htlc() {
        check_added_monitors!(nodes[0], 1);
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 
-       expect_pending_htlcs_forwardable!(nodes[0]);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[0], events);
 
        let events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
@@ -738,6 +740,8 @@ fn test_update_fee_with_fundee_update_add_htlc() {
        send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
        close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
+       check_closed_event!(nodes[0], 1);
+       check_closed_event!(nodes[1], 1);
 }
 
 #[test]
@@ -850,6 +854,8 @@ fn test_update_fee() {
        assert_eq!(get_feerate!(nodes[0], channel_id), feerate + 30);
        assert_eq!(get_feerate!(nodes[1], channel_id), feerate + 30);
        close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
+       check_closed_event!(nodes[0], 1);
+       check_closed_event!(nodes[1], 1);
 }
 
 #[test]
@@ -977,10 +983,20 @@ fn fake_network_test() {
 
        // Close down the channels...
        close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
+       check_closed_event!(nodes[0], 1);
+       check_closed_event!(nodes[1], 1);
        close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
+       check_closed_event!(nodes[1], 1);
+       check_closed_event!(nodes[2], 1);
        close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
+       check_closed_event!(nodes[2], 1);
+       check_closed_event!(nodes[3], 1);
        close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
+       check_closed_event!(nodes[1], 1);
+       check_closed_event!(nodes[3], 1);
        close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false);
+       check_closed_event!(nodes[1], 1);
+       check_closed_event!(nodes[3], 1);
 }
 
 #[test]
@@ -1042,8 +1058,10 @@ fn holding_cell_htlc_counting() {
        commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
        // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
        // fails), the second will process the resulting failure and fail the HTLC backward.
-       expect_pending_htlcs_forwardable!(nodes[1]);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        check_added_monitors!(nodes[1], 1);
 
        let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -1084,7 +1102,8 @@ fn holding_cell_htlc_counting() {
        nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_final_raa);
        check_added_monitors!(nodes[2], 1);
 
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       let events = nodes[2].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[2], events);
 
        let events = nodes[2].node.get_and_clear_pending_events();
        assert_eq!(events.len(), payments.len());
@@ -1176,6 +1195,7 @@ fn test_duplicate_htlc_different_direction_onchain() {
 
        mine_transaction(&nodes[0], &remote_txn[0]);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
 
        // Check we only broadcast 1 timeout tx
@@ -1457,6 +1477,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1);
 }
 
 #[test]
@@ -1583,6 +1604,7 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1);
 }
 
 #[test]
@@ -1771,13 +1793,15 @@ fn test_channel_reserve_holding_cell_htlcs() {
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        check_added_monitors!(nodes[1], 1);
 
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
 
        let ref payment_event_11 = expect_forward!(nodes[1]);
        nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]);
        commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false);
 
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       let events = nodes[2].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[2], events);
        expect_payment_received!(nodes[2], our_payment_hash_1, our_payment_secret_1, recv_value_1);
 
        // flush the htlcs in the holding cell
@@ -1785,7 +1809,8 @@ fn test_channel_reserve_holding_cell_htlcs() {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]);
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]);
        commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
 
        let ref payment_event_3 = expect_forward!(nodes[1]);
        assert_eq!(payment_event_3.msgs.len(), 2);
@@ -1793,7 +1818,8 @@ fn test_channel_reserve_holding_cell_htlcs() {
        nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]);
 
        commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false);
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       let events = nodes[2].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[2], events);
 
        let events = nodes[2].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 2);
@@ -1908,7 +1934,8 @@ fn channel_reserve_in_flight_removes() {
        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed);
        check_added_monitors!(nodes[0], 1);
        let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
-       expect_payment_sent!(nodes[0], payment_preimage_1);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_payment_sent!(nodes[0], payment_preimage_1, events);
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]);
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg);
@@ -1937,13 +1964,15 @@ fn channel_reserve_in_flight_removes() {
        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
        check_added_monitors!(nodes[0], 1);
        let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
-       expect_payment_sent!(nodes[0], payment_preimage_2);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_payment_sent!(nodes[0], payment_preimage_2, events);
 
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        expect_payment_received!(nodes[1], payment_hash_3, payment_secret_3, 100000);
 
        // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't
@@ -1990,7 +2019,8 @@ fn channel_reserve_in_flight_removes() {
        nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
        check_added_monitors!(nodes[0], 1);
 
-       expect_pending_htlcs_forwardable!(nodes[0]);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[0], events);
        expect_payment_received!(nodes[0], payment_hash_4, payment_secret_4, 10000);
 
        claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4);
@@ -2039,6 +2069,8 @@ fn channel_monitor_network_test() {
        check_closed_broadcast!(nodes[0], true);
        assert_eq!(nodes[0].node.list_channels().len(), 0);
        assert_eq!(nodes[1].node.list_channels().len(), 1);
+       check_closed_event!(nodes[0], 1);
+       check_closed_event!(nodes[1], 1);
 
        // One pending HTLC is discarded by the force-close:
        let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0;
@@ -2059,6 +2091,8 @@ fn channel_monitor_network_test() {
        check_closed_broadcast!(nodes[2], true);
        assert_eq!(nodes[1].node.list_channels().len(), 0);
        assert_eq!(nodes[2].node.list_channels().len(), 1);
+       check_closed_event!(nodes[1], 1);
+       check_closed_event!(nodes[2], 1);
 
        macro_rules! claim_funds {
                ($node: expr, $prev_node: expr, $preimage: expr) => {
@@ -2101,6 +2135,8 @@ fn channel_monitor_network_test() {
        check_closed_broadcast!(nodes[3], true);
        assert_eq!(nodes[2].node.list_channels().len(), 0);
        assert_eq!(nodes[3].node.list_channels().len(), 1);
+       check_closed_event!(nodes[2], 1);
+       check_closed_event!(nodes[3], 1);
 
        // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
        // confusing us in the following tests.
@@ -2172,6 +2208,8 @@ fn channel_monitor_network_test() {
        assert_eq!(nodes[4].node.list_channels().len(), 0);
 
        nodes[3].chain_monitor.chain_monitor.monitors.write().unwrap().insert(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon);
+       check_closed_event!(nodes[3], 1);
+       check_closed_event!(nodes[4], 1);
 }
 
 #[test]
@@ -2221,6 +2259,7 @@ fn test_justice_tx() {
                        node_txn.truncate(1);
                }
                check_added_monitors!(nodes[1], 1);
+               check_closed_event!(nodes[1], 1);
                test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE);
 
                mine_transaction(&nodes[0], &revoked_local_txn[0]);
@@ -2228,6 +2267,7 @@ fn test_justice_tx() {
                // Verify broadcast of revoked HTLC-timeout
                let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
                check_added_monitors!(nodes[0], 1);
+               check_closed_event!(nodes[0], 1);
                // Broadcast revoked HTLC-timeout on node 1
                mine_transaction(&nodes[1], &node_txn[1]);
                test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone());
@@ -2269,9 +2309,11 @@ fn test_justice_tx() {
                test_txn_broadcast(&nodes[0], &chan_6, None, HTLCType::NONE);
 
                mine_transaction(&nodes[1], &revoked_local_txn[0]);
+               check_closed_event!(nodes[1], 1);
                let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
                check_added_monitors!(nodes[1], 1);
                mine_transaction(&nodes[0], &node_txn[1]);
+               check_closed_event!(nodes[0], 1);
                test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone());
        }
        get_announce_close_broadcast_events(&nodes, 0, 1);
@@ -2299,6 +2341,7 @@ fn revoked_output_claim() {
        // Inform nodes[1] that nodes[0] broadcast a stale tx
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx against revoked to_local output, ChannelManager: local commitment tx
 
@@ -2308,7 +2351,8 @@ fn revoked_output_claim() {
        // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
        get_announce_close_broadcast_events(&nodes, 0, 1);
-       check_added_monitors!(nodes[0], 1)
+       check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1);
 }
 
 #[test]
@@ -2345,10 +2389,13 @@ fn claim_htlc_outputs_shared_tx() {
        {
                mine_transaction(&nodes[0], &revoked_local_txn[0]);
                check_added_monitors!(nodes[0], 1);
+               check_closed_event!(nodes[0], 1);
                mine_transaction(&nodes[1], &revoked_local_txn[0]);
                check_added_monitors!(nodes[1], 1);
+               check_closed_event!(nodes[1], 1);
                connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
-               expect_payment_failed!(nodes[1], payment_hash_2, true);
+               let events = nodes[1].node.get_and_clear_pending_events();
+               expect_payment_failed!(nodes[1], events, payment_hash_2, true);
 
                let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_txn.len(), 2); // ChannelMonitor: penalty tx, ChannelManager: local commitment
@@ -2403,10 +2450,17 @@ fn claim_htlc_outputs_single_tx() {
                check_added_monitors!(nodes[0], 1);
                confirm_transaction_at(&nodes[1], &revoked_local_txn[0], 100);
                check_added_monitors!(nodes[1], 1);
-               expect_pending_htlcs_forwardable_ignore!(nodes[0]);
+               check_closed_event!(nodes[1], 1);
+               let mut events = nodes[0].node.get_and_clear_pending_events();
+               expect_pending_htlcs_forwardable_ignore!(nodes[0], events[0..1]);
+               match events[1] {
+                       Event::ChannelClosed { .. } => {}
+                       _ => panic!("Unexpected event"),
+               }
 
                connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
-               expect_payment_failed!(nodes[1], payment_hash_2, true);
+               let events = nodes[1].node.get_and_clear_pending_events();
+               expect_payment_failed!(nodes[1], events, payment_hash_2, true);
 
                let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_txn.len(), 9);
@@ -2501,6 +2555,7 @@ fn test_htlc_on_chain_success() {
        mine_transaction(&nodes[2], &commitment_tx[0]);
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
+       check_closed_event!(nodes[2], 1);
        let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 3 (commitment tx, 2*htlc-success tx), ChannelMonitor : 2 (2 * HTLC-Success tx)
        assert_eq!(node_txn.len(), 5);
        assert_eq!(node_txn[0], node_txn[3]);
@@ -2597,6 +2652,7 @@ fn test_htlc_on_chain_success() {
        mine_transaction(&nodes[1], &node_a_commitment_tx[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 2);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(node_txn.len(), 6); // ChannelManager : 3 (commitment tx + HTLC-Sucess * 2), ChannelMonitor : 3 (HTLC-Success, 2* RBF bumps of above HTLC txn)
        let commitment_spend =
@@ -2632,7 +2688,7 @@ fn test_htlc_on_chain_success() {
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
        let events = nodes[0].node.get_and_clear_pending_events();
-       assert_eq!(events.len(), 2);
+       assert_eq!(events.len(), 3);
        let mut first_claimed = false;
        for event in events {
                match event {
@@ -2644,6 +2700,7 @@ fn test_htlc_on_chain_success() {
                                        assert_eq!(payment_preimage, our_payment_preimage_2);
                                }
                        },
+                       Event::ChannelClosed { .. } => {},
                        _ => panic!("Unexpected event"),
                }
        }
@@ -2682,7 +2739,8 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
        check_spends!(commitment_tx[0], chan_2.3);
        nodes[2].node.fail_htlc_backwards(&payment_hash);
        check_added_monitors!(nodes[2], 0);
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       let events = nodes[2].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[2], events);
        check_added_monitors!(nodes[2], 1);
 
        let events = nodes[2].node.get_and_clear_pending_msg_events();
@@ -2700,6 +2758,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
        mine_transaction(&nodes[2], &commitment_tx[0]);
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
+       check_closed_event!(nodes[2], 1);
        let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx)
        assert_eq!(node_txn.len(), 1);
        check_spends!(node_txn[0], chan_2.3);
@@ -2709,6 +2768,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
        // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
        connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1);
        mine_transaction(&nodes[1], &commitment_tx[0]);
+       check_closed_event!(nodes[1], 1);
        let timeout_tx;
        {
                let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -2752,7 +2812,8 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
                }
        }
 
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        check_added_monitors!(nodes[1], 1);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
@@ -2776,6 +2837,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
 
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1);
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 commitment tx, ChannelMonitor : 1 timeout tx
        assert_eq!(node_txn.len(), 2);
        check_spends!(node_txn[0], chan_1.3);
@@ -2814,11 +2876,13 @@ fn test_simple_commitment_revoked_fail_backward() {
        let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
 
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
+       check_closed_event!(nodes[1], 1);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
 
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        check_added_monitors!(nodes[1], 1);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
@@ -2881,7 +2945,8 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        let (_, third_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
 
        assert!(nodes[2].node.fail_htlc_backwards(&first_payment_hash));
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       let events = nodes[2].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[2], events);
        check_added_monitors!(nodes[2], 1);
        let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        assert!(updates.update_add_htlcs.is_empty());
@@ -2894,7 +2959,8 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        // Drop the last RAA from 3 -> 2
 
        assert!(nodes[2].node.fail_htlc_backwards(&second_payment_hash));
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       let events = nodes[2].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[2], events);
        check_added_monitors!(nodes[2], 1);
        let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        assert!(updates.update_add_htlcs.is_empty());
@@ -2911,7 +2977,8 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        check_added_monitors!(nodes[2], 1);
 
        assert!(nodes[2].node.fail_htlc_backwards(&third_payment_hash));
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       let events = nodes[2].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[2], events);
        check_added_monitors!(nodes[2], 1);
        let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        assert!(updates.update_add_htlcs.is_empty());
@@ -2963,15 +3030,19 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        let events = nodes[1].node.get_and_clear_pending_events();
-       assert_eq!(events.len(), if deliver_bs_raa { 1 } else { 2 });
+       assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 3 });
        match events[0] {
+               Event::ChannelClosed { .. } => { },
+               _ => panic!("Unexepected event"),
+       }
+       match events[1] {
                Event::PaymentFailed { ref payment_hash, .. } => {
                        assert_eq!(*payment_hash, fourth_payment_hash);
                },
                _ => panic!("Unexpected event"),
        }
        if !deliver_bs_raa {
-               match events[1] {
+               match events[2] {
                        Event::PendingHTLCsForwardable { .. } => { },
                        _ => panic!("Unexpected event"),
                };
@@ -3131,9 +3202,13 @@ fn fail_backward_pending_htlc_upon_channel_failure() {
                };
                nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
        }
-
+       let events = nodes[0].node.get_and_clear_pending_events();
        // Check that Alice fails backward the pending HTLC from the second payment.
-       expect_payment_failed!(nodes[0], failed_payment_hash, true);
+       expect_payment_failed!(nodes[0], events[0..1].to_vec(), failed_payment_hash, true);
+       match events[1] {
+               Event::ChannelClosed { .. } => {}
+               _ => panic!("Unexpected event"),
+       }
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 }
@@ -3153,6 +3228,7 @@ fn test_htlc_ignore_latest_remote_commitment() {
        connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1);
 
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 3);
@@ -3162,6 +3238,7 @@ fn test_htlc_ignore_latest_remote_commitment() {
        connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[1].clone()]});
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1);
 
        // Duplicate the connect_block call since this may happen due to other listeners
        // registering new transactions
@@ -3196,7 +3273,8 @@ fn test_force_close_fail_back() {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
        commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
 
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
 
        let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events_2.len(), 1);
@@ -3216,6 +3294,7 @@ fn test_force_close_fail_back() {
        nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id).unwrap();
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
+       check_closed_event!(nodes[2], 1);
        let tx = {
                let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
                // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
@@ -3230,6 +3309,7 @@ fn test_force_close_fail_back() {
        // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1);
 
        // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
        {
@@ -3267,7 +3347,8 @@ fn test_dup_events_on_peer_disconnect() {
        check_added_monitors!(nodes[1], 1);
        let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]);
-       expect_payment_sent!(nodes[0], payment_preimage);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_payment_sent!(nodes[0], payment_preimage, events);
 
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
@@ -3843,7 +3924,8 @@ fn test_drop_messages_peer_disconnect_dual_htlc() {
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        check_added_monitors!(nodes[1], 1);
 
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
 
        let events_5 = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events_5.len(), 1);
@@ -3912,7 +3994,8 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) {
                connect_block(&nodes[1], &block);
        }
 
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
 
        check_added_monitors!(nodes[1], 1);
        let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -3926,7 +4009,8 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) {
        // 100_000 msat as u64, followed by the height at which we failed back above
        let mut expected_failure_data = byte_utils::be64_to_array(100_000).to_vec();
        expected_failure_data.extend_from_slice(&byte_utils::be32_to_array(block_count - 1));
-       expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000 | 15, &expected_failure_data[..]);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_payment_failed!(nodes[0], events, our_payment_hash, true, 0x4000 | 15, &expected_failure_data[..]);
 }
 
 #[test]
@@ -3971,7 +4055,8 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) {
                let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
                nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
                commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               let events = nodes[1].node.get_and_clear_pending_events();
+               expect_pending_htlcs_forwardable!(nodes[1], events);
                check_added_monitors!(nodes[1], 0);
        } else {
                let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
@@ -3986,7 +4071,8 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) {
        connect_blocks(&nodes[1], 1);
 
        if forwarded_htlc {
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               let events = nodes[1].node.get_and_clear_pending_events();
+               expect_pending_htlcs_forwardable!(nodes[1], events);
                check_added_monitors!(nodes[1], 1);
                let fail_commit = nodes[1].node.get_and_clear_pending_msg_events();
                assert_eq!(fail_commit.len(), 1);
@@ -3999,7 +4085,8 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) {
                }
                expect_payment_failed_with_update!(nodes[0], second_payment_hash, false, chan_2.0.contents.short_channel_id, false);
        } else {
-               expect_payment_failed!(nodes[1], second_payment_hash, true);
+               let events = nodes[1].node.get_and_clear_pending_events();
+               expect_payment_failed!(nodes[1], events, second_payment_hash, true);
        }
 }
 
@@ -4143,6 +4230,7 @@ fn test_dup_htlc_onchain_fails_on_reload() {
        nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1);
 
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
@@ -4160,6 +4248,7 @@ fn test_dup_htlc_onchain_fails_on_reload() {
        connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone(), node_txn[2].clone()]});
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1);
        let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
 
        header.prev_blockhash = nodes[0].best_block_hash();
@@ -4174,7 +4263,8 @@ fn test_dup_htlc_onchain_fails_on_reload() {
        header.prev_blockhash = nodes[0].best_block_hash();
        let claim_block = Block { header, txdata: claim_txn};
        connect_block(&nodes[0], &claim_block);
-       expect_payment_sent!(nodes[0], payment_preimage);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_payment_sent!(nodes[0], payment_preimage, events);
 
        // ChannelManagers generally get re-serialized after any relevant event(s). Since we just
        // connected a highly-relevant block, it likely gets serialized out now.
@@ -4567,6 +4657,7 @@ fn test_claim_sizeable_push_msat() {
        nodes[1].node.force_close_channel(&chan.2).unwrap();
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 1);
        check_spends!(node_txn[0], chan.3);
@@ -4595,6 +4686,7 @@ fn test_claim_on_remote_sizeable_push_msat() {
        nodes[0].node.force_close_channel(&chan.2).unwrap();
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1);
 
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 1);
@@ -4604,6 +4696,7 @@ fn test_claim_on_remote_sizeable_push_msat() {
        mine_transaction(&nodes[1], &node_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@ -4631,6 +4724,7 @@ fn test_claim_on_remote_revoked_sizeable_push_msat() {
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1);
 
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        mine_transaction(&nodes[1], &node_txn[0]);
@@ -4683,6 +4777,7 @@ fn test_static_spendable_outputs_preimage_tx() {
        check_spends!(node_txn[2], node_txn[1]);
 
        mine_transaction(&nodes[1], &node_txn[0]);
+       check_closed_event!(nodes[1], 1);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@ -4727,8 +4822,10 @@ fn test_static_spendable_outputs_timeout_tx() {
        assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
 
        mine_transaction(&nodes[1], &node_txn[1]);
+       check_closed_event!(nodes[1], 1);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
-       expect_payment_failed!(nodes[1], our_payment_hash, true);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_payment_failed!(nodes[1], events, our_payment_hash, true);
 
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
        assert_eq!(spend_txn.len(), 3); // SpendableOutput: remote_commitment_tx.to_remote, timeout_tx.output
@@ -4757,6 +4854,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1);
 
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 2);
@@ -4793,6 +4891,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
 
        let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -4808,6 +4907,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
        connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[1].clone()] });
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1);
 
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 3); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs, ChannelManager: local commitment tx
@@ -4864,6 +4964,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1);
        let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
 
        assert_eq!(revoked_htlc_txn.len(), 2);
@@ -4880,6 +4981,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
        connect_block(&nodes[0], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] });
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1);
 
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 3); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success, ChannelManager: local commitment tx
@@ -4960,6 +5062,7 @@ fn test_onchain_to_onchain_claim() {
        mine_transaction(&nodes[2], &commitment_tx[0]);
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
+       check_closed_event!(nodes[2], 1);
 
        let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Success tx), ChannelMonitor : 1 (HTLC-Success tx)
        assert_eq!(c_txn.len(), 3);
@@ -4984,6 +5087,8 @@ fn test_onchain_to_onchain_claim() {
                check_spends!(b_txn[0], chan_2.3); // B local commitment tx, issued by ChannelManager
                b_txn.clear();
        }
+       check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1);
        let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(msg_events.len(), 3);
        check_added_monitors!(nodes[1], 1);
@@ -5008,6 +5113,7 @@ fn test_onchain_to_onchain_claim() {
        // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
        let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
        mine_transaction(&nodes[1], &commitment_tx[0]);
+       check_closed_event!(nodes[1], 1);
        let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        // ChannelMonitor: HTLC-Success tx, ChannelManager: local commitment tx + HTLC-Success tx
        assert_eq!(b_txn.len(), 3);
@@ -5065,6 +5171,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        mine_transaction(&nodes[1], &commitment_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1);
        connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32 - 1); // Confirm blocks until the HTLC expires
 
        let htlc_timeout_tx;
@@ -5091,6 +5198,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        nodes[2].node.claim_funds(our_payment_preimage);
        mine_transaction(&nodes[2], &commitment_txn[0]);
        check_added_monitors!(nodes[2], 2);
+       check_closed_event!(nodes[2], 1);
        let events = nodes[2].node.get_and_clear_pending_msg_events();
        match events[0] {
                MessageSendEvent::UpdateHTLCs { .. } => {},
@@ -5116,7 +5224,8 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
 
        mine_transaction(&nodes[1], &htlc_timeout_tx);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        assert!(htlc_updates.update_add_htlcs.is_empty());
        assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
@@ -5178,6 +5287,7 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
        check_added_monitors!(nodes[1], 1);
        mine_transaction(&nodes[1], &local_txn[0]);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        match events[0] {
                MessageSendEvent::UpdateHTLCs { .. } => {},
@@ -5291,7 +5401,8 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_5));
        assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_6));
        check_added_monitors!(nodes[4], 0);
-       expect_pending_htlcs_forwardable!(nodes[4]);
+       let events = nodes[4].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[4], events);
        check_added_monitors!(nodes[4], 1);
 
        let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id());
@@ -5305,7 +5416,8 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        assert!(nodes[5].node.fail_htlc_backwards(&payment_hash_2));
        assert!(nodes[5].node.fail_htlc_backwards(&payment_hash_4));
        check_added_monitors!(nodes[5], 0);
-       expect_pending_htlcs_forwardable!(nodes[5]);
+       let events = nodes[5].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[5], events);
        check_added_monitors!(nodes[5], 1);
 
        let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id());
@@ -5315,7 +5427,8 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
 
        let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan.2);
 
-       expect_pending_htlcs_forwardable!(nodes[3]);
+       let events = nodes[3].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[3], events);
        check_added_monitors!(nodes[3], 1);
        let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
        nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]);
@@ -5345,12 +5458,51 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
 
        if announce_latest {
                mine_transaction(&nodes[2], &ds_last_commitment_tx[0]);
+               let events = nodes[2].node.get_and_clear_pending_events();
+               if deliver_last_raa {
+                       assert_eq!(events.len(), 2);
+                       match events[1] {
+                               Event::ChannelClosed { .. } => {}
+                               _ => panic!("Unexpected event"),
+                       }
+                       connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
+                       check_closed_broadcast!(nodes[2], true);
+                       expect_pending_htlcs_forwardable!(nodes[2], events[0..1]);
+               } else {
+                       assert_eq!(events.len(), 1);
+                       match events[0] {
+                               Event::ChannelClosed { .. } => {}
+                               _ => panic!("Unexpected event"),
+                       }
+                       connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
+                       check_closed_broadcast!(nodes[2], true);
+                       let events = nodes[2].node.get_and_clear_pending_events();
+                       expect_pending_htlcs_forwardable!(nodes[2], events);
+               }
        } else {
                mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]);
+               let events = nodes[2].node.get_and_clear_pending_events();
+               if deliver_last_raa {
+                       assert_eq!(events.len(), 2);
+                       match events[1] {
+                               Event::ChannelClosed { .. } => {}
+                               _ => panic!("Unexpected event"),
+                       }
+                       connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
+                       check_closed_broadcast!(nodes[2], true);
+                       expect_pending_htlcs_forwardable!(nodes[2], events[0..1]);
+               } else {
+                       assert_eq!(events.len(), 1);
+                       match events[0] {
+                               Event::ChannelClosed { .. } => {}
+                               _ => panic!("Unexpected event"),
+                       }
+                       connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
+                       check_closed_broadcast!(nodes[2], true);
+                       let events = nodes[2].node.get_and_clear_pending_events();
+                       expect_pending_htlcs_forwardable!(nodes[2], events);
+               }
        }
-       connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
-       check_closed_broadcast!(nodes[2], true);
-       expect_pending_htlcs_forwardable!(nodes[2]);
        check_added_monitors!(nodes[2], 3);
 
        let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events();
@@ -5487,6 +5639,7 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
        mine_transaction(&nodes[0], &local_txn[0]);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
 
        let htlc_timeout = {
@@ -5501,7 +5654,8 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
 
        mine_transaction(&nodes[0], &htlc_timeout);
        connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
-       expect_payment_failed!(nodes[0], our_payment_hash, true);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_payment_failed!(nodes[0], events, our_payment_hash, true);
 
        // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
        let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
@@ -5570,6 +5724,7 @@ fn test_key_derivation_params() {
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1);
 
        let htlc_timeout = {
                let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -5581,7 +5736,8 @@ fn test_key_derivation_params() {
 
        mine_transaction(&nodes[0], &htlc_timeout);
        connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
-       expect_payment_failed!(nodes[0], our_payment_hash, true);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_payment_failed!(nodes[0], events, our_payment_hash, true);
 
        // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
        let new_keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
@@ -5610,6 +5766,7 @@ fn test_static_output_closing_tx() {
        let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
 
        mine_transaction(&nodes[0], &closing_tx);
+       check_closed_event!(nodes[0], 1);
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
@@ -5617,6 +5774,7 @@ fn test_static_output_closing_tx() {
        check_spends!(spend_txn[0], closing_tx);
 
        mine_transaction(&nodes[1], &closing_tx);
+       check_closed_event!(nodes[1], 1);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@ -5667,6 +5825,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) {
        test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1);
 }
 
 fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
@@ -5699,6 +5858,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
        test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1);
 }
 
 fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
@@ -5715,7 +5875,8 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no
        let htlc_value = if use_dust { 50000 } else { 3000000 };
        let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
        assert!(nodes[1].node.fail_htlc_backwards(&our_payment_hash));
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        check_added_monitors!(nodes[1], 1);
 
        let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -5747,8 +5908,10 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no
                test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
+               check_closed_event!(nodes[0], 1);
        } else {
-               expect_payment_failed!(nodes[0], our_payment_hash, true);
+               let events = nodes[0].node.get_and_clear_pending_events();
+               expect_payment_failed!(nodes[0], events, our_payment_hash, true);
        }
 }
 
@@ -6143,7 +6306,8 @@ fn test_fail_holding_cell_htlc_upon_free_multihop() {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
        check_added_monitors!(nodes[1], 0);
        commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
 
        chan_stat = get_channel_value_stat!(nodes[1], chan_1_2.2);
        assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
@@ -6286,6 +6450,7 @@ fn test_update_add_htlc_bolt2_receiver_zero_value_msat() {
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1);
        check_closed_broadcast!(nodes[1], true).unwrap();
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1);
 }
 
 #[test]
@@ -6341,7 +6506,8 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment()
                check_added_monitors!(nodes[1], 0);
                commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
 
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               let events = nodes[1].node.get_and_clear_pending_events();
+               expect_pending_htlcs_forwardable!(nodes[1], events);
                expect_payment_received!(nodes[1], our_payment_hash, our_payment_secret, 100000);
        }
        let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
@@ -6413,6 +6579,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1);
 }
 
 #[test]
@@ -6449,6 +6616,7 @@ fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1);
 }
 
 #[test]
@@ -6493,6 +6661,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1);
 }
 
 #[test]
@@ -6518,6 +6687,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1);
 }
 
 #[test]
@@ -6543,6 +6713,7 @@ fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height");
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1);
 }
 
 #[test]
@@ -6592,6 +6763,7 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1);
 }
 
 #[test]
@@ -6625,6 +6797,7 @@ fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1);
 }
 
 #[test]
@@ -6658,6 +6831,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1);
 }
 
 #[test]
@@ -6691,6 +6865,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment()
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1);
 }
 
 #[test]
@@ -6732,6 +6907,7 @@ fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find");
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1);
 }
 
 #[test]
@@ -6773,6 +6949,7 @@ fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1);
 }
 
 #[test]
@@ -6821,6 +6998,7 @@ fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_messag
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set");
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1);
 }
 
 #[test]
@@ -6851,7 +7029,8 @@ fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_upda
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
        check_added_monitors!(nodes[1], 0);
        commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events_2.len(), 1);
        check_added_monitors!(nodes[1], 1);
@@ -6884,7 +7063,8 @@ fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_upda
 
        check_added_monitors!(nodes[1], 0);
        commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events_4.len(), 1);
 
@@ -6928,7 +7108,8 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
        // Fail one HTLC to prune it in the will-be-latest-local commitment tx
        assert!(nodes[1].node.fail_htlc_backwards(&payment_hash_2));
        check_added_monitors!(nodes[1], 0);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        check_added_monitors!(nodes[1], 1);
 
        let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -6963,6 +7144,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
 
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1);
 
        assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
@@ -7023,8 +7205,10 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
        if local {
                // We fail dust-HTLC 1 by broadcast of local commitment tx
                mine_transaction(&nodes[0], &as_commitment_tx[0]);
+               check_closed_event!(nodes[0], 1);
                connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
-               expect_payment_failed!(nodes[0], dust_hash, true);
+               let events = nodes[0].node.get_and_clear_pending_events();
+               expect_payment_failed!(nodes[0], events, dust_hash, true);
 
                connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - ANTI_REORG_DELAY);
                check_closed_broadcast!(nodes[0], true);
@@ -7036,23 +7220,27 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
                assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
                mine_transaction(&nodes[0], &timeout_tx[0]);
                connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
-               expect_payment_failed!(nodes[0], non_dust_hash, true);
+               let events = nodes[0].node.get_and_clear_pending_events();
+               expect_payment_failed!(nodes[0], events, non_dust_hash, true);
        } else {
                // We fail dust-HTLC 1 by broadcast of remote commitment tx. If revoked, fail also non-dust HTLC
                mine_transaction(&nodes[0], &bs_commitment_tx[0]);
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
+               check_closed_event!(nodes[0], 1);
                assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
                connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
                timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[1].clone());
                if !revoked {
-                       expect_payment_failed!(nodes[0], dust_hash, true);
+                       let events = nodes[0].node.get_and_clear_pending_events();
+                       expect_payment_failed!(nodes[0], events, dust_hash, true);
                        assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
                        // We fail non-dust-HTLC 2 by broadcast of local timeout tx on remote commitment tx
                        mine_transaction(&nodes[0], &timeout_tx[0]);
                        assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
                        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
-                       expect_payment_failed!(nodes[0], non_dust_hash, true);
+                       let events = nodes[0].node.get_and_clear_pending_events();
+                       expect_payment_failed!(nodes[0], events, non_dust_hash, true);
                } else {
                        // If revoked, both dust & non-dust HTLCs should have been failed after ANTI_REORG_DELAY confs of revoked
                        // commitment tx
@@ -7131,6 +7319,7 @@ fn test_user_configurable_csv_delay() {
                        _ => { assert!(false); }
                }
        } else { assert!(false); }
+       check_closed_event!(nodes[0], 1);
 
        // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Channel::new_from_req()
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
@@ -7243,10 +7432,10 @@ fn test_data_loss_protect() {
 
        // Check we close channel detecting A is fallen-behind
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
+       check_closed_event!(nodes[1], 1);
        assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Peer attempted to reestablish channel with a very old local commitment transaction");
        check_added_monitors!(nodes[1], 1);
 
-
        // Check A is able to claim to_remote output
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(node_txn.len(), 1);
@@ -7254,6 +7443,7 @@ fn test_data_loss_protect() {
        assert_eq!(node_txn[0].output.len(), 2);
        mine_transaction(&nodes[0], &node_txn[0]);
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
+       check_closed_event!(nodes[0], 1);
        let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
        assert_eq!(spend_txn.len(), 1);
        check_spends!(spend_txn[0], node_txn[0]);
@@ -7287,8 +7477,10 @@ fn test_check_htlc_underpaying() {
 
        // Note that we first have to wait a random delay before processing the receipt of the HTLC,
        // and then will wait a second random delay before failing the HTLC back:
-       expect_pending_htlcs_forwardable!(nodes[1]);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
 
        // Node 3 is expecting payment of 100_000 but received 10_000,
        // it should fail htlc like we didn't know the preimage.
@@ -7315,7 +7507,8 @@ fn test_check_htlc_underpaying() {
        // 10_000 msat as u64, followed by a height of CHAN_CONFIRM_DEPTH as u32
        let mut expected_failure_data = byte_utils::be64_to_array(10_000).to_vec();
        expected_failure_data.extend_from_slice(&byte_utils::be32_to_array(CHAN_CONFIRM_DEPTH));
-       expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000|15, &expected_failure_data[..]);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_payment_failed!(nodes[0], events, our_payment_hash, true, 0x4000|15, &expected_failure_data[..]);
 }
 
 #[test]
@@ -7696,6 +7889,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] });
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1);
        connect_blocks(&nodes[1], 49); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
 
        let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -7717,7 +7911,12 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        connect_block(&nodes[0], &Block { header: header_11, txdata: vec![revoked_local_txn[0].clone()] });
        let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_11.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[2].clone()] });
-       expect_pending_htlcs_forwardable_ignore!(nodes[0]);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable_ignore!(nodes[0], events[0..1]);
+       match events[1] {
+               Event::ChannelClosed { .. } => {}
+               _ => panic!("Unexpected event"),
+       }
        let first;
        let feerate_1;
        let penalty_txn;
@@ -7963,6 +8162,7 @@ fn test_counterparty_raa_skip_no_crash() {
                &msgs::RevokeAndACK { channel_id, per_commitment_secret, next_per_commitment_point });
        assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1);
 }
 
 #[test]
@@ -7989,12 +8189,14 @@ fn test_bump_txn_sanitize_tracking_maps() {
 
        // Broadcast set of revoked txn on A
        connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH);
-       expect_pending_htlcs_forwardable_ignore!(nodes[0]);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable_ignore!(nodes[0], events);
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
 
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1);
        let penalty_txn = {
                let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_txn.len(), 4); //ChannelMonitor: justice txn * 3, ChannelManager: local commitment tx
@@ -8108,7 +8310,8 @@ fn test_preimage_storage() {
        }
        // Note that after leaving the above scope we have no knowledge of any arguments or return
        // values from previous calls.
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        let events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        match events[0] {
@@ -8179,7 +8382,8 @@ fn test_secret_timeout() {
        }
        // Note that after leaving the above scope we have no knowledge of any arguments or return
        // values from previous calls.
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        let events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        match events[0] {
@@ -8223,8 +8427,10 @@ fn test_bad_secret_hash() {
 
                        // We have to forward pending HTLCs once to process the receipt of the HTLC and then
                        // again to process the pending backwards-failure of the HTLC
-                       expect_pending_htlcs_forwardable!(nodes[1]);
-                       expect_pending_htlcs_forwardable!(nodes[1]);
+                       let events = nodes[1].node.get_and_clear_pending_events();
+                       expect_pending_htlcs_forwardable!(nodes[1], events);
+                       let events = nodes[1].node.get_and_clear_pending_events();
+                       expect_pending_htlcs_forwardable!(nodes[1], events);
                        check_added_monitors!(nodes[1], 1);
 
                        // We should fail the payment back
@@ -8246,17 +8452,20 @@ fn test_bad_secret_hash() {
        // Send a payment with the right payment hash but the wrong payment secret
        nodes[0].node.send_payment(&route, our_payment_hash, &Some(random_payment_secret)).unwrap();
        handle_unknown_invalid_payment_data!();
-       expect_payment_failed!(nodes[0], our_payment_hash, true, expected_error_code, expected_error_data);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_payment_failed!(nodes[0], events, our_payment_hash, true, expected_error_code, expected_error_data);
 
        // Send a payment with a random payment hash, but the right payment secret
        nodes[0].node.send_payment(&route, random_payment_hash, &Some(our_payment_secret)).unwrap();
        handle_unknown_invalid_payment_data!();
-       expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_payment_failed!(nodes[0], events, random_payment_hash, true, expected_error_code, expected_error_data);
 
        // Send a payment with a random payment hash and random payment secret
        nodes[0].node.send_payment(&route, random_payment_hash, &Some(random_payment_secret)).unwrap();
        handle_unknown_invalid_payment_data!();
-       expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_payment_failed!(nodes[0], events, random_payment_hash, true, expected_error_code, expected_error_data);
 }
 
 #[test]
@@ -8478,6 +8687,7 @@ fn test_pre_lockin_no_chan_closed_update() {
        let channel_id = ::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
        assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
+       check_closed_event!(nodes[0], 1);
 }
 
 #[test]
@@ -8512,6 +8722,7 @@ fn test_htlc_no_detection() {
        chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &Block { header, txdata: vec![local_txn[0].clone()] }, nodes[0].best_block_info().1 + 1);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1);
 
        let htlc_timeout = {
@@ -8525,7 +8736,8 @@ fn test_htlc_no_detection() {
        let header_201 = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[0], &Block { header: header_201, txdata: vec![htlc_timeout.clone()] });
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
-       expect_payment_failed!(nodes[0], our_payment_hash, true);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_payment_failed!(nodes[0], events, our_payment_hash, true);
 }
 
 fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain_before_fulfill: bool) {
@@ -8571,6 +8783,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
        nodes[force_closing_node].node.force_close_channel(&chan_ab.2).unwrap();
        check_closed_broadcast!(nodes[force_closing_node], true);
        check_added_monitors!(nodes[force_closing_node], 1);
+       check_closed_event!(nodes[force_closing_node], 1);
        if go_onchain_before_fulfill {
                let txn_to_broadcast = match broadcast_alice {
                        true => alice_txn.clone(),
@@ -8582,6 +8795,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
                if broadcast_alice {
                        check_closed_broadcast!(nodes[1], true);
                        check_added_monitors!(nodes[1], 1);
+                       check_closed_event!(nodes[1], 1);
                }
                assert_eq!(bob_txn.len(), 1);
                check_spends!(bob_txn[0], chan_ab.3);
@@ -8662,6 +8876,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
                if broadcast_alice {
                        check_closed_broadcast!(nodes[1], true);
                        check_added_monitors!(nodes[1], 1);
+                       check_closed_event!(nodes[1], 1);
                }
                let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
                if broadcast_alice {
@@ -8877,6 +9092,7 @@ fn test_error_chans_closed() {
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
        check_added_monitors!(nodes[0], 1);
        check_closed_broadcast!(nodes[0], false);
+       check_closed_event!(nodes[0], 1);
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
        assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
        assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
@@ -8886,6 +9102,7 @@ fn test_error_chans_closed() {
        let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() });
        check_added_monitors!(nodes[0], 2);
+       check_closed_event!(nodes[0], 2);
        let events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 2);
        match events[0] {
@@ -8950,6 +9167,7 @@ fn test_invalid_funding_tx() {
        nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
 
        confirm_transaction_at(&nodes[1], &tx, 1);
+       check_closed_event!(nodes[1], 1);
        check_added_monitors!(nodes[1], 1);
        let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events_2.len(), 1);
@@ -8993,6 +9211,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
 
        nodes[1].node.force_close_channel(&channel_id).unwrap();
        check_closed_broadcast!(nodes[1], true);
+       check_closed_event!(nodes[1], 1);
        check_added_monitors!(nodes[1], 1);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(node_txn.len(), 1);
@@ -9024,7 +9243,8 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
                // additional block built on top of the current chain.
                nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
                        &nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1);
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               let events = nodes[1].node.get_and_clear_pending_events();
+               expect_pending_htlcs_forwardable!(nodes[1], events);
                check_added_monitors!(nodes[1], 1);
 
                let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
index c621f4eba22327b36602526c70d234f5b6426b72..237e62324d119e6d8e6d79d9d7a73ecc1692903e 100644 (file)
@@ -65,7 +65,8 @@ fn chanmon_fail_from_stale_commitment() {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
        commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false);
 
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
        check_added_monitors!(nodes[1], 1);
 
@@ -77,7 +78,8 @@ fn chanmon_fail_from_stale_commitment() {
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_pending_htlcs_forwardable!(nodes[1], events);
        check_added_monitors!(nodes[1], 1);
        let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 
@@ -247,7 +249,8 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
        if prev_commitment_tx {
                // To build a previous commitment transaction, deliver one round of commitment messages.
                nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &b_htlc_msgs.update_fulfill_htlcs[0]);
-               expect_payment_sent!(nodes[0], payment_preimage);
+               let events = nodes[0].node.get_and_clear_pending_events();
+               expect_payment_sent!(nodes[0], payment_preimage, events);
                nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &b_htlc_msgs.commitment_signed);
                check_added_monitors!(nodes[0], 1);
                let (as_raa, as_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
@@ -357,7 +360,8 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
                sorted_vec(nodes[1].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances()));
 
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
-       expect_payment_failed!(nodes[0], dust_payment_hash, true);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_payment_failed!(nodes[0], events, dust_payment_hash, true);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        // After ANTI_REORG_DELAY, A will consider its balance fully spendable and generate a
@@ -397,7 +401,8 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
        // possibly-claimable up to ANTI_REORG_DELAY, at which point it will drop it.
        mine_transaction(&nodes[0], &b_broadcast_txn[0]);
        if !prev_commitment_tx {
-               expect_payment_sent!(nodes[0], payment_preimage);
+               let events = nodes[0].node.get_and_clear_pending_events();
+               expect_payment_sent!(nodes[0], payment_preimage, events);
        }
        assert_eq!(sorted_vec(vec![Balance::MaybeClaimableHTLCAwaitingTimeout {
                        claimable_amount_satoshis: 3_000,
@@ -443,7 +448,8 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
        assert_eq!(Vec::<Balance>::new(),
                nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances());
-       expect_payment_failed!(nodes[0], timeout_payment_hash, true);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       expect_payment_failed!(nodes[0], events, timeout_payment_hash, true);
 
        let mut node_a_spendable = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events();
        assert_eq!(node_a_spendable.len(), 1);
index 70d5a0c0bdfe30362668824ed6e77e0e267755a0..814782e59e4836f18e96197e86145484fa39014e 100644 (file)
@@ -122,7 +122,8 @@ fn run_onion_failure_test_with_fail_intercept<F1,F2,F3>(_name: &str, test_case:
                                expect_htlc_forward!(&nodes[2]);
                                expect_event!(&nodes[2], Event::PaymentReceived);
                                callback_node();
-                               expect_pending_htlcs_forwardable!(nodes[2]);
+                               let events = nodes[2].node.get_and_clear_pending_events();
+                               expect_pending_htlcs_forwardable!(nodes[2], events);
                        }
 
                        let update_2_1 = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
index 0db30b8178cdc6bb8dc6113168d93d56ecb7f762..da43773c9803ee8113409786055107e4d52c2541 100644 (file)
@@ -81,6 +81,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
                connect_block(&nodes[2], &Block { header, txdata: node_1_commitment_txn.clone() });
                check_added_monitors!(nodes[2], 1);
                check_closed_broadcast!(nodes[2], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
+               check_closed_event!(nodes[2], 1);
                let node_2_commitment_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_2_commitment_txn.len(), 3); // ChannelMonitor: 1 offered HTLC-Claim, ChannelManger: 1 local commitment tx, 1 Received HTLC-Claim
                assert_eq!(node_2_commitment_txn[1].output.len(), 2); // to-remote and Received HTLC (to-self is dust)
@@ -122,6 +123,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
        };
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
+       check_closed_event!(nodes[1], 1);
        // Connect ANTI_REORG_DELAY - 2 blocks, giving us a confirmation count of ANTI_REORG_DELAY - 1.
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 2);
        check_added_monitors!(nodes[1], 0);
@@ -147,7 +149,8 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
                        txdata: vec![],
                };
                connect_block(&nodes[1], &block);
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               let events = nodes[1].node.get_and_clear_pending_events();
+               expect_pending_htlcs_forwardable!(nodes[1], events);
        }
 
        check_added_monitors!(nodes[1], 1);
@@ -162,7 +165,8 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
        }
        commitment_signed_dance!(nodes[0], nodes[1], htlc_updates.commitment_signed, false, true);
        if claim {
-               expect_payment_sent!(nodes[0], our_payment_preimage);
+               let events = nodes[0].node.get_and_clear_pending_events();
+               expect_payment_sent!(nodes[0], our_payment_preimage, events);
        } else {
                expect_payment_failed_with_update!(nodes[0], our_payment_hash, false, chan_2.0.contents.short_channel_id, true);
        }
@@ -298,6 +302,8 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
        *nodes[0].chain_monitor.expect_channel_force_closed.lock().unwrap() = Some((chan.2, true));
        nodes[0].node.test_process_background_events(); // Required to free the pending background monitor update
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1);
+       check_closed_event!(nodes[1], 1);
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
        nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
 
@@ -369,6 +375,7 @@ fn test_set_outpoints_partial_claiming() {
        // Connect blocks on node A commitment transaction
        mine_transaction(&nodes[0], &remote_txn[0]);
        check_closed_broadcast!(nodes[0], true);
+       check_closed_event!(nodes[0], 1);
        check_added_monitors!(nodes[0], 1);
        // Verify node A broadcast tx claiming both HTLCs
        {
@@ -386,6 +393,7 @@ fn test_set_outpoints_partial_claiming() {
        // Connect blocks on node B
        connect_blocks(&nodes[1], 135);
        check_closed_broadcast!(nodes[1], true);
+       check_closed_event!(nodes[1], 1);
        check_added_monitors!(nodes[1], 1);
        // Verify node B broadcast 2 HTLC-timeout txn
        let partial_claim_tx = {
index c6691826ee6457615d6597fcfb5f899c3d79e1fe..abd8f6b105fcdda900518efbd3c59ea2c52c14cf 100644 (file)
@@ -16,6 +16,7 @@
 
 use chain::keysinterface::SpendableOutputDescriptor;
 use ln::msgs;
+use ln::msgs::DecodeError;
 use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
 use routing::network_graph::NetworkUpdate;
 use util::ser::{Writeable, Writer, MaybeReadable, Readable, VecReadWrapper, VecWriteWrapper};
@@ -26,7 +27,6 @@ use bitcoin::secp256k1::key::PublicKey;
 
 use io;
 use prelude::*;
-use core::cmp;
 use core::time::Duration;
 use core::ops::Deref;
 
@@ -69,6 +69,47 @@ pub enum PaymentPurpose {
        SpontaneousPayment(PaymentPreimage),
 }
 
+#[derive(Clone, Debug)]
+pub enum ClosureDescriptor {
+       ForceClosed,
+       UserInitiated,
+       CounterpartyInitiated,
+       CooperativeClosure,
+       UnknownOnchainCommitment,
+       ProcessingError,
+       DisconnectedPeer,
+}
+
+impl Writeable for ClosureDescriptor {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+               match self {
+                       ClosureDescriptor::ForceClosed => 0u8.write(writer)?,
+                       ClosureDescriptor::UserInitiated => 1u8.write(writer)?,
+                       ClosureDescriptor::CounterpartyInitiated => 2u8.write(writer)?,
+                       ClosureDescriptor::CooperativeClosure => 3u8.write(writer)?,
+                       ClosureDescriptor::UnknownOnchainCommitment => 4u8.write(writer)?,
+                       ClosureDescriptor::ProcessingError => 5u8.write(writer)?,
+                       ClosureDescriptor::DisconnectedPeer => 6u8.write(writer)?,
+               }
+               Ok(())
+       }
+}
+
+impl Readable for ClosureDescriptor {
+       fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+               Ok(match <u8 as Readable>::read(reader)? {
+                       0 => ClosureDescriptor::ForceClosed,
+                       1 => ClosureDescriptor::UserInitiated,
+                       2 => ClosureDescriptor::CounterpartyInitiated,
+                       3 => ClosureDescriptor::CooperativeClosure,
+                       4 => ClosureDescriptor::UnknownOnchainCommitment,
+                       5 => ClosureDescriptor::ProcessingError,
+                       6 => ClosureDescriptor::DisconnectedPeer,
+                       _ => return Err(DecodeError::InvalidValue),
+               })
+       }
+}
+
 /// An Event which you should probably take some action in response to.
 ///
 /// Note that while Writeable and Readable are implemented for Event, you probably shouldn't use
@@ -195,8 +236,8 @@ pub enum Event {
                /// The channel_id which has been barren from further off-chain updates but
                /// funding output might still be not resolved yet.
                channel_id: [u8; 32],
-               /// A human-readable error message
-               err: String
+               /// A machine-readable error message
+               err: ClosureDescriptor
        }
 }
 
@@ -277,8 +318,7 @@ impl Writeable for Event {
                        &Event::ChannelClosed { ref channel_id, ref err } => {
                                6u8.write(writer)?;
                                channel_id.write(writer)?;
-                               (err.len() as u16).write(writer)?;
-                               writer.write_all(err.as_bytes())?;
+                               err.write(writer)?;
                                write_tlv_fields!(writer, {});
                        },
                }
@@ -399,18 +439,9 @@ impl MaybeReadable for Event {
                        6u8 => {
                                let f = || {
                                        let channel_id = Readable::read(reader)?;
-                                       let err = {
-                                               let mut size: usize = <u16 as Readable>::read(reader)? as usize;
-                                               let mut data = vec![];
-                                               let data_len = reader.read_to_end(&mut data)?;
-                                               size = cmp::min(data_len, size);
-                                               match String::from_utf8(data[..size as usize].to_vec()) {
-                                                       Ok(s) => s,
-                                                       Err(_) => return Err(msgs::DecodeError::InvalidValue),
-                                               }
-                                       };
+                                       let err = Readable::read(reader)?;
                                        read_tlv_fields!(reader, {});
-                                       Ok(Some(Event::ChannelClosed { channel_id, err: err }))
+                                       Ok(Some(Event::ChannelClosed { channel_id, err}))
                                };
                                f()
                        },