]> git.bitcoin.ninja Git - rust-lightning/commitdiff
f drop spurious test changes
authorMatt Corallo <git@bluematt.me>
Tue, 21 Sep 2021 04:54:02 +0000 (04:54 +0000)
committerMatt Corallo <git@bluematt.me>
Tue, 21 Sep 2021 05:48:46 +0000 (05:48 +0000)
lightning-persister/src/lib.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/monitor_tests.rs
lightning/src/ln/reorg_tests.rs

index 0a813bf079d1e22b8f1d229db15387f9572979c7..b0707a4e618fb15c53a820c674bd1b45c1922f5a 100644 (file)
@@ -182,11 +182,11 @@ mod tests {
        use bitcoin::Txid;
        use lightning::chain::channelmonitor::{Persist, ChannelMonitorUpdateErr};
        use lightning::chain::transaction::OutPoint;
-       use lightning::{check_closed_broadcast, check_added_monitors};
+       use lightning::{check_closed_broadcast, check_closed_event, check_added_monitors};
        use lightning::ln::features::InitFeatures;
        use lightning::ln::functional_test_utils::*;
        use lightning::ln::msgs::ErrorAction;
-       use lightning::util::events::{MessageSendEventsProvider, MessageSendEvent};
+       use lightning::util::events::{ClosureReason, Event, MessageSendEventsProvider, MessageSendEvent};
        use lightning::util::test_utils;
        use std::fs;
        #[cfg(target_os = "windows")]
@@ -259,6 +259,7 @@ mod tests {
                // Force close because cooperative close doesn't result in any persisted
                // updates.
                nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
+               check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
 
@@ -268,12 +269,11 @@ mod tests {
                let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[0].clone()]});
                check_closed_broadcast!(nodes[1], true);
+               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxBroadcasted);
                check_added_monitors!(nodes[1], 1);
 
                // Make sure everything is persisted as expected after close.
                check_persisted_data!(11);
-               nodes[0].node.get_and_clear_pending_events();
-               nodes[1].node.get_and_clear_pending_events();
        }
 
        // Test that if the persister's path to channel data is read-only, writing a
@@ -293,6 +293,7 @@ mod tests {
                let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
                let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
                nodes[1].node.force_close_channel(&chan.2).unwrap();
+               check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
                let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
 
                // Set the persister's directory to read-only, which should result in
@@ -314,7 +315,6 @@ mod tests {
 
                nodes[1].node.get_and_clear_pending_msg_events();
                added_monitors.clear();
-               nodes[1].node.get_and_clear_pending_events();
        }
 
        // Test that if a persister's directory name is invalid, monitor persistence
index eed7beea27448f8fe3fe8b9d3569e6cb3b288714..36f226823945b3a4cc5f0e07485788198b7719d8 100644 (file)
@@ -763,6 +763,7 @@ fn test_monitor_update_fail_no_rebroadcast() {
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+       assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
        check_added_monitors!(nodes[1], 1);
 
        *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
@@ -918,6 +919,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
        nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
+       assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        check_added_monitors!(nodes[1], 1);
 
@@ -1007,8 +1009,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
 
        nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0);
        commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false);
-       let events = nodes[0].node.get_and_clear_pending_events();
-       expect_payment_failed!(nodes[0], events, payment_hash_1, true);
+       expect_payment_failed!(nodes[0], payment_hash_1, true);
 
        nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]);
        let as_cs;
@@ -1716,8 +1717,7 @@ fn test_monitor_update_fail_claim() {
        let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]);
        commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false);
-       let events = nodes[0].node.get_and_clear_pending_events();
-       expect_payment_sent!(nodes[0], payment_preimage_1, events);
+       expect_payment_sent!(nodes[0], payment_preimage_1);
 
        // Get the payment forwards, note that they were batched into one commitment update.
        expect_pending_htlcs_forwardable!(nodes[1]);
@@ -1928,6 +1928,7 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
        nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
        check_added_monitors!(nodes[0], 1);
+       assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
        *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
        let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
@@ -2413,8 +2414,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
                        assert!(updates.update_fee.is_none());
                        assert_eq!(updates.update_fulfill_htlcs.len(), 1);
                        nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
-                       let events = nodes[1].node.get_and_clear_pending_events();
-                       expect_payment_sent!(nodes[1], payment_preimage_0, events);
+                       expect_payment_sent!(nodes[1], payment_preimage_0);
                        assert_eq!(updates.update_add_htlcs.len(), 1);
                        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
                        updates.commitment_signed
@@ -2520,8 +2520,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
                bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
                assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
                nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
-               let events = nodes[0].node.get_and_clear_pending_events();
-               expect_payment_sent!(nodes[0], payment_preimage, events);
+               expect_payment_sent!(nodes[0], payment_preimage);
                if htlc_status == HTLCStatusAtDupClaim::Cleared {
                        commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
                }
@@ -2547,8 +2546,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
                bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
                assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
                nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
-               let events = nodes[0].node.get_and_clear_pending_events();
-               expect_payment_sent!(nodes[0], payment_preimage, events);
+               expect_payment_sent!(nodes[0], payment_preimage);
        }
        if htlc_status != HTLCStatusAtDupClaim::Cleared {
                commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
@@ -2715,8 +2713,7 @@ fn double_temp_error() {
        assert_eq!(node_id, nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_1);
        check_added_monitors!(nodes[0], 0);
-       let events = nodes[0].node.get_and_clear_pending_events();
-       expect_payment_sent!(nodes[0], payment_preimage_1, events);
+       expect_payment_sent!(nodes[0], payment_preimage_1);
        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_b1);
        check_added_monitors!(nodes[0], 1);
        nodes[0].node.process_pending_htlc_forwards();
@@ -2759,6 +2756,5 @@ fn double_temp_error() {
        check_added_monitors!(nodes[0], 0);
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
        commitment_signed_dance!(nodes[0], nodes[1], commitment_signed_b2, false);
-       let events = nodes[0].node.get_and_clear_pending_events();
-       expect_payment_sent!(nodes[0], payment_preimage_2, events);
+       expect_payment_sent!(nodes[0], payment_preimage_2);
 }
index 6ab97c2420e0fd1bab7da472de68ff4d27ff684a..734466404eb0b675ad2c20cf76c97b31642bd61f 100644 (file)
@@ -5711,8 +5711,7 @@ mod tests {
                assert!(updates.update_fee.is_none());
                nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
                commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
-               let events = nodes[0].node.get_and_clear_pending_events();
-               expect_payment_failed!(nodes[0], events, our_payment_hash, true);
+               expect_payment_failed!(nodes[0], our_payment_hash, true);
 
                // Send the second half of the original MPP payment.
                nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, mpp_id, &None).unwrap();
@@ -5766,7 +5765,6 @@ mod tests {
 
        #[test]
        fn test_keysend_dup_payment_hash() {
-
                // (1): Test that a keysend payment with a duplicate payment hash to an existing pending
                //      outbound regular payment fails as expected.
                // (2): Test that a regular payment with a duplicate payment hash to an existing keysend payment
@@ -5804,8 +5802,7 @@ mod tests {
                assert!(updates.update_fee.is_none());
                nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
                commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
-               let events = nodes[0].node.get_and_clear_pending_events();
-               expect_payment_failed!(nodes[0], events, payment_hash, true);
+               expect_payment_failed!(nodes[0], payment_hash, true);
 
                // Finally, claim the original payment.
                claim_payment(&nodes[0], &expected_route, payment_preimage);
@@ -5843,8 +5840,7 @@ mod tests {
                assert!(updates.update_fee.is_none());
                nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
                commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
-               let events = nodes[0].node.get_and_clear_pending_events();
-               expect_payment_failed!(nodes[0], events, payment_hash, true);
+               expect_payment_failed!(nodes[0], payment_hash, true);
 
                // Finally, succeed the keysend payment.
                claim_payment(&nodes[0], &expected_route, payment_preimage);
index 1ab91cfb2d64b4e22faa06fdad7bdad81adaf085..a8b91735c7a9d7fe28b0bd48f13fc865037b4681 100644 (file)
@@ -1038,9 +1038,10 @@ macro_rules! expect_payment_received {
 }
 
 macro_rules! expect_payment_sent {
-       ($node: expr, $expected_payment_preimage: expr, $events: expr) => {
-               assert_eq!($events.len(), 1);
-               match $events[0] {
+       ($node: expr, $expected_payment_preimage: expr) => {
+               let events = $node.node.get_and_clear_pending_events();
+               assert_eq!(events.len(), 1);
+               match events[0] {
                        Event::PaymentSent { ref payment_preimage } => {
                                assert_eq!($expected_payment_preimage, *payment_preimage);
                        },
@@ -1094,8 +1095,8 @@ macro_rules! expect_payment_failed_with_update {
 
 #[cfg(test)]
 macro_rules! expect_payment_failed {
-       ($node: expr, $events: expr, $expected_payment_hash: expr, $rejected_by_dest: expr $(, $expected_error_code: expr, $expected_error_data: expr)*) => {
-               let events: Vec<Event> = $events;
+       ($node: expr, $expected_payment_hash: expr, $rejected_by_dest: expr $(, $expected_error_code: expr, $expected_error_data: expr)*) => {
+               let events = $node.node.get_and_clear_pending_events();
                assert_eq!(events.len(), 1);
                match events[0] {
                        Event::PaymentFailed { ref payment_hash, rejected_by_dest, network_update: _, ref error_code, ref error_data, .. } => {
@@ -1273,8 +1274,7 @@ pub fn claim_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, exp
                }
        }
        if !skip_last {
-               let events = origin_node.node.get_and_clear_pending_events();
-               expect_payment_sent!(origin_node, our_payment_preimage, events);
+               expect_payment_sent!(origin_node, our_payment_preimage);
        }
 }
 
index 3da0f08fe601a1a6f8fd185e3b44ff655343e3bb..c6db9817614a8258bd1088fbd263b9031dc140b1 100644 (file)
@@ -1926,8 +1926,7 @@ fn channel_reserve_in_flight_removes() {
        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed);
        check_added_monitors!(nodes[0], 1);
        let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
-       let events = nodes[0].node.get_and_clear_pending_events();
-       expect_payment_sent!(nodes[0], payment_preimage_1, events);
+       expect_payment_sent!(nodes[0], payment_preimage_1);
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]);
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg);
@@ -1956,8 +1955,7 @@ fn channel_reserve_in_flight_removes() {
        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
        check_added_monitors!(nodes[0], 1);
        let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
-       let events = nodes[0].node.get_and_clear_pending_events();
-       expect_payment_sent!(nodes[0], payment_preimage_2, events);
+       expect_payment_sent!(nodes[0], payment_preimage_2);
 
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
        check_added_monitors!(nodes[1], 1);
@@ -2384,8 +2382,7 @@ fn claim_htlc_outputs_shared_tx() {
                check_added_monitors!(nodes[1], 1);
                check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxBroadcasted);
                connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
-               let events = nodes[1].node.get_and_clear_pending_events();
-               expect_payment_failed!(nodes[1], events, payment_hash_2, true);
+               expect_payment_failed!(nodes[1], payment_hash_2, true);
 
                let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_txn.len(), 2); // ChannelMonitor: penalty tx, ChannelManager: local commitment
@@ -2444,13 +2441,12 @@ fn claim_htlc_outputs_single_tx() {
                let mut events = nodes[0].node.get_and_clear_pending_events();
                expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
                match events[1] {
-                       Event::ChannelClosed { .. } => {}
+                       Event::ChannelClosed { reason: ClosureReason::CommitmentTxBroadcasted, .. } => {}
                        _ => panic!("Unexpected event"),
                }
 
                connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
-               let events = nodes[1].node.get_and_clear_pending_events();
-               expect_payment_failed!(nodes[1], events, payment_hash_2, true);
+               expect_payment_failed!(nodes[1], payment_hash_2, true);
 
                let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_txn.len(), 9);
@@ -3191,8 +3187,14 @@ fn fail_backward_pending_htlc_upon_channel_failure() {
                nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
        }
        let events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 2);
        // Check that Alice fails backward the pending HTLC from the second payment.
-       expect_payment_failed!(nodes[0], events[0..1].to_vec(), failed_payment_hash, true);
+       match events[0] {
+               Event::PaymentFailed { payment_hash, .. } => {
+                       assert_eq!(payment_hash, failed_payment_hash);
+               },
+               _ => panic!("Unexpected event"),
+       }
        match events[1] {
                Event::ChannelClosed { .. } => {}
                _ => panic!("Unexpected event"),
@@ -3334,8 +3336,7 @@ fn test_dup_events_on_peer_disconnect() {
        check_added_monitors!(nodes[1], 1);
        let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]);
-       let events = nodes[0].node.get_and_clear_pending_events();
-       expect_payment_sent!(nodes[0], payment_preimage, events);
+       expect_payment_sent!(nodes[0], payment_preimage);
 
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
@@ -3994,8 +3995,7 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) {
        // 100_000 msat as u64, followed by the height at which we failed back above
        let mut expected_failure_data = byte_utils::be64_to_array(100_000).to_vec();
        expected_failure_data.extend_from_slice(&byte_utils::be32_to_array(block_count - 1));
-       let events = nodes[0].node.get_and_clear_pending_events();
-       expect_payment_failed!(nodes[0], events, our_payment_hash, true, 0x4000 | 15, &expected_failure_data[..]);
+       expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000 | 15, &expected_failure_data[..]);
 }
 
 #[test]
@@ -4068,8 +4068,7 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) {
                }
                expect_payment_failed_with_update!(nodes[0], second_payment_hash, false, chan_2.0.contents.short_channel_id, false);
        } else {
-               let events = nodes[1].node.get_and_clear_pending_events();
-               expect_payment_failed!(nodes[1], events, second_payment_hash, true);
+               expect_payment_failed!(nodes[1], second_payment_hash, true);
        }
 }
 
@@ -4246,8 +4245,7 @@ fn test_dup_htlc_onchain_fails_on_reload() {
        header.prev_blockhash = nodes[0].best_block_hash();
        let claim_block = Block { header, txdata: claim_txn};
        connect_block(&nodes[0], &claim_block);
-       let events = nodes[0].node.get_and_clear_pending_events();
-       expect_payment_sent!(nodes[0], payment_preimage, events);
+       expect_payment_sent!(nodes[0], payment_preimage);
 
        // ChannelManagers generally get re-serialized after any relevant event(s). Since we just
        // connected a highly-relevant block, it likely gets serialized out now.
@@ -4807,8 +4805,7 @@ fn test_static_spendable_outputs_timeout_tx() {
        mine_transaction(&nodes[1], &node_txn[1]);
        check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxBroadcasted);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_payment_failed!(nodes[1], events, our_payment_hash, true);
+       expect_payment_failed!(nodes[1], our_payment_hash, true);
 
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
        assert_eq!(spend_txn.len(), 3); // SpendableOutput: remote_commitment_tx.to_remote, timeout_tx.output
@@ -5447,48 +5444,28 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
 
        if announce_latest {
                mine_transaction(&nodes[2], &ds_last_commitment_tx[0]);
-               let events = nodes[2].node.get_and_clear_pending_events();
-               if deliver_last_raa {
-                       assert_eq!(events.len(), 2);
-                       match events[1] {
-                               Event::ChannelClosed { .. } => {}
-                               _ => panic!("Unexpected event"),
-                       }
-                       connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
-                       check_closed_broadcast!(nodes[2], true);
-                       expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
-               } else {
-                       assert_eq!(events.len(), 1);
-                       match events[0] {
-                               Event::ChannelClosed { .. } => {}
-                               _ => panic!("Unexpected event"),
-                       }
-                       connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
-                       check_closed_broadcast!(nodes[2], true);
-                       expect_pending_htlcs_forwardable!(nodes[2]);
-               }
        } else {
                mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]);
-               let events = nodes[2].node.get_and_clear_pending_events();
-               if deliver_last_raa {
-                       assert_eq!(events.len(), 2);
-                       match events[1] {
-                               Event::ChannelClosed { .. } => {}
-                               _ => panic!("Unexpected event"),
-                       }
-                       connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
-                       check_closed_broadcast!(nodes[2], true);
-                       expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
-               } else {
-                       assert_eq!(events.len(), 1);
-                       match events[0] {
-                               Event::ChannelClosed { .. } => {}
-                               _ => panic!("Unexpected event"),
-                       }
-                       connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
-                       check_closed_broadcast!(nodes[2], true);
-                       expect_pending_htlcs_forwardable!(nodes[2]);
-               }
+       }
+       let events = nodes[2].node.get_and_clear_pending_events();
+       let close_event = if deliver_last_raa {
+               assert_eq!(events.len(), 2);
+               events[1].clone()
+       } else {
+               assert_eq!(events.len(), 1);
+               events[0].clone()
+       };
+       match close_event {
+               Event::ChannelClosed { reason: ClosureReason::CommitmentTxBroadcasted, .. } => {}
+               _ => panic!("Unexpected event"),
+       }
+
+       connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
+       check_closed_broadcast!(nodes[2], true);
+       if deliver_last_raa {
+               expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
+       } else {
+               expect_pending_htlcs_forwardable!(nodes[2]);
        }
        check_added_monitors!(nodes[2], 3);
 
@@ -5641,8 +5618,7 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
 
        mine_transaction(&nodes[0], &htlc_timeout);
        connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
-       let events = nodes[0].node.get_and_clear_pending_events();
-       expect_payment_failed!(nodes[0], events, our_payment_hash, true);
+       expect_payment_failed!(nodes[0], our_payment_hash, true);
 
        // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
        let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
@@ -5723,8 +5699,7 @@ fn test_key_derivation_params() {
 
        mine_transaction(&nodes[0], &htlc_timeout);
        connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
-       let events = nodes[0].node.get_and_clear_pending_events();
-       expect_payment_failed!(nodes[0], events, our_payment_hash, true);
+       expect_payment_failed!(nodes[0], our_payment_hash, true);
 
        // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
        let new_keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
@@ -5896,8 +5871,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no
                check_added_monitors!(nodes[0], 1);
                check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxBroadcasted);
        } else {
-               let events = nodes[0].node.get_and_clear_pending_events();
-               expect_payment_failed!(nodes[0], events, our_payment_hash, true);
+               expect_payment_failed!(nodes[0], our_payment_hash, true);
        }
 }
 
@@ -7188,8 +7162,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
                mine_transaction(&nodes[0], &as_commitment_tx[0]);
                check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxBroadcasted);
                connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
-               let events = nodes[0].node.get_and_clear_pending_events();
-               expect_payment_failed!(nodes[0], events, dust_hash, true);
+               expect_payment_failed!(nodes[0], dust_hash, true);
 
                connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - ANTI_REORG_DELAY);
                check_closed_broadcast!(nodes[0], true);
@@ -7201,8 +7174,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
                assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
                mine_transaction(&nodes[0], &timeout_tx[0]);
                connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
-               let events = nodes[0].node.get_and_clear_pending_events();
-               expect_payment_failed!(nodes[0], events, non_dust_hash, true);
+               expect_payment_failed!(nodes[0], non_dust_hash, true);
        } else {
                // We fail dust-HTLC 1 by broadcast of remote commitment tx. If revoked, fail also non-dust HTLC
                mine_transaction(&nodes[0], &bs_commitment_tx[0]);
@@ -7213,15 +7185,13 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
                connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
                timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[1].clone());
                if !revoked {
-                       let events = nodes[0].node.get_and_clear_pending_events();
-                       expect_payment_failed!(nodes[0], events, dust_hash, true);
+                       expect_payment_failed!(nodes[0], dust_hash, true);
                        assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
                        // We fail non-dust-HTLC 2 by broadcast of local timeout tx on remote commitment tx
                        mine_transaction(&nodes[0], &timeout_tx[0]);
                        assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
                        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
-                       let events = nodes[0].node.get_and_clear_pending_events();
-                       expect_payment_failed!(nodes[0], events, non_dust_hash, true);
+                       expect_payment_failed!(nodes[0], non_dust_hash, true);
                } else {
                        // If revoked, both dust & non-dust HTLCs should have been failed after ANTI_REORG_DELAY confs of revoked
                        // commitment tx
@@ -7486,8 +7456,7 @@ fn test_check_htlc_underpaying() {
        // 10_000 msat as u64, followed by a height of CHAN_CONFIRM_DEPTH as u32
        let mut expected_failure_data = byte_utils::be64_to_array(10_000).to_vec();
        expected_failure_data.extend_from_slice(&byte_utils::be32_to_array(CHAN_CONFIRM_DEPTH));
-       let events = nodes[0].node.get_and_clear_pending_events();
-       expect_payment_failed!(nodes[0], events, our_payment_hash, true, 0x4000|15, &expected_failure_data[..]);
+       expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000|15, &expected_failure_data[..]);
 }
 
 #[test]
@@ -8426,20 +8395,17 @@ fn test_bad_secret_hash() {
        // Send a payment with the right payment hash but the wrong payment secret
        nodes[0].node.send_payment(&route, our_payment_hash, &Some(random_payment_secret)).unwrap();
        handle_unknown_invalid_payment_data!();
-       let events = nodes[0].node.get_and_clear_pending_events();
-       expect_payment_failed!(nodes[0], events, our_payment_hash, true, expected_error_code, expected_error_data);
+       expect_payment_failed!(nodes[0], our_payment_hash, true, expected_error_code, expected_error_data);
 
        // Send a payment with a random payment hash, but the right payment secret
        nodes[0].node.send_payment(&route, random_payment_hash, &Some(our_payment_secret)).unwrap();
        handle_unknown_invalid_payment_data!();
-       let events = nodes[0].node.get_and_clear_pending_events();
-       expect_payment_failed!(nodes[0], events, random_payment_hash, true, expected_error_code, expected_error_data);
+       expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
 
        // Send a payment with a random payment hash and random payment secret
        nodes[0].node.send_payment(&route, random_payment_hash, &Some(random_payment_secret)).unwrap();
        handle_unknown_invalid_payment_data!();
-       let events = nodes[0].node.get_and_clear_pending_events();
-       expect_payment_failed!(nodes[0], events, random_payment_hash, true, expected_error_code, expected_error_data);
+       expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
 }
 
 #[test]
@@ -8710,8 +8676,7 @@ fn test_htlc_no_detection() {
        let header_201 = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[0], &Block { header: header_201, txdata: vec![htlc_timeout.clone()] });
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
-       let events = nodes[0].node.get_and_clear_pending_events();
-       expect_payment_failed!(nodes[0], events, our_payment_hash, true);
+       expect_payment_failed!(nodes[0], our_payment_hash, true);
 }
 
 fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain_before_fulfill: bool) {
index 9e8d74b55e066e891a67e1e576e0c369e23b4931..98efa993184af6862894d0c2c8e09d27cb517427 100644 (file)
@@ -250,8 +250,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
        if prev_commitment_tx {
                // To build a previous commitment transaction, deliver one round of commitment messages.
                nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &b_htlc_msgs.update_fulfill_htlcs[0]);
-               let events = nodes[0].node.get_and_clear_pending_events();
-               expect_payment_sent!(nodes[0], payment_preimage, events);
+               expect_payment_sent!(nodes[0], payment_preimage);
                nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &b_htlc_msgs.commitment_signed);
                check_added_monitors!(nodes[0], 1);
                let (as_raa, as_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
@@ -363,8 +362,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
                sorted_vec(nodes[1].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances()));
 
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
-       let events = nodes[0].node.get_and_clear_pending_events();
-       expect_payment_failed!(nodes[0], events, dust_payment_hash, true);
+       expect_payment_failed!(nodes[0], dust_payment_hash, true);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        // After ANTI_REORG_DELAY, A will consider its balance fully spendable and generate a
@@ -404,8 +402,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
        // possibly-claimable up to ANTI_REORG_DELAY, at which point it will drop it.
        mine_transaction(&nodes[0], &b_broadcast_txn[0]);
        if !prev_commitment_tx {
-               let events = nodes[0].node.get_and_clear_pending_events();
-               expect_payment_sent!(nodes[0], payment_preimage, events);
+               expect_payment_sent!(nodes[0], payment_preimage);
        }
        assert_eq!(sorted_vec(vec![Balance::MaybeClaimableHTLCAwaitingTimeout {
                        claimable_amount_satoshis: 3_000,
@@ -451,8 +448,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
        assert_eq!(Vec::<Balance>::new(),
                nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances());
-       let events = nodes[0].node.get_and_clear_pending_events();
-       expect_payment_failed!(nodes[0], events, timeout_payment_hash, true);
+       expect_payment_failed!(nodes[0], timeout_payment_hash, true);
 
        let mut node_a_spendable = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events();
        assert_eq!(node_a_spendable.len(), 1);
index 54ec7b833bdb8e033ca513b177e8c7b0fd91af2b..834d9ca5320c59a25e9b65b5afdfcea1ea14face 100644 (file)
@@ -164,8 +164,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
        }
        commitment_signed_dance!(nodes[0], nodes[1], htlc_updates.commitment_signed, false, true);
        if claim {
-               let events = nodes[0].node.get_and_clear_pending_events();
-               expect_payment_sent!(nodes[0], our_payment_preimage, events);
+               expect_payment_sent!(nodes[0], our_payment_preimage);
        } else {
                expect_payment_failed_with_update!(nodes[0], our_payment_hash, false, chan_2.0.contents.short_channel_id, true);
        }