Rename set_equality within update_claims_view_from_matched_txn
[rust-lightning] / lightning / src / ln / reload_tests.rs
index 88185315148bc83ce3466b72a37ace7a5912913f..2c7e8e72bbee663fd824a7f3aad5a27b1c118792 100644 (file)
@@ -637,7 +637,7 @@ fn test_forwardable_regen() {
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
        expect_pending_htlcs_forwardable!(nodes[1]);
-       expect_payment_received!(nodes[1], payment_hash, payment_secret, 100_000);
+       expect_payment_claimable!(nodes[1], payment_hash, payment_secret, 100_000);
        check_added_monitors!(nodes[1], 1);
 
        let mut events = nodes[1].node.get_and_clear_pending_msg_events();
@@ -646,7 +646,7 @@ fn test_forwardable_regen() {
        nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
        commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false);
        expect_pending_htlcs_forwardable!(nodes[2]);
-       expect_payment_received!(nodes[2], payment_hash_2, payment_secret_2, 200_000);
+       expect_payment_claimable!(nodes[2], payment_hash_2, payment_secret_2, 200_000);
 
        claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
        claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2);
@@ -656,7 +656,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
        // Test what happens if a node receives an MPP payment, claims it, but crashes before
        // persisting the ChannelManager. If `persist_both_monitors` is false, also crash after only
        // updating one of the two channels' ChannelMonitors. As a result, on startup, we'll (a) still
-       // have the PaymentReceived event, (b) have one (or two) channel(s) that goes on chain with the
+       // have the PaymentClaimable event, (b) have one (or two) channel(s) that goes on chain with the
        // HTLC preimage in them, and (c) optionally have one channel that is live off-chain but does
        // not have the preimage tied to the still-pending HTLC.
        //
@@ -693,7 +693,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
        nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
        check_added_monitors!(nodes[0], 2);
 
-       // Send the payment through to nodes[3] *without* clearing the PaymentReceived event
+       // Send the payment through to nodes[3] *without* clearing the PaymentClaimable event
        let mut send_events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(send_events.len(), 2);
        do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[0].clone(), true, false, None);
@@ -713,7 +713,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
 
        let original_manager = nodes[3].node.encode();
 
-       expect_payment_received!(nodes[3], payment_hash, payment_secret, 15_000_000);
+       expect_payment_claimable!(nodes[3], payment_hash, payment_secret, 15_000_000);
 
        nodes[3].node.claim_funds(payment_preimage);
        check_added_monitors!(nodes[3], 2);
@@ -750,11 +750,11 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
        nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
 
        // During deserialization, we should have closed one channel and broadcast its latest
-       // commitment transaction. We should also still have the original PaymentReceived event we
+       // commitment transaction. We should also still have the original PaymentClaimable event we
        // never finished processing.
        let events = nodes[3].node.get_and_clear_pending_events();
        assert_eq!(events.len(), if persist_both_monitors { 4 } else { 3 });
-       if let Event::PaymentReceived { amount_msat: 15_000_000, .. } = events[0] { } else { panic!(); }
+       if let Event::PaymentClaimable { amount_msat: 15_000_000, .. } = events[0] { } else { panic!(); }
        if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[1] { } else { panic!(); }
        if persist_both_monitors {
                if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); }
@@ -788,9 +788,9 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
                let ds_msgs = nodes[3].node.get_and_clear_pending_msg_events();
                check_added_monitors!(nodes[3], 1);
                assert_eq!(ds_msgs.len(), 2);
-               if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[1] {} else { panic!(); }
+               if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[0] {} else { panic!(); }
 
-               let cs_updates = match ds_msgs[0] {
+               let cs_updates = match ds_msgs[1] {
                        MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
                                nodes[2].node.handle_update_fulfill_htlc(&nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
                                check_added_monitors!(nodes[2], 1);