Correct `expect_payment_forwarded` upstream channel checking
[rust-lightning] / lightning / src / ln / chanmon_update_fail_tests.rs
index a9a5790b06c428cd09ff74eecedc743c65949a08..3001485b09739f95b2dad0539b29d54cdfffecd0 100644 (file)
@@ -20,7 +20,7 @@ use crate::chain::transaction::OutPoint;
 use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch};
 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
 use crate::ln::channelmanager::{RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields};
-use crate::ln::channel::AnnouncementSigsState;
+use crate::ln::channel::{AnnouncementSigsState, ChannelPhase};
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
 use crate::util::test_channel_signer::TestChannelSigner;
@@ -92,7 +92,7 @@ fn test_monitor_and_persister_update_fail() {
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
 
        // Route an HTLC from node 0 to node 1 (but don't settle)
-       let (preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
+       let (preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
 
        // Make a copy of the ChainMonitor so we can capture the error it returns on a
        // bogus update. Note that if instead we updated the nodes[0]'s ChainMonitor
@@ -136,15 +136,18 @@ fn test_monitor_and_persister_update_fail() {
        {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
-               let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2);
-               if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
-                       // Check that even though the persister is returning a InProgress,
-                       // because the update is bogus, ultimately the error that's returned
-                       // should be a PermanentFailure.
-                       if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor error to be permanent"); }
-                       logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
-                       assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
-               } else { assert!(false); }
+               if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2) {
+                       if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
+                               // Check that even though the persister is returning a InProgress,
+                               // because the update is bogus, ultimately the error that's returned
+                               // should be a PermanentFailure.
+                               if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor error to be permanent"); }
+                               logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
+                               assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
+                       } else { assert!(false); }
+               } else {
+                       assert!(false);
+               }
        }
 
        check_added_monitors!(nodes[0], 1);
@@ -283,7 +286,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
 
-       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
        // Now try to send a second payment which will fail to send
        let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
@@ -855,7 +858,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
        send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
 
        // Route a first payment that we'll fail backwards
-       let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
+       let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
 
        // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
        nodes[2].node.fail_htlc_backwards(&payment_hash_1);
@@ -1125,7 +1128,7 @@ fn test_monitor_update_fail_reestablish() {
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
        create_announced_chan_between_nodes(&nodes, 1, 2);
 
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
 
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
@@ -1341,7 +1344,7 @@ fn claim_while_disconnected_monitor_update_fail() {
        let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
 
        // Forward a payment for B to claim
-       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
@@ -1460,12 +1463,12 @@ fn monitor_failed_no_reestablish_response() {
        {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
-               get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
+               get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).context_mut().announcement_sigs_state = AnnouncementSigsState::PeerReceived;
        }
        {
                let mut node_1_per_peer_lock;
                let mut node_1_peer_state_lock;
-               get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
+               get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).context_mut().announcement_sigs_state = AnnouncementSigsState::PeerReceived;
        }
 
        // Route the payment and deliver the initial commitment_signed (with a monitor update failure
@@ -1641,7 +1644,7 @@ fn test_monitor_update_fail_claim() {
        // Rebalance a bit so that we can send backwards from 3 to 2.
        send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
 
-       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[1].node.claim_funds(payment_preimage_1);
@@ -1760,7 +1763,7 @@ fn test_monitor_update_on_pending_forwards() {
        // Rebalance a bit so that we can send backwards from 3 to 1.
        send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
 
-       let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
+       let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
        nodes[2].node.fail_htlc_backwards(&payment_hash_1);
        expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]);
        check_added_monitors!(nodes[2], 1);
@@ -1832,7 +1835,7 @@ fn monitor_update_claim_fail_no_response() {
        let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
 
        // Forward a payment for B to claim
-       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
        // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA
        let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
@@ -2174,7 +2177,7 @@ fn test_fail_htlc_on_broadcast_after_claim() {
        create_announced_chan_between_nodes(&nodes, 0, 1);
        let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
 
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 2000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 2000);
 
        let bs_txn = get_local_commitment_txn!(nodes[2], chan_id_2);
        assert_eq!(bs_txn.len(), 1);
@@ -2340,7 +2343,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
        //
        // Note that because, at the end, MonitorUpdateInProgress is still set, the HTLC generated in
        // (c) will not be freed from the holding cell.
-       let (payment_preimage_0, payment_hash_0, _) = route_payment(&nodes[1], &[&nodes[0]], 100_000);
+       let (payment_preimage_0, payment_hash_0, ..) = route_payment(&nodes[1], &[&nodes[0]], 100_000);
 
        nodes[0].node.send_payment_with_route(&route, payment_hash_1,
                RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
@@ -2514,7 +2517,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
        create_announced_chan_between_nodes(&nodes, 0, 1);
        let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
 
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
 
        let mut as_raa = None;
        if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
@@ -2744,8 +2747,8 @@ fn double_temp_error() {
 
        let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1);
 
-       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
-       let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        // `claim_funds` results in a ChannelMonitorUpdate.
@@ -3035,15 +3038,15 @@ fn test_blocked_chan_preimage_release() {
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
-       create_announced_chan_between_nodes(&nodes, 0, 1).2;
-       create_announced_chan_between_nodes(&nodes, 1, 2).2;
+       create_announced_chan_between_nodes(&nodes, 0, 1);
+       let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
 
        send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5_000_000);
 
        // Tee up two payments in opposite directions across nodes[1], one it sent to generate a
        // PaymentSent event and one it forwards.
-       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[1], &[&nodes[2]], 1_000_000);
-       let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[2], &[&nodes[1], &nodes[0]], 1_000_000);
+       let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2]], 1_000_000);
+       let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[2], &[&nodes[1], &nodes[0]], 1_000_000);
 
        // Claim the first payment to get a `PaymentSent` event (but don't handle it yet).
        nodes[2].node.claim_funds(payment_preimage_1);
@@ -3065,11 +3068,20 @@ fn test_blocked_chan_preimage_release() {
        let as_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
        nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.update_fulfill_htlcs[0]);
        check_added_monitors(&nodes[1], 1); // We generate only a preimage monitor update
+       assert!(get_monitor!(nodes[1], chan_id_2).get_stored_preimages().contains_key(&payment_hash_2));
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
-       // Finish the CS dance between nodes[0] and nodes[1].
-       do_commitment_signed_dance(&nodes[1], &nodes[0], &as_htlc_fulfill_updates.commitment_signed, false, false);
+       // Finish the CS dance between nodes[0] and nodes[1]. Note that until the event handling, the
+       // update_fulfill_htlc + CS is held, even though the preimage is already on disk for the
+       // channel.
+       nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.commitment_signed);
+       check_added_monitors(&nodes[1], 1);
+       let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false);
+       assert!(a.is_none());
+
+       nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
        check_added_monitors(&nodes[1], 0);
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
        let events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 3);
@@ -3077,8 +3089,8 @@ fn test_blocked_chan_preimage_release() {
        if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); }
        if let Event::PaymentForwarded { .. } = events[1] {} else { panic!(); }
 
-       // The event processing should release the last RAA update.
-       check_added_monitors(&nodes[1], 1);
+       // The event processing should release the last RAA updates on both channels.
+       check_added_monitors(&nodes[1], 2);
 
        // When we fetch the next update the message getter will generate the next update for nodes[2],
        // generating a further monitor update.
@@ -3089,3 +3101,128 @@ fn test_blocked_chan_preimage_release() {
        do_commitment_signed_dance(&nodes[2], &nodes[1], &bs_htlc_fulfill_updates.commitment_signed, false, false);
        expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true);
 }
+
+fn do_test_inverted_mon_completion_order(complete_bc_commitment_dance: bool) {
+       // When we forward a payment and receive an `update_fulfill_htlc` message from the downstream
+       // channel, we immediately claim the HTLC on the upstream channel, before even doing a
+       // `commitment_signed` dance on the downstream channel. This implies that our
+       // `ChannelMonitorUpdate`s are generated in the right order - first we ensure we'll get our
+       // money, then we write the update that resolves the downstream node claiming their money. This
+       // is safe as long as `ChannelMonitorUpdate`s complete in the order in which they are
+       // generated, but of course this may not be the case. For asynchronous update writes, we have
+       // to ensure monitor updates can block each other, preventing the inversion all together.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+
+       let persister;
+       let new_chain_monitor;
+       let nodes_1_deserialized;
+
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+       let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2;
+
+       // Route a payment from A, through B, to C, then claim it on C. Once we pass B the
+       // `update_fulfill_htlc` we have a monitor update for both of B's channels. We complete the one
+       // on the B<->C channel but leave the A<->B monitor update pending, then reload B.
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
+
+       let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode();
+
+       nodes[2].node.claim_funds(payment_preimage);
+       check_added_monitors(&nodes[2], 1);
+       expect_payment_claimed!(nodes[2], payment_hash, 100_000);
+
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+       let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
+
+       // B generates a new monitor update for the A <-> B channel, but doesn't send the new messages
+       // for it since the monitor update is marked in-progress.
+       check_added_monitors(&nodes[1], 1);
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+       // Now step the Commitment Signed Dance between B and C forward a bit (or fully), ensuring we
+       // won't get the preimage when the nodes reconnect and we have to get it from the
+       // ChannelMonitor.
+       nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed);
+       check_added_monitors(&nodes[1], 1);
+       if complete_bc_commitment_dance {
+               let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id());
+               nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
+               check_added_monitors(&nodes[2], 1);
+               nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
+               check_added_monitors(&nodes[2], 1);
+               let cs_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+
+               // At this point node B still hasn't persisted the `ChannelMonitorUpdate` with the
+               // preimage in the A <-> B channel, which will prevent it from persisting the
+               // `ChannelMonitorUpdate` for the B<->C channel here to avoid "losing" the preimage.
+               nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &cs_raa);
+               check_added_monitors(&nodes[1], 0);
+               assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+       }
+
+       // Now reload node B
+       let manager_b = nodes[1].node.encode();
+
+       let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
+       reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
+
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+
+       // If we used the latest ChannelManager to reload from, we should have both channels still
+       // live. The B <-> C channel's final RAA ChannelMonitorUpdate must still be blocked as
+       // before - the ChannelMonitorUpdate for the A <-> B channel hasn't completed.
+       // When we call `timer_tick_occurred` we will get that monitor update back, which we'll
+       // complete after reconnecting to our peers.
+       persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+       nodes[1].node.timer_tick_occurred();
+       check_added_monitors(&nodes[1], 1);
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+       // Now reconnect B to both A and C. If the B <-> C commitment signed dance wasn't run to
+       // the end go ahead and do that, though the
+       // `pending_responding_commitment_signed_dup_monitor` in `reconnect_args` indicates that we
+       // expect to *not* receive the final RAA ChannelMonitorUpdate.
+       if complete_bc_commitment_dance {
+               reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2]));
+       } else {
+               let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
+               reconnect_args.pending_responding_commitment_signed.1 = true;
+               reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
+               reconnect_args.pending_raa = (false, true);
+               reconnect_nodes(reconnect_args);
+       }
+
+       reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
+
+       // (Finally) complete the A <-> B ChannelMonitorUpdate, ensuring the preimage is durably on
+       // disk in the proper ChannelMonitor, unblocking the B <-> C ChannelMonitor updating
+       // process.
+       let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
+       nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap();
+
+       // When we fetch B's HTLC update messages here (now that the ChannelMonitorUpdate has
+       // completed), it will also release the final RAA ChannelMonitorUpdate on the B <-> C
+       // channel.
+       let bs_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id());
+       check_added_monitors(&nodes[1], 1);
+
+       nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
+       do_commitment_signed_dance(&nodes[0], &nodes[1], &bs_updates.commitment_signed, false, false);
+
+       expect_payment_forwarded!(nodes[1], &nodes[0], &nodes[2], Some(1_000), false, false);
+
+       // Finally, check that the payment was, ultimately, seen as sent by node A.
+       expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
+}
+
+#[test]
+fn test_inverted_mon_completion_order() {
+       do_test_inverted_mon_completion_order(true);
+       do_test_inverted_mon_completion_order(false);
+}