Merge pull request #2280 from TheBlueMatt/2023-05-event-deadlock
[rust-lightning] / lightning / src / ln / chanmon_update_fail_tests.rs
index b8eb11b527fff1d162bf77242112615ede06c4c4..e59cf47f17600963c9852389fc5723d8189816ef 100644 (file)
@@ -12,7 +12,6 @@
 //! There are a bunch of these as their handling is relatively error-prone so they are split out
 //! here. See also the chanmon_fail_consistency fuzz test.
 
-use bitcoin::blockdata::block::{Block, BlockHeader};
 use bitcoin::blockdata::constants::genesis_block;
 use bitcoin::hash_types::BlockHash;
 use bitcoin::network::constants::Network;
@@ -35,7 +34,6 @@ use crate::util::test_utils;
 
 use crate::io;
 use bitcoin::hashes::Hash;
-use bitcoin::TxMerkleNode;
 use crate::prelude::*;
 use crate::sync::{Arc, Mutex};
 
@@ -121,15 +119,7 @@ fn test_monitor_and_persister_update_fail() {
                assert_eq!(chain_mon.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
                chain_mon
        };
-       let header = BlockHeader {
-               version: 0x20000000,
-               prev_blockhash: BlockHash::all_zeros(),
-               merkle_root: TxMerkleNode::all_zeros(),
-               time: 42,
-               bits: 42,
-               nonce: 42
-       };
-       chain_mon.chain_monitor.block_connected(&Block { header, txdata: vec![] }, 200);
+       chain_mon.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), 200);
 
        // Set the persister's return value to be a InProgress.
        persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
@@ -146,7 +136,7 @@ fn test_monitor_and_persister_update_fail() {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
                let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2);
-               if let Ok(update) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
+               if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
                        // Check that even though the persister is returning a InProgress,
                        // because the update is bogus, ultimately the error that's returned
                        // should be a PermanentFailure.
@@ -208,7 +198,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
        let events_3 = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events_3.len(), 1);
        match events_3[0] {
-               Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id: _ } => {
+               Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
                        assert_eq!(payment_hash_1, *payment_hash);
                        assert_eq!(amount_msat, 1_000_000);
                        assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
@@ -581,7 +571,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
        let events_5 = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events_5.len(), 1);
        match events_5[0] {
-               Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id: _ } => {
+               Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
                        assert_eq!(payment_hash_2, *payment_hash);
                        assert_eq!(amount_msat, 1_000_000);
                        assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
@@ -699,7 +689,7 @@ fn test_monitor_update_fail_cs() {
        let events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        match events[0] {
-               Event::PaymentClaimable { payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id: _ } => {
+               Event::PaymentClaimable { payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
                        assert_eq!(payment_hash, our_payment_hash);
                        assert_eq!(amount_msat, 1_000_000);
                        assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
@@ -1692,7 +1682,7 @@ fn test_monitor_update_fail_claim() {
        let events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 2);
        match events[0] {
-               Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id } => {
+               Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id, .. } => {
                        assert_eq!(payment_hash_2, *payment_hash);
                        assert_eq!(1_000_000, amount_msat);
                        assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id());
@@ -1709,7 +1699,7 @@ fn test_monitor_update_fail_claim() {
                _ => panic!("Unexpected event"),
        }
        match events[1] {
-               Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id: _ } => {
+               Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
                        assert_eq!(payment_hash_3, *payment_hash);
                        assert_eq!(1_000_000, amount_msat);
                        assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id());
@@ -2000,12 +1990,12 @@ fn test_path_paused_mpp() {
        // Set us up to take multiple routes, one 0 -> 1 -> 3 and one 0 -> 2 -> 3:
        let path = route.paths[0].clone();
        route.paths.push(path);
-       route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
-       route.paths[0][0].short_channel_id = chan_1_id;
-       route.paths[0][1].short_channel_id = chan_3_id;
-       route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
-       route.paths[1][0].short_channel_id = chan_2_ann.contents.short_channel_id;
-       route.paths[1][1].short_channel_id = chan_4_id;
+       route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id();
+       route.paths[0].hops[0].short_channel_id = chan_1_id;
+       route.paths[0].hops[1].short_channel_id = chan_3_id;
+       route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id();
+       route.paths[1].hops[0].short_channel_id = chan_2_ann.contents.short_channel_id;
+       route.paths[1].hops[1].short_channel_id = chan_4_id;
 
        // Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second
        // (for the path 0 -> 2 -> 3) fails.