Reduce the number of timer ticks a peer is allowed to take
[rust-lightning] / lightning / src / ln / reorg_tests.rs
index bbdb5bfac6bae9270e30bb99434c818020a3d475..6c39efb87b509feb4332e92ad1f47f64b7c78d5b 100644 (file)
@@ -14,9 +14,9 @@ use chain::transaction::OutPoint;
 use chain::{Confirm, Watch};
 use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs};
 use ln::features::InitFeatures;
-use ln::msgs::{ChannelMessageHandler, ErrorAction, HTLCFailChannelUpdate};
+use ln::msgs::ChannelMessageHandler;
 use util::enforcing_trait_impls::EnforcingSigner;
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
 use util::test_utils;
 use util::ser::{ReadableArgs, Writeable};
 
@@ -80,6 +80,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
                connect_block(&nodes[2], &Block { header, txdata: node_1_commitment_txn.clone() });
                check_added_monitors!(nodes[2], 1);
                check_closed_broadcast!(nodes[2], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
+               check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
                let node_2_commitment_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_2_commitment_txn.len(), 3); // ChannelMonitor: 1 offered HTLC-Claim, ChannelManger: 1 local commitment tx, 1 Received HTLC-Claim
                assert_eq!(node_2_commitment_txn[1].output.len(), 2); // to-remote and Received HTLC (to-self is dust)
@@ -97,7 +98,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
                vec![node_1_commitment_txn[0].clone(), node_2_commitment_txn[0].clone()]
        } else {
                // Broadcast node 2 commitment txn
-               let node_2_commitment_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
+               let mut node_2_commitment_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
                assert_eq!(node_2_commitment_txn.len(), 2); // 1 local commitment tx, 1 Received HTLC-Claim
                assert_eq!(node_2_commitment_txn[0].output.len(), 2); // to-remote and Received HTLC (to-self is dust)
                check_spends!(node_2_commitment_txn[0], chan_2.3);
@@ -112,15 +113,14 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
                check_spends!(node_1_commitment_txn[0], chan_2.3);
                check_spends!(node_1_commitment_txn[1], node_2_commitment_txn[0]);
 
-               // Confirm node 2's commitment txn (and node 1's HTLC-Timeout) on node 1
-               header.prev_blockhash = nodes[1].best_block_hash();
-               let block = Block { header, txdata: vec![node_2_commitment_txn[0].clone(), node_1_commitment_txn[1].clone()] };
-               connect_block(&nodes[1], &block);
+               // Confirm node 1's HTLC-Timeout on node 1
+               mine_transaction(&nodes[1], &node_1_commitment_txn[1]);
                // ...but return node 2's commitment tx (and claim) in case claim is set and we're preparing to reorg
-               node_2_commitment_txn
+               vec![node_2_commitment_txn.pop().unwrap()]
        };
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        // Connect ANTI_REORG_DELAY - 2 blocks, giving us a confirmation count of ANTI_REORG_DELAY - 1.
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 2);
        check_added_monitors!(nodes[1], 0);
@@ -163,12 +163,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
        if claim {
                expect_payment_sent!(nodes[0], our_payment_preimage);
        } else {
-               let events = nodes[0].node.get_and_clear_pending_msg_events();
-               assert_eq!(events.len(), 1);
-               if let MessageSendEvent::PaymentFailureNetworkUpdate { update: HTLCFailChannelUpdate::ChannelClosed { ref is_permanent, .. } } = events[0] {
-                       assert!(is_permanent);
-               } else { panic!("Unexpected event!"); }
-               expect_payment_failed!(nodes[0], our_payment_hash, false);
+               expect_payment_failed_with_update!(nodes[0], our_payment_hash, false, chan_2.0.contents.short_channel_id, true);
        }
 }
 
@@ -218,9 +213,9 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
                        disconnect_all_blocks(&nodes[0]);
                }
                if connect_style == ConnectStyle::FullBlockViaListen && !use_funding_unconfirmed {
-                       handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Funding transaction was un-confirmed. Locked at 6 confs, now have 2 confs.");
+                       handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 2 confs.");
                } else {
-                       handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.");
+                       handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.");
                }
                check_added_monitors!(nodes[1], 1);
                {
@@ -237,7 +232,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
                // it when we go to deserialize, and then use the ChannelManager.
                let nodes_0_serialized = nodes[0].node.encode();
                let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
-               nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
+               get_monitor!(nodes[0], chan.2).write(&mut chan_0_monitor_serialized).unwrap();
 
                persister = test_utils::TestPersister::new();
                let keys_manager = &chanmon_cfgs[0].keys_manager;
@@ -286,9 +281,9 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
                        disconnect_all_blocks(&nodes[0]);
                }
                if connect_style == ConnectStyle::FullBlockViaListen && !use_funding_unconfirmed {
-                       handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Funding transaction was un-confirmed. Locked at 6 confs, now have 2 confs.");
+                       handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 2 confs.");
                } else {
-                       handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.");
+                       handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.");
                }
                check_added_monitors!(nodes[1], 1);
                {
@@ -302,6 +297,13 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
        *nodes[0].chain_monitor.expect_channel_force_closed.lock().unwrap() = Some((chan.2, true));
        nodes[0].node.test_process_background_events(); // Required to free the pending background monitor update
        check_added_monitors!(nodes[0], 1);
+       let expected_err = if connect_style == ConnectStyle::FullBlockViaListen && !use_funding_unconfirmed {
+               "Funding transaction was un-confirmed. Locked at 6 confs, now have 2 confs."
+       } else {
+               "Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs."
+       };
+       check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Channel closed because of an exception: ".to_owned() + expected_err });
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: expected_err.to_owned() });
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
        nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
 
@@ -373,6 +375,7 @@ fn test_set_outpoints_partial_claiming() {
        // Connect blocks on node A commitment transaction
        mine_transaction(&nodes[0], &remote_txn[0]);
        check_closed_broadcast!(nodes[0], true);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        check_added_monitors!(nodes[0], 1);
        // Verify node A broadcast tx claiming both HTLCs
        {
@@ -390,6 +393,7 @@ fn test_set_outpoints_partial_claiming() {
        // Connect blocks on node B
        connect_blocks(&nodes[1], 135);
        check_closed_broadcast!(nodes[1], true);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        check_added_monitors!(nodes[1], 1);
        // Verify node B broadcast 2 HTLC-timeout txn
        let partial_claim_tx = {
@@ -464,9 +468,11 @@ fn do_test_to_remote_after_local_detection(style: ConnectStyle) {
        assert!(nodes[0].node.list_channels().is_empty());
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        assert!(nodes[1].node.list_channels().is_empty());
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
 
        // Drop transactions broadcasted in response to the first commitment transaction (we have good
        // test coverage of these things already elsewhere).