Move `broadcast_node_announcement` to `PeerManager`
[rust-lightning] / lightning / src / ln / reorg_tests.rs
index a7090e4e7355508fb0a7fe10f1106a3a37e7ee78..e4b916c9345d59c7aecd92ab536541a042534ad3 100644 (file)
@@ -16,7 +16,7 @@ use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs};
 use ln::features::InitFeatures;
 use ln::msgs::ChannelMessageHandler;
 use util::enforcing_trait_impls::EnforcingSigner;
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
 use util::test_utils;
 use util::ser::{ReadableArgs, Writeable};
 
@@ -28,6 +28,8 @@ use bitcoin::secp256k1::Secp256k1;
 
 use prelude::*;
 use core::mem;
+use bitcoin::hashes::Hash;
+use bitcoin::TxMerkleNode;
 
 use ln::functional_test_utils::*;
 
@@ -68,7 +70,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
        check_added_monitors!(nodes[2], 1);
        get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
 
-       let mut header = BlockHeader { version: 0x2000_0000, prev_blockhash: nodes[2].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let mut header = BlockHeader { version: 0x2000_0000, prev_blockhash: nodes[2].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
        let claim_txn = if local_commitment {
                // Broadcast node 1 commitment txn to broadcast the HTLC-Timeout
                let node_1_commitment_txn = get_local_commitment_txn!(nodes[1], chan_2.2);
@@ -82,7 +84,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
                check_added_monitors!(nodes[2], 1);
                check_closed_broadcast!(nodes[2], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
                check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
-               let node_2_commitment_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
+               let node_2_commitment_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
                assert_eq!(node_2_commitment_txn.len(), 3); // ChannelMonitor: 1 offered HTLC-Claim, ChannelManger: 1 local commitment tx, 1 Received HTLC-Claim
                assert_eq!(node_2_commitment_txn[1].output.len(), 2); // to-remote and Received HTLC (to-self is dust)
                check_spends!(node_2_commitment_txn[1], chan_2.3);
@@ -128,10 +130,11 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
        assert_eq!(nodes[1].node.get_and_clear_pending_events().len(), 0);
 
        if claim {
-               disconnect_blocks(&nodes[1], ANTI_REORG_DELAY - 2);
+               // Disconnect Node 1's HTLC-Timeout which was connected above
+               disconnect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
                let block = Block {
-                       header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+                       header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
                        txdata: claim_txn,
                };
                connect_block(&nodes[1], &block);
@@ -143,11 +146,11 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
        } else {
                // Confirm the timeout tx and check that we fail the HTLC backwards
                let block = Block {
-                       header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+                       header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
                        txdata: vec![],
                };
                connect_block(&nodes[1], &block);
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
        }
 
        check_added_monitors!(nodes[1], 1);
@@ -273,7 +276,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
 
        let channel_state = nodes[0].node.channel_state.lock().unwrap();
        assert_eq!(channel_state.by_id.len(), 1);
-       assert_eq!(channel_state.short_to_id.len(), 2);
+       assert_eq!(channel_state.short_to_chan_info.len(), 2);
        mem::drop(channel_state);
 
        if !reorg_after_reload {
@@ -293,7 +296,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
                {
                        let channel_state = nodes[0].node.channel_state.lock().unwrap();
                        assert_eq!(channel_state.by_id.len(), 0);
-                       assert_eq!(channel_state.short_to_id.len(), 0);
+                       assert_eq!(channel_state.short_to_chan_info.len(), 0);
                }
        }
 
@@ -361,7 +364,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
                {
                        let channel_state = nodes[0].node.channel_state.lock().unwrap();
                        assert_eq!(channel_state.by_id.len(), 0);
-                       assert_eq!(channel_state.short_to_id.len(), 0);
+                       assert_eq!(channel_state.short_to_chan_info.len(), 0);
                }
        }
        // With expect_channel_force_closed set the TestChainMonitor will enforce that the next update