X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Freorg_tests.rs;h=46400641bc178ba21d6e41366f62fe0cbaaef4f9;hb=4fc05af870b9598adccb39ef96a79cd1635baf84;hp=bfc0f56698103b7cb7d54f9b19f74dfe0355f27b;hpb=e1c1ac7576bb74678d65b34b19cd091fc0bb371e;p=rust-lightning diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index bfc0f566..46400641 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -1,14 +1,30 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + //! Further functional tests which test blockchain reorganizations. -use ln::channelmonitor::ANTI_REORG_DELAY; +use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor}; +use chain::Watch; +use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs}; use ln::features::InitFeatures; use ln::msgs::{ChannelMessageHandler, ErrorAction, HTLCFailChannelUpdate}; +use util::config::UserConfig; +use util::enforcing_trait_impls::EnforcingSigner; use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider}; +use util::test_utils; +use util::ser::{ReadableArgs, Writeable}; -use bitcoin::util::hash::BitcoinHash; use bitcoin::blockdata::block::{Block, BlockHeader}; +use bitcoin::hash_types::BlockHash; -use std::default::Default; +use std::collections::HashMap; +use std::mem; use ln::functional_test_utils::*; @@ -33,18 +49,22 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()); - let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::supported(), InitFeatures::supported()); + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); + + // Make sure all nodes are at the same starting height + connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); let (our_payment_preimage, our_payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); // Provide preimage to node 2 by claiming payment - nodes[2].node.claim_funds(our_payment_preimage, 1000000); + nodes[2].node.claim_funds(our_payment_preimage, &None, 1000000); check_added_monitors!(nodes[2], 1); get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); - let mut headers = Vec::new(); - let mut header = BlockHeader { version: 0x2000_0000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + let mut header = BlockHeader { version: 0x2000_0000, prev_blockhash: nodes[2].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; let claim_txn = if local_commitment { // Broadcast node 1 commitment txn to broadcast the HTLC-Timeout let node_1_commitment_txn = get_local_commitment_txn!(nodes[1], chan_2.2); @@ -54,7 +74,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { check_spends!(node_1_commitment_txn[1], node_1_commitment_txn[0]); // Give node 2 node 1's transactions and get its response (claiming the HTLC instead). - nodes[2].block_notifier.block_connected(&Block { header, txdata: node_1_commitment_txn.clone() }, CHAN_CONFIRM_DEPTH + 1); + connect_block(&nodes[2], &Block { header, txdata: node_1_commitment_txn.clone() }); check_added_monitors!(nodes[2], 1); check_closed_broadcast!(nodes[2], false); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate) let node_2_commitment_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -64,8 +84,11 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { check_spends!(node_2_commitment_txn[2], node_2_commitment_txn[1]); check_spends!(node_2_commitment_txn[0], node_1_commitment_txn[0]); + // Make sure node 1's height is the same as the !local_commitment case + connect_blocks(&nodes[1], 1); // Confirm node 1's commitment txn (and HTLC-Timeout) on node 1 - nodes[1].block_notifier.block_connected(&Block { header, txdata: node_1_commitment_txn.clone() }, CHAN_CONFIRM_DEPTH + 1); + header.prev_blockhash = nodes[1].best_block_hash(); + connect_block(&nodes[1], &Block { header, txdata: node_1_commitment_txn.clone() }); // ...but return node 1's commitment tx in case claim is set and we're preparing to reorg vec![node_1_commitment_txn[0].clone(), node_2_commitment_txn[0].clone()] @@ -78,7 +101,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { check_spends!(node_2_commitment_txn[1], node_2_commitment_txn[0]); // Give node 1 node 2's commitment transaction and get its response (timing the HTLC out) - nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![node_2_commitment_txn[0].clone()] }, CHAN_CONFIRM_DEPTH + 1); + mine_transaction(&nodes[1], &node_2_commitment_txn[0]); let node_1_commitment_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_1_commitment_txn.len(), 3); // ChannelMonitor: 1 offered HTLC-Timeout, ChannelManger: 1 local commitment tx, 1 Offered HTLC-Timeout assert_eq!(node_1_commitment_txn[1].output.len(), 2); // to-local and Offered HTLC (to-remote is dust) @@ -87,39 +110,37 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { check_spends!(node_1_commitment_txn[0], node_2_commitment_txn[0]); // Confirm node 2's commitment txn (and node 1's HTLC-Timeout) on node 1 - nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![node_2_commitment_txn[0].clone(), node_1_commitment_txn[0].clone()] }, CHAN_CONFIRM_DEPTH + 1); + header.prev_blockhash = nodes[1].best_block_hash(); + connect_block(&nodes[1], &Block { header, txdata: vec![node_2_commitment_txn[0].clone(), node_1_commitment_txn[0].clone()] }); // ...but return node 2's commitment tx (and claim) in case claim is set and we're preparing to reorg node_2_commitment_txn }; check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], false); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate) - headers.push(header.clone()); - // At CHAN_CONFIRM_DEPTH + 1 we have a confirmation count of 1, so CHAN_CONFIRM_DEPTH + - // ANTI_REORG_DELAY - 1 will give us a confirmation count of ANTI_REORG_DELAY - 1. - for i in CHAN_CONFIRM_DEPTH + 2..CHAN_CONFIRM_DEPTH + ANTI_REORG_DELAY - 1 { - header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - nodes[1].block_notifier.block_connected_checked(&header, i, &vec![], &[0; 0]); - headers.push(header.clone()); - } + // Connect ANTI_REORG_DELAY - 2 blocks, giving us a confirmation count of ANTI_REORG_DELAY - 1. + connect_blocks(&nodes[1], ANTI_REORG_DELAY - 2); check_added_monitors!(nodes[1], 0); assert_eq!(nodes[1].node.get_and_clear_pending_events().len(), 0); if claim { - // Now reorg back to CHAN_CONFIRM_DEPTH and confirm node 2's broadcasted transactions: - for (height, header) in (CHAN_CONFIRM_DEPTH + 1..CHAN_CONFIRM_DEPTH + ANTI_REORG_DELAY - 1).zip(headers.iter()).rev() { - nodes[1].block_notifier.block_disconnected(&header, height); - } + disconnect_blocks(&nodes[1], ANTI_REORG_DELAY - 2); - header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - nodes[1].block_notifier.block_connected(&Block { header, txdata: claim_txn }, CHAN_CONFIRM_DEPTH + 1); + let block = Block { + header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }, + txdata: claim_txn, + }; + connect_block(&nodes[1], &block); - // ChannelManager only polls ManyChannelMonitor::get_and_clear_pending_htlcs_updated when we + // ChannelManager only polls chain::Watch::release_pending_monitor_events when we // probe it for events, so we probe non-message events here (which should still end up empty): assert_eq!(nodes[1].node.get_and_clear_pending_events().len(), 0); } else { // Confirm the timeout tx and check that we fail the HTLC backwards - header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - nodes[1].block_notifier.block_connected_checked(&header, CHAN_CONFIRM_DEPTH + ANTI_REORG_DELAY, &vec![], &[0; 0]); + let block = Block { + header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }, + txdata: vec![], + }; + connect_block(&nodes[1], &block); expect_pending_htlcs_forwardable!(nodes[1]); } @@ -162,3 +183,191 @@ fn test_onchain_htlc_claim_reorg_remote_commitment() { fn test_onchain_htlc_timeout_delay_remote_commitment() { do_test_onchain_htlc_reorg(false, false); } + +fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool) { + // After creating a chan between nodes, we disconnect all blocks previously seen to force a + // channel close on nodes[0] side. We also use this to provide very basic testing of logic + // around freeing background events which store monitor updates during block_[dis]connected. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let persister: test_utils::TestPersister; + let new_chain_monitor: test_utils::TestChainMonitor; + let nodes_0_deserialized: ChannelManager; + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; + + let channel_state = nodes[0].node.channel_state.lock().unwrap(); + assert_eq!(channel_state.by_id.len(), 1); + assert_eq!(channel_state.short_to_id.len(), 1); + mem::drop(channel_state); + + if !reorg_after_reload { + disconnect_all_blocks(&nodes[0]); + check_closed_broadcast!(nodes[0], false); + { + let channel_state = nodes[0].node.channel_state.lock().unwrap(); + assert_eq!(channel_state.by_id.len(), 0); + assert_eq!(channel_state.short_to_id.len(), 0); + } + } + + if reload_node { + // Since we currently have a background event pending, it's good to test that we survive a + // serialization roundtrip. Further, this tests the somewhat awkward edge-case of dropping + // the Channel object from the ChannelManager, but still having a monitor event pending for + // it when we go to deserialize, and then use the ChannelManager. + let nodes_0_serialized = nodes[0].node.encode(); + let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new()); + nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap(); + + persister = test_utils::TestPersister::new(); + let keys_manager = &chanmon_cfgs[0].keys_manager; + new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), nodes[0].logger, node_cfgs[0].fee_estimator, &persister, keys_manager); + nodes[0].chain_monitor = &new_chain_monitor; + let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..]; + let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor)>::read( + &mut chan_0_monitor_read, keys_manager).unwrap(); + assert!(chan_0_monitor_read.is_empty()); + + let mut nodes_0_read = &nodes_0_serialized[..]; + let config = UserConfig::default(); + nodes_0_deserialized = { + let mut channel_monitors = HashMap::new(); + channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor); + <(BlockHash, ChannelManager)>::read( + &mut nodes_0_read, ChannelManagerReadArgs { + default_config: config, + keys_manager, + fee_estimator: node_cfgs[0].fee_estimator, + chain_monitor: nodes[0].chain_monitor, + tx_broadcaster: nodes[0].tx_broadcaster.clone(), + logger: nodes[0].logger, + channel_monitors, + }).unwrap().1 + }; + nodes[0].node = &nodes_0_deserialized; + assert!(nodes_0_read.is_empty()); + + nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0.clone(), chan_0_monitor).unwrap(); + check_added_monitors!(nodes[0], 1); + } + + if reorg_after_reload { + disconnect_all_blocks(&nodes[0]); + check_closed_broadcast!(nodes[0], false); + { + let channel_state = nodes[0].node.channel_state.lock().unwrap(); + assert_eq!(channel_state.by_id.len(), 0); + assert_eq!(channel_state.short_to_id.len(), 0); + } + } + // With expect_channel_force_closed set the TestChainMonitor will enforce that the next update + // is a ChannelForcClosed on the right channel with should_broadcast set. + *nodes[0].chain_monitor.expect_channel_force_closed.lock().unwrap() = Some((chan_id, true)); + nodes[0].node.test_process_background_events(); // Required to free the pending background monitor update + check_added_monitors!(nodes[0], 1); +} + +#[test] +fn test_unconf_chan() { + do_test_unconf_chan(true, true); + do_test_unconf_chan(false, true); + do_test_unconf_chan(true, false); + do_test_unconf_chan(false, false); +} + +#[test] +fn test_set_outpoints_partial_claiming() { + // - remote party claim tx, new bump tx + // - disconnect remote claiming tx, new bump + // - disconnect tx, see no tx anymore + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known()); + let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000).0; + let payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000).0; + + // Remote commitment txn with 4 outputs: to_local, to_remote, 2 outgoing HTLC + let remote_txn = get_local_commitment_txn!(nodes[1], chan.2); + assert_eq!(remote_txn.len(), 3); + assert_eq!(remote_txn[0].output.len(), 4); + assert_eq!(remote_txn[0].input.len(), 1); + assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.txid()); + check_spends!(remote_txn[1], remote_txn[0]); + check_spends!(remote_txn[2], remote_txn[0]); + + // Connect blocks on node A to advance height towards TEST_FINAL_CLTV + // Provide node A with both preimage + nodes[0].node.claim_funds(payment_preimage_1, &None, 3_000_000); + nodes[0].node.claim_funds(payment_preimage_2, &None, 3_000_000); + check_added_monitors!(nodes[0], 2); + nodes[0].node.get_and_clear_pending_events(); + nodes[0].node.get_and_clear_pending_msg_events(); + + // Connect blocks on node A commitment transaction + mine_transaction(&nodes[0], &remote_txn[0]); + check_closed_broadcast!(nodes[0], false); + check_added_monitors!(nodes[0], 1); + // Verify node A broadcast tx claiming both HTLCs + { + let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + // ChannelMonitor: claim tx, ChannelManager: local commitment tx + HTLC-Success*2 + assert_eq!(node_txn.len(), 4); + check_spends!(node_txn[0], remote_txn[0]); + check_spends!(node_txn[1], chan.3); + check_spends!(node_txn[2], node_txn[1]); + check_spends!(node_txn[3], node_txn[1]); + assert_eq!(node_txn[0].input.len(), 2); + node_txn.clear(); + } + + // Connect blocks on node B + connect_blocks(&nodes[1], 135); + check_closed_broadcast!(nodes[1], false); + check_added_monitors!(nodes[1], 1); + // Verify node B broadcast 2 HTLC-timeout txn + let partial_claim_tx = { + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 3); + check_spends!(node_txn[1], node_txn[0]); + check_spends!(node_txn[2], node_txn[0]); + assert_eq!(node_txn[1].input.len(), 1); + assert_eq!(node_txn[2].input.len(), 1); + node_txn[1].clone() + }; + + // Broadcast partial claim on node A, should regenerate a claiming tx with HTLC dropped + mine_transaction(&nodes[0], &partial_claim_tx); + { + let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 1); + check_spends!(node_txn[0], remote_txn[0]); + assert_eq!(node_txn[0].input.len(), 1); //dropped HTLC + node_txn.clear(); + } + nodes[0].node.get_and_clear_pending_msg_events(); + + // Disconnect last block on node A, should regenerate a claiming tx with HTLC dropped + disconnect_blocks(&nodes[0], 1); + { + let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 1); + check_spends!(node_txn[0], remote_txn[0]); + assert_eq!(node_txn[0].input.len(), 2); //resurrected HTLC + node_txn.clear(); + } + + //// Disconnect one more block and then reconnect multiple no transaction should be generated + disconnect_blocks(&nodes[0], 1); + connect_blocks(&nodes[0], 15); + { + let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 0); + node_txn.clear(); + } +}