From e985334fd2a297a6b3a3e4637a4277147c4b9d7d Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 17 Mar 2021 13:11:48 -0400 Subject: [PATCH] Fix block connection ordering in a number of functional tests Many functional tests rely on being able to call block_connected arbitrarily, jumping back in time to confirm a transaction at a specific height. Instead, this takes us one step towards having a well-formed blockchain in the functional tests. We also take this opportunity to reduce the number of blocks connected during tests, requiring a number of constant tweaks in various functional tests. Co-authored-by: Valentine Wallace Co-authored-by: Matt Corallo --- lightning-persister/src/lib.rs | 2 +- lightning/src/ln/functional_test_utils.rs | 85 +++++++++--- lightning/src/ln/functional_tests.rs | 162 ++++++++++++---------- lightning/src/ln/reorg_tests.rs | 24 +--- 4 files changed, 156 insertions(+), 117 deletions(-) diff --git a/lightning-persister/src/lib.rs b/lightning-persister/src/lib.rs index 264715c01..dcf5e45f2 100644 --- a/lightning-persister/src/lib.rs +++ b/lightning-persister/src/lib.rs @@ -248,7 +248,7 @@ mod tests { assert_eq!(node_txn.len(), 1); let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[0].clone()]}, CHAN_CONFIRM_DEPTH); + connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[0].clone()]}, CHAN_CONFIRM_DEPTH + 1); check_closed_broadcast!(nodes[1], false); check_added_monitors!(nodes[1], 1); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 58d5fd355..3672a1ba4 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -44,29 +44,47 @@ use std::sync::Mutex; use std::mem; use std::collections::HashMap; -pub const CHAN_CONFIRM_DEPTH: u32 = 100; +pub const CHAN_CONFIRM_DEPTH: u32 = 10; +/// Mine the given transaction in the next block and then mine CHAN_CONFIRM_DEPTH - 1 blocks on +/// top, giving the given transaction CHAN_CONFIRM_DEPTH confirmations. pub fn confirm_transaction<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction) { - let dummy_tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() }; - let dummy_tx_count = tx.version as usize; + confirm_transaction_at(node, tx, node.best_block_info().1 + 1); + connect_blocks(node, CHAN_CONFIRM_DEPTH - 1, node.best_block_info().1, false, Default::default()); +} +/// Mine a signle block containing the given transaction +pub fn mine_transaction<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction) { + let height = node.best_block_info().1 + 1; + confirm_transaction_at(node, tx, height); +} +/// Mine the given transaction at the given height, mining blocks as required to build to that +/// height +pub fn confirm_transaction_at<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction, conf_height: u32) { + let starting_block = node.best_block_info(); let mut block = Block { - header: BlockHeader { version: 0x20000000, prev_blockhash: node.best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }, - txdata: vec![dummy_tx; dummy_tx_count], + header: BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }, + txdata: Vec::new(), }; - block.txdata.push(tx.clone()); - connect_block(node, &block, 1); - for i in 2..CHAN_CONFIRM_DEPTH { + let height = starting_block.1 + 1; + assert!(height <= conf_height); + for i in height..conf_height { + connect_block(node, &block, i); block = Block { header: BlockHeader { version: 0x20000000, prev_blockhash: block.header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }, txdata: vec![], }; - connect_block(node, &block, i); } + + for _ in 0..*node.network_chan_count.borrow() { // Make sure we don't end up with channels at the same short id by offsetting by chan_count + block.txdata.push(Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() }); + } + block.txdata.push(tx.clone()); + connect_block(node, &block, conf_height); } pub fn connect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, depth: u32, height: u32, parent: bool, prev_blockhash: BlockHash) -> BlockHash { let mut block = Block { - header: BlockHeader { version: 0x2000000, prev_blockhash: if parent { prev_blockhash } else { Default::default() }, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }, + header: BlockHeader { version: 0x2000000, prev_blockhash: if parent { prev_blockhash } else { node.best_block_hash() }, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }, txdata: vec![], }; connect_block(node, &block, height + 1); @@ -91,9 +109,23 @@ pub fn connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block, pub fn disconnect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, header: &BlockHeader, height: u32) { node.chain_monitor.chain_monitor.block_disconnected(header, height); node.node.block_disconnected(header); - node.node.test_process_background_events(); node.blocks.borrow_mut().pop(); } +pub fn disconnect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, count: u32) { + assert!(node.blocks.borrow_mut().len() as u32 > count); // Cannot disconnect genesis + for _ in 0..count { + let (block_header, height) = { + let blocks = node.blocks.borrow_mut(); + (blocks[blocks.len() - 1].0, blocks[blocks.len() - 1].1) + }; + disconnect_block(&node, &block_header, height); + } +} + +pub fn disconnect_all_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>) { + let count = node.blocks.borrow_mut().len() as u32 - 1; + disconnect_blocks(node, count); +} pub struct TestChanMonCfg { pub tx_broadcaster: test_utils::TestBroadcaster, @@ -427,8 +459,9 @@ pub fn create_chan_between_nodes_with_value_init<'a, 'b, 'c>(node_a: &Node<'a, ' tx } -pub fn create_chan_between_nodes_with_value_confirm_first<'a, 'b, 'c, 'd>(node_recv: &'a Node<'b, 'c, 'c>, node_conf: &'a Node<'b, 'c, 'd>, tx: &Transaction) { - confirm_transaction(node_conf, tx); +pub fn create_chan_between_nodes_with_value_confirm_first<'a, 'b, 'c, 'd>(node_recv: &'a Node<'b, 'c, 'c>, node_conf: &'a Node<'b, 'c, 'd>, tx: &Transaction, conf_height: u32) { + confirm_transaction_at(node_conf, tx, conf_height); + connect_blocks(node_conf, CHAN_CONFIRM_DEPTH - 1, node_conf.best_block_info().1, false, Default::default()); node_recv.node.handle_funding_locked(&node_conf.node.get_our_node_id(), &get_event_msg!(node_conf, MessageSendEvent::SendFundingLocked, node_recv.node.get_our_node_id())); } @@ -453,8 +486,10 @@ pub fn create_chan_between_nodes_with_value_confirm_second<'a, 'b, 'c>(node_recv } pub fn create_chan_between_nodes_with_value_confirm<'a, 'b, 'c, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, tx: &Transaction) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) { - create_chan_between_nodes_with_value_confirm_first(node_a, node_b, tx); - confirm_transaction(node_a, tx); + let conf_height = std::cmp::max(node_a.best_block_info().1 + 1, node_b.best_block_info().1 + 1); + create_chan_between_nodes_with_value_confirm_first(node_a, node_b, tx, conf_height); + confirm_transaction_at(node_a, tx, conf_height); + connect_blocks(node_a, CHAN_CONFIRM_DEPTH - 1, node_a.best_block_info().1, false, Default::default()); create_chan_between_nodes_with_value_confirm_second(node_b, node_a) } @@ -854,13 +889,13 @@ macro_rules! expect_payment_failed { assert_eq!(events.len(), 1); match events[0] { Event::PaymentFailed { ref payment_hash, rejected_by_dest, ref error_code, ref error_data } => { - assert_eq!(*payment_hash, $expected_payment_hash); - assert_eq!(rejected_by_dest, $rejected_by_dest); - assert!(error_code.is_some()); - assert!(error_data.is_some()); + assert_eq!(*payment_hash, $expected_payment_hash, "unexpected payment_hash"); + assert_eq!(rejected_by_dest, $rejected_by_dest, "unexpected rejected_by_dest value"); + assert!(error_code.is_some(), "expected error_code.is_some() = true"); + assert!(error_data.is_some(), "expected error_data.is_some() = true"); $( - assert_eq!(error_code.unwrap(), $expected_error_code); - assert_eq!(&error_data.as_ref().unwrap()[..], $expected_error_data); + assert_eq!(error_code.unwrap(), $expected_error_code, "unexpected error code"); + assert_eq!(&error_data.as_ref().unwrap()[..], $expected_error_data, "unexpected error data"); )* }, _ => panic!("Unexpected event"), @@ -1031,7 +1066,7 @@ pub fn claim_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: claim_payment_along_route(origin_node, expected_route, false, our_payment_preimage, expected_amount); } -pub const TEST_FINAL_CLTV: u32 = 32; +pub const TEST_FINAL_CLTV: u32 = 50; pub fn route_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) -> (PaymentPreimage, PaymentHash) { let net_graph_msg_handler = &origin_node.net_graph_msg_handler; @@ -1172,6 +1207,9 @@ pub fn create_node_chanmgrs<'a, 'b>(node_count: usize, cfgs: &'a Vec let mut chanmgrs = Vec::new(); for i in 0..node_count { let mut default_config = UserConfig::default(); + // Set cltv_expiry_delta slightly lower to keep the final CLTV values inside one byte in our + // tests so that our script-length checks don't fail (see ACCEPTED_HTLC_SCRIPT_WEIGHT). + default_config.channel_options.cltv_expiry_delta = 6*6; default_config.channel_options.announced_channel = true; default_config.peer_channel_config_limits.force_announced_channel_preference = false; default_config.own_channel_config.our_htlc_minimum_msat = 1000; // sanitization being done by the sender, to exerce receiver logic we need to lift of limit @@ -1207,7 +1245,8 @@ pub fn create_network<'a, 'b: 'a, 'c: 'b>(node_count: usize, cfgs: &'b Vec panic!("Unexpected event"), }; - connect_block(&nodes[2], &Block { header, txdata: vec![commitment_tx[0].clone()]}, 1); + let last_block = nodes[2].best_block_info(); + mine_transaction(&nodes[2], &commitment_tx[0]); check_closed_broadcast!(nodes[2], false); check_added_monitors!(nodes[2], 1); let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx) @@ -3010,30 +3009,44 @@ fn test_htlc_on_chain_timeout() { // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence - connect_block(&nodes[1], &Block { header, txdata: vec![commitment_tx[0].clone()]}, 200); + connect_blocks(&nodes[1], 200 - last_block.1 - 1, last_block.1, false, Default::default()); + mine_transaction(&nodes[1], &commitment_tx[0]); let timeout_tx; { let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 5); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : 2 (local commitment tx + HTLC-timeout), 1 timeout tx - assert_eq!(node_txn[1], node_txn[3]); - assert_eq!(node_txn[2], node_txn[4]); + assert_eq!(node_txn[0], node_txn[3]); + assert_eq!(node_txn[1], node_txn[4]); - check_spends!(node_txn[0], commitment_tx[0]); - assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + check_spends!(node_txn[2], commitment_tx[0]); + assert_eq!(node_txn[2].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); - check_spends!(node_txn[1], chan_2.3); - check_spends!(node_txn[2], node_txn[1]); - assert_eq!(node_txn[1].clone().input[0].witness.last().unwrap().len(), 71); - assert_eq!(node_txn[2].clone().input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + check_spends!(node_txn[0], chan_2.3); + check_spends!(node_txn[1], node_txn[0]); + assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), 71); + assert_eq!(node_txn[1].clone().input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); - timeout_tx = node_txn[0].clone(); + timeout_tx = node_txn[2].clone(); node_txn.clear(); } - connect_block(&nodes[1], &Block { header, txdata: vec![timeout_tx]}, 1); - connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1, 1, true, header.block_hash()); + mine_transaction(&nodes[1], &timeout_tx); check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], false); + { + // B will rebroadcast a fee-bumped timeout transaction here. + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(node_txn.len(), 1); + check_spends!(node_txn[0], commitment_tx[0]); + } + + connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1, 201, false, Default::default()); + { + // B will rebroadcast its own holder commitment transaction here...just because + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(node_txn.len(), 1); + check_spends!(node_txn[0], chan_2.3); + } expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); @@ -3049,14 +3062,13 @@ fn test_htlc_on_chain_timeout() { }, _ => panic!("Unexpected event"), }; - let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // Well... here we detect our own htlc_timeout_tx so no tx to be generated - assert_eq!(node_txn.len(), 0); // Broadcast legit commitment tx from B on A's chain let commitment_tx = get_local_commitment_txn!(nodes[1], chan_1.2); check_spends!(commitment_tx[0], chan_1.3); - connect_block(&nodes[0], &Block { header, txdata: vec![commitment_tx[0].clone()]}, 200); + mine_transaction(&nodes[0], &commitment_tx[0]); + check_closed_broadcast!(nodes[0], false); check_added_monitors!(nodes[0], 1); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : 1 timeout tx @@ -4054,7 +4066,8 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { let payment_secret = PaymentSecret([0xdb; 32]); // Use the utility function send_payment_along_path to send the payment with MPP data which // indicates there are more HTLCs coming. - nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200000, CHAN_CONFIRM_DEPTH).unwrap(); + let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match. + nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200000, cur_height).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -4067,12 +4080,12 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { }; let mut block = Block { - header: BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }, + header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }, txdata: vec![], }; - connect_block(&nodes[0], &block, 101); - connect_block(&nodes[1], &block, 101); - for i in 102..TEST_FINAL_CLTV + 100 + 1 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS { + connect_block(&nodes[0], &block, CHAN_CONFIRM_DEPTH + 1); + connect_block(&nodes[1], &block, CHAN_CONFIRM_DEPTH + 1); + for i in CHAN_CONFIRM_DEPTH + 2 ..TEST_FINAL_CLTV + CHAN_CONFIRM_DEPTH + 2 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS { block.header.prev_blockhash = block.block_hash(); connect_block(&nodes[0], &block, i); connect_block(&nodes[1], &block, i); @@ -4089,9 +4102,9 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_timeout_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false); - // 100_000 msat as u64, followed by a height of 123 as u32 + // 100_000 msat as u64, followed by a height of TEST_FINAL_CLTV + 2 as u32 let mut expected_failure_data = byte_utils::be64_to_array(100_000).to_vec(); - expected_failure_data.extend_from_slice(&byte_utils::be32_to_array(123)); + expected_failure_data.extend_from_slice(&byte_utils::be32_to_array(TEST_FINAL_CLTV + 2)); expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000 | 15, &expected_failure_data[..]); } @@ -4109,6 +4122,12 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); + + // Make sure all nodes are at the same starting height + connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1, nodes[0].best_block_info().1, false, Default::default()); + connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1, nodes[1].best_block_info().1, false, Default::default()); + connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1, nodes[2].best_block_info().1, false, Default::default()); + let logger = test_utils::TestLogger::new(); // Route a first payment to get the 1 -> 2 channel in awaiting_raa... @@ -4140,21 +4159,10 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { check_added_monitors!(nodes[1], 0); } - let mut block = Block { - header: BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }, - txdata: vec![], - }; - connect_block(&nodes[1], &block, 101); - for i in 102..TEST_FINAL_CLTV + 100 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS { - block.header.prev_blockhash = block.block_hash(); - connect_block(&nodes[1], &block, i); - } - + connect_blocks(&nodes[1], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS, nodes[1].best_block_info().1, false, Default::default()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); - - block.header.prev_blockhash = block.block_hash(); - connect_block(&nodes[1], &block, TEST_FINAL_CLTV + 100 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS); + connect_blocks(&nodes[1], 1, nodes[1].best_block_info().1, false, Default::default()); if forwarded_htlc { expect_pending_htlcs_forwardable!(nodes[1]); @@ -5749,11 +5757,12 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0); check_added_monitors!(nodes[1], 1); + let starting_block = nodes[1].best_block_info(); let mut block = Block { - header: BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }, + header: BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }, txdata: vec![], }; - for i in 1..TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + CHAN_CONFIRM_DEPTH + 1 { + for i in starting_block.1 + 1..TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + starting_block.1 + 2 { connect_block(&nodes[1], &block, i); block.header.prev_blockhash = block.block_hash(); } @@ -5782,9 +5791,10 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { // transaction, however it is not in A's latest local commitment, so we can just broadcast that // to "time out" the HTLC. - let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + let starting_block = nodes[1].best_block_info(); + let mut header = BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - for i in 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 1 { + for i in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + starting_block.1 + 2 { connect_block(&nodes[0], &Block { header, txdata: Vec::new()}, i); header.prev_blockhash = header.block_hash(); } @@ -5826,11 +5836,12 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no check_added_monitors!(nodes[0], 1); } + let starting_block = nodes[1].best_block_info(); let mut block = Block { - header: BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }, + header: BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }, txdata: vec![], }; - for i in 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 1 { + for i in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 2 { connect_block(&nodes[0], &block, i); block.header.prev_blockhash = block.block_hash(); } @@ -7577,9 +7588,9 @@ fn test_check_htlc_underpaying() { nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlc); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true); - // 10_000 msat as u64, followed by a height of 99 as u32 + // 10_000 msat as u64, followed by a height of CHAN_CONFIRM_DEPTH as u32 let mut expected_failure_data = byte_utils::be64_to_array(10_000).to_vec(); - expected_failure_data.extend_from_slice(&byte_utils::be32_to_array(99)); + expected_failure_data.extend_from_slice(&byte_utils::be32_to_array(CHAN_CONFIRM_DEPTH)); expect_payment_failed!(nodes[0], payment_hash, true, 0x4000|15, &expected_failure_data[..]); nodes[1].node.get_and_clear_pending_events(); } @@ -7659,7 +7670,6 @@ fn test_bump_penalty_txn_on_revoked_commitment() { let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known()); let logger = test_utils::TestLogger::new(); - let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; let net_graph_msg_handler = &nodes[1].net_graph_msg_handler; let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), None, None, &Vec::new(), 3000000, 30, &logger).unwrap(); @@ -7680,12 +7690,13 @@ fn test_bump_penalty_txn_on_revoked_commitment() { } // Connect blocks to change height_timer range to see if we use right soonest_timelock - let header_114 = connect_blocks(&nodes[1], 114, 0, false, Default::default()); + let starting_height = nodes[1].best_block_info().1; + let header_114 = connect_blocks(&nodes[1], 14, starting_height, false, Default::default()); // Actually revoke tx by claiming a HTLC claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage, 3_000_000); let header = BlockHeader { version: 0x20000000, prev_blockhash: header_114, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[1], &Block { header, txdata: vec![revoked_txn[0].clone()] }, 115); + connect_block(&nodes[1], &Block { header, txdata: vec![revoked_txn[0].clone()] }, 15 + starting_height); check_added_monitors!(nodes[1], 1); // One or more justice tx should have been broadcast, check it @@ -7704,7 +7715,7 @@ fn test_bump_penalty_txn_on_revoked_commitment() { }; // After exhaustion of height timer, a new bumped justice tx should have been broadcast, check it - let header = connect_blocks(&nodes[1], 3, 115, true, header.block_hash()); + let header = connect_blocks(&nodes[1], 15, 15 + starting_height, true, header.block_hash()); let mut penalty_2 = penalty_1; let mut feerate_2 = 0; { @@ -7727,7 +7738,7 @@ fn test_bump_penalty_txn_on_revoked_commitment() { assert_ne!(feerate_2, 0); // After exhaustion of height timer for a 2nd time, a new bumped justice tx should have been broadcast, check it - connect_blocks(&nodes[1], 3, 118, true, header); + connect_blocks(&nodes[1], 1, 30 + starting_height, true, header); let penalty_3; let mut feerate_3 = 0; { @@ -7776,9 +7787,9 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { // Revoke local commitment tx claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage, 3_000_000); - let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; // B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx - connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); + connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] }, CHAN_CONFIRM_DEPTH + 1); check_closed_broadcast!(nodes[1], false); check_added_monitors!(nodes[1], 1); @@ -7801,11 +7812,12 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { } // Broadcast set of revoked txn on A - let header_128 = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[0], &Block { header: header_128, txdata: vec![revoked_local_txn[0].clone()] }, 128); + let hash_128 = connect_blocks(&nodes[0], 40, CHAN_CONFIRM_DEPTH, false, Default::default()); + let header_11 = BlockHeader { version: 0x20000000, prev_blockhash: hash_128, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + connect_block(&nodes[0], &Block { header: header_11, txdata: vec![revoked_local_txn[0].clone()] }, CHAN_CONFIRM_DEPTH + 40 + 1); + let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_11.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()] }, CHAN_CONFIRM_DEPTH + 40 + 2); expect_pending_htlcs_forwardable_ignore!(nodes[0]); - let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_128.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()] }, 129); let first; let feerate_1; let penalty_txn; @@ -7856,9 +7868,9 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { // Connect one more block to see if bumped penalty are issued for HTLC txn let header_130 = BlockHeader { version: 0x20000000, prev_blockhash: header_129.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn }, 130); + connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn }, CHAN_CONFIRM_DEPTH + 40 + 3); let header_131 = BlockHeader { version: 0x20000000, prev_blockhash: header_130.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[0], &Block { header: header_131, txdata: Vec::new() }, 131); + connect_block(&nodes[0], &Block { header: header_131, txdata: Vec::new() }, CHAN_CONFIRM_DEPTH + 40 + 4); { let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 2); // 2 bumped penalty txn on revoked commitment tx @@ -7877,9 +7889,9 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { }; // Few more blocks to confirm penalty txn - let header_135 = connect_blocks(&nodes[0], 4, 131, true, header_131.block_hash()); + let header_135 = connect_blocks(&nodes[0], 4, CHAN_CONFIRM_DEPTH + 40 + 4, true, header_131.block_hash()); assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty()); - let header_144 = connect_blocks(&nodes[0], 9, 135, true, header_135); + let header_144 = connect_blocks(&nodes[0], 9, CHAN_CONFIRM_DEPTH + 40 + 8, true, header_135); let node_txn = { let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 1); @@ -7897,8 +7909,8 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { }; // Broadcast claim txn and confirm blocks to avoid further bumps on this outputs let header_145 = BlockHeader { version: 0x20000000, prev_blockhash: header_144, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[0], &Block { header: header_145, txdata: node_txn }, 145); - connect_blocks(&nodes[0], 20, 145, true, header_145.block_hash()); + connect_block(&nodes[0], &Block { header: header_145, txdata: node_txn }, CHAN_CONFIRM_DEPTH + 40 + 8 + 10); + connect_blocks(&nodes[0], 20, CHAN_CONFIRM_DEPTH + 40 + 8 + 10, true, header_145.block_hash()); { let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); // We verify than no new transaction has been broadcast because previously @@ -8306,7 +8318,7 @@ fn test_concurrent_monitor_claim() { watchtower }; let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - watchtower_alice.chain_monitor.block_connected(&header, &vec![], 135); + watchtower_alice.chain_monitor.block_connected(&header, &vec![], CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS); // Watchtower Alice should have broadcast a commitment/HTLC-timeout { @@ -8332,7 +8344,7 @@ fn test_concurrent_monitor_claim() { watchtower }; let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - watchtower_bob.chain_monitor.block_connected(&header, &vec![], 134); + watchtower_bob.chain_monitor.block_connected(&header, &vec![], CHAN_CONFIRM_DEPTH + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS); // Route another payment to generate another update with still previous HTLC pending let (_, payment_hash) = get_payment_preimage_hash!(nodes[0]); @@ -8358,7 +8370,7 @@ fn test_concurrent_monitor_claim() { check_added_monitors!(nodes[0], 1); //// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout - watchtower_bob.chain_monitor.block_connected(&header, &vec![], 135); + watchtower_bob.chain_monitor.block_connected(&header, &vec![], CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS); // Watchtower Bob should have broadcast a commitment/HTLC-timeout let bob_state_y; @@ -8370,7 +8382,7 @@ fn test_concurrent_monitor_claim() { }; // We confirm Bob's state Y on Alice, she should broadcast a HTLC-timeout - watchtower_alice.chain_monitor.block_connected(&header, &vec![(0, &bob_state_y)], 136); + watchtower_alice.chain_monitor.block_connected(&header, &vec![(0, &bob_state_y)], CHAN_CONFIRM_DEPTH + 2 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS); { let htlc_txn = chanmon_cfgs[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); // We broadcast twice the transaction, once due to the HTLC-timeout, once due @@ -8513,8 +8525,8 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain true => alice_txn.clone(), false => get_local_commitment_txn!(nodes[1], chan_ab.2) }; - let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42}; - connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]}, 1); + let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42}; + connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]}, CHAN_CONFIRM_DEPTH * 2 + 1); let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); if broadcast_alice { check_closed_broadcast!(nodes[1], false); @@ -8592,8 +8604,8 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain let mut txn_to_broadcast = alice_txn.clone(); if !broadcast_alice { txn_to_broadcast = get_local_commitment_txn!(nodes[1], chan_ab.2); } if !go_onchain_before_fulfill { - let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42}; - connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]}, 1); + let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42}; + connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]}, CHAN_CONFIRM_DEPTH * 2 + 1); // If Bob was the one to force-close, he will have already passed these checks earlier. if broadcast_alice { check_closed_broadcast!(nodes[1], false); diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 7a317f65b..92a313f8c 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -209,17 +209,8 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool) { assert_eq!(channel_state.short_to_id.len(), 1); mem::drop(channel_state); - let mut headers = Vec::new(); - let mut header = BlockHeader { version: 0x20000000, prev_blockhash: genesis_block(Network::Testnet).header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - headers.push(header.clone()); - for _i in 2..100 { - header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - headers.push(header.clone()); - } if !reorg_after_reload { - while !headers.is_empty() { - nodes[0].node.block_disconnected(&headers.pop().unwrap()); - } + disconnect_all_blocks(&nodes[0]); check_closed_broadcast!(nodes[0], false); { let channel_state = nodes[0].node.channel_state.lock().unwrap(); @@ -271,9 +262,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool) { } if reorg_after_reload { - while !headers.is_empty() { - nodes[0].node.block_disconnected(&headers.pop().unwrap()); - } + disconnect_all_blocks(&nodes[0]); check_closed_broadcast!(nodes[0], false); { let channel_state = nodes[0].node.channel_state.lock().unwrap(); @@ -281,7 +270,6 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool) { assert_eq!(channel_state.short_to_id.len(), 0); } } - // With expect_channel_force_closed set the TestChainMonitor will enforce that the next update // is a ChannelForcClosed on the right channel with should_broadcast set. *nodes[0].chain_monitor.expect_channel_force_closed.lock().unwrap() = Some((chan_id, true)); @@ -331,7 +319,7 @@ fn test_set_outpoints_partial_claiming() { // Connect blocks on node A commitment transaction let header_101 = BlockHeader { version: 0x20000000, prev_blockhash: block_hash_100, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[0], &Block { header: header_101, txdata: vec![remote_txn[0].clone()] }, 101); + connect_block(&nodes[0], &Block { header: header_101, txdata: vec![remote_txn[0].clone()] }, CHAN_CONFIRM_DEPTH + 1); check_closed_broadcast!(nodes[0], false); check_added_monitors!(nodes[0], 1); // Verify node A broadcast tx claiming both HTLCs @@ -364,7 +352,7 @@ fn test_set_outpoints_partial_claiming() { // Broadcast partial claim on node A, should regenerate a claiming tx with HTLC dropped let header_102 = BlockHeader { version: 0x20000000, prev_blockhash: header_101.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[0], &Block { header: header_102, txdata: vec![partial_claim_tx.clone()] }, 102); + connect_block(&nodes[0], &Block { header: header_102, txdata: vec![partial_claim_tx.clone()] }, CHAN_CONFIRM_DEPTH + 2); { let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 1); @@ -375,7 +363,7 @@ fn test_set_outpoints_partial_claiming() { nodes[0].node.get_and_clear_pending_msg_events(); // Disconnect last block on node A, should regenerate a claiming tx with HTLC dropped - disconnect_block(&nodes[0], &header_102, 102); + disconnect_block(&nodes[0], &header_102, CHAN_CONFIRM_DEPTH + 2); { let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 1); @@ -385,7 +373,7 @@ fn test_set_outpoints_partial_claiming() { } //// Disconnect one more block and then reconnect multiple no transaction should be generated - disconnect_block(&nodes[0], &header_101, 101); + disconnect_block(&nodes[0], &header_101, CHAN_CONFIRM_DEPTH + 1); connect_blocks(&nodes[1], 15, 101, false, block_hash_100); { let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); -- 2.39.5