X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Ffunctional_tests.rs;h=5d8cc59978eb1d8f01379b47be6be41900aa8cd1;hb=c92db69183143a58b3017ec450bdbf15a035bd9f;hp=b182d17fe8285a724e151081af2affb4e76adf93;hpb=146a291f15b359289bf536f5579e8cc7bd00b512;p=rust-lightning diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index b182d17f..5d8cc599 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -17,7 +17,7 @@ use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::chain::channelmonitor; use crate::chain::channelmonitor::{CLOSED_CHANNEL_UPDATE_ID, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; use crate::chain::transaction::OutPoint; -use crate::sign::{EcdsaChannelSigner, EntropySource, SignerProvider}; +use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, SignerProvider}; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason}; use crate::ln::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash}; use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase}; @@ -693,7 +693,7 @@ fn test_update_fee_that_funder_cannot_afford() { *feerate_lock += 4; } nodes[0].node.timer_tick_occurred(); - nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot afford to send new feerate at {}", feerate + 4), 1); + nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot afford to send new feerate at {}", feerate + 4), 1); check_added_monitors!(nodes[0], 0); const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654; @@ -746,7 +746,7 @@ fn test_update_fee_that_funder_cannot_afford() { &mut htlcs, &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable() ); - local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap() + local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), Vec::new(), &secp_ctx).unwrap() }; let commit_signed_msg = msgs::CommitmentSigned { @@ -768,7 +768,7 @@ fn test_update_fee_that_funder_cannot_afford() { //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve) //Should produce and error. nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg); - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1); + nodes[1].logger.assert_log("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee".to_string(), 1); check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") }, @@ -1415,6 +1415,7 @@ fn test_fee_spike_violation_fails_htlc() { cltv_expiry: htlc_cltv, onion_routing_packet: onion_packet, skimmed_fee_msat: None, + blinding_point: None, }; nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg); @@ -1493,7 +1494,7 @@ fn test_fee_spike_violation_fails_htlc() { &mut vec![(accepted_htlc_info, ())], &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable() ); - local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap() + local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), Vec::new(), &secp_ctx).unwrap() }; let commit_signed_msg = msgs::CommitmentSigned { @@ -1528,7 +1529,7 @@ fn test_fee_spike_violation_fails_htlc() { }, _ => panic!("Unexpected event"), }; - nodes[1].logger.assert_log("lightning::ln::channel".to_string(), + nodes[1].logger.assert_log("lightning::ln::channel", format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1); check_added_monitors!(nodes[1], 2); @@ -1611,11 +1612,12 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { cltv_expiry: htlc_cltv, onion_routing_packet: onion_packet, skimmed_fee_msat: None, + blinding_point: None, }; nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg); // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd. - nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string(), 1); + nodes[0].logger.assert_log("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string(), 1); assert_eq!(nodes[0].node.list_channels().len(), 0); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value"); @@ -1789,11 +1791,12 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { cltv_expiry: htlc_cltv, onion_routing_packet: onion_packet, skimmed_fee_msat: None, + blinding_point: None, }; nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg); // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd. - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote HTLC add would put them under remote reserve value".to_string(), 1); + nodes[1].logger.assert_log("lightning::ln::channelmanager", "Remote HTLC add would put them under remote reserve value".to_string(), 1); assert_eq!(nodes[1].node.list_channels().len(), 1); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); @@ -2270,9 +2273,15 @@ fn channel_monitor_network_test() { nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap(); check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000); { let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE); assert_eq!(node_txn.len(), 1); + mine_transaction(&nodes[1], &node_txn[0]); + if nodes[1].connect_style.borrow().updates_best_block_first() { + let _ = nodes[1].tx_broadcaster.txn_broadcast(); + } + mine_transaction(&nodes[0], &node_txn[0]); check_added_monitors!(nodes[0], 1); test_txn_broadcast(&nodes[0], &chan_1, Some(node_txn[0].clone()), HTLCType::NONE); @@ -2281,7 +2290,6 @@ fn channel_monitor_network_test() { assert_eq!(nodes[0].node.list_channels().len(), 0); assert_eq!(nodes[1].node.list_channels().len(), 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000); // One pending HTLC is discarded by the force-close: let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000); @@ -3510,6 +3518,7 @@ fn fail_backward_pending_htlc_upon_channel_failure() { cltv_expiry, onion_routing_packet, skimmed_fee_msat: None, + blinding_point: None, }; nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc); } @@ -3552,7 +3561,7 @@ fn test_htlc_ignore_latest_remote_commitment() { // connect_style. return; } - create_announced_chan_between_nodes(&nodes, 0, 1); + let funding_tx = create_announced_chan_between_nodes(&nodes, 0, 1).3; route_payment(&nodes[0], &[&nodes[1]], 10000000); nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap(); @@ -3561,11 +3570,12 @@ fn test_htlc_ignore_latest_remote_commitment() { check_added_monitors!(nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); - let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); - assert_eq!(node_txn.len(), 3); - assert_eq!(node_txn[0].txid(), node_txn[1].txid()); + let node_txn = nodes[0].tx_broadcaster.unique_txn_broadcast(); + assert_eq!(node_txn.len(), 2); + check_spends!(node_txn[0], funding_tx); + check_spends!(node_txn[1], node_txn[0]); - let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[1].clone()]); + let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone()]); connect_block(&nodes[1], &block); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); @@ -3622,7 +3632,7 @@ fn test_force_close_fail_back() { check_closed_broadcast!(nodes[2], true); check_added_monitors!(nodes[2], 1); check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); - let tx = { + let commitment_tx = { let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap(); // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't // have a use for it unless nodes[2] learns the preimage somehow, the funds will go @@ -3631,7 +3641,7 @@ fn test_force_close_fail_back() { node_txn.remove(0) }; - mine_transaction(&nodes[1], &tx); + mine_transaction(&nodes[1], &commitment_tx); // Note no UpdateHTLCs event here from nodes[1] to nodes[0]! check_closed_broadcast!(nodes[1], true); @@ -3643,15 +3653,16 @@ fn test_force_close_fail_back() { get_monitor!(nodes[2], payment_event.commitment_msg.channel_id) .provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), &node_cfgs[2].logger); } - mine_transaction(&nodes[2], &tx); - let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(node_txn.len(), 1); - assert_eq!(node_txn[0].input.len(), 1); - assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid()); - assert_eq!(node_txn[0].lock_time, LockTime::ZERO); // Must be an HTLC-Success - assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success + mine_transaction(&nodes[2], &commitment_tx); + let mut node_txn = nodes[2].tx_broadcaster.txn_broadcast(); + assert_eq!(node_txn.len(), if nodes[2].connect_style.borrow().updates_best_block_first() { 2 } else { 1 }); + let htlc_tx = node_txn.pop().unwrap(); + assert_eq!(htlc_tx.input.len(), 1); + assert_eq!(htlc_tx.input[0].previous_output.txid, commitment_tx.txid()); + assert_eq!(htlc_tx.lock_time, LockTime::ZERO); // Must be an HTLC-Success + assert_eq!(htlc_tx.input[0].witness.len(), 5); // Must be an HTLC-Success - check_spends!(node_txn[0], tx); + check_spends!(htlc_tx, commitment_tx); } #[test] @@ -5926,7 +5937,7 @@ fn test_fail_holding_cell_htlc_upon_free() { // us to surface its failure to the user. chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); - nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), 1); + nodes[0].logger.assert_log("lightning::ln::channel", format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), 1); // Check that the payment failed to be sent out. let events = nodes[0].node.get_and_clear_pending_events(); @@ -6014,7 +6025,7 @@ fn test_free_and_fail_holding_cell_htlcs() { // to surface its failure to the user. The first payment should succeed. chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); - nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), 1); + nodes[0].logger.assert_log("lightning::ln::channel", format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), 1); // Check that the second payment failed to be sent out. let events = nodes[0].node.get_and_clear_pending_events(); @@ -6288,7 +6299,7 @@ fn test_update_add_htlc_bolt2_receiver_zero_value_msat() { updates.update_add_htlcs[0].amount_msat = 0; nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1); + nodes[1].logger.assert_log("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC".to_string(), 1); check_closed_broadcast!(nodes[1], true).unwrap(); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() }, @@ -6481,6 +6492,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { cltv_expiry: htlc_cltv, onion_routing_packet: onion_packet.clone(), skimmed_fee_msat: None, + blinding_point: None, }; for i in 0..50 { @@ -8563,10 +8575,11 @@ fn test_concurrent_monitor_claim() { watchtower_alice.chain_monitor.block_connected(&block, HTLC_TIMEOUT_BROADCAST); // Watchtower Alice should have broadcast a commitment/HTLC-timeout - let alice_state = { + { let mut txn = alice_broadcaster.txn_broadcast(); assert_eq!(txn.len(), 2); - txn.remove(0) + check_spends!(txn[0], chan_1.3); + check_spends!(txn[1], txn[0]); }; // Copy ChainMonitor to simulate watchtower Bob and make it receive a commitment update first. @@ -8635,11 +8648,8 @@ fn test_concurrent_monitor_claim() { check_added_monitors(&nodes[0], 1); { let htlc_txn = alice_broadcaster.txn_broadcast(); - assert_eq!(htlc_txn.len(), 2); + assert_eq!(htlc_txn.len(), 1); check_spends!(htlc_txn[0], bob_state_y); - // Alice doesn't clean up the old HTLC claim since it hasn't seen a conflicting spend for - // it. However, she should, because it now has an invalid parent. - check_spends!(htlc_txn[1], alice_state); } } @@ -8878,7 +8888,12 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain assert_eq!(bob_txn.len(), 1); check_spends!(bob_txn[0], txn_to_broadcast[0]); } else { - assert_eq!(bob_txn.len(), 2); + if nodes[1].connect_style.borrow().updates_best_block_first() { + assert_eq!(bob_txn.len(), 3); + assert_eq!(bob_txn[0].txid(), bob_txn[1].txid()); + } else { + assert_eq!(bob_txn.len(), 2); + } check_spends!(bob_txn[0], chan_ab.3); } } @@ -8894,15 +8909,16 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain // If Alice force-closed, Bob only broadcasts a HTLC-output-claiming transaction. Otherwise, // Bob force-closed and broadcasts the commitment transaction along with a // HTLC-output-claiming transaction. - let bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); + let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); if broadcast_alice { assert_eq!(bob_txn.len(), 1); check_spends!(bob_txn[0], txn_to_broadcast[0]); assert_eq!(bob_txn[0].input[0].witness.last().unwrap().len(), script_weight); } else { - assert_eq!(bob_txn.len(), 2); - check_spends!(bob_txn[1], txn_to_broadcast[0]); - assert_eq!(bob_txn[1].input[0].witness.last().unwrap().len(), script_weight); + assert_eq!(bob_txn.len(), if nodes[1].connect_style.borrow().updates_best_block_first() { 3 } else { 2 }); + let htlc_tx = bob_txn.pop().unwrap(); + check_spends!(htlc_tx, txn_to_broadcast[0]); + assert_eq!(htlc_tx.input[0].witness.last().unwrap().len(), script_weight); } } } @@ -9054,7 +9070,7 @@ fn test_duplicate_chan_id() { nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id())); create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event - let (_, funding_created) = { + let funding_created = { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); // Once we call `get_funding_created` the channel has a duplicate channel_id as @@ -9062,7 +9078,7 @@ fn test_duplicate_chan_id() { // try to create another channel. Instead, we drop the channel entirely here (leaving the // channelmanager in a possibly nonsense state instead). match a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap() { - ChannelPhase::UnfundedOutboundV1(chan) => { + ChannelPhase::UnfundedOutboundV1(mut chan) => { let logger = test_utils::TestLogger::new(); chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap() }, @@ -9378,8 +9394,12 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t // We should broadcast an HTLC transaction spending our funding transaction first let spending_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(spending_txn.len(), 2); - assert_eq!(spending_txn[0].txid(), node_txn[0].txid()); - check_spends!(spending_txn[1], node_txn[0]); + let htlc_tx = if spending_txn[0].txid() == node_txn[0].txid() { + &spending_txn[1] + } else { + &spending_txn[0] + }; + check_spends!(htlc_tx, node_txn[0]); // We should also generate a SpendableOutputs event with the to_self output (as its // timelock is up). let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); @@ -9389,7 +9409,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t // should immediately fail-backwards the HTLC to the previous hop, without waiting for an // additional block built on top of the current chain. nodes[1].chain_monitor.chain_monitor.transactions_confirmed( - &nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1); + &nodes[1].get_block_header(conf_height + 1), &[(0, htlc_tx)], conf_height + 1); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]); check_added_monitors!(nodes[1], 1); @@ -9838,10 +9858,10 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e // Outbound dust balance: 6399 sats let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1); let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat; - nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, max_dust_htlc_exposure_msat), 1); + nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, max_dust_htlc_exposure_msat), 1); } else { // Outbound dust balance: 5200 sats - nodes[0].logger.assert_log("lightning::ln::channel".to_string(), + nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx - 1) + dust_htlc_on_counterparty_tx_msat + 4, max_dust_htlc_exposure_msat), 1);