X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Ffunctional_tests.rs;h=2d52d56aaed46745ce30c5e0de5284b3b1a8efbd;hb=b08387055403b287b448cc5867f8aaa650fae1e7;hp=d73cafdaef46703bcb1a545b668e14df1c42bae9;hpb=224d470d389704f9481df537e6e6d91f4909d5fd;p=rust-lightning diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index d73cafda..2d52d56a 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -42,7 +42,7 @@ use bitcoin::blockdata::constants::genesis_block; use bitcoin::network::constants::Network; use bitcoin::secp256k1::Secp256k1; -use bitcoin::secp256k1::key::{PublicKey,SecretKey}; +use bitcoin::secp256k1::{PublicKey,SecretKey}; use regex; @@ -1280,23 +1280,41 @@ fn test_duplicate_htlc_different_direction_onchain() { check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires - // Check we only broadcast 1 timeout tx let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(claim_txn.len(), 8); - assert_eq!(claim_txn[1], claim_txn[4]); - assert_eq!(claim_txn[2], claim_txn[5]); - check_spends!(claim_txn[1], chan_1.3); - check_spends!(claim_txn[2], claim_txn[1]); - check_spends!(claim_txn[7], claim_txn[1]); + + check_spends!(claim_txn[0], remote_txn[0]); // Immediate HTLC claim with preimage + + check_spends!(claim_txn[1], chan_1.3); // Alternative commitment tx + check_spends!(claim_txn[2], claim_txn[1]); // HTLC spend in alternative commitment tx + + let bump_tx = if claim_txn[1] == claim_txn[4] { + assert_eq!(claim_txn[1], claim_txn[4]); + assert_eq!(claim_txn[2], claim_txn[5]); + + check_spends!(claim_txn[7], claim_txn[1]); // HTLC timeout on alternative commitment tx + + check_spends!(claim_txn[3], remote_txn[0]); // HTLC timeout on broadcasted commitment tx + &claim_txn[3] + } else { + assert_eq!(claim_txn[1], claim_txn[3]); + assert_eq!(claim_txn[2], claim_txn[4]); + + check_spends!(claim_txn[5], claim_txn[1]); // HTLC timeout on alternative commitment tx + + check_spends!(claim_txn[7], remote_txn[0]); // HTLC timeout on broadcasted commitment tx + + &claim_txn[7] + }; assert_eq!(claim_txn[0].input.len(), 1); - assert_eq!(claim_txn[3].input.len(), 1); - assert_eq!(claim_txn[0].input[0].previous_output, claim_txn[3].input[0].previous_output); + assert_eq!(bump_tx.input.len(), 1); + assert_eq!(claim_txn[0].input[0].previous_output, bump_tx.input[0].previous_output); assert_eq!(claim_txn[0].input.len(), 1); assert_eq!(claim_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC 1 <--> 0, preimage tx - check_spends!(claim_txn[0], remote_txn[0]); assert_eq!(remote_txn[0].output[claim_txn[0].input[0].previous_output.vout as usize].value, 800); + assert_eq!(claim_txn[6].input.len(), 1); assert_eq!(claim_txn[6].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // HTLC 0 <--> 1, timeout tx check_spends!(claim_txn[6], remote_txn[0]); @@ -2178,9 +2196,9 @@ fn channel_monitor_network_test() { send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000); // Simple case with no pending HTLCs: - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), true); + nodes[1].node.force_close_channel(&chan_1.2).unwrap(); check_added_monitors!(nodes[1], 1); - check_closed_broadcast!(nodes[1], false); + check_closed_broadcast!(nodes[1], true); { let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE); assert_eq!(node_txn.len(), 1); @@ -2192,15 +2210,15 @@ fn channel_monitor_network_test() { assert_eq!(nodes[0].node.list_channels().len(), 0); assert_eq!(nodes[1].node.list_channels().len(), 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); - check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer); + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); // One pending HTLC is discarded by the force-close: let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0; // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not // broadcasted until we reach the timelock time). - nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), true); - check_closed_broadcast!(nodes[1], false); + nodes[1].node.force_close_channel(&chan_2.2).unwrap(); + check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); { let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE); @@ -2213,7 +2231,7 @@ fn channel_monitor_network_test() { check_closed_broadcast!(nodes[2], true); assert_eq!(nodes[1].node.list_channels().len(), 0); assert_eq!(nodes[2].node.list_channels().len(), 1); - check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer); + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed); macro_rules! claim_funds { @@ -2238,9 +2256,9 @@ fn channel_monitor_network_test() { // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2] // HTLC-Timeout and a nodes[3] claim against it (+ its own announces) - nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), true); + nodes[2].node.force_close_channel(&chan_3.2).unwrap(); check_added_monitors!(nodes[2], 1); - check_closed_broadcast!(nodes[2], false); + check_closed_broadcast!(nodes[2], true); let node2_commitment_txid; { let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE); @@ -2257,7 +2275,7 @@ fn channel_monitor_network_test() { check_closed_broadcast!(nodes[3], true); assert_eq!(nodes[2].node.list_channels().len(), 0); assert_eq!(nodes[3].node.list_channels().len(), 1); - check_closed_event!(nodes[2], 1, ClosureReason::DisconnectedPeer); + check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed); check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed); // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and @@ -2351,7 +2369,8 @@ fn test_justice_tx() { chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true; let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + *nodes[0].connect_style.borrow_mut() = ConnectStyle::FullBlockViaListen; // Create some new channels: let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); @@ -2583,11 +2602,7 @@ fn claim_htlc_outputs_single_tx() { expect_payment_failed!(nodes[1], payment_hash_2, true); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(node_txn.len(), 9); - // ChannelMonitor: justice tx revoked offered htlc, justice tx revoked received htlc, justice tx revoked to_local (3) - // ChannelManager: local commmitment + local HTLC-timeout (2) - // ChannelMonitor: bumped justice tx, after one increase, bumps on HTLC aren't generated not being substantial anymore, bump on revoked to_local isn't generated due to more room for expiration (2) - // ChannelMonitor: local commitment + local HTLC-timeout (2) + assert!(node_txn.len() == 9 || node_txn.len() == 10); // Check the pair local commitment and HTLC-timeout broadcast due to HTLC expiration assert_eq!(node_txn[0].input.len(), 1); @@ -3489,6 +3504,47 @@ fn test_dup_events_on_peer_disconnect() { expect_payment_path_successful!(nodes[0]); } +#[test] +fn test_peer_disconnected_before_funding_broadcasted() { + // Test that channels are closed with `ClosureReason::DisconnectedPeer` if the peer disconnects + // before the funding transaction has been broadcasted. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + // Open a channel between `nodes[0]` and `nodes[1]`, for which the funding transaction is never + // broadcasted, even though it's created by `nodes[0]`. + let expected_temporary_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap(); + let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel); + let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel); + + let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], 1_000_000, 42); + assert_eq!(temporary_channel_id, expected_temporary_channel_id); + + assert!(nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).is_ok()); + + let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + assert_eq!(funding_created_msg.temporary_channel_id, expected_temporary_channel_id); + + // Even though the funding transaction is created by `nodes[0]`, the `FundingCreated` msg is + // never sent to `nodes[1]`, and therefore the tx is never signed by either party nor + // broadcasted. + { + assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0); + } + + // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` when the peers are + // disconnected before the funding transaction was broadcasted. + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer); + check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer); +} + #[test] fn test_simple_peer_disconnect() { // Test that we can reconnect when there are no lost messages @@ -5242,21 +5298,30 @@ fn test_duplicate_payment_hash_one_failure_one_success() { let htlc_timeout_tx; { // Extract one of the two HTLC-Timeout transaction let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); - // ChannelMonitor: timeout tx * 3, ChannelManager: local commitment tx - assert_eq!(node_txn.len(), 4); + // ChannelMonitor: timeout tx * 2-or-3, ChannelManager: local commitment tx + assert!(node_txn.len() == 4 || node_txn.len() == 3); check_spends!(node_txn[0], chan_2.3); check_spends!(node_txn[1], commitment_txn[0]); assert_eq!(node_txn[1].input.len(), 1); - check_spends!(node_txn[2], commitment_txn[0]); - assert_eq!(node_txn[2].input.len(), 1); - assert_eq!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output); - check_spends!(node_txn[3], commitment_txn[0]); - assert_ne!(node_txn[1].input[0].previous_output, node_txn[3].input[0].previous_output); + + if node_txn.len() > 3 { + check_spends!(node_txn[2], commitment_txn[0]); + assert_eq!(node_txn[2].input.len(), 1); + assert_eq!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output); + + check_spends!(node_txn[3], commitment_txn[0]); + assert_ne!(node_txn[1].input[0].previous_output, node_txn[3].input[0].previous_output); + } else { + check_spends!(node_txn[2], commitment_txn[0]); + assert_ne!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output); + } assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); - assert_eq!(node_txn[3].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + if node_txn.len() > 3 { + assert_eq!(node_txn[3].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + } htlc_timeout_tx = node_txn[1].clone(); } @@ -5746,8 +5811,8 @@ fn test_key_derivation_params() { check_spends!(local_txn_1[0], chan_1.3); // We check funding pubkey are unique - let (from_0_funding_key_0, from_0_funding_key_1) = (PublicKey::from_slice(&local_txn_0[0].input[0].witness[3][2..35]), PublicKey::from_slice(&local_txn_0[0].input[0].witness[3][36..69])); - let (from_1_funding_key_0, from_1_funding_key_1) = (PublicKey::from_slice(&local_txn_1[0].input[0].witness[3][2..35]), PublicKey::from_slice(&local_txn_1[0].input[0].witness[3][36..69])); + let (from_0_funding_key_0, from_0_funding_key_1) = (PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][36..69])); + let (from_1_funding_key_0, from_1_funding_key_1) = (PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][36..69])); if from_0_funding_key_0 == from_1_funding_key_0 || from_0_funding_key_0 == from_1_funding_key_1 || from_0_funding_key_1 == from_1_funding_key_0 @@ -7345,7 +7410,7 @@ fn test_data_loss_protect() { logger = test_utils::TestLogger::with_id(format!("node {}", 0)); let mut chain_monitor = <(BlockHash, ChannelMonitor)>::read(&mut io::Cursor::new(previous_chain_monitor_state.0), keys_manager).unwrap().1; chain_source = test_utils::TestChainSource::new(Network::Testnet); - tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))}; + tx_broadcaster = test_utils::TestBroadcaster { txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new())) }; fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }; persister = test_utils::TestPersister::new(); monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &fee_estimator, &persister, keys_manager); @@ -7402,22 +7467,48 @@ fn test_data_loss_protect() { } // Check we close channel detecting A is fallen-behind + // Check that we sent the warning message when we detected that A has fallen behind, + // and give the possibility for A to recover from the warning. nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Peer attempted to reestablish channel with a very old local commitment transaction".to_string() }); - assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Peer attempted to reestablish channel with a very old local commitment transaction"); - check_added_monitors!(nodes[1], 1); + let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction".to_owned(); + assert!(check_warn_msg!(nodes[1], nodes[0].node.get_our_node_id(), chan.2).contains(&warn_msg)); // Check A is able to claim to_remote output - let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); - assert_eq!(node_txn.len(), 1); - check_spends!(node_txn[0], chan.3); - assert_eq!(node_txn[0].output.len(), 2); - mine_transaction(&nodes[0], &node_txn[0]); - connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting".to_string() }); - let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager); - assert_eq!(spend_txn.len(), 1); - check_spends!(spend_txn[0], node_txn[0]); + let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); + // The node B should not broadcast the transaction to force close the channel! + assert!(node_txn.is_empty()); + // B should now detect that there is something wrong and should force close the channel. + let exp_err = "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting"; + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: exp_err.to_string() }); + + // after the warning message sent by B, we should not able to + // use the channel, or reconnect with success to the channel. + assert!(nodes[0].node.list_usable_channels().is_empty()); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); + let retry_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &retry_reestablish[0]); + let mut err_msgs_0 = Vec::with_capacity(1); + for msg in nodes[0].node.get_and_clear_pending_msg_events() { + if let MessageSendEvent::HandleError { ref action, .. } = msg { + match action { + &ErrorAction::SendErrorMessage { ref msg } => { + assert_eq!(msg.data, "Failed to find corresponding channel"); + err_msgs_0.push(msg.clone()); + }, + _ => panic!("Unexpected event!"), + } + } else { + panic!("Unexpected event!"); + } + } + assert_eq!(err_msgs_0.len(), 1); + nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), &err_msgs_0[0]); + assert!(nodes[1].node.list_usable_channels().is_empty()); + check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel".to_owned() }); + check_closed_broadcast!(nodes[1], false); } #[test] @@ -7615,7 +7706,7 @@ fn test_bump_penalty_txn_on_revoked_commitment() { assert_eq!(node_txn[0].output.len(), 1); check_spends!(node_txn[0], revoked_txn[0]); let fee_1 = penalty_sum - node_txn[0].output[0].value; - feerate_1 = fee_1 * 1000 / node_txn[0].get_weight() as u64; + feerate_1 = fee_1 * 1000 / node_txn[0].weight() as u64; penalty_1 = node_txn[0].txid(); node_txn.clear(); }; @@ -7635,7 +7726,7 @@ fn test_bump_penalty_txn_on_revoked_commitment() { // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast assert_ne!(penalty_2, penalty_1); let fee_2 = penalty_sum - node_txn[0].output[0].value; - feerate_2 = fee_2 * 1000 / node_txn[0].get_weight() as u64; + feerate_2 = fee_2 * 1000 / node_txn[0].weight() as u64; // Verify 25% bump heuristic assert!(feerate_2 * 100 >= feerate_1 * 125); node_txn.clear(); @@ -7658,7 +7749,7 @@ fn test_bump_penalty_txn_on_revoked_commitment() { // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast assert_ne!(penalty_3, penalty_2); let fee_3 = penalty_sum - node_txn[0].output[0].value; - feerate_3 = fee_3 * 1000 / node_txn[0].get_weight() as u64; + feerate_3 = fee_3 * 1000 / node_txn[0].weight() as u64; // Verify 25% bump heuristic assert!(feerate_3 * 100 >= feerate_2 * 125); node_txn.clear(); @@ -7777,7 +7868,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { first = node_txn[4].txid(); // Store both feerates for later comparison let fee_1 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[2].output[0].value - node_txn[4].output[0].value; - feerate_1 = fee_1 * 1000 / node_txn[4].get_weight() as u64; + feerate_1 = fee_1 * 1000 / node_txn[4].weight() as u64; penalty_txn = vec![node_txn[2].clone()]; node_txn.clear(); } @@ -7817,7 +7908,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { // Verify bumped tx is different and 25% bump heuristic assert_ne!(first, node_txn[0].txid()); let fee_2 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[2].output[0].value - node_txn[0].output[0].value; - let feerate_2 = fee_2 * 1000 / node_txn[0].get_weight() as u64; + let feerate_2 = fee_2 * 1000 / node_txn[0].weight() as u64; assert!(feerate_2 * 100 > feerate_1 * 125); let txn = vec![node_txn[0].clone()]; node_txn.clear(); @@ -7890,23 +7981,34 @@ fn test_bump_penalty_txn_on_remote_commitment() { assert_eq!(node_txn[6].input.len(), 1); check_spends!(node_txn[0], remote_txn[0]); check_spends!(node_txn[6], remote_txn[0]); - assert_eq!(node_txn[0].input[0].previous_output, node_txn[3].input[0].previous_output); - preimage_bump = node_txn[3].clone(); check_spends!(node_txn[1], chan.3); check_spends!(node_txn[2], node_txn[1]); - assert_eq!(node_txn[1], node_txn[4]); - assert_eq!(node_txn[2], node_txn[5]); + + if node_txn[0].input[0].previous_output == node_txn[3].input[0].previous_output { + preimage_bump = node_txn[3].clone(); + check_spends!(node_txn[3], remote_txn[0]); + + assert_eq!(node_txn[1], node_txn[4]); + assert_eq!(node_txn[2], node_txn[5]); + } else { + preimage_bump = node_txn[7].clone(); + check_spends!(node_txn[7], remote_txn[0]); + assert_eq!(node_txn[0].input[0].previous_output, node_txn[7].input[0].previous_output); + + assert_eq!(node_txn[1], node_txn[3]); + assert_eq!(node_txn[2], node_txn[4]); + } timeout = node_txn[6].txid(); let index = node_txn[6].input[0].previous_output.vout; let fee = remote_txn[0].output[index as usize].value - node_txn[6].output[0].value; - feerate_timeout = fee * 1000 / node_txn[6].get_weight() as u64; + feerate_timeout = fee * 1000 / node_txn[6].weight() as u64; preimage = node_txn[0].txid(); let index = node_txn[0].input[0].previous_output.vout; let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value; - feerate_preimage = fee * 1000 / node_txn[0].get_weight() as u64; + feerate_preimage = fee * 1000 / node_txn[0].weight() as u64; node_txn.clear(); }; @@ -7925,13 +8027,13 @@ fn test_bump_penalty_txn_on_remote_commitment() { let index = preimage_bump.input[0].previous_output.vout; let fee = remote_txn[0].output[index as usize].value - preimage_bump.output[0].value; - let new_feerate = fee * 1000 / preimage_bump.get_weight() as u64; + let new_feerate = fee * 1000 / preimage_bump.weight() as u64; assert!(new_feerate * 100 > feerate_timeout * 125); assert_ne!(timeout, preimage_bump.txid()); let index = node_txn[0].input[0].previous_output.vout; let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value; - let new_feerate = fee * 1000 / node_txn[0].get_weight() as u64; + let new_feerate = fee * 1000 / node_txn[0].weight() as u64; assert!(new_feerate * 100 > feerate_preimage * 125); assert_ne!(preimage, node_txn[0].txid()); @@ -8644,10 +8746,11 @@ fn test_update_err_monitor_lockdown() { watchtower }; let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + let block = Block { header, txdata: vec![] }; // Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating // transaction lock time requirements here. - chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize(200, (header, 0)); - watchtower.chain_monitor.block_connected(&Block { header, txdata: vec![] }, 200); + chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize(200, (block.clone(), 0)); + watchtower.chain_monitor.block_connected(&block, 200); // Try to update ChannelMonitor assert!(nodes[1].node.claim_funds(preimage)); @@ -8705,10 +8808,11 @@ fn test_concurrent_monitor_claim() { watchtower }; let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + let block = Block { header, txdata: vec![] }; // Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating // transaction lock time requirements here. - chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize((CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS) as usize, (header, 0)); - watchtower_alice.chain_monitor.block_connected(&Block { header, txdata: vec![] }, CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS); + chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize((CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS) as usize, (block.clone(), 0)); + watchtower_alice.chain_monitor.block_connected(&block, CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS); // Watchtower Alice should have broadcast a commitment/HTLC-timeout { @@ -9509,12 +9613,7 @@ fn test_forwardable_regen() { claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2); } -#[test] -fn test_dup_htlc_second_fail_panic() { - // Previously, if we received two HTLCs back-to-back, where the second overran the expected - // value for the payment, we'd fail back both HTLCs after generating a `PaymentReceived` event. - // Then, if the user failed the second payment, they'd hit a "tried to fail an already failed - // HTLC" debug panic. This tests for this behavior, checking that only one HTLC is auto-failed. +fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -9524,14 +9623,9 @@ fn test_dup_htlc_second_fail_panic() { let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id()) .with_features(InvoiceFeatures::known()); - let scorer = test_utils::TestScorer::with_penalty(0); - let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); - let route = get_route( - &nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(), - Some(&nodes[0].node.list_usable_channels().iter().collect::>()), - 10_000, TEST_FINAL_CLTV, nodes[0].logger, &scorer, &random_seed_bytes).unwrap(); + let route = get_route!(nodes[0], payment_params, 10_000, TEST_FINAL_CLTV).unwrap(); - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[1]); + let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[1]); { nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); @@ -9559,26 +9653,153 @@ fn test_dup_htlc_second_fail_panic() { // the first HTLC delivered above. } - // Now we go fail back the first HTLC from the user end. expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); - nodes[1].node.fail_htlc_backwards(&our_payment_hash); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); - nodes[1].node.process_pending_htlc_forwards(); + if test_for_second_fail_panic { + // Now we go fail back the first HTLC from the user end. + nodes[1].node.fail_htlc_backwards(&our_payment_hash); - check_added_monitors!(nodes[1], 1); - let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - assert_eq!(fail_updates_1.update_fail_htlcs.len(), 2); + expect_pending_htlcs_forwardable_ignore!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); + + check_added_monitors!(nodes[1], 1); + let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert_eq!(fail_updates_1.update_fail_htlcs.len(), 2); + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[1]); + commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false); + + let failure_events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(failure_events.len(), 2); + if let Event::PaymentPathFailed { .. } = failure_events[0] {} else { panic!(); } + if let Event::PaymentPathFailed { .. } = failure_events[1] {} else { panic!(); } + } else { + // Let the second HTLC fail and claim the first + expect_pending_htlcs_forwardable_ignore!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); + + check_added_monitors!(nodes[1], 1); + let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false); + + expect_payment_failed_conditions!(nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain()); + + claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage); + } +} + +#[test] +fn test_dup_htlc_second_fail_panic() { + // Previously, if we received two HTLCs back-to-back, where the second overran the expected + // value for the payment, we'd fail back both HTLCs after generating a `PaymentReceived` event. + // Then, if the user failed the second payment, they'd hit a "tried to fail an already failed + // HTLC" debug panic. This tests for this behavior, checking that only one HTLC is auto-failed. + do_test_dup_htlc_second_rejected(true); +} + +#[test] +fn test_dup_htlc_second_rejected() { + // Test that if we receive a second HTLC for an MPP payment that overruns the payment amount we + // simply reject the second HTLC but are still able to claim the first HTLC. + do_test_dup_htlc_second_rejected(false); +} + +#[test] +fn test_inconsistent_mpp_params() { + // Test that if we recieve two HTLCs with different payment parameters we fail back the first + // such HTLC and allow the second to stay. + let chanmon_cfgs = create_chanmon_cfgs(4); + let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, InitFeatures::known(), InitFeatures::known()); + create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0, InitFeatures::known(), InitFeatures::known()); + create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known()); + create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known()); + + let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id()) + .with_features(InvoiceFeatures::known()); + let mut route = get_route!(nodes[0], payment_params, 15_000_000, TEST_FINAL_CLTV).unwrap(); + assert_eq!(route.paths.len(), 2); + route.paths.sort_by(|path_a, _| { + // Sort the path so that the path through nodes[1] comes first + if path_a[0].pubkey == nodes[1].node.get_our_node_id() { + core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater } + }); + let payment_params_opt = Some(payment_params); + + let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[3]); + + let cur_height = nodes[0].best_block_info().1; + let payment_id = PaymentId([42; 32]); + { + nodes[0].node.send_payment_along_path(&route.paths[0], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), false, None); + } + assert!(nodes[3].node.get_and_clear_pending_events().is_empty()); + + { + nodes[0].node.send_payment_along_path(&route.paths[1], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 14_000_000, cur_height, payment_id, &None).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.pop().unwrap()); + + nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + commitment_signed_dance!(nodes[2], nodes[0], payment_event.commitment_msg, false); - nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]); - nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[1]); - commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false); + expect_pending_htlcs_forwardable!(nodes[2]); + check_added_monitors!(nodes[2], 1); + + let mut events = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.pop().unwrap()); + + nodes[3].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]); + check_added_monitors!(nodes[3], 0); + commitment_signed_dance!(nodes[3], nodes[2], payment_event.commitment_msg, true, true); + + // At this point, nodes[3] should notice the two HTLCs don't contain the same total payment + // amount. It will assume the second is a privacy attack (no longer particularly relevant + // post-payment_secrets) and fail back the new HTLC. + } + expect_pending_htlcs_forwardable_ignore!(nodes[3]); + nodes[3].node.process_pending_htlc_forwards(); + expect_pending_htlcs_forwardable_ignore!(nodes[3]); + nodes[3].node.process_pending_htlc_forwards(); + + check_added_monitors!(nodes[3], 1); + + let fail_updates_1 = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id()); + nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false); + + expect_pending_htlcs_forwardable!(nodes[2]); + check_added_monitors!(nodes[2], 1); + + let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &fail_updates_2.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[2], fail_updates_2.commitment_signed, false); + + expect_payment_failed_conditions!(nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain()); + + nodes[0].node.send_payment_along_path(&route.paths[1], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None); - let failure_events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(failure_events.len(), 2); - if let Event::PaymentPathFailed { .. } = failure_events[0] {} else { panic!(); } - if let Event::PaymentPathFailed { .. } = failure_events[1] {} else { panic!(); } + claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, our_payment_preimage); } #[test]