X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Ffunctional_tests.rs;h=c3174b3931520dc738472d04900825d0d044d278;hb=9e7c02cd125cba991e1b757a5a59c6d4a230280e;hp=e3ae9c7a20f10a51a65ce0a2b3a0e50dac626bd1;hpb=a4c4301730c78e0adbf5810c753ca37b1ba8063b;p=rust-lightning diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index e3ae9c7a..c3174b39 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -17,7 +17,7 @@ use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::chain::channelmonitor; use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; use crate::chain::transaction::OutPoint; -use crate::chain::keysinterface::{BaseSign, KeysInterface}; +use crate::chain::keysinterface::{BaseSign, EntropySource, KeysInterface}; use crate::ln::{PaymentPreimage, PaymentSecret, PaymentHash}; use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT}; use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA}; @@ -30,7 +30,7 @@ use crate::ln::features::{ChannelFeatures, NodeFeatures}; use crate::ln::msgs; use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction}; use crate::util::enforcing_trait_impls::EnforcingSigner; -use crate::util::{byte_utils, test_utils}; +use crate::util::test_utils; use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination}; use crate::util::errors::APIError; use crate::util::ser::{Writeable, ReadableArgs}; @@ -56,7 +56,7 @@ use alloc::collections::BTreeSet; use core::default::Default; use core::iter::repeat; use bitcoin::hashes::Hash; -use crate::sync::Mutex; +use crate::sync::{Arc, Mutex}; use crate::ln::functional_test_utils::*; use crate::ln::chan_utils::CommitmentTransaction; @@ -175,8 +175,11 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) { } nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), channelmanager::provided_init_features(), &accept_channel_message); { - let mut lock; - let mut chan = get_channel_ref!(if send_from_initiator { &nodes[1] } else { &nodes[0] }, lock, temp_channel_id); + let sender_node = if send_from_initiator { &nodes[1] } else { &nodes[0] }; + let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] }; + let mut sender_node_per_peer_lock; + let mut sender_node_peer_state_lock; + let mut chan = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id); chan.holder_selected_channel_reserve_satoshis = 0; chan.holder_max_htlc_value_in_flight_msat = 100_000_000; } @@ -686,16 +689,18 @@ fn test_update_fee_that_funder_cannot_afford() { // Get the EnforcingSigner for each channel, which will be used to (1) get the keys // needed to sign the new commitment tx and (2) sign the new commitment tx. let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = { - let chan_lock = nodes[0].node.channel_state.lock().unwrap(); - let local_chan = chan_lock.by_id.get(&chan.2).unwrap(); + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap(); let chan_signer = local_chan.get_signer(); let pubkeys = chan_signer.pubkeys(); (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint, pubkeys.funding_pubkey) }; let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point, remote_funding) = { - let chan_lock = nodes[1].node.channel_state.lock().unwrap(); - let remote_chan = chan_lock.by_id.get(&chan.2).unwrap(); + let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); + let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); + let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap(); let chan_signer = remote_chan.get_signer(); let pubkeys = chan_signer.pubkeys(); (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, @@ -705,11 +710,12 @@ fn test_update_fee_that_funder_cannot_afford() { // Assemble the set of keys we can use for signatures for our commitment_signed message. let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint, - &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint).unwrap(); + &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint); let res = { - let local_chan_lock = nodes[0].node.channel_state.lock().unwrap(); - let local_chan = local_chan_lock.by_id.get(&chan.2).unwrap(); + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let local_chan = local_chan_lock.channel_by_id.get(&chan.2).unwrap(); let local_chan_signer = local_chan.get_signer(); let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![]; let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data( @@ -834,7 +840,7 @@ fn test_update_fee_with_fundee_update_add_htlc() { let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentReceived { .. } => { }, + Event::PaymentClaimable { .. } => { }, _ => panic!("Unexpected event"), }; @@ -954,8 +960,8 @@ fn test_update_fee() { check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - assert_eq!(get_feerate!(nodes[0], channel_id), feerate + 30); - assert_eq!(get_feerate!(nodes[1], channel_id), feerate + 30); + assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30); + assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30); close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); @@ -1179,7 +1185,7 @@ fn holding_cell_htlc_counting() { assert_eq!(events.len(), payments.len()); for (event, &(_, ref hash)) in events.iter().zip(payments.iter()) { match event { - &Event::PaymentReceived { ref payment_hash, .. } => { + &Event::PaymentClaimable { ref payment_hash, .. } => { assert_eq!(*payment_hash, *hash); }, _ => panic!("Unexpected event"), @@ -1268,19 +1274,16 @@ fn test_duplicate_htlc_different_direction_onchain() { connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); - assert_eq!(claim_txn.len(), 5); + assert_eq!(claim_txn.len(), 3); check_spends!(claim_txn[0], remote_txn[0]); // Immediate HTLC claim with preimage - check_spends!(claim_txn[1], chan_1.3); // Alternative commitment tx - check_spends!(claim_txn[2], claim_txn[1]); // HTLC spend in alternative commitment tx - - check_spends!(claim_txn[3], remote_txn[0]); - check_spends!(claim_txn[4], remote_txn[0]); + check_spends!(claim_txn[1], remote_txn[0]); + check_spends!(claim_txn[2], remote_txn[0]); let preimage_tx = &claim_txn[0]; - let (preimage_bump_tx, timeout_tx) = if claim_txn[3].input[0].previous_output == preimage_tx.input[0].previous_output { - (&claim_txn[3], &claim_txn[4]) + let (preimage_bump_tx, timeout_tx) = if claim_txn[1].input[0].previous_output == preimage_tx.input[0].previous_output { + (&claim_txn[1], &claim_txn[2]) } else { - (&claim_txn[4], &claim_txn[3]) + (&claim_txn[2], &claim_txn[1]) }; assert_eq!(preimage_tx.input.len(), 1); @@ -1324,11 +1327,11 @@ fn test_basic_channel_reserve() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, channelmanager::provided_init_features(), channelmanager::provided_init_features()); - let chan_stat = get_channel_value_stat!(nodes[0], chan.2); + let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); let channel_reserve = chan_stat.channel_reserve_msat; // The 2* and +1 are for the fee spike reserve. - let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], chan.2), 1 + 1, get_opt_anchors!(nodes[0], chan.2)); + let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], nodes[1], chan.2), 1 + 1, get_opt_anchors!(nodes[0], nodes[1], chan.2)); let max_can_send = 5000000 - channel_reserve - commit_tx_fee; let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send + 1); let err = nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret), PaymentId(our_payment_hash.0)).err().unwrap(); @@ -1381,15 +1384,16 @@ fn test_fee_spike_violation_fails_htlc() { // nodes[0] just sent. In the code for construction of this message, "local" refers // to the sender of the message, and "remote" refers to the receiver. - let feerate_per_kw = get_feerate!(nodes[0], chan.2); + let feerate_per_kw = get_feerate!(nodes[0], nodes[1], chan.2); const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; // Get the EnforcingSigner for each channel, which will be used to (1) get the keys // needed to sign the new commitment tx and (2) sign the new commitment tx. let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point, local_funding) = { - let chan_lock = nodes[0].node.channel_state.lock().unwrap(); - let local_chan = chan_lock.by_id.get(&chan.2).unwrap(); + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap(); let chan_signer = local_chan.get_signer(); // Make the signer believe we validated another commitment, so we can release the secret chan_signer.get_enforcement_state().last_holder_commitment -= 1; @@ -1401,8 +1405,9 @@ fn test_fee_spike_violation_fails_htlc() { chan_signer.pubkeys().funding_pubkey) }; let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = { - let chan_lock = nodes[1].node.channel_state.lock().unwrap(); - let remote_chan = chan_lock.by_id.get(&chan.2).unwrap(); + let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); + let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); + let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap(); let chan_signer = remote_chan.get_signer(); let pubkeys = chan_signer.pubkeys(); (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, @@ -1412,7 +1417,7 @@ fn test_fee_spike_violation_fails_htlc() { // Assemble the set of keys we can use for signatures for our commitment_signed message. let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint, - &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint).unwrap(); + &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint); // Build the remote commitment transaction so we can sign it, and then later use the // signature for the commitment_signed message. @@ -1429,8 +1434,9 @@ fn test_fee_spike_violation_fails_htlc() { let commitment_number = INITIAL_COMMITMENT_NUMBER - 1; let res = { - let local_chan_lock = nodes[0].node.channel_state.lock().unwrap(); - let local_chan = local_chan_lock.by_id.get(&chan.2).unwrap(); + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let local_chan = local_chan_lock.channel_by_id.get(&chan.2).unwrap(); let local_chan_signer = local_chan.get_signer(); let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data( commitment_number, @@ -1680,9 +1686,9 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { let feemsat = 239; let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat; - let chan_stat = get_channel_value_stat!(nodes[0], chan.2); - let feerate = get_feerate!(nodes[0], chan.2); - let opt_anchors = get_opt_anchors!(nodes[0], chan.2); + let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); + let feerate = get_feerate!(nodes[0], nodes[1], chan.2); + let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2); // Add a 2* and +1 for the fee spike reserve. let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1, opt_anchors); @@ -1771,11 +1777,11 @@ fn test_channel_reserve_holding_cell_htlcs() { let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 190000, 1001, channelmanager::provided_init_features(), channelmanager::provided_init_features()); let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 190000, 1001, channelmanager::provided_init_features(), channelmanager::provided_init_features()); - let mut stat01 = get_channel_value_stat!(nodes[0], chan_1.2); - let mut stat11 = get_channel_value_stat!(nodes[1], chan_1.2); + let mut stat01 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2); + let mut stat11 = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2); - let mut stat12 = get_channel_value_stat!(nodes[1], chan_2.2); - let mut stat22 = get_channel_value_stat!(nodes[2], chan_2.2); + let mut stat12 = get_channel_value_stat!(nodes[1], nodes[2], chan_2.2); + let mut stat22 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2); macro_rules! expect_forward { ($node: expr) => {{ @@ -1789,8 +1795,8 @@ fn test_channel_reserve_holding_cell_htlcs() { let feemsat = 239; // set above let total_fee_msat = (nodes.len() - 2) as u64 * feemsat; - let feerate = get_feerate!(nodes[0], chan_1.2); - let opt_anchors = get_opt_anchors!(nodes[0], chan_1.2); + let feerate = get_feerate!(nodes[0], nodes[1], chan_1.2); + let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan_1.2); let recv_value_0 = stat01.counterparty_max_htlc_value_in_flight_msat - total_fee_msat; @@ -1828,10 +1834,10 @@ fn test_channel_reserve_holding_cell_htlcs() { claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); let (stat01_, stat11_, stat12_, stat22_) = ( - get_channel_value_stat!(nodes[0], chan_1.2), - get_channel_value_stat!(nodes[1], chan_1.2), - get_channel_value_stat!(nodes[1], chan_2.2), - get_channel_value_stat!(nodes[2], chan_2.2), + get_channel_value_stat!(nodes[0], nodes[1], chan_1.2), + get_channel_value_stat!(nodes[1], nodes[0], chan_1.2), + get_channel_value_stat!(nodes[1], nodes[2], chan_2.2), + get_channel_value_stat!(nodes[2], nodes[1], chan_2.2), ); assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat); @@ -1882,7 +1888,7 @@ fn test_channel_reserve_holding_cell_htlcs() { let recv_value_21 = recv_value_2/2 - additional_htlc_cost_msat/2; let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat - additional_htlc_cost_msat; { - let stat = get_channel_value_stat!(nodes[0], chan_1.2); + let stat = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2); assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat + commit_tx_fee_3_htlcs), stat.channel_reserve_msat); } @@ -1936,7 +1942,7 @@ fn test_channel_reserve_holding_cell_htlcs() { commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[2]); - expect_payment_received!(nodes[2], our_payment_hash_1, our_payment_secret_1, recv_value_1); + expect_payment_claimable!(nodes[2], our_payment_hash_1, our_payment_secret_1, recv_value_1); // flush the htlcs in the holding cell assert_eq!(commitment_update_2.update_add_htlcs.len(), 2); @@ -1956,9 +1962,11 @@ fn test_channel_reserve_holding_cell_htlcs() { let events = nodes[2].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => { + Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id: _ } => { assert_eq!(our_payment_hash_21, *payment_hash); assert_eq!(recv_value_21, amount_msat); + assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap()); + assert_eq!(via_channel_id, Some(chan_2.2)); match &purpose { PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { assert!(payment_preimage.is_none()); @@ -1970,9 +1978,11 @@ fn test_channel_reserve_holding_cell_htlcs() { _ => panic!("Unexpected event"), } match events[1] { - Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => { + Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id: _ } => { assert_eq!(our_payment_hash_22, *payment_hash); assert_eq!(recv_value_22, amount_msat); + assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap()); + assert_eq!(via_channel_id, Some(chan_2.2)); match &purpose { PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { assert!(payment_preimage.is_none()); @@ -1994,11 +2004,11 @@ fn test_channel_reserve_holding_cell_htlcs() { let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors); let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat) - (recv_value_3 + total_fee_msat); - let stat0 = get_channel_value_stat!(nodes[0], chan_1.2); + let stat0 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2); assert_eq!(stat0.value_to_self_msat, expected_value_to_self); assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat + commit_tx_fee_1_htlc); - let stat2 = get_channel_value_stat!(nodes[2], chan_2.2); + let stat2 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2); assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22 + recv_value_3); } @@ -2032,7 +2042,7 @@ fn channel_reserve_in_flight_removes() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()); - let b_chan_values = get_channel_value_stat!(nodes[1], chan_1.2); + let b_chan_values = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2); // Route the first two HTLCs. let payment_value_1 = b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000; let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], payment_value_1); @@ -2102,7 +2112,7 @@ fn channel_reserve_in_flight_removes() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); expect_pending_htlcs_forwardable!(nodes[1]); - expect_payment_received!(nodes[1], payment_hash_3, payment_secret_3, 100000); + expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 100000); // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't // resolve the second HTLC from A's point of view. @@ -2149,7 +2159,7 @@ fn channel_reserve_in_flight_removes() { check_added_monitors!(nodes[0], 1); expect_pending_htlcs_forwardable!(nodes[0]); - expect_payment_received!(nodes[0], payment_hash_4, payment_secret_4, 10000); + expect_payment_claimable!(nodes[0], payment_hash_4, payment_secret_4, 10000); claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3); @@ -2192,7 +2202,7 @@ fn channel_monitor_network_test() { assert_eq!(node_txn.len(), 1); mine_transaction(&nodes[0], &node_txn[0]); check_added_monitors!(nodes[0], 1); - test_txn_broadcast(&nodes[0], &chan_1, None, HTLCType::NONE); + test_txn_broadcast(&nodes[0], &chan_1, Some(node_txn[0].clone()), HTLCType::NONE); } check_closed_broadcast!(nodes[0], true); assert_eq!(nodes[0].node.list_channels().len(), 0); @@ -2214,7 +2224,7 @@ fn channel_monitor_network_test() { test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT); mine_transaction(&nodes[2], &node_txn[0]); check_added_monitors!(nodes[2], 1); - test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::NONE); + test_txn_broadcast(&nodes[2], &chan_2, Some(node_txn[0].clone()), HTLCType::NONE); } check_closed_broadcast!(nodes[2], true); assert_eq!(nodes[1].node.list_channels().len(), 0); @@ -2382,16 +2392,15 @@ fn test_justice_tx() { mine_transaction(&nodes[1], &revoked_local_txn[0]); { let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(node_txn.len(), 2); // ChannelMonitor: penalty tx, ChannelManager: local commitment tx + assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx assert_eq!(node_txn[0].input.len(), 2); // We should claim the revoked output and the HTLC output check_spends!(node_txn[0], revoked_local_txn[0]); node_txn.swap_remove(0); - node_txn.truncate(1); } check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); - test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE); + test_txn_broadcast(&nodes[1], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::NONE); mine_transaction(&nodes[0], &revoked_local_txn[0]); connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires @@ -2430,14 +2439,14 @@ fn test_justice_tx() { mine_transaction(&nodes[0], &revoked_local_txn[0]); { let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(node_txn.len(), 2); //ChannelMonitor: penalty tx, ChannelManager: local commitment tx + assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx assert_eq!(node_txn[0].input.len(), 1); // We claim the received HTLC output check_spends!(node_txn[0], revoked_local_txn[0]); node_txn.swap_remove(0); } check_added_monitors!(nodes[0], 1); - test_txn_broadcast(&nodes[0], &chan_6, None, HTLCType::NONE); + test_txn_broadcast(&nodes[0], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::NONE); mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); @@ -2474,10 +2483,9 @@ fn revoked_output_claim() { check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); - assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx against revoked to_local output, ChannelManager: local commitment tx + assert_eq!(node_txn.len(), 1); // ChannelMonitor: justice tx against revoked to_local output check_spends!(node_txn[0], revoked_local_txn[0]); - check_spends!(node_txn[1], chan_1.3); // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan mine_transaction(&nodes[0], &revoked_local_txn[0]); @@ -2528,7 +2536,7 @@ fn claim_htlc_outputs_shared_tx() { assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); - assert_eq!(node_txn.len(), 2); // ChannelMonitor: penalty tx, ChannelManager: local commitment + assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx assert_eq!(node_txn[0].input.len(), 3); // Claim the revoked output + both revoked HTLC outputs check_spends!(node_txn[0], revoked_local_txn[0]); @@ -2542,10 +2550,6 @@ fn claim_htlc_outputs_shared_tx() { assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC - // Next nodes[1] broadcasts its current local tx state: - assert_eq!(node_txn[1].input.len(), 1); - check_spends!(node_txn[1], chan_1.3); - // Finally, mine the penalty transaction and check that we get an HTLC failure after // ANTI_REORG_DELAY confirmations. mine_transaction(&nodes[1], &node_txn[0]); @@ -2598,7 +2602,7 @@ fn claim_htlc_outputs_single_tx() { assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); - assert!(node_txn.len() == 9 || node_txn.len() == 10); + assert_eq!(node_txn.len(), 7); // Check the pair local commitment and HTLC-timeout broadcast due to HTLC expiration assert_eq!(node_txn[0].input.len(), 1); @@ -2608,7 +2612,7 @@ fn claim_htlc_outputs_single_tx() { assert_eq!(witness_script.len(), OFFERED_HTLC_SCRIPT_WEIGHT); //Spending an offered htlc output check_spends!(node_txn[1], node_txn[0]); - // Justice transactions are indices 1-2-4 + // Justice transactions are indices 2-3-4 assert_eq!(node_txn[2].input.len(), 1); assert_eq!(node_txn[3].input.len(), 1); assert_eq!(node_txn[4].input.len(), 1); @@ -2697,11 +2701,8 @@ fn test_htlc_on_chain_success() { check_closed_broadcast!(nodes[2], true); check_added_monitors!(nodes[2], 1); check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed); - let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 3 (commitment tx, 2*htlc-success tx), ChannelMonitor : 2 (2 * HTLC-Success tx) - assert_eq!(node_txn.len(), 5); - assert_eq!(node_txn[0], node_txn[3]); - assert_eq!(node_txn[1], node_txn[4]); - assert_eq!(node_txn[2], commitment_tx[0]); + let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 2 (2 * HTLC-Success tx) + assert_eq!(node_txn.len(), 2); check_spends!(node_txn[0], commitment_tx[0]); check_spends!(node_txn[1], commitment_tx[0]); assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); @@ -2713,7 +2714,7 @@ fn test_htlc_on_chain_success() { // Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42}; - connect_block(&nodes[1], &Block { header, txdata: node_txn}); + connect_block(&nodes[1], &Block { header, txdata: vec![commitment_tx[0].clone(), node_txn[0].clone(), node_txn[1].clone()]}); connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); @@ -2775,35 +2776,31 @@ fn test_htlc_on_chain_success() { _ => panic!("Unexpected event"), }; macro_rules! check_tx_local_broadcast { - ($node: expr, $htlc_offered: expr, $commitment_tx: expr, $chan_tx: expr) => { { + ($node: expr, $htlc_offered: expr, $commitment_tx: expr) => { { let mut node_txn = $node.tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(node_txn.len(), 3); - // Node[1]: ChannelManager: 3 (commitment tx, 2*HTLC-Timeout tx), ChannelMonitor: 2 (timeout tx) - // Node[0]: ChannelManager: 3 (commtiemtn tx, 2*HTLC-Timeout tx), ChannelMonitor: 2 HTLC-timeout + assert_eq!(node_txn.len(), 2); + // Node[1]: 2 * HTLC-timeout tx + // Node[0]: 2 * HTLC-timeout tx + check_spends!(node_txn[0], $commitment_tx); check_spends!(node_txn[1], $commitment_tx); - check_spends!(node_txn[2], $commitment_tx); + assert_ne!(node_txn[0].lock_time.0, 0); assert_ne!(node_txn[1].lock_time.0, 0); - assert_ne!(node_txn[2].lock_time.0, 0); if $htlc_offered { + assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); - assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output - assert!(node_txn[2].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output } else { + assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); - assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment - assert!(node_txn[2].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment } - check_spends!(node_txn[0], $chan_tx); - assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), 71); node_txn.clear(); } } } - // nodes[1] now broadcasts its own local state as a fallback, suggesting an alternate - // commitment transaction with a corresponding HTLC-Timeout transactions, as well as a - // timeout-claim of the output that nodes[2] just claimed via success. - check_tx_local_broadcast!(nodes[1], false, commitment_tx[0], chan_2.3); + // nodes[1] now broadcasts its own timeout-claim of the output that nodes[2] just claimed via success. + check_tx_local_broadcast!(nodes[1], false, commitment_tx[0]); // Broadcast legit commitment tx from A on B's chain // Broadcast preimage tx by B on offered output from A commitment tx on A's chain @@ -2814,18 +2811,24 @@ fn test_htlc_on_chain_success() { check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); - assert_eq!(node_txn.len(), 6); // ChannelManager : 3 (commitment tx + HTLC-Sucess * 2), ChannelMonitor : 3 (HTLC-Success, 2* RBF bumps of above HTLC txn) + assert!(node_txn.len() == 1 || node_txn.len() == 3); // HTLC-Success, 2* RBF bumps of above HTLC txn let commitment_spend = - if node_txn[0].input[0].previous_output.txid == node_a_commitment_tx[0].txid() { - check_spends!(node_txn[1], commitment_tx[0]); - check_spends!(node_txn[2], commitment_tx[0]); - assert_ne!(node_txn[1].input[0].previous_output.vout, node_txn[2].input[0].previous_output.vout); + if node_txn.len() == 1 { &node_txn[0] } else { - check_spends!(node_txn[0], commitment_tx[0]); - check_spends!(node_txn[1], commitment_tx[0]); - assert_ne!(node_txn[0].input[0].previous_output.vout, node_txn[1].input[0].previous_output.vout); - &node_txn[2] + // Certain `ConnectStyle`s will cause RBF bumps of the previous HTLC transaction to be broadcast. + // FullBlockViaListen + if node_txn[0].input[0].previous_output.txid == node_a_commitment_tx[0].txid() { + check_spends!(node_txn[1], commitment_tx[0]); + check_spends!(node_txn[2], commitment_tx[0]); + assert_ne!(node_txn[1].input[0].previous_output.vout, node_txn[2].input[0].previous_output.vout); + &node_txn[0] + } else { + check_spends!(node_txn[0], commitment_tx[0]); + check_spends!(node_txn[1], commitment_tx[0]); + assert_ne!(node_txn[0].input[0].previous_output.vout, node_txn[1].input[0].previous_output.vout); + &node_txn[2] + } }; check_spends!(commitment_spend, node_a_commitment_tx[0]); @@ -2834,10 +2837,6 @@ fn test_htlc_on_chain_success() { assert_eq!(commitment_spend.input[1].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); assert_eq!(commitment_spend.lock_time.0, 0); assert!(commitment_spend.output[0].script_pubkey.is_v0_p2wpkh()); // direct payment - check_spends!(node_txn[3], chan_1.3); - assert_eq!(node_txn[3].input[0].witness.clone().last().unwrap().len(), 71); - check_spends!(node_txn[4], node_txn[3]); - check_spends!(node_txn[5], node_txn[3]); // We don't bother to check that B can claim the HTLC output on its commitment tx here as // we already checked the same situation with A. @@ -2866,7 +2865,7 @@ fn test_htlc_on_chain_success() { _ => panic!("Unexpected event"), } } - check_tx_local_broadcast!(nodes[0], true, node_a_commitment_tx[0], chan_1.3); + check_tx_local_broadcast!(nodes[0], true, node_a_commitment_tx[0]); } fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { @@ -2920,10 +2919,8 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { check_closed_broadcast!(nodes[2], true); check_added_monitors!(nodes[2], 1); check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed); - let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx) - assert_eq!(node_txn.len(), 1); - check_spends!(node_txn[0], chan_2.3); - assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), 71); + let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); + assert_eq!(node_txn.len(), 0); // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence @@ -2933,9 +2930,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { let timeout_tx; { let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(node_txn.len(), 5); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : 2 (local commitment tx + HTLC-timeout), 1 timeout tx - assert_eq!(node_txn[0], node_txn[3]); - assert_eq!(node_txn[1], node_txn[4]); + assert_eq!(node_txn.len(), 3); // 2 (local commitment tx + HTLC-timeout), 1 timeout tx check_spends!(node_txn[2], commitment_tx[0]); assert_eq!(node_txn[2].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); @@ -2980,12 +2975,10 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); - let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 commitment tx, ChannelMonitor : 1 timeout tx - assert_eq!(node_txn.len(), 2); - check_spends!(node_txn[0], chan_1.3); - assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), 71); - check_spends!(node_txn[1], commitment_tx[0]); - assert_eq!(node_txn[1].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // 1 timeout tx + assert_eq!(node_txn.len(), 1); + check_spends!(node_txn[0], commitment_tx[0]); + assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); } #[test] @@ -3078,7 +3071,8 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let value = if use_dust { // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as // well, so HTLCs at exactly the dust limit will not be included in commitment txn. - nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().holder_dust_limit_satoshis * 1000 + nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().holder_dust_limit_satoshis * 1000 } else { 3000000 }; let (_, first_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value); @@ -3370,6 +3364,12 @@ fn test_htlc_ignore_latest_remote_commitment() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + if *nodes[1].connect_style.borrow() == ConnectStyle::FullBlockViaListen { + // We rely on the ability to connect a block redundantly, which isn't allowed via + // `chain::Listen`, so we never run the test if we randomly get assigned that + // connect_style. + return; + } create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()); route_payment(&nodes[0], &[&nodes[1]], 10000000); @@ -3391,7 +3391,6 @@ fn test_htlc_ignore_latest_remote_commitment() { // Duplicate the connect_block call since this may happen due to other listeners // registering new transactions - header.prev_blockhash = header.block_hash(); connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[2].clone()]}); } @@ -3612,15 +3611,16 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let mut as_channel_ready = None; - if messages_delivered == 0 { - let (channel_ready, _, _) = create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001, channelmanager::provided_init_features(), channelmanager::provided_init_features()); + let channel_id = if messages_delivered == 0 { + let (channel_ready, chan_id, _) = create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001, channelmanager::provided_init_features(), channelmanager::provided_init_features()); as_channel_ready = Some(channel_ready); // nodes[1] doesn't receive the channel_ready message (it'll be re-sent on reconnect) // Note that we store it so that if we're running with `simulate_broken_lnd` we can deliver // it before the channel_reestablish message. + chan_id } else { - create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()); - } + create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2 + }; let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000); @@ -3723,9 +3723,11 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken let events_2 = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events_2.len(), 1); match events_2[0] { - Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => { + Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id: _ } => { assert_eq!(payment_hash_1, *payment_hash); assert_eq!(amount_msat, 1_000_000); + assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id()); + assert_eq!(via_channel_id, Some(channel_id)); match &purpose { PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { assert!(payment_preimage.is_none()); @@ -4007,7 +4009,7 @@ fn test_drop_messages_peer_disconnect_dual_htlc() { let events_5 = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events_5.len(), 1); match events_5[0] { - Event::PaymentReceived { ref payment_hash, ref purpose, .. } => { + Event::PaymentClaimable { ref payment_hash, ref purpose, .. } => { assert_eq!(payment_hash_2, *payment_hash); match &purpose { PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { @@ -4050,7 +4052,7 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); // Now do the relevant commitment_signed/RAA dances along the path, noting that the final - // hop should *not* yet generate any PaymentReceived event(s). + // hop should *not* yet generate any PaymentClaimable event(s). pass_along_path(&nodes[0], &[&nodes[1]], 100000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None); our_payment_hash } else { @@ -4082,8 +4084,8 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_timeout_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false); // 100_000 msat as u64, followed by the height at which we failed back above - let mut expected_failure_data = byte_utils::be64_to_array(100_000).to_vec(); - expected_failure_data.extend_from_slice(&byte_utils::be32_to_array(block_count - 1)); + let mut expected_failure_data = (100_000 as u64).to_be_bytes().to_vec(); + expected_failure_data.extend_from_slice(&(block_count - 1).to_be_bytes()); expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000 | 15, &expected_failure_data[..]); } @@ -4310,12 +4312,10 @@ fn test_static_spendable_outputs_preimage_tx() { } // Check B's monitor was able to send back output descriptor event for preimage tx on A's commitment tx - let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (local commitment tx + HTLC-Success), ChannelMonitor: preimage tx - assert_eq!(node_txn.len(), 3); + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: preimage tx + assert_eq!(node_txn.len(), 1); check_spends!(node_txn[0], commitment_tx[0]); assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); - check_spends!(node_txn[1], chan_1.3); - check_spends!(node_txn[2], node_txn[1]); mine_transaction(&nodes[1], &node_txn[0]); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); @@ -4357,12 +4357,11 @@ fn test_static_spendable_outputs_timeout_tx() { // Check B's monitor was able to send back output descriptor event for timeout tx on A's commitment tx let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); - assert_eq!(node_txn.len(), 2); // ChannelManager : 1 local commitent tx, ChannelMonitor: timeout tx - check_spends!(node_txn[0], chan_1.3.clone()); - check_spends!(node_txn[1], commitment_tx[0].clone()); - assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert_eq!(node_txn.len(), 1); // ChannelMonitor: timeout tx + check_spends!(node_txn[0], commitment_tx[0].clone()); + assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); - mine_transaction(&nodes[1], &node_txn[1]); + mine_transaction(&nodes[1], &node_txn[0]); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); expect_payment_failed!(nodes[1], our_payment_hash, false); @@ -4370,8 +4369,8 @@ fn test_static_spendable_outputs_timeout_tx() { let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); assert_eq!(spend_txn.len(), 3); // SpendableOutput: remote_commitment_tx.to_remote, timeout_tx.output check_spends!(spend_txn[0], commitment_tx[0]); - check_spends!(spend_txn[1], node_txn[1]); - check_spends!(spend_txn[2], node_txn[1], commitment_tx[0]); // All outputs + check_spends!(spend_txn[1], node_txn[0]); + check_spends!(spend_txn[2], node_txn[0], commitment_tx[0]); // All outputs } #[test] @@ -4397,7 +4396,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() { check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); - assert_eq!(node_txn.len(), 2); + assert_eq!(node_txn.len(), 1); assert_eq!(node_txn[0].input.len(), 2); check_spends!(node_txn[0], revoked_local_txn[0]); @@ -4435,40 +4434,36 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); - assert_eq!(revoked_htlc_txn.len(), 2); - check_spends!(revoked_htlc_txn[0], chan_1.3); - assert_eq!(revoked_htlc_txn[1].input.len(), 1); - assert_eq!(revoked_htlc_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); - check_spends!(revoked_htlc_txn[1], revoked_local_txn[0]); - assert_ne!(revoked_htlc_txn[1].lock_time.0, 0); // HTLC-Timeout + assert_eq!(revoked_htlc_txn.len(), 1); + assert_eq!(revoked_htlc_txn[0].input.len(), 1); + assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]); + assert_ne!(revoked_htlc_txn[0].lock_time.0, 0); // HTLC-Timeout // B will generate justice tx from A's revoked commitment/HTLC tx let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[1].clone()] }); + connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); - assert_eq!(node_txn.len(), 3); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs, ChannelManager: local commitment tx + assert_eq!(node_txn.len(), 2); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0] // including the one already spent by revoked_htlc_txn[1]. That's OK, we'll spend with valid // transactions next... assert_eq!(node_txn[0].input.len(), 3); - check_spends!(node_txn[0], revoked_local_txn[0], revoked_htlc_txn[1]); + check_spends!(node_txn[0], revoked_local_txn[0], revoked_htlc_txn[0]); assert_eq!(node_txn[1].input.len(), 2); - check_spends!(node_txn[1], revoked_local_txn[0], revoked_htlc_txn[1]); - if node_txn[1].input[1].previous_output.txid == revoked_htlc_txn[1].txid() { - assert_ne!(node_txn[1].input[0].previous_output, revoked_htlc_txn[1].input[0].previous_output); + check_spends!(node_txn[1], revoked_local_txn[0], revoked_htlc_txn[0]); + if node_txn[1].input[1].previous_output.txid == revoked_htlc_txn[0].txid() { + assert_ne!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output); } else { - assert_eq!(node_txn[1].input[0].previous_output.txid, revoked_htlc_txn[1].txid()); - assert_ne!(node_txn[1].input[1].previous_output, revoked_htlc_txn[1].input[0].previous_output); + assert_eq!(node_txn[1].input[0].previous_output.txid, revoked_htlc_txn[0].txid()); + assert_ne!(node_txn[1].input[1].previous_output, revoked_htlc_txn[0].input[0].previous_output); } - assert_eq!(node_txn[2].input.len(), 1); - check_spends!(node_txn[2], chan_1.3); - mine_transaction(&nodes[1], &node_txn[1]); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); @@ -4507,7 +4502,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); - assert_eq!(revoked_htlc_txn.len(), 2); + assert_eq!(revoked_htlc_txn.len(), 1); assert_eq!(revoked_htlc_txn[0].input.len(), 1); assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]); @@ -4524,7 +4519,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); - assert_eq!(node_txn.len(), 3); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success, ChannelManager: local commitment tx + assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0] // including the one already spent by revoked_htlc_txn[0]. That's OK, we'll spend with valid @@ -4541,8 +4536,6 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { assert_eq!(node_txn[1].input.len(), 1); check_spends!(node_txn[1], revoked_htlc_txn[0]); - check_spends!(node_txn[2], chan_1.3); - mine_transaction(&nodes[0], &node_txn[1]); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); @@ -4605,20 +4598,16 @@ fn test_onchain_to_onchain_claim() { check_added_monitors!(nodes[2], 1); check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed); - let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Success tx), ChannelMonitor : 1 (HTLC-Success tx) - assert_eq!(c_txn.len(), 3); - assert_eq!(c_txn[0], c_txn[2]); - assert_eq!(commitment_tx[0], c_txn[1]); - check_spends!(c_txn[1], chan_2.3); - check_spends!(c_txn[2], c_txn[1]); - assert_eq!(c_txn[1].input[0].witness.clone().last().unwrap().len(), 71); - assert_eq!(c_txn[2].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 1 (HTLC-Success tx) + assert_eq!(c_txn.len(), 1); + check_spends!(c_txn[0], commitment_tx[0]); + assert_eq!(c_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); assert!(c_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output assert_eq!(c_txn[0].lock_time.0, 0); // Success tx // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42}; - connect_block(&nodes[1], &Block { header, txdata: vec![c_txn[1].clone(), c_txn[2].clone()]}); + connect_block(&nodes[1], &Block { header, txdata: vec![commitment_tx[0].clone(), c_txn[0].clone()]}); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); @@ -4635,13 +4624,6 @@ fn test_onchain_to_onchain_claim() { }, _ => panic!("Unexpected event"), } - { - let mut b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); - // ChannelMonitor: claim tx - assert_eq!(b_txn.len(), 1); - check_spends!(b_txn[0], chan_2.3); // B local commitment tx, issued by ChannelManager - b_txn.clear(); - } check_added_monitors!(nodes[1], 1); let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 3); @@ -4668,10 +4650,8 @@ fn test_onchain_to_onchain_claim() { mine_transaction(&nodes[1], &commitment_tx[0]); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); - // ChannelMonitor: HTLC-Success tx, ChannelManager: local commitment tx + HTLC-Success tx - assert_eq!(b_txn.len(), 3); - check_spends!(b_txn[1], chan_1.3); - check_spends!(b_txn[2], b_txn[1]); + // ChannelMonitor: HTLC-Success tx + assert_eq!(b_txn.len(), 1); check_spends!(b_txn[0], commitment_tx[0]); assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment @@ -4731,31 +4711,30 @@ fn test_duplicate_payment_hash_one_failure_one_success() { let htlc_timeout_tx; { // Extract one of the two HTLC-Timeout transaction let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); - // ChannelMonitor: timeout tx * 2-or-3, ChannelManager: local commitment tx - assert!(node_txn.len() == 4 || node_txn.len() == 3); - check_spends!(node_txn[0], chan_2.3); + // ChannelMonitor: timeout tx * 2-or-3 + assert!(node_txn.len() == 2 || node_txn.len() == 3); - check_spends!(node_txn[1], commitment_txn[0]); - assert_eq!(node_txn[1].input.len(), 1); + check_spends!(node_txn[0], commitment_txn[0]); + assert_eq!(node_txn[0].input.len(), 1); - if node_txn.len() > 3 { - check_spends!(node_txn[2], commitment_txn[0]); - assert_eq!(node_txn[2].input.len(), 1); - assert_eq!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output); + if node_txn.len() > 2 { + check_spends!(node_txn[1], commitment_txn[0]); + assert_eq!(node_txn[1].input.len(), 1); + assert_eq!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output); - check_spends!(node_txn[3], commitment_txn[0]); - assert_ne!(node_txn[1].input[0].previous_output, node_txn[3].input[0].previous_output); - } else { check_spends!(node_txn[2], commitment_txn[0]); - assert_ne!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output); + assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output); + } else { + check_spends!(node_txn[1], commitment_txn[0]); + assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output); } + assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); - assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); - if node_txn.len() > 3 { - assert_eq!(node_txn[3].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + if node_txn.len() > 2 { + assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); } - htlc_timeout_tx = node_txn[1].clone(); + htlc_timeout_tx = node_txn[0].clone(); } nodes[2].node.claim_funds(our_payment_preimage); @@ -4774,7 +4753,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() { _ => panic!("Unexepected event"), } let htlc_success_txn: Vec<_> = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); - assert_eq!(htlc_success_txn.len(), 5); // ChannelMonitor: HTLC-Success txn (*2 due to 2-HTLC outputs), ChannelManager: local commitment tx + HTLC-Success txn (*2 due to 2-HTLC outputs) + assert_eq!(htlc_success_txn.len(), 2); // ChannelMonitor: HTLC-Success txn (*2 due to 2-HTLC outputs) check_spends!(htlc_success_txn[0], commitment_txn[0]); check_spends!(htlc_success_txn[1], commitment_txn[0]); assert_eq!(htlc_success_txn[0].input.len(), 1); @@ -4782,10 +4761,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() { assert_eq!(htlc_success_txn[1].input.len(), 1); assert_eq!(htlc_success_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); assert_ne!(htlc_success_txn[0].input[0].previous_output, htlc_success_txn[1].input[0].previous_output); - assert_eq!(htlc_success_txn[2], commitment_txn[0]); - assert_eq!(htlc_success_txn[3], htlc_success_txn[0]); - assert_eq!(htlc_success_txn[4], htlc_success_txn[1]); - assert_ne!(htlc_success_txn[0].input[0].previous_output, htlc_timeout_tx.input[0].previous_output); + assert_ne!(htlc_success_txn[1].input[0].previous_output, htlc_timeout_tx.input[0].previous_output); mine_transaction(&nodes[1], &htlc_timeout_tx); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); @@ -4808,7 +4784,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() { // Solve 2nd HTLC by broadcasting on B's chain HTLC-Success Tx from C // Note that the fee paid is effectively double as the HTLC value (including the nodes[1] fee // and nodes[2] fee) is rounded down and then claimed in full. - mine_transaction(&nodes[1], &htlc_success_txn[0]); + mine_transaction(&nodes[1], &htlc_success_txn[1]); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(196*2), true, true); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -4866,9 +4842,7 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() { } let node_tx = { let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(node_txn.len(), 3); - assert_eq!(node_txn[0], node_txn[2]); - assert_eq!(node_txn[1], local_txn[0]); + assert_eq!(node_txn.len(), 1); assert_eq!(node_txn[0].input.len(), 1); assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); check_spends!(node_txn[0], local_txn[0]); @@ -4920,7 +4894,8 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000); assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2); - let ds_dust_limit = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan_2_3.2).unwrap().holder_dust_limit_satoshis; + let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().holder_dust_limit_satoshis; // 0th HTLC: let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee // 1st HTLC: @@ -5213,12 +5188,11 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() { let htlc_timeout = { let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(node_txn.len(), 2); - check_spends!(node_txn[0], chan_1.3); - assert_eq!(node_txn[1].input.len(), 1); - assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); - check_spends!(node_txn[1], local_txn[0]); - node_txn[1].clone() + assert_eq!(node_txn.len(), 1); + assert_eq!(node_txn[0].input.len(), 1); + assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + check_spends!(node_txn[0], local_txn[0]); + node_txn[0].clone() }; mine_transaction(&nodes[0], &htlc_timeout); @@ -5240,9 +5214,10 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() { #[test] fn test_key_derivation_params() { - // This test is a copy of test_dynamic_spendable_outputs_local_htlc_timeout_tx, with - // a key manager rotation to test that key_derivation_params returned in DynamicOutputP2WSH - // let us re-derive the channel key set to then derive a delayed_payment_key. + // This test is a copy of test_dynamic_spendable_outputs_local_htlc_timeout_tx, with a key + // manager rotation to test that `channel_keys_id` returned in + // [`SpendableOutputDescriptor::DelayedPaymentOutput`] let us re-derive the channel key set to + // then derive a `delayed_payment_key`. let chanmon_cfgs = create_chanmon_cfgs(3); @@ -5250,8 +5225,9 @@ fn test_key_derivation_params() { let seed = [42; 32]; let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet); let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager); - let network_graph = NetworkGraph::new(chanmon_cfgs[0].chain_source.genesis_hash, &chanmon_cfgs[0].logger); - let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, features: channelmanager::provided_init_features() }; + let network_graph = Arc::new(NetworkGraph::new(chanmon_cfgs[0].chain_source.genesis_hash, &chanmon_cfgs[0].logger)); + let router = test_utils::TestRouter::new(network_graph.clone()); + let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, features: channelmanager::provided_init_features() }; let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs); node_cfgs.remove(0); node_cfgs.insert(0, node); @@ -5297,10 +5273,11 @@ fn test_key_derivation_params() { let htlc_timeout = { let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(node_txn[1].input.len(), 1); - assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); - check_spends!(node_txn[1], local_txn_1[0]); - node_txn[1].clone() + assert_eq!(node_txn.len(), 1); + assert_eq!(node_txn[0].input.len(), 1); + assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + check_spends!(node_txn[0], local_txn_1[0]); + node_txn[0].clone() }; mine_transaction(&nodes[0], &htlc_timeout); @@ -5627,10 +5604,10 @@ fn test_fail_holding_cell_htlc_upon_free() { nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()); - let mut chan_stat = get_channel_value_stat!(nodes[0], chan.2); + let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); let channel_reserve = chan_stat.channel_reserve_msat; - let feerate = get_feerate!(nodes[0], chan.2); - let opt_anchors = get_opt_anchors!(nodes[0], chan.2); + let feerate = get_feerate!(nodes[0], nodes[1], chan.2); + let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2); // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve. let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors); @@ -5638,7 +5615,7 @@ fn test_fail_holding_cell_htlc_upon_free() { // Send a payment which passes reserve checks but gets stuck in the holding cell. nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - chan_stat = get_channel_value_stat!(nodes[0], chan.2); + chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send); // Flush the pending fee update. @@ -5651,7 +5628,7 @@ fn test_fail_holding_cell_htlc_upon_free() { // Upon receipt of the RAA, there will be an attempt to resend the holding cell // HTLC, but now that the fee has been raised the payment will now fail, causing // us to surface its failure to the user. - chan_stat = get_channel_value_stat!(nodes[0], chan.2); + chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", hex::encode(chan.2)), 1); let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({}) in channel {}", @@ -5705,10 +5682,10 @@ fn test_free_and_fail_holding_cell_htlcs() { nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()); - let mut chan_stat = get_channel_value_stat!(nodes[0], chan.2); + let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); let channel_reserve = chan_stat.channel_reserve_msat; - let feerate = get_feerate!(nodes[0], chan.2); - let opt_anchors = get_opt_anchors!(nodes[0], chan.2); + let feerate = get_feerate!(nodes[0], nodes[1], chan.2); + let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2); // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve. let amt_1 = 20000; @@ -5718,11 +5695,11 @@ fn test_free_and_fail_holding_cell_htlcs() { // Send 2 payments which pass reserve checks but get stuck in the holding cell. nodes[0].node.send_payment(&route_1, payment_hash_1, &Some(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap(); - chan_stat = get_channel_value_stat!(nodes[0], chan.2); + chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1); let payment_id_2 = PaymentId(nodes[0].keys_manager.get_secure_random_bytes()); nodes[0].node.send_payment(&route_2, payment_hash_2, &Some(payment_secret_2), payment_id_2).unwrap(); - chan_stat = get_channel_value_stat!(nodes[0], chan.2); + chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1 + amt_2); // Flush the pending fee update. @@ -5736,7 +5713,7 @@ fn test_free_and_fail_holding_cell_htlcs() { // Upon receipt of the RAA, there will be an attempt to resend the holding cell HTLCs, // but now that the fee has been raised the second payment will now fail, causing us // to surface its failure to the user. The first payment should succeed. - chan_stat = get_channel_value_stat!(nodes[0], chan.2); + chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", hex::encode(chan.2)), 1); let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({}) in channel {}", @@ -5782,7 +5759,7 @@ fn test_free_and_fail_holding_cell_htlcs() { let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentReceived { .. } => {}, + Event::PaymentClaimable { .. } => {}, _ => panic!("Unexpected event"), } nodes[1].node.claim_funds(payment_preimage_1); @@ -5832,10 +5809,10 @@ fn test_fail_holding_cell_htlc_upon_free_multihop() { nodes[2].node.handle_update_fee(&nodes[1].node.get_our_node_id(), update_msg.unwrap()); - let mut chan_stat = get_channel_value_stat!(nodes[0], chan_0_1.2); + let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan_0_1.2); let channel_reserve = chan_stat.channel_reserve_msat; - let feerate = get_feerate!(nodes[0], chan_0_1.2); - let opt_anchors = get_opt_anchors!(nodes[0], chan_0_1.2); + let feerate = get_feerate!(nodes[0], nodes[1], chan_0_1.2); + let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan_0_1.2); // Send a payment which passes reserve checks but gets stuck in the holding cell. let feemsat = 239; @@ -5856,7 +5833,7 @@ fn test_fail_holding_cell_htlc_upon_free_multihop() { commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - chan_stat = get_channel_value_stat!(nodes[1], chan_1_2.2); + chan_stat = get_channel_value_stat!(nodes[1], nodes[2], chan_1_2.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send); // Flush the pending fee update. @@ -6005,7 +5982,7 @@ fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() { .with_features(channelmanager::provided_invoice_features()); let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000, 0); route.paths[0].last_mut().unwrap().cltv_expiry_delta = 500000001; - unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret), PaymentId(our_payment_hash.0)), true, APIError::RouteError { ref err }, + unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret), PaymentId(our_payment_hash.0)), true, APIError::InvalidRoute { ref err }, assert_eq!(err, &"Channel CLTV overflowed?")); } @@ -6019,7 +5996,8 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0, channelmanager::provided_init_features(), channelmanager::provided_init_features()); - let max_accepted_htlcs = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().counterparty_max_accepted_htlcs as u64; + let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().counterparty_max_accepted_htlcs as u64; for i in 0..max_accepted_htlcs { let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); @@ -6041,7 +6019,7 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_payment_received!(nodes[1], our_payment_hash, our_payment_secret, 100000); + expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 100000); } let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret), PaymentId(our_payment_hash.0)), true, APIError::ChannelUnavailable { ref err }, @@ -6060,7 +6038,7 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_value = 100000; let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 0, channelmanager::provided_init_features(), channelmanager::provided_init_features()); - let max_in_flight = get_channel_value_stat!(nodes[0], chan.2).counterparty_max_htlc_value_in_flight_msat; + let max_in_flight = get_channel_value_stat!(nodes[0], nodes[1], chan.2).counterparty_max_htlc_value_in_flight_msat; send_payment(&nodes[0], &vec!(&nodes[1])[..], max_in_flight); @@ -6088,8 +6066,9 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() { let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, channelmanager::provided_init_features(), channelmanager::provided_init_features()); let htlc_minimum_msat: u64; { - let chan_lock = nodes[0].node.channel_state.lock().unwrap(); - let channel = chan_lock.by_id.get(&chan.2).unwrap(); + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let channel = chan_lock.channel_by_id.get(&chan.2).unwrap(); htlc_minimum_msat = channel.get_holder_htlc_minimum_msat(); } @@ -6115,10 +6094,10 @@ fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, channelmanager::provided_init_features(), channelmanager::provided_init_features()); - let chan_stat = get_channel_value_stat!(nodes[0], chan.2); + let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); let channel_reserve = chan_stat.channel_reserve_msat; - let feerate = get_feerate!(nodes[0], chan.2); - let opt_anchors = get_opt_anchors!(nodes[0], chan.2); + let feerate = get_feerate!(nodes[0], nodes[1], chan.2); + let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2); // The 2* and +1 are for the fee spike reserve. let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, opt_anchors); @@ -6194,7 +6173,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 1); let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], chan.2).counterparty_max_htlc_value_in_flight_msat + 1; + updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], nodes[0], chan.2).counterparty_max_htlc_value_in_flight_msat + 1; nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); assert!(nodes[1].node.list_channels().is_empty()); @@ -6667,7 +6646,8 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan =create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()); - let bs_dust_limit = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis; + let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis; // We route 2 dust-HTLCs between A and B let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000); @@ -6758,7 +6738,8 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()); - let bs_dust_limit = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis; + let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis; let (_payment_preimage_1, dust_hash, _payment_secret_1) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000); let (_payment_preimage_2, non_dust_hash, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1]], 1000000); @@ -6784,7 +6765,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); - timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[1].clone()); + timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone()); assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // We fail non-dust-HTLC 2 by broadcast of local HTLC-timeout tx on local commitment tx assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); @@ -6956,8 +6937,8 @@ fn test_check_htlc_underpaying() { commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true); // 10_000 msat as u64, followed by a height of CHAN_CONFIRM_DEPTH as u32 - let mut expected_failure_data = byte_utils::be64_to_array(10_000).to_vec(); - expected_failure_data.extend_from_slice(&byte_utils::be32_to_array(CHAN_CONFIRM_DEPTH)); + let mut expected_failure_data = (10_000 as u64).to_be_bytes().to_vec(); + expected_failure_data.extend_from_slice(&CHAN_CONFIRM_DEPTH.to_be_bytes()); expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000|15, &expected_failure_data[..]); } @@ -7089,7 +7070,7 @@ fn test_bump_penalty_txn_on_revoked_commitment() { let feerate_1; { let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(node_txn.len(), 2); // justice tx (broadcasted from ChannelMonitor) + local commitment tx + assert_eq!(node_txn.len(), 1); // justice tx (broadcasted from ChannelMonitor) assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs assert_eq!(node_txn[0].output.len(), 1); check_spends!(node_txn[0], revoked_txn[0]); @@ -7189,24 +7170,23 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { connect_blocks(&nodes[1], 49); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above) let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); - assert_eq!(revoked_htlc_txn.len(), 3); - check_spends!(revoked_htlc_txn[1], chan.3); + assert_eq!(revoked_htlc_txn.len(), 2); assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); assert_eq!(revoked_htlc_txn[0].input.len(), 1); check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]); - assert_eq!(revoked_htlc_txn[2].input.len(), 1); - assert_eq!(revoked_htlc_txn[2].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); - assert_eq!(revoked_htlc_txn[2].output.len(), 1); - check_spends!(revoked_htlc_txn[2], revoked_local_txn[0]); + assert_eq!(revoked_htlc_txn[1].input.len(), 1); + assert_eq!(revoked_htlc_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert_eq!(revoked_htlc_txn[1].output.len(), 1); + check_spends!(revoked_htlc_txn[1], revoked_local_txn[0]); // Broadcast set of revoked txn on A let hash_128 = connect_blocks(&nodes[0], 40); let header_11 = BlockHeader { version: 0x20000000, prev_blockhash: hash_128, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }; connect_block(&nodes[0], &Block { header: header_11, txdata: vec![revoked_local_txn[0].clone()] }); let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_11.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[2].clone()] }); + connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()] }); let events = nodes[0].node.get_and_clear_pending_events(); expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true); match events.last().unwrap() { @@ -7218,7 +7198,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { let penalty_txn; { let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(node_txn.len(), 5); // 3 penalty txn on revoked commitment tx + A commitment tx + 1 penalty tnx on revoked HTLC txn + assert_eq!(node_txn.len(), 4); // 3 penalty txn on revoked commitment tx + 1 penalty tnx on revoked HTLC txn // Verify claim tx are spending revoked HTLC txn // node_txn 0-2 each spend a separate revoked output from revoked_local_txn[0] @@ -7240,23 +7220,18 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { assert_ne!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output); assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output); - assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[2].input[0].previous_output); + assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[1].input[0].previous_output); - // node_txn[3] is the local commitment tx broadcast just because (and somewhat in case of - // reorgs, though its not clear its ever worth broadcasting conflicting txn like this when - // a remote commitment tx has already been confirmed). - check_spends!(node_txn[3], chan.3); - - // node_txn[4] spends the revoked outputs from the revoked_htlc_txn (which only have one + // node_txn[3] spends the revoked outputs from the revoked_htlc_txn (which only have one // output, checked above). - assert_eq!(node_txn[4].input.len(), 2); - assert_eq!(node_txn[4].output.len(), 1); - check_spends!(node_txn[4], revoked_htlc_txn[0], revoked_htlc_txn[2]); + assert_eq!(node_txn[3].input.len(), 2); + assert_eq!(node_txn[3].output.len(), 1); + check_spends!(node_txn[3], revoked_htlc_txn[0], revoked_htlc_txn[1]); - first = node_txn[4].txid(); + first = node_txn[3].txid(); // Store both feerates for later comparison - let fee_1 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[2].output[0].value - node_txn[4].output[0].value; - feerate_1 = fee_1 * 1000 / node_txn[4].weight() as u64; + let fee_1 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[3].output[0].value; + feerate_1 = fee_1 * 1000 / node_txn[3].weight() as u64; penalty_txn = vec![node_txn[2].clone()]; node_txn.clear(); } @@ -7276,10 +7251,10 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { assert_eq!(node_txn.len(), 1); assert_eq!(node_txn[0].input.len(), 2); - check_spends!(node_txn[0], revoked_htlc_txn[0], revoked_htlc_txn[2]); + check_spends!(node_txn[0], revoked_htlc_txn[0], revoked_htlc_txn[1]); // Verify bumped tx is different and 25% bump heuristic assert_ne!(first, node_txn[0].txid()); - let fee_2 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[2].output[0].value - node_txn[0].output[0].value; + let fee_2 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[0].output[0].value; let feerate_2 = fee_2 * 1000 / node_txn[0].weight() as u64; assert!(feerate_2 * 100 > feerate_1 * 125); let txn = vec![node_txn[0].clone()]; @@ -7344,29 +7319,25 @@ fn test_bump_penalty_txn_on_remote_commitment() { let feerate_preimage; { let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); - // 5 transactions including: - // local commitment + HTLC-Success + // 3 transactions including: // preimage and timeout sweeps from remote commitment + preimage sweep bump - assert_eq!(node_txn.len(), 5); + assert_eq!(node_txn.len(), 3); assert_eq!(node_txn[0].input.len(), 1); - assert_eq!(node_txn[3].input.len(), 1); - assert_eq!(node_txn[4].input.len(), 1); + assert_eq!(node_txn[1].input.len(), 1); + assert_eq!(node_txn[2].input.len(), 1); check_spends!(node_txn[0], remote_txn[0]); - check_spends!(node_txn[3], remote_txn[0]); - check_spends!(node_txn[4], remote_txn[0]); - - check_spends!(node_txn[1], chan.3); // local commitment - check_spends!(node_txn[2], node_txn[1]); // local HTLC-Success + check_spends!(node_txn[1], remote_txn[0]); + check_spends!(node_txn[2], remote_txn[0]); preimage = node_txn[0].txid(); let index = node_txn[0].input[0].previous_output.vout; let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value; feerate_preimage = fee * 1000 / node_txn[0].weight() as u64; - let (preimage_bump_tx, timeout_tx) = if node_txn[3].input[0].previous_output == node_txn[0].input[0].previous_output { - (node_txn[3].clone(), node_txn[4].clone()) + let (preimage_bump_tx, timeout_tx) = if node_txn[2].input[0].previous_output == node_txn[0].input[0].previous_output { + (node_txn[2].clone(), node_txn[1].clone()) } else { - (node_txn[4].clone(), node_txn[3].clone()) + (node_txn[1].clone(), node_txn[2].clone()) }; preimage_bump = preimage_bump_tx; @@ -7431,8 +7402,9 @@ fn test_counterparty_raa_skip_no_crash() { let per_commitment_secret; let next_per_commitment_point; { - let mut guard = nodes[0].node.channel_state.lock().unwrap(); - let keys = guard.by_id.get_mut(&channel_id).unwrap().get_signer(); + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let mut guard = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let keys = guard.channel_by_id.get_mut(&channel_id).unwrap().get_signer(); const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; @@ -7489,7 +7461,7 @@ fn test_bump_txn_sanitize_tracking_maps() { check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); let penalty_txn = { let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(node_txn.len(), 4); //ChannelMonitor: justice txn * 3, ChannelManager: local commitment tx + assert_eq!(node_txn.len(), 3); //ChannelMonitor: justice txn * 3 check_spends!(node_txn[0], revoked_local_txn[0]); check_spends!(node_txn[1], revoked_local_txn[0]); check_spends!(node_txn[2], revoked_local_txn[0]); @@ -7790,8 +7762,9 @@ fn test_reject_funding_before_inbound_channel_accepted() { // `handle_accept_channel`, which is required in order for `create_funding_transaction` to // succeed when `nodes[0]` is passed to it. let accept_chan_msg = { - let mut lock; - let channel = get_channel_ref!(&nodes[1], lock, temp_channel_id); + let mut node_1_per_peer_lock; + let mut node_1_peer_state_lock; + let channel = get_channel_ref!(&nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, temp_channel_id); channel.get_accept_channel_message() }; nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), channelmanager::provided_init_features(), &accept_chan_msg); @@ -7877,7 +7850,7 @@ fn test_can_not_accept_unknown_inbound_channel() { let api_res = nodes[0].node.accept_inbound_channel(&unknown_channel_id, &nodes[1].node.get_our_node_id(), 0); match api_res { Err(APIError::ChannelUnavailable { err }) => { - assert_eq!(err, "Can't accept a channel that doesn't exist"); + assert_eq!(err, format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(unknown_channel_id), nodes[1].node.get_our_node_id())); }, Ok(_) => panic!("It shouldn't be possible to accept an unkown channel"), Err(_) => panic!("Unexpected Error"), @@ -7936,7 +7909,7 @@ fn test_preimage_storage() { let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentReceived { ref purpose, .. } => { + Event::PaymentClaimable { ref purpose, .. } => { match &purpose { PaymentPurpose::InvoicePayment { payment_preimage, .. } => { claim_payment(&nodes[0], &[&nodes[1]], payment_preimage.unwrap()); @@ -8006,7 +7979,7 @@ fn test_secret_timeout() { let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentReceived { purpose: PaymentPurpose::InvoicePayment { payment_preimage, payment_secret }, .. } => { + Event::PaymentClaimable { purpose: PaymentPurpose::InvoicePayment { payment_preimage, payment_secret }, .. } => { assert!(payment_preimage.is_none()); assert_eq!(payment_secret, our_payment_secret); // We don't actually have the payment preimage with which to claim this payment! @@ -8113,7 +8086,7 @@ fn test_update_err_monitor_lockdown() { let mut w = test_utils::TestVecWriter(Vec::new()); monitor.write(&mut w).unwrap(); let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( - &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1; + &mut io::Cursor::new(&w.0), nodes[0].keys_manager).unwrap().1; assert!(new_monitor == *monitor); let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed); @@ -8134,12 +8107,15 @@ fn test_update_err_monitor_lockdown() { let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); - if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) { + { + let mut node_0_per_peer_lock; + let mut node_0_peer_state_lock; + let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2); if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { assert_eq!(watchtower.chain_monitor.update_channel(outpoint, update.clone()), ChannelMonitorUpdateStatus::PermanentFailure); assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, update), ChannelMonitorUpdateStatus::Completed); } else { assert!(false); } - } else { assert!(false); }; + } // Our local monitor is in-sync and hasn't processed yet timeout check_added_monitors!(nodes[0], 1); let events = nodes[0].node.get_and_clear_pending_events(); @@ -8177,7 +8153,7 @@ fn test_concurrent_monitor_claim() { let mut w = test_utils::TestVecWriter(Vec::new()); monitor.write(&mut w).unwrap(); let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( - &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1; + &mut io::Cursor::new(&w.0), nodes[0].keys_manager).unwrap().1; assert!(new_monitor == *monitor); let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed); @@ -8206,7 +8182,7 @@ fn test_concurrent_monitor_claim() { let mut w = test_utils::TestVecWriter(Vec::new()); monitor.write(&mut w).unwrap(); let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( - &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1; + &mut io::Cursor::new(&w.0), nodes[0].keys_manager).unwrap().1; assert!(new_monitor == *monitor); let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed); @@ -8225,14 +8201,17 @@ fn test_concurrent_monitor_claim() { let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert_eq!(updates.update_add_htlcs.len(), 1); nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); - if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) { + { + let mut node_0_per_peer_lock; + let mut node_0_peer_state_lock; + let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2); if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { // Watchtower Alice should already have seen the block and reject the update assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, update.clone()), ChannelMonitorUpdateStatus::PermanentFailure); assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, update.clone()), ChannelMonitorUpdateStatus::Completed); assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, update), ChannelMonitorUpdateStatus::Completed); } else { assert!(false); } - } else { assert!(false); }; + } // Our local monitor is in-sync and hasn't processed yet timeout check_added_monitors!(nodes[0], 1); @@ -8334,10 +8313,11 @@ fn test_htlc_no_detection() { let htlc_timeout = { let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(node_txn[1].input.len(), 1); - assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); - check_spends!(node_txn[1], local_txn[0]); - node_txn[1].clone() + assert_eq!(node_txn.len(), 1); + assert_eq!(node_txn[0].input.len(), 1); + assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + check_spends!(node_txn[0], local_txn[0]); + node_txn[0].clone() }; let header_201 = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }; @@ -8403,14 +8383,11 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain }; let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42}; connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]}); - let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); if broadcast_alice { check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); } - assert_eq!(bob_txn.len(), 1); - check_spends!(bob_txn[0], chan_ab.3); } // Step (5): @@ -8494,13 +8471,8 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain } let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); if broadcast_alice { - // In `connect_block()`, the ChainMonitor and ChannelManager are separately notified about a - // new block being connected. The ChannelManager being notified triggers a monitor update, - // which triggers broadcasting our commitment tx and an HTLC-claiming tx. The ChainMonitor - // being notified triggers the HTLC-claiming tx redundantly, resulting in 3 total txs being - // broadcasted. - assert_eq!(bob_txn.len(), 3); - check_spends!(bob_txn[1], chan_ab.3); + assert_eq!(bob_txn.len(), 1); + check_spends!(bob_txn[0], txn_to_broadcast[0]); } else { assert_eq!(bob_txn.len(), 2); check_spends!(bob_txn[0], chan_ab.3); @@ -8511,23 +8483,20 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain // Finally, check that Bob broadcasted a preimage-claiming transaction for the HTLC output on the // broadcasted commitment transaction. { - let bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); - if go_onchain_before_fulfill { - // Bob should now have an extra broadcasted tx, for the preimage-claiming transaction. - assert_eq!(bob_txn.len(), 2); - } let script_weight = match broadcast_alice { true => OFFERED_HTLC_SCRIPT_WEIGHT, false => ACCEPTED_HTLC_SCRIPT_WEIGHT }; - // If Alice force-closed and Bob didn't receive her commitment transaction until after he - // received Carol's fulfill, he broadcasts the HTLC-output-claiming transaction first. Else if - // Bob force closed or if he found out about Alice's commitment tx before receiving Carol's - // fulfill, then he broadcasts the HTLC-output-claiming transaction second. - if broadcast_alice && !go_onchain_before_fulfill { + // If Alice force-closed, Bob only broadcasts a HTLC-output-claiming transaction. Otherwise, + // Bob force-closed and broadcasts the commitment transaction along with a + // HTLC-output-claiming transaction. + let bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); + if broadcast_alice { + assert_eq!(bob_txn.len(), 1); check_spends!(bob_txn[0], txn_to_broadcast[0]); assert_eq!(bob_txn[0].input[0].witness.last().unwrap().len(), script_weight); } else { + assert_eq!(bob_txn.len(), 2); check_spends!(bob_txn[1], txn_to_broadcast[0]); assert_eq!(bob_txn[1].input[0].witness.last().unwrap().len(), script_weight); } @@ -8542,6 +8511,56 @@ fn test_onchain_htlc_settlement_after_close() { do_test_onchain_htlc_settlement_after_close(false, false); } +#[test] +fn test_duplicate_temporary_channel_id_from_different_peers() { + // Tests that we can accept two different `OpenChannel` requests with the same + // `temporary_channel_id`, as long as they are from different peers. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + // Create an first channel channel + nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 42, None).unwrap(); + let mut open_chan_msg_chan_1_0 = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); + + // Create an second channel + nodes[2].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 43, None).unwrap(); + let mut open_chan_msg_chan_2_0 = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); + + // Modify the `OpenChannel` from `nodes[2]` to `nodes[0]` to ensure that it uses the same + // `temporary_channel_id` as the `OpenChannel` from nodes[1] to nodes[0]. + open_chan_msg_chan_2_0.temporary_channel_id = open_chan_msg_chan_1_0.temporary_channel_id; + + // Assert that `nodes[0]` can accept both `OpenChannel` requests, even though they use the same + // `temporary_channel_id` as they are from different peers. + nodes[0].node.handle_open_channel(&nodes[1].node.get_our_node_id(), channelmanager::provided_init_features(), &open_chan_msg_chan_1_0); + { + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match &events[0] { + MessageSendEvent::SendAcceptChannel { node_id, msg } => { + assert_eq!(node_id, &nodes[1].node.get_our_node_id()); + assert_eq!(msg.temporary_channel_id, open_chan_msg_chan_1_0.temporary_channel_id); + }, + _ => panic!("Unexpected event"), + } + } + + nodes[0].node.handle_open_channel(&nodes[2].node.get_our_node_id(), channelmanager::provided_init_features(), &open_chan_msg_chan_2_0); + { + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match &events[0] { + MessageSendEvent::SendAcceptChannel { node_id, msg } => { + assert_eq!(node_id, &nodes[2].node.get_our_node_id()); + assert_eq!(msg.temporary_channel_id, open_chan_msg_chan_1_0.temporary_channel_id); + }, + _ => panic!("Unexpected event"), + } + } +} + #[test] fn test_duplicate_chan_id() { // Test that if a given peer tries to open a channel with the same channel_id as one that is @@ -8630,12 +8649,13 @@ fn test_duplicate_chan_id() { create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event let funding_created = { - let mut a_channel_lock = nodes[0].node.channel_state.lock().unwrap(); + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); // Once we call `get_outbound_funding_created` the channel has a duplicate channel_id as // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we // try to create another channel. Instead, we drop the channel entirely here (leaving the // channelmanager in a possibly nonsense state instead). - let mut as_chan = a_channel_lock.by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap(); + let mut as_chan = a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap(); let logger = test_utils::TestLogger::new(); as_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap() }; @@ -8941,7 +8961,7 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); } expect_pending_htlcs_forwardable!(nodes[1]); - expect_payment_received!(nodes[1], our_payment_hash, our_payment_secret, 10_000); + expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 10_000); { // Note that we use a different PaymentId here to allow us to duplicativly pay @@ -9003,7 +9023,7 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { #[test] fn test_dup_htlc_second_fail_panic() { // Previously, if we received two HTLCs back-to-back, where the second overran the expected - // value for the payment, we'd fail back both HTLCs after generating a `PaymentReceived` event. + // value for the payment, we'd fail back both HTLCs after generating a `PaymentClaimable` event. // Then, if the user failed the second payment, they'd hit a "tried to fail an already failed // HTLC" debug panic. This tests for this behavior, checking that only one HTLC is auto-failed. do_test_dup_htlc_second_rejected(true); @@ -9127,7 +9147,7 @@ fn test_keysend_payments_to_public_node() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, channelmanager::provided_init_features(), channelmanager::provided_init_features()); - let network_graph = nodes[0].network_graph; + let network_graph = nodes[0].network_graph.clone(); let payer_pubkey = nodes[0].node.get_our_node_id(); let payee_pubkey = nodes[1].node.get_our_node_id(); let route_params = RouteParameters { @@ -9168,7 +9188,7 @@ fn test_keysend_payments_to_private_node() { final_value_msat: 10000, final_cltv_expiry_delta: 40, }; - let network_graph = nodes[0].network_graph; + let network_graph = nodes[0].network_graph.clone(); let first_hops = nodes[0].node.list_usable_channels(); let scorer = test_utils::TestScorer::with_penalty(0); let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); @@ -9190,9 +9210,9 @@ fn test_keysend_payments_to_private_node() { #[test] fn test_double_partial_claim() { - // Test what happens if a node receives a payment, generates a PaymentReceived event, the HTLCs + // Test what happens if a node receives a payment, generates a PaymentClaimable event, the HTLCs // time out, the sender resends only some of the MPP parts, then the user processes the - // PaymentReceived event, ensuring they don't inadvertently claim only part of the full payment + // PaymentClaimable event, ensuring they don't inadvertently claim only part of the full payment // amount. let chanmon_cfgs = create_chanmon_cfgs(4); let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); @@ -9213,7 +9233,7 @@ fn test_double_partial_claim() { }); send_along_route_with_secret(&nodes[0], route.clone(), &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 15_000_000, payment_hash, payment_secret); - // nodes[3] has now received a PaymentReceived event...which it will take some (exorbitant) + // nodes[3] has now received a PaymentClaimable event...which it will take some (exorbitant) // amount of time to respond to. // Connect some blocks to time out the payment @@ -9237,7 +9257,7 @@ fn test_double_partial_claim() { pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None); // At this point nodes[3] has received one half of the payment, and the user goes to handle - // that PaymentReceived event they got hours ago and never handled...we should refuse to claim. + // that PaymentClaimable event they got hours ago and never handled...we should refuse to claim. nodes[3].node.claim_funds(payment_preimage); check_added_monitors!(nodes[3], 0); assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty()); @@ -9290,9 +9310,10 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); if on_holder_tx { - if let Some(mut chan) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&temporary_channel_id) { - chan.holder_dust_limit_satoshis = 546; - } + let mut node_0_per_peer_lock; + let mut node_0_peer_state_lock; + let mut chan = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id); + chan.holder_dust_limit_satoshis = 546; } nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); @@ -9307,8 +9328,9 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update); let dust_buffer_feerate = { - let chan_lock = nodes[0].node.channel_state.lock().unwrap(); - let chan = chan_lock.by_id.get(&channel_id).unwrap(); + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let chan = chan_lock.channel_by_id.get(&channel_id).unwrap(); chan.get_dust_buffer_feerate(None) as u64 }; let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;