From: Viktor Tigerström <11711198+ViktorTigerstrom@users.noreply.github.com> Date: Tue, 3 Jan 2023 09:52:10 +0000 (+0100) Subject: Expect `pending_msg_events` to be in random peer order in tests X-Git-Tag: v0.0.114-beta~67^2~1 X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=commitdiff_plain;h=cb952f651ffbe79337f857940723ae5b2fcbc408;p=rust-lightning Expect `pending_msg_events` to be in random peer order in tests --- diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index eb634229d..b76da83c0 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -935,7 +935,8 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Note that the ordering of the events for different nodes is non-prescriptive, though the // ordering of the two events that both go to nodes[2] have to stay in the same order. - let messages_a = match events_3.pop().unwrap() { + let (nodes_0_event, events_3) = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &events_3); + let messages_a = match nodes_0_event { MessageSendEvent::UpdateHTLCs { node_id, mut updates } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()); assert!(updates.update_fulfill_htlcs.is_empty()); @@ -947,8 +948,14 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { }, _ => panic!("Unexpected event type!"), }; + + let (nodes_2_event, events_3) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &events_3); + let send_event_b = SendEvent::from_event(nodes_2_event); + assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id()); + let raa = if test_ignore_second_cs { - match events_3.remove(1) { + let (nodes_2_event, _events_3) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &events_3); + match nodes_2_event { MessageSendEvent::SendRevokeAndACK { node_id, msg } => { assert_eq!(node_id, nodes[2].node.get_our_node_id()); Some(msg.clone()) @@ -956,8 +963,6 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { _ => panic!("Unexpected event"), } } else { None }; - let send_event_b = SendEvent::from_event(events_3.remove(0)); - assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id()); // Now deliver the new messages... @@ -991,6 +996,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { check_added_monitors!(nodes[2], 1); let bs_revoke_and_commit = nodes[2].node.get_and_clear_pending_msg_events(); + // As both messages are for nodes[1], they're in order. assert_eq!(bs_revoke_and_commit.len(), 2); match bs_revoke_and_commit[0] { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index e813d012f..49127ba18 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -558,6 +558,84 @@ macro_rules! get_htlc_update_msgs { } } +/// Fetches the first `msg_event` to the passed `node_id` in the passed `msg_events` vec. +/// Returns the `msg_event`, along with an updated `msg_events` vec with the message removed. +/// +/// Note that even though `BroadcastChannelAnnouncement` and `BroadcastChannelUpdate` +/// `msg_events` are stored under specific peers, this function does not fetch such `msg_events` as +/// such messages are intended to all peers. +pub fn remove_first_msg_event_to_node(msg_node_id: &PublicKey, msg_events: &Vec) -> (MessageSendEvent, Vec) { + let ev_index = msg_events.iter().position(|e| { match e { + MessageSendEvent::SendAcceptChannel { node_id, .. } => { + node_id == msg_node_id + }, + MessageSendEvent::SendOpenChannel { node_id, .. } => { + node_id == msg_node_id + }, + MessageSendEvent::SendFundingCreated { node_id, .. } => { + node_id == msg_node_id + }, + MessageSendEvent::SendFundingSigned { node_id, .. } => { + node_id == msg_node_id + }, + MessageSendEvent::SendChannelReady { node_id, .. } => { + node_id == msg_node_id + }, + MessageSendEvent::SendAnnouncementSignatures { node_id, .. } => { + node_id == msg_node_id + }, + MessageSendEvent::UpdateHTLCs { node_id, .. } => { + node_id == msg_node_id + }, + MessageSendEvent::SendRevokeAndACK { node_id, .. } => { + node_id == msg_node_id + }, + MessageSendEvent::SendClosingSigned { node_id, .. } => { + node_id == msg_node_id + }, + MessageSendEvent::SendShutdown { node_id, .. } => { + node_id == msg_node_id + }, + MessageSendEvent::SendChannelReestablish { node_id, .. } => { + node_id == msg_node_id + }, + MessageSendEvent::SendChannelAnnouncement { node_id, .. } => { + node_id == msg_node_id + }, + MessageSendEvent::BroadcastChannelAnnouncement { .. } => { + false + }, + MessageSendEvent::BroadcastChannelUpdate { .. } => { + false + }, + MessageSendEvent::SendChannelUpdate { node_id, .. } => { + node_id == msg_node_id + }, + MessageSendEvent::HandleError { node_id, .. } => { + node_id == msg_node_id + }, + MessageSendEvent::SendChannelRangeQuery { node_id, .. } => { + node_id == msg_node_id + }, + MessageSendEvent::SendShortIdsQuery { node_id, .. } => { + node_id == msg_node_id + }, + MessageSendEvent::SendReplyChannelRange { node_id, .. } => { + node_id == msg_node_id + }, + MessageSendEvent::SendGossipTimestampFilter { node_id, .. } => { + node_id == msg_node_id + }, + }}); + if ev_index.is_some() { + let mut updated_msg_events = msg_events.to_vec(); + let ev = updated_msg_events.remove(ev_index.unwrap()); + (ev, updated_msg_events) + } else { + panic!("Couldn't find any MessageSendEvent to the node!") + } +} + #[cfg(test)] macro_rules! get_channel_ref { ($node: expr, $counterparty_node: expr, $per_peer_state_lock: ident, $peer_state_lock: ident, $channel_id: expr) => { @@ -1276,13 +1354,14 @@ macro_rules! commitment_signed_dance { let (bs_revoke_and_ack, extra_msg_option) = { let events = $node_b.node.get_and_clear_pending_msg_events(); assert!(events.len() <= 2); - (match events[0] { + let (node_a_event, events) = remove_first_msg_event_to_node(&$node_a.node.get_our_node_id(), &events); + (match node_a_event { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { assert_eq!(*node_id, $node_a.node.get_our_node_id()); (*msg).clone() }, _ => panic!("Unexpected event"), - }, events.get(1).map(|e| e.clone())) + }, events.get(0).map(|e| e.clone())) }; check_added_monitors!($node_b, 1); if $fail_backwards { @@ -1833,7 +1912,9 @@ pub fn pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path pub fn pass_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&[&Node<'a, 'b, 'c>]], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: PaymentSecret) { let mut events = origin_node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), expected_route.len()); - for (path_idx, (ev, expected_path)) in events.drain(..).zip(expected_route.iter()).enumerate() { + for (path_idx, expected_path) in expected_route.iter().enumerate() { + let (ev, updated_events) = remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &events); + events = updated_events; // Once we've gotten through all the HTLCs, the last one should result in a // PaymentClaimable (but each previous one should not!), . let expect_payment = path_idx == expected_route.len() - 1; @@ -1884,10 +1965,18 @@ pub fn do_claim_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, } } let mut per_path_msgs: Vec<((msgs::UpdateFulfillHTLC, msgs::CommitmentSigned), PublicKey)> = Vec::with_capacity(expected_paths.len()); - let events = expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events(); + let mut events = expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), expected_paths.len()); - for ev in events.iter() { - per_path_msgs.push(msgs_from_ev!(ev)); + + if events.len() == 1 { + per_path_msgs.push(msgs_from_ev!(&events[0])); + } else { + for expected_path in expected_paths.iter() { + // For MPP payments, we always want the message to the first node in the path. + let (ev, updated_events) = remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &events); + per_path_msgs.push(msgs_from_ev!(&ev)); + events = updated_events; + } } for (expected_route, (path_msgs, next_hop)) in expected_paths.iter().zip(per_path_msgs.drain(..)) { diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index c3174b393..c544c3680 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -2756,16 +2756,16 @@ fn test_htlc_on_chain_success() { added_monitors.clear(); } assert_eq!(events.len(), 3); - match events[0] { - MessageSendEvent::BroadcastChannelUpdate { .. } => {}, - _ => panic!("Unexpected event"), - } - match events[1] { + + let (nodes_2_event, events) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &events); + let (nodes_0_event, events) = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &events); + + match nodes_2_event { MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id: _ } => {}, _ => panic!("Unexpected event"), } - match events[2] { + match nodes_0_event { MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => { assert!(update_add_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); @@ -2775,6 +2775,13 @@ fn test_htlc_on_chain_success() { }, _ => panic!("Unexpected event"), }; + + // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2 + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + macro_rules! check_tx_local_broadcast { ($node: expr, $htlc_offered: expr, $commitment_tx: expr) => { { let mut node_txn = $node.tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -3192,21 +3199,12 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); - let events = nodes[1].node.get_and_clear_pending_msg_events(); + let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), if deliver_bs_raa { 4 } else { 3 }); - match events[if deliver_bs_raa { 1 } else { 0 }] { - MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {}, - _ => panic!("Unexpected event"), - } - match events[if deliver_bs_raa { 2 } else { 1 }] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id, ref data } }, node_id: _ } => { - assert_eq!(channel_id, chan_2.2); - assert_eq!(data.as_str(), "Channel closed because commitment or closing transaction was confirmed on chain."); - }, - _ => panic!("Unexpected event"), - } - if deliver_bs_raa { - match events[0] { + + let events = if deliver_bs_raa { + let (nodes_2_event, events) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &events); + match nodes_2_event { MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => { assert_eq!(nodes[2].node.get_our_node_id(), *node_id); assert_eq!(update_add_htlcs.len(), 1); @@ -3216,8 +3214,20 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use }, _ => panic!("Unexpected event"), } + events + } else { events }; + + let (nodes_2_event, events) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &events); + match nodes_2_event { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id, ref data } }, node_id: _ } => { + assert_eq!(channel_id, chan_2.2); + assert_eq!(data.as_str(), "Channel closed because commitment or closing transaction was confirmed on chain."); + }, + _ => panic!("Unexpected event"), } - match events[if deliver_bs_raa { 3 } else { 2 }] { + + let (nodes_0_event, events) = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &events); + match nodes_0_event { MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => { assert!(update_add_htlcs.is_empty()); assert_eq!(update_fail_htlcs.len(), 3); @@ -3262,6 +3272,12 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use _ => panic!("Unexpected event"), } + // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2 + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {}, + _ => panic!("Unexpected event"), + } + assert!(failed_htlcs.contains(&first_payment_hash.0)); assert!(failed_htlcs.contains(&second_payment_hash.0)); assert!(failed_htlcs.contains(&third_payment_hash.0)); @@ -4627,15 +4643,15 @@ fn test_onchain_to_onchain_claim() { check_added_monitors!(nodes[1], 1); let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 3); - match msg_events[0] { - MessageSendEvent::BroadcastChannelUpdate { .. } => {}, - _ => panic!("Unexpected event"), - } - match msg_events[1] { + let (nodes_2_event, msg_events) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &msg_events); + let (nodes_0_event, msg_events) = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &msg_events); + + match nodes_2_event { MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id: _ } => {}, _ => panic!("Unexpected event"), } - match msg_events[2] { + + match nodes_0_event { MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => { assert!(update_add_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); @@ -4645,6 +4661,13 @@ fn test_onchain_to_onchain_claim() { }, _ => panic!("Unexpected event"), }; + + // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2 + match msg_events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2); mine_transaction(&nodes[1], &commitment_tx[0]); @@ -9254,7 +9277,8 @@ fn test_double_partial_claim() { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); - pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None); + let (node_1_msgs, _events) = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &events); + pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_1_msgs, false, None); // At this point nodes[3] has received one half of the payment, and the user goes to handle // that PaymentClaimable event they got hours ago and never handled...we should refuse to claim. diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index af31b4c01..34f71047d 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -146,11 +146,11 @@ fn mpp_retry() { assert_eq!(events.len(), 2); // Pass half of the payment along the success path. - let success_path_msgs = events.remove(0); + let (success_path_msgs, mut events) = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &events); pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 2_000_000, payment_hash, Some(payment_secret), success_path_msgs, false, None); // Add the HTLC along the first hop. - let fail_path_msgs_1 = events.remove(0); + let (fail_path_msgs_1, _events) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &events); let (update_add, commitment_signed) = match fail_path_msgs_1 { MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { assert_eq!(update_add_htlcs.len(), 1); @@ -230,7 +230,8 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { assert_eq!(events.len(), 2); // Pass half of the payment along the first path. - pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 200_000, payment_hash, Some(payment_secret), events.remove(0), false, None); + let (node_1_msgs, mut events) = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &events); + pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 200_000, payment_hash, Some(payment_secret), node_1_msgs, false, None); if send_partial_mpp { // Time out the partial MPP @@ -257,7 +258,8 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain().expected_htlc_error_data(23, &[][..])); } else { // Pass half of the payment along the second path. - pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash, Some(payment_secret), events.remove(0), true, None); + let (node_2_msgs, _events) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &events); + pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash, Some(payment_secret), node_2_msgs, true, None); // Even after MPP_TIMEOUT_TICKS we should not timeout the MPP if we have all the parts for _ in 0..MPP_TIMEOUT_TICKS { diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 50aa687d8..486a5ee2d 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -699,8 +699,10 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { // Send the payment through to nodes[3] *without* clearing the PaymentClaimable event let mut send_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(send_events.len(), 2); - do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[0].clone(), true, false, None); - do_pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[1].clone(), true, false, None); + let (node_1_msgs, mut send_events) = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &send_events); + let (node_2_msgs, _send_events) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &send_events); + do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_1_msgs, true, false, None); + do_pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_2_msgs, true, false, None); // Now that we have an MPP payment pending, get the latest encoded copies of nodes[3]'s // monitors and ChannelManager, for use later, if we don't want to persist both monitors.