// Note that the ordering of the events for different nodes is non-prescriptive, though the
// ordering of the two events that both go to nodes[2] have to stay in the same order.
- let messages_a = match events_3.pop().unwrap() {
+ let (nodes_0_event, events_3) = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &events_3);
+ let messages_a = match nodes_0_event {
MessageSendEvent::UpdateHTLCs { node_id, mut updates } => {
assert_eq!(node_id, nodes[0].node.get_our_node_id());
assert!(updates.update_fulfill_htlcs.is_empty());
},
_ => panic!("Unexpected event type!"),
};
+
+ let (nodes_2_event, events_3) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &events_3);
+ let send_event_b = SendEvent::from_event(nodes_2_event);
+ assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id());
+
let raa = if test_ignore_second_cs {
- match events_3.remove(1) {
+ let (nodes_2_event, _events_3) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &events_3);
+ match nodes_2_event {
MessageSendEvent::SendRevokeAndACK { node_id, msg } => {
assert_eq!(node_id, nodes[2].node.get_our_node_id());
Some(msg.clone())
_ => panic!("Unexpected event"),
}
} else { None };
- let send_event_b = SendEvent::from_event(events_3.remove(0));
- assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id());
// Now deliver the new messages...
check_added_monitors!(nodes[2], 1);
let bs_revoke_and_commit = nodes[2].node.get_and_clear_pending_msg_events();
+ // As both messages are for nodes[1], they're in order.
assert_eq!(bs_revoke_and_commit.len(), 2);
match bs_revoke_and_commit[0] {
MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
}
}
+/// Fetches the first `msg_event` to the passed `node_id` in the passed `msg_events` vec.
+/// Returns the `msg_event`, along with an updated `msg_events` vec with the message removed.
+///
+/// Note that even though `BroadcastChannelAnnouncement` and `BroadcastChannelUpdate`
+/// `msg_events` are stored under specific peers, this function does not fetch such `msg_events` as
+/// such messages are intended to all peers.
+pub fn remove_first_msg_event_to_node(msg_node_id: &PublicKey, msg_events: &Vec<MessageSendEvent>) -> (MessageSendEvent, Vec<MessageSendEvent>) {
+ let ev_index = msg_events.iter().position(|e| { match e {
+ MessageSendEvent::SendAcceptChannel { node_id, .. } => {
+ node_id == msg_node_id
+ },
+ MessageSendEvent::SendOpenChannel { node_id, .. } => {
+ node_id == msg_node_id
+ },
+ MessageSendEvent::SendFundingCreated { node_id, .. } => {
+ node_id == msg_node_id
+ },
+ MessageSendEvent::SendFundingSigned { node_id, .. } => {
+ node_id == msg_node_id
+ },
+ MessageSendEvent::SendChannelReady { node_id, .. } => {
+ node_id == msg_node_id
+ },
+ MessageSendEvent::SendAnnouncementSignatures { node_id, .. } => {
+ node_id == msg_node_id
+ },
+ MessageSendEvent::UpdateHTLCs { node_id, .. } => {
+ node_id == msg_node_id
+ },
+ MessageSendEvent::SendRevokeAndACK { node_id, .. } => {
+ node_id == msg_node_id
+ },
+ MessageSendEvent::SendClosingSigned { node_id, .. } => {
+ node_id == msg_node_id
+ },
+ MessageSendEvent::SendShutdown { node_id, .. } => {
+ node_id == msg_node_id
+ },
+ MessageSendEvent::SendChannelReestablish { node_id, .. } => {
+ node_id == msg_node_id
+ },
+ MessageSendEvent::SendChannelAnnouncement { node_id, .. } => {
+ node_id == msg_node_id
+ },
+ MessageSendEvent::BroadcastChannelAnnouncement { .. } => {
+ false
+ },
+ MessageSendEvent::BroadcastChannelUpdate { .. } => {
+ false
+ },
+ MessageSendEvent::SendChannelUpdate { node_id, .. } => {
+ node_id == msg_node_id
+ },
+ MessageSendEvent::HandleError { node_id, .. } => {
+ node_id == msg_node_id
+ },
+ MessageSendEvent::SendChannelRangeQuery { node_id, .. } => {
+ node_id == msg_node_id
+ },
+ MessageSendEvent::SendShortIdsQuery { node_id, .. } => {
+ node_id == msg_node_id
+ },
+ MessageSendEvent::SendReplyChannelRange { node_id, .. } => {
+ node_id == msg_node_id
+ },
+ MessageSendEvent::SendGossipTimestampFilter { node_id, .. } => {
+ node_id == msg_node_id
+ },
+ }});
+ if ev_index.is_some() {
+ let mut updated_msg_events = msg_events.to_vec();
+ let ev = updated_msg_events.remove(ev_index.unwrap());
+ (ev, updated_msg_events)
+ } else {
+ panic!("Couldn't find any MessageSendEvent to the node!")
+ }
+}
+
#[cfg(test)]
macro_rules! get_channel_ref {
($node: expr, $counterparty_node: expr, $per_peer_state_lock: ident, $peer_state_lock: ident, $channel_id: expr) => {
let (bs_revoke_and_ack, extra_msg_option) = {
let events = $node_b.node.get_and_clear_pending_msg_events();
assert!(events.len() <= 2);
- (match events[0] {
+ let (node_a_event, events) = remove_first_msg_event_to_node(&$node_a.node.get_our_node_id(), &events);
+ (match node_a_event {
MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
assert_eq!(*node_id, $node_a.node.get_our_node_id());
(*msg).clone()
},
_ => panic!("Unexpected event"),
- }, events.get(1).map(|e| e.clone()))
+ }, events.get(0).map(|e| e.clone()))
};
check_added_monitors!($node_b, 1);
if $fail_backwards {
pub fn pass_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&[&Node<'a, 'b, 'c>]], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: PaymentSecret) {
let mut events = origin_node.node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), expected_route.len());
- for (path_idx, (ev, expected_path)) in events.drain(..).zip(expected_route.iter()).enumerate() {
+ for (path_idx, expected_path) in expected_route.iter().enumerate() {
+ let (ev, updated_events) = remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &events);
+ events = updated_events;
// Once we've gotten through all the HTLCs, the last one should result in a
// PaymentClaimable (but each previous one should not!), .
let expect_payment = path_idx == expected_route.len() - 1;
}
}
let mut per_path_msgs: Vec<((msgs::UpdateFulfillHTLC, msgs::CommitmentSigned), PublicKey)> = Vec::with_capacity(expected_paths.len());
- let events = expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events();
+ let mut events = expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), expected_paths.len());
- for ev in events.iter() {
- per_path_msgs.push(msgs_from_ev!(ev));
+
+ if events.len() == 1 {
+ per_path_msgs.push(msgs_from_ev!(&events[0]));
+ } else {
+ for expected_path in expected_paths.iter() {
+ // For MPP payments, we always want the message to the first node in the path.
+ let (ev, updated_events) = remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &events);
+ per_path_msgs.push(msgs_from_ev!(&ev));
+ events = updated_events;
+ }
}
for (expected_route, (path_msgs, next_hop)) in expected_paths.iter().zip(per_path_msgs.drain(..)) {
added_monitors.clear();
}
assert_eq!(events.len(), 3);
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { .. } => {},
- _ => panic!("Unexpected event"),
- }
- match events[1] {
+
+ let (nodes_2_event, events) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &events);
+ let (nodes_0_event, events) = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &events);
+
+ match nodes_2_event {
MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id: _ } => {},
_ => panic!("Unexpected event"),
}
- match events[2] {
+ match nodes_0_event {
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
assert!(update_add_htlcs.is_empty());
assert!(update_fail_htlcs.is_empty());
},
_ => panic!("Unexpected event"),
};
+
+ // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
+ match events[0] {
+ MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
+
macro_rules! check_tx_local_broadcast {
($node: expr, $htlc_offered: expr, $commitment_tx: expr) => { {
let mut node_txn = $node.tx_broadcaster.txn_broadcasted.lock().unwrap();
nodes[1].node.process_pending_htlc_forwards();
check_added_monitors!(nodes[1], 1);
- let events = nodes[1].node.get_and_clear_pending_msg_events();
+ let mut events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), if deliver_bs_raa { 4 } else { 3 });
- match events[if deliver_bs_raa { 1 } else { 0 }] {
- MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {},
- _ => panic!("Unexpected event"),
- }
- match events[if deliver_bs_raa { 2 } else { 1 }] {
- MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id, ref data } }, node_id: _ } => {
- assert_eq!(channel_id, chan_2.2);
- assert_eq!(data.as_str(), "Channel closed because commitment or closing transaction was confirmed on chain.");
- },
- _ => panic!("Unexpected event"),
- }
- if deliver_bs_raa {
- match events[0] {
+
+ let events = if deliver_bs_raa {
+ let (nodes_2_event, events) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &events);
+ match nodes_2_event {
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
assert_eq!(nodes[2].node.get_our_node_id(), *node_id);
assert_eq!(update_add_htlcs.len(), 1);
},
_ => panic!("Unexpected event"),
}
+ events
+ } else { events };
+
+ let (nodes_2_event, events) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &events);
+ match nodes_2_event {
+ MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id, ref data } }, node_id: _ } => {
+ assert_eq!(channel_id, chan_2.2);
+ assert_eq!(data.as_str(), "Channel closed because commitment or closing transaction was confirmed on chain.");
+ },
+ _ => panic!("Unexpected event"),
}
- match events[if deliver_bs_raa { 3 } else { 2 }] {
+
+ let (nodes_0_event, events) = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &events);
+ match nodes_0_event {
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
assert!(update_add_htlcs.is_empty());
assert_eq!(update_fail_htlcs.len(), 3);
_ => panic!("Unexpected event"),
}
+ // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
+ match events[0] {
+ MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {},
+ _ => panic!("Unexpected event"),
+ }
+
assert!(failed_htlcs.contains(&first_payment_hash.0));
assert!(failed_htlcs.contains(&second_payment_hash.0));
assert!(failed_htlcs.contains(&third_payment_hash.0));
check_added_monitors!(nodes[1], 1);
let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 3);
- match msg_events[0] {
- MessageSendEvent::BroadcastChannelUpdate { .. } => {},
- _ => panic!("Unexpected event"),
- }
- match msg_events[1] {
+ let (nodes_2_event, msg_events) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &msg_events);
+ let (nodes_0_event, msg_events) = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &msg_events);
+
+ match nodes_2_event {
MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id: _ } => {},
_ => panic!("Unexpected event"),
}
- match msg_events[2] {
+
+ match nodes_0_event {
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
assert!(update_add_htlcs.is_empty());
assert!(update_fail_htlcs.is_empty());
},
_ => panic!("Unexpected event"),
};
+
+ // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
+ match msg_events[0] {
+ MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
+
// Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
mine_transaction(&nodes[1], &commitment_tx[0]);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 2);
- pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
+ let (node_1_msgs, _events) = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &events);
+ pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_1_msgs, false, None);
// At this point nodes[3] has received one half of the payment, and the user goes to handle
// that PaymentClaimable event they got hours ago and never handled...we should refuse to claim.
assert_eq!(events.len(), 2);
// Pass half of the payment along the success path.
- let success_path_msgs = events.remove(0);
+ let (success_path_msgs, mut events) = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &events);
pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 2_000_000, payment_hash, Some(payment_secret), success_path_msgs, false, None);
// Add the HTLC along the first hop.
- let fail_path_msgs_1 = events.remove(0);
+ let (fail_path_msgs_1, _events) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &events);
let (update_add, commitment_signed) = match fail_path_msgs_1 {
MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
assert_eq!(update_add_htlcs.len(), 1);
assert_eq!(events.len(), 2);
// Pass half of the payment along the first path.
- pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 200_000, payment_hash, Some(payment_secret), events.remove(0), false, None);
+ let (node_1_msgs, mut events) = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &events);
+ pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 200_000, payment_hash, Some(payment_secret), node_1_msgs, false, None);
if send_partial_mpp {
// Time out the partial MPP
expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain().expected_htlc_error_data(23, &[][..]));
} else {
// Pass half of the payment along the second path.
- pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash, Some(payment_secret), events.remove(0), true, None);
+ let (node_2_msgs, _events) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &events);
+ pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash, Some(payment_secret), node_2_msgs, true, None);
// Even after MPP_TIMEOUT_TICKS we should not timeout the MPP if we have all the parts
for _ in 0..MPP_TIMEOUT_TICKS {
// Send the payment through to nodes[3] *without* clearing the PaymentClaimable event
let mut send_events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(send_events.len(), 2);
- do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[0].clone(), true, false, None);
- do_pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[1].clone(), true, false, None);
+ let (node_1_msgs, mut send_events) = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &send_events);
+ let (node_2_msgs, _send_events) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &send_events);
+ do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_1_msgs, true, false, None);
+ do_pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_2_msgs, true, false, None);
// Now that we have an MPP payment pending, get the latest encoded copies of nodes[3]'s
// monitors and ChannelManager, for use later, if we don't want to persist both monitors.