Generalize next_hop_packet_pubkey onion util
[rust-lightning] / lightning / src / ln / functional_test_utils.rs
index 84bc1a1b3f0656476aa1d00771f0368d97003cd0..be18a830beb6d848af4345bced13ddbdeccaedc5 100644 (file)
@@ -1418,14 +1418,18 @@ macro_rules! check_closed_broadcast {
 }
 
 /// Check that a channel's closing channel events has been issued
-pub fn check_closed_event(node: &Node, events_count: usize, expected_reason: ClosureReason, is_check_discard_funding: bool) {
+pub fn check_closed_event(node: &Node, events_count: usize, expected_reason: ClosureReason, is_check_discard_funding: bool,
+       expected_counterparty_node_ids: &[PublicKey], expected_channel_capacity: u64) {
        let events = node.node.get_and_clear_pending_events();
        assert_eq!(events.len(), events_count, "{:?}", events);
        let mut issues_discard_funding = false;
-       for event in events {
+       for (idx, event) in events.into_iter().enumerate() {
                match event {
-                       Event::ChannelClosed { ref reason, .. } => {
+                       Event::ChannelClosed { ref reason, counterparty_node_id, 
+                               channel_capacity_sats, .. } => {
                                assert_eq!(*reason, expected_reason);
+                               assert_eq!(counterparty_node_id.unwrap(), expected_counterparty_node_ids[idx]);
+                               assert_eq!(channel_capacity_sats.unwrap(), expected_channel_capacity);
                        },
                        Event::DiscardFunding { .. } => {
                                issues_discard_funding = true;
@@ -1441,11 +1445,12 @@ pub fn check_closed_event(node: &Node, events_count: usize, expected_reason: Clo
 /// Don't use this, use the identically-named function instead.
 #[macro_export]
 macro_rules! check_closed_event {
-       ($node: expr, $events: expr, $reason: expr) => {
-               check_closed_event!($node, $events, $reason, false);
+       ($node: expr, $events: expr, $reason: expr, $counterparty_node_ids: expr, $channel_capacity: expr) => {
+               check_closed_event!($node, $events, $reason, false, $counterparty_node_ids, $channel_capacity);
        };
-       ($node: expr, $events: expr, $reason: expr, $is_check_discard_funding: expr) => {
-               $crate::ln::functional_test_utils::check_closed_event(&$node, $events, $reason, $is_check_discard_funding);
+       ($node: expr, $events: expr, $reason: expr, $is_check_discard_funding: expr, $counterparty_node_ids: expr, $channel_capacity: expr) => {
+               $crate::ln::functional_test_utils::check_closed_event(&$node, $events, $reason, 
+                       $is_check_discard_funding, &$counterparty_node_ids, $channel_capacity);
        }
 }
 
@@ -2250,7 +2255,10 @@ pub fn do_claim_payment_along_route_with_extra_penultimate_hop_fees<'a, 'b, 'c>(
                assert_eq!(path.last().unwrap().node.get_our_node_id(), expected_paths[0].last().unwrap().node.get_our_node_id());
        }
        expected_paths[0].last().unwrap().node.claim_funds(our_payment_preimage);
+       pass_claimed_payment_along_route(origin_node, expected_paths, expected_extra_fees, skip_last, our_payment_preimage)
+}
 
+pub fn pass_claimed_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_paths: &[&[&Node<'a, 'b, 'c>]], expected_extra_fees: &[u32], skip_last: bool, our_payment_preimage: PaymentPreimage) -> u64 {
        let claim_event = expected_paths[0].last().unwrap().node.get_and_clear_pending_events();
        assert_eq!(claim_event.len(), 1);
        match claim_event[0] {
@@ -2945,9 +2953,41 @@ macro_rules! handle_chan_reestablish_msgs {
        }
 }
 
+pub struct ReconnectArgs<'a, 'b, 'c, 'd> {
+       pub node_a: &'a Node<'b, 'c, 'd>,
+       pub node_b: &'a Node<'b, 'c, 'd>,
+       pub send_channel_ready: (bool, bool),
+       pub pending_htlc_adds: (i64, i64),
+       pub pending_htlc_claims: (usize, usize),
+       pub pending_htlc_fails: (usize, usize),
+       pub pending_cell_htlc_claims: (usize, usize),
+       pub pending_cell_htlc_fails: (usize, usize),
+       pub pending_raa: (bool, bool),
+}
+
+impl<'a, 'b, 'c, 'd> ReconnectArgs<'a, 'b, 'c, 'd> {
+       pub fn new(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>) -> Self {
+               Self {
+                       node_a,
+                       node_b,
+                       send_channel_ready: (false, false),
+                       pending_htlc_adds: (0, 0),
+                       pending_htlc_claims: (0, 0),
+                       pending_htlc_fails: (0, 0),
+                       pending_cell_htlc_claims: (0, 0),
+                       pending_cell_htlc_fails: (0, 0),
+                       pending_raa: (false, false),
+               }
+       }
+}
+
 /// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas
 /// for claims/fails they are separated out.
-pub fn reconnect_nodes<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, send_channel_ready: (bool, bool), pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_htlc_fails: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool))  {
+pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) {
+       let ReconnectArgs {
+               node_a, node_b, send_channel_ready, pending_htlc_adds, pending_htlc_claims, pending_htlc_fails,
+               pending_cell_htlc_claims, pending_cell_htlc_fails, pending_raa
+       } = args;
        node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init {
                features: node_b.node.init_features(), networks: None, remote_network_address: None
        }, true).unwrap();