Correct lifetimes on `_reload_node`
[rust-lightning] / lightning / src / ln / functional_test_utils.rs
index 220557e4ca3ecfbde2ab1c7dd9a443a6787b8925..a7047f5b8302eb0b978f537a514659b544f21822 100644 (file)
@@ -368,7 +368,7 @@ pub struct NodeCfg<'a> {
        pub override_init_features: Rc<RefCell<Option<InitFeatures>>>,
 }
 
-type TestChannelManager<'a, 'b, 'c> = ChannelManager<&'b TestChainMonitor<'c>, &'c test_utils::TestBroadcaster, &'b test_utils::TestKeysInterface, &'b test_utils::TestKeysInterface, &'b test_utils::TestKeysInterface, &'c test_utils::TestFeeEstimator, &'b test_utils::TestRouter<'c>, &'c test_utils::TestLogger>;
+type TestChannelManager<'b, 'c> = ChannelManager<&'b TestChainMonitor<'c>, &'c test_utils::TestBroadcaster, &'b test_utils::TestKeysInterface, &'b test_utils::TestKeysInterface, &'b test_utils::TestKeysInterface, &'c test_utils::TestFeeEstimator, &'b test_utils::TestRouter<'c>, &'c test_utils::TestLogger>;
 
 pub struct Node<'a, 'b: 'a, 'c: 'b> {
        pub chain_source: &'c test_utils::TestChainSource,
@@ -377,7 +377,7 @@ pub struct Node<'a, 'b: 'a, 'c: 'b> {
        pub router: &'b test_utils::TestRouter<'c>,
        pub chain_monitor: &'b test_utils::TestChainMonitor<'c>,
        pub keys_manager: &'b test_utils::TestKeysInterface,
-       pub node: &'a TestChannelManager<'a, 'b, 'c>,
+       pub node: &'a TestChannelManager<'b, 'c>,
        pub network_graph: &'a NetworkGraph<&'c test_utils::TestLogger>,
        pub gossip_sync: P2PGossipSync<&'b NetworkGraph<&'c test_utils::TestLogger>, &'c test_utils::TestChainSource, &'c test_utils::TestLogger>,
        pub node_seed: [u8; 32],
@@ -448,8 +448,8 @@ impl<H: NodeHolder> NodeHolder for &H {
        fn chain_monitor(&self) -> Option<&test_utils::TestChainMonitor> { (*self).chain_monitor() }
 }
 impl<'a, 'b: 'a, 'c: 'b> NodeHolder for Node<'a, 'b, 'c> {
-       type CM = TestChannelManager<'a, 'b, 'c>;
-       fn node(&self) -> &TestChannelManager<'a, 'b, 'c> { &self.node }
+       type CM = TestChannelManager<'b, 'c>;
+       fn node(&self) -> &TestChannelManager<'b, 'c> { &self.node }
        fn chain_monitor(&self) -> Option<&test_utils::TestChainMonitor> { Some(self.chain_monitor) }
 }
 
@@ -924,7 +924,7 @@ macro_rules! check_added_monitors {
        }
 }
 
-pub fn _reload_node<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, default_config: UserConfig, chanman_encoded: &[u8], monitors_encoded: &[&[u8]]) -> ChannelManager<&'b TestChainMonitor<'c>, &'c test_utils::TestBroadcaster, &'b test_utils::TestKeysInterface, &'b test_utils::TestKeysInterface, &'b test_utils::TestKeysInterface, &'c test_utils::TestFeeEstimator, &'b test_utils::TestRouter<'c>, &'c test_utils::TestLogger> {
+pub fn _reload_node<'a, 'b, 'c>(node: &'a Node<'a, 'b, 'c>, default_config: UserConfig, chanman_encoded: &[u8], monitors_encoded: &[&[u8]]) -> TestChannelManager<'b, 'c> {
        let mut monitors_read = Vec::with_capacity(monitors_encoded.len());
        for encoded in monitors_encoded {
                let mut monitor_read = &encoded[..];
@@ -940,7 +940,7 @@ pub fn _reload_node<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, default_config:
                for monitor in monitors_read.iter_mut() {
                        assert!(channel_monitors.insert(monitor.get_funding_txo().0, monitor).is_none());
                }
-               <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>)>::read(&mut node_read, ChannelManagerReadArgs {
+               <(BlockHash, TestChannelManager<'b, 'c>)>::read(&mut node_read, ChannelManagerReadArgs {
                        default_config,
                        entropy_source: node.keys_manager,
                        node_signer: node.keys_manager,
@@ -1418,14 +1418,18 @@ macro_rules! check_closed_broadcast {
 }
 
 /// Check that a channel's closing channel events has been issued
-pub fn check_closed_event(node: &Node, events_count: usize, expected_reason: ClosureReason, is_check_discard_funding: bool) {
+pub fn check_closed_event(node: &Node, events_count: usize, expected_reason: ClosureReason, is_check_discard_funding: bool,
+       expected_counterparty_node_ids: &[PublicKey], expected_channel_capacity: u64) {
        let events = node.node.get_and_clear_pending_events();
        assert_eq!(events.len(), events_count, "{:?}", events);
        let mut issues_discard_funding = false;
-       for event in events {
+       for (idx, event) in events.into_iter().enumerate() {
                match event {
-                       Event::ChannelClosed { ref reason, .. } => {
+                       Event::ChannelClosed { ref reason, counterparty_node_id, 
+                               channel_capacity_sats, .. } => {
                                assert_eq!(*reason, expected_reason);
+                               assert_eq!(counterparty_node_id.unwrap(), expected_counterparty_node_ids[idx]);
+                               assert_eq!(channel_capacity_sats.unwrap(), expected_channel_capacity);
                        },
                        Event::DiscardFunding { .. } => {
                                issues_discard_funding = true;
@@ -1441,11 +1445,12 @@ pub fn check_closed_event(node: &Node, events_count: usize, expected_reason: Clo
 /// Don't use this, use the identically-named function instead.
 #[macro_export]
 macro_rules! check_closed_event {
-       ($node: expr, $events: expr, $reason: expr) => {
-               check_closed_event!($node, $events, $reason, false);
+       ($node: expr, $events: expr, $reason: expr, $counterparty_node_ids: expr, $channel_capacity: expr) => {
+               check_closed_event!($node, $events, $reason, false, $counterparty_node_ids, $channel_capacity);
        };
-       ($node: expr, $events: expr, $reason: expr, $is_check_discard_funding: expr) => {
-               $crate::ln::functional_test_utils::check_closed_event(&$node, $events, $reason, $is_check_discard_funding);
+       ($node: expr, $events: expr, $reason: expr, $is_check_discard_funding: expr, $counterparty_node_ids: expr, $channel_capacity: expr) => {
+               $crate::ln::functional_test_utils::check_closed_event(&$node, $events, $reason, 
+                       $is_check_discard_funding, &$counterparty_node_ids, $channel_capacity);
        }
 }
 
@@ -2250,7 +2255,10 @@ pub fn do_claim_payment_along_route_with_extra_penultimate_hop_fees<'a, 'b, 'c>(
                assert_eq!(path.last().unwrap().node.get_our_node_id(), expected_paths[0].last().unwrap().node.get_our_node_id());
        }
        expected_paths[0].last().unwrap().node.claim_funds(our_payment_preimage);
+       pass_claimed_payment_along_route(origin_node, expected_paths, expected_extra_fees, skip_last, our_payment_preimage)
+}
 
+pub fn pass_claimed_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_paths: &[&[&Node<'a, 'b, 'c>]], expected_extra_fees: &[u32], skip_last: bool, our_payment_preimage: PaymentPreimage) -> u64 {
        let claim_event = expected_paths[0].last().unwrap().node.get_and_clear_pending_events();
        assert_eq!(claim_event.len(), 1);
        match claim_event[0] {
@@ -2861,13 +2869,6 @@ macro_rules! get_chan_reestablish_msgs {
                                        panic!("Unexpected event")
                                }
                        }
-                       for chan in $src_node.node.list_channels() {
-                               if chan.is_public && chan.counterparty.node_id != $dst_node.node.get_our_node_id() {
-                                       if let Some(scid) = chan.short_channel_id {
-                                               assert!(announcements.remove(&scid));
-                                       }
-                               }
-                       }
                        assert!(announcements.is_empty());
                        res
                }
@@ -2952,9 +2953,41 @@ macro_rules! handle_chan_reestablish_msgs {
        }
 }
 
+pub struct ReconnectArgs<'a, 'b, 'c, 'd> {
+       pub node_a: &'a Node<'b, 'c, 'd>,
+       pub node_b: &'a Node<'b, 'c, 'd>,
+       pub send_channel_ready: (bool, bool),
+       pub pending_htlc_adds: (i64, i64),
+       pub pending_htlc_claims: (usize, usize),
+       pub pending_htlc_fails: (usize, usize),
+       pub pending_cell_htlc_claims: (usize, usize),
+       pub pending_cell_htlc_fails: (usize, usize),
+       pub pending_raa: (bool, bool),
+}
+
+impl<'a, 'b, 'c, 'd> ReconnectArgs<'a, 'b, 'c, 'd> {
+       pub fn new(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>) -> Self {
+               Self {
+                       node_a,
+                       node_b,
+                       send_channel_ready: (false, false),
+                       pending_htlc_adds: (0, 0),
+                       pending_htlc_claims: (0, 0),
+                       pending_htlc_fails: (0, 0),
+                       pending_cell_htlc_claims: (0, 0),
+                       pending_cell_htlc_fails: (0, 0),
+                       pending_raa: (false, false),
+               }
+       }
+}
+
 /// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas
 /// for claims/fails they are separated out.
-pub fn reconnect_nodes<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, send_channel_ready: (bool, bool), pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_htlc_fails: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool))  {
+pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) {
+       let ReconnectArgs {
+               node_a, node_b, send_channel_ready, pending_htlc_adds, pending_htlc_claims, pending_htlc_fails,
+               pending_cell_htlc_claims, pending_cell_htlc_fails, pending_raa
+       } = args;
        node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init {
                features: node_b.node.init_features(), networks: None, remote_network_address: None
        }, true).unwrap();