Fix CI to error on doc links to private items
[rust-lightning] / lightning / src / ln / functional_test_utils.rs
index ee34b590f47c23e4867b9f8f5b8256629418b1de..54d199a26f83f85cec4567baa1b1fae45d9e6faa 100644 (file)
@@ -46,6 +46,7 @@ use core::cell::RefCell;
 use alloc::rc::Rc;
 use sync::{Arc, Mutex};
 use core::mem;
+use core::iter::repeat;
 
 pub const CHAN_CONFIRM_DEPTH: u32 = 10;
 
@@ -352,6 +353,11 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
                                }
                        }
 
+                       let broadcaster = test_utils::TestBroadcaster {
+                               txn_broadcasted: Mutex::new(self.tx_broadcaster.txn_broadcasted.lock().unwrap().clone()),
+                               blocks: Arc::new(Mutex::new(self.tx_broadcaster.blocks.lock().unwrap().clone())),
+                       };
+
                        // Before using all the new monitors to check the watch outpoints, use the full set of
                        // them to ensure we can write and reload our ChannelManager.
                        {
@@ -367,20 +373,13 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
                                        keys_manager: self.keys_manager,
                                        fee_estimator: &test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) },
                                        chain_monitor: self.chain_monitor,
-                                       tx_broadcaster: &test_utils::TestBroadcaster {
-                                               txn_broadcasted: Mutex::new(self.tx_broadcaster.txn_broadcasted.lock().unwrap().clone()),
-                                               blocks: Arc::new(Mutex::new(self.tx_broadcaster.blocks.lock().unwrap().clone())),
-                                       },
+                                       tx_broadcaster: &broadcaster,
                                        logger: &self.logger,
                                        channel_monitors,
                                }).unwrap();
                        }
 
                        let persister = test_utils::TestPersister::new();
-                       let broadcaster = test_utils::TestBroadcaster {
-                               txn_broadcasted: Mutex::new(self.tx_broadcaster.txn_broadcasted.lock().unwrap().clone()),
-                               blocks: Arc::new(Mutex::new(self.tx_broadcaster.blocks.lock().unwrap().clone())),
-                       };
                        let chain_source = test_utils::TestChainSource::new(Network::Testnet);
                        let chain_monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &broadcaster, &self.logger, &feeest, &persister, &self.keys_manager);
                        for deserialized_monitor in deserialized_monitors.drain(..) {
@@ -1187,7 +1186,7 @@ macro_rules! commitment_signed_dance {
                {
                        commitment_signed_dance!($node_a, $node_b, $commitment_signed, $fail_backwards, true);
                        if $fail_backwards {
-                               $crate::expect_pending_htlcs_forwardable!($node_a);
+                               expect_pending_htlcs_forwardable_and_htlc_handling_failed!($node_a, vec![$crate::util::events::HTLCDestination::NextHopChannel{ node_id: Some($node_b.node.get_our_node_id()), channel_id: $commitment_signed.channel_id }]);
                                check_added_monitors!($node_a, 1);
 
                                let channel_state = $node_a.node.channel_state.lock().unwrap();
@@ -1254,55 +1253,33 @@ macro_rules! get_route_and_payment_hash {
        }}
 }
 
-pub struct HTLCHandlingFailedConditions {
-       pub expected_destinations: Vec<HTLCDestination>,
-}
-
-impl HTLCHandlingFailedConditions {
-       pub fn new() -> Self {
-               Self {
-                       expected_destinations: vec![],
-               }
-       }
-
-       pub fn with_reason(mut self, reason: HTLCDestination) -> Self {
-               self.expected_destinations = vec![reason];
-               self
-       }
-
-       pub fn with_reasons(mut self, reasons: Vec<HTLCDestination>) -> Self {
-               self.expected_destinations = reasons;
-               self
-       }
-}
-
 #[macro_export]
 macro_rules! expect_pending_htlcs_forwardable_conditions {
-       ($node: expr, $conditions: expr) => {{
-               let conditions = $conditions;
+       ($node: expr, $expected_failures: expr) => {{
+               let expected_failures = $expected_failures;
                let events = $node.node.get_and_clear_pending_events();
                match events[0] {
                        $crate::util::events::Event::PendingHTLCsForwardable { .. } => { },
                        _ => panic!("Unexpected event"),
                };
 
-               let count = conditions.expected_destinations.len() + 1;
+               let count = expected_failures.len() + 1;
                assert_eq!(events.len(), count);
 
-               if conditions.expected_destinations.len() > 0 {
-                       expect_htlc_handling_failed_destinations!(events, conditions.expected_destinations)
+               if expected_failures.len() > 0 {
+                       expect_htlc_handling_failed_destinations!(events, expected_failures)
                }
        }}
 }
 
 #[macro_export]
 macro_rules! expect_htlc_handling_failed_destinations {
-       ($events: expr, $destinations: expr) => {{
+       ($events: expr, $expected_failures: expr) => {{
                for event in $events {
                        match event {
                                $crate::util::events::Event::PendingHTLCsForwardable { .. } => { },
                                $crate::util::events::Event::HTLCHandlingFailed { ref failed_next_destination, .. } => {
-                                       assert!($destinations.contains(&failed_next_destination))
+                                       assert!($expected_failures.contains(&failed_next_destination))
                                },
                                _ => panic!("Unexpected destination"),
                        }
@@ -1314,15 +1291,15 @@ macro_rules! expect_htlc_handling_failed_destinations {
 /// Clears (and ignores) a PendingHTLCsForwardable event
 macro_rules! expect_pending_htlcs_forwardable_ignore {
        ($node: expr) => {{
-               expect_pending_htlcs_forwardable_conditions!($node, $crate::ln::functional_test_utils::HTLCHandlingFailedConditions::new());
+               expect_pending_htlcs_forwardable_conditions!($node, vec![]);
        }};
 }
 
 #[macro_export]
 /// Clears (and ignores) PendingHTLCsForwardable and HTLCHandlingFailed events
 macro_rules! expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore {
-       ($node: expr, $conditions: expr) => {{
-               expect_pending_htlcs_forwardable_conditions!($node, $conditions);
+       ($node: expr, $expected_failures: expr) => {{
+               expect_pending_htlcs_forwardable_conditions!($node, $expected_failures);
        }};
 }
 
@@ -1341,8 +1318,8 @@ macro_rules! expect_pending_htlcs_forwardable {
 #[macro_export]
 /// Handles a PendingHTLCsForwardable and HTLCHandlingFailed event
 macro_rules! expect_pending_htlcs_forwardable_and_htlc_handling_failed {
-       ($node: expr, $conditions: expr) => {{
-               expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!($node, $conditions);
+       ($node: expr, $expected_failures: expr) => {{
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!($node, $expected_failures);
                $node.node.process_pending_htlc_forwards();
 
                // Ensure process_pending_htlc_forwards is idempotent.
@@ -1563,7 +1540,7 @@ pub fn expect_payment_failed_conditions<'a, 'b, 'c, 'd, 'e>(
        let mut events = node.node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        let expected_payment_id = match events.pop().unwrap() {
-               Event::PaymentPathFailed { payment_hash, rejected_by_dest, path, retry, payment_id, network_update,
+               Event::PaymentPathFailed { payment_hash, rejected_by_dest, path, retry, payment_id, network_update, short_channel_id,
                        #[cfg(test)]
                        error_code,
                        #[cfg(test)]
@@ -1573,6 +1550,9 @@ pub fn expect_payment_failed_conditions<'a, 'b, 'c, 'd, 'e>(
                        assert!(retry.is_some(), "expected retry.is_some()");
                        assert_eq!(retry.as_ref().unwrap().final_value_msat, path.last().unwrap().fee_msat, "Retry amount should match last hop in path");
                        assert_eq!(retry.as_ref().unwrap().payment_params.payee_pubkey, path.last().unwrap().pubkey, "Retry payee node_id should match last hop in path");
+                       if let Some(scid) = short_channel_id {
+                               assert!(retry.as_ref().unwrap().payment_params.previously_failed_channels.contains(&scid));
+                       }
 
                        #[cfg(test)]
                        {
@@ -1884,7 +1864,8 @@ pub fn fail_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expe
                assert_eq!(path.last().unwrap().node.get_our_node_id(), expected_paths[0].last().unwrap().node.get_our_node_id());
        }
        expected_paths[0].last().unwrap().node.fail_htlc_backwards(&our_payment_hash);
-       expect_pending_htlcs_forwardable!(expected_paths[0].last().unwrap());
+       let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::FailedPayment { payment_hash: our_payment_hash }).take(expected_paths.len()).collect();
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(expected_paths[0].last().unwrap(), expected_destinations);
 
        pass_failed_payment_back(origin_node, expected_paths, skip_last, our_payment_hash);
 }
@@ -1925,7 +1906,7 @@ pub fn pass_failed_payment_back<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expe
                                node.node.handle_update_fail_htlc(&prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0);
                                commitment_signed_dance!(node, prev_node, next_msgs.as_ref().unwrap().1, update_next_node);
                                if !update_next_node {
-                                       expect_pending_htlcs_forwardable!(node);
+                                       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(node, vec![HTLCDestination::NextHopChannel { node_id: Some(prev_node.node.get_our_node_id()), channel_id: next_msgs.as_ref().unwrap().0.channel_id }]);
                                }
                        }
                        let events = node.node.get_and_clear_pending_msg_events();