Merge pull request #1843 from TheBlueMatt/2022-11-fix-bp-exit-docs
[rust-lightning] / lightning / src / util / test_utils.rs
index 7a32b13e60da6fbaef3c734d813d912fada9f440..89bf27de2805ef8c152279cc02855d114d7a4e9b 100644 (file)
@@ -7,24 +7,25 @@
 // You may not use this file except in accordance with one or both of these
 // licenses.
 
-use chain;
-use chain::WatchedOutput;
-use chain::chaininterface;
-use chain::chaininterface::ConfirmationTarget;
-use chain::chainmonitor;
-use chain::chainmonitor::MonitorUpdateId;
-use chain::channelmonitor;
-use chain::channelmonitor::MonitorEvent;
-use chain::transaction::OutPoint;
-use chain::keysinterface;
-use ln::features::{ChannelFeatures, InitFeatures};
-use ln::{msgs, wire};
-use ln::script::ShutdownScript;
-use routing::scoring::FixedPenaltyScorer;
-use util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
-use util::events;
-use util::logger::{Logger, Level, Record};
-use util::ser::{Readable, ReadableArgs, Writer, Writeable};
+use crate::chain;
+use crate::chain::WatchedOutput;
+use crate::chain::chaininterface;
+use crate::chain::chaininterface::ConfirmationTarget;
+use crate::chain::chainmonitor;
+use crate::chain::chainmonitor::MonitorUpdateId;
+use crate::chain::channelmonitor;
+use crate::chain::channelmonitor::MonitorEvent;
+use crate::chain::transaction::OutPoint;
+use crate::chain::keysinterface;
+use crate::ln::channelmanager;
+use crate::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
+use crate::ln::{msgs, wire};
+use crate::ln::script::ShutdownScript;
+use crate::routing::scoring::FixedPenaltyScorer;
+use crate::util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
+use crate::util::events;
+use crate::util::logger::{Logger, Level, Record};
+use crate::util::ser::{Readable, ReadableArgs, Writer, Writeable};
 
 use bitcoin::blockdata::constants::genesis_block;
 use bitcoin::blockdata::transaction::{Transaction, TxOut};
@@ -40,14 +41,14 @@ use bitcoin::secp256k1::ecdsa::RecoverableSignature;
 
 use regex;
 
-use io;
-use prelude::*;
+use crate::io;
+use crate::prelude::*;
 use core::time::Duration;
-use sync::{Mutex, Arc};
+use crate::sync::{Mutex, Arc};
 use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
 use core::mem;
 use bitcoin::bech32::u5;
-use chain::keysinterface::{InMemorySigner, Recipient, KeyMaterial};
+use crate::chain::keysinterface::{InMemorySigner, Recipient, KeyMaterial};
 
 #[cfg(feature = "std")]
 use std::time::{SystemTime, UNIX_EPOCH};
@@ -125,7 +126,7 @@ impl<'a> TestChainMonitor<'a> {
        }
 }
 impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
-       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> Result<(), chain::ChannelMonitorUpdateErr> {
+       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> chain::ChannelMonitorUpdateStatus {
                // At every point where we get a monitor update, we should be able to send a useful monitor
                // to a watchtower and disk...
                let mut w = TestVecWriter(Vec::new());
@@ -139,7 +140,7 @@ impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
                self.chain_monitor.watch_channel(funding_txo, new_monitor)
        }
 
-       fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), chain::ChannelMonitorUpdateErr> {
+       fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> chain::ChannelMonitorUpdateStatus {
                // Every monitor update should survive roundtrip
                let mut w = TestVecWriter(Vec::new());
                update.write(&mut w).unwrap();
@@ -177,10 +178,10 @@ impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
 }
 
 pub struct TestPersister {
-       pub update_ret: Mutex<Result<(), chain::ChannelMonitorUpdateErr>>,
+       pub update_ret: Mutex<chain::ChannelMonitorUpdateStatus>,
        /// If this is set to Some(), after the next return, we'll always return this until update_ret
        /// is changed:
-       pub next_update_ret: Mutex<Option<Result<(), chain::ChannelMonitorUpdateErr>>>,
+       pub next_update_ret: Mutex<Option<chain::ChannelMonitorUpdateStatus>>,
        /// When we get an update_persisted_channel call with no ChannelMonitorUpdate, we insert the
        /// MonitorUpdateId here.
        pub chain_sync_monitor_persistences: Mutex<HashMap<OutPoint, HashSet<MonitorUpdateId>>>,
@@ -191,23 +192,23 @@ pub struct TestPersister {
 impl TestPersister {
        pub fn new() -> Self {
                Self {
-                       update_ret: Mutex::new(Ok(())),
+                       update_ret: Mutex::new(chain::ChannelMonitorUpdateStatus::Completed),
                        next_update_ret: Mutex::new(None),
                        chain_sync_monitor_persistences: Mutex::new(HashMap::new()),
                        offchain_monitor_updates: Mutex::new(HashMap::new()),
                }
        }
 
-       pub fn set_update_ret(&self, ret: Result<(), chain::ChannelMonitorUpdateErr>) {
+       pub fn set_update_ret(&self, ret: chain::ChannelMonitorUpdateStatus) {
                *self.update_ret.lock().unwrap() = ret;
        }
 
-       pub fn set_next_update_ret(&self, next_ret: Option<Result<(), chain::ChannelMonitorUpdateErr>>) {
+       pub fn set_next_update_ret(&self, next_ret: Option<chain::ChannelMonitorUpdateStatus>) {
                *self.next_update_ret.lock().unwrap() = next_ret;
        }
 }
 impl<Signer: keysinterface::Sign> chainmonitor::Persist<Signer> for TestPersister {
-       fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<Signer>, _id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
+       fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<Signer>, _id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
                let ret = self.update_ret.lock().unwrap().clone();
                if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
                        *self.update_ret.lock().unwrap() = next_ret;
@@ -215,7 +216,7 @@ impl<Signer: keysinterface::Sign> chainmonitor::Persist<Signer> for TestPersiste
                ret
        }
 
-       fn update_persisted_channel(&self, funding_txo: OutPoint, update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>, update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
+       fn update_persisted_channel(&self, funding_txo: OutPoint, update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>, update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
                let ret = self.update_ret.lock().unwrap().clone();
                if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
                        *self.update_ret.lock().unwrap() = next_ret;
@@ -287,9 +288,9 @@ impl TestChannelMessageHandler {
 
 impl Drop for TestChannelMessageHandler {
        fn drop(&mut self) {
-               let l = self.expected_recv_msgs.lock().unwrap();
                #[cfg(feature = "std")]
                {
+                       let l = self.expected_recv_msgs.lock().unwrap();
                        if !std::thread::panicking() {
                                assert!(l.is_none() || l.as_ref().unwrap().is_empty());
                        }
@@ -350,13 +351,20 @@ impl msgs::ChannelMessageHandler for TestChannelMessageHandler {
                self.received_msg(wire::Message::ChannelReestablish(msg.clone()));
        }
        fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {}
-       fn peer_connected(&self, _their_node_id: &PublicKey, _msg: &msgs::Init) {
+       fn peer_connected(&self, _their_node_id: &PublicKey, _msg: &msgs::Init) -> Result<(), ()> {
                // Don't bother with `received_msg` for Init as its auto-generated and we don't want to
                // bother re-generating the expected Init message in all tests.
+               Ok(())
        }
        fn handle_error(&self, _their_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
                self.received_msg(wire::Message::Error(msg.clone()));
        }
+       fn provided_node_features(&self) -> NodeFeatures {
+               channelmanager::provided_node_features()
+       }
+       fn provided_init_features(&self, _their_init_features: &PublicKey) -> InitFeatures {
+               channelmanager::provided_init_features()
+       }
 }
 
 impl events::MessageSendEventsProvider for TestChannelMessageHandler {
@@ -377,7 +385,7 @@ fn get_dummy_channel_announcement(short_chan_id: u64) -> msgs::ChannelAnnounceme
        let node_1_btckey = SecretKey::from_slice(&[40; 32]).unwrap();
        let node_2_btckey = SecretKey::from_slice(&[39; 32]).unwrap();
        let unsigned_ann = msgs::UnsignedChannelAnnouncement {
-               features: ChannelFeatures::known(),
+               features: ChannelFeatures::empty(),
                chain_hash: genesis_block(network).header.block_hash(),
                short_channel_id: short_chan_id,
                node_id_1: PublicKey::from_secret_key(&secp_ctx, &node_1_privkey),
@@ -459,19 +467,17 @@ impl msgs::RoutingMessageHandler for TestRoutingMessageHandler {
                None
        }
 
-       fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &msgs::Init) {
+       fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &msgs::Init) -> Result<(), ()> {
                if !init_msg.features.supports_gossip_queries() {
-                       return ();
+                       return Ok(());
                }
 
-               let should_request_full_sync = self.request_full_sync.load(Ordering::Acquire);
-
                #[allow(unused_mut, unused_assignments)]
                let mut gossip_start_time = 0;
                #[cfg(feature = "std")]
                {
                        gossip_start_time = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs();
-                       if should_request_full_sync {
+                       if self.request_full_sync.load(Ordering::Acquire) {
                                gossip_start_time -= 60 * 60 * 24 * 7 * 2; // 2 weeks ago
                        } else {
                                gossip_start_time -= 60 * 60; // an hour ago
@@ -487,6 +493,7 @@ impl msgs::RoutingMessageHandler for TestRoutingMessageHandler {
                                timestamp_range: u32::max_value(),
                        },
                });
+               Ok(())
        }
 
        fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyChannelRange) -> Result<(), msgs::LightningError> {
@@ -504,6 +511,18 @@ impl msgs::RoutingMessageHandler for TestRoutingMessageHandler {
        fn handle_query_short_channel_ids(&self, _their_node_id: &PublicKey, _msg: msgs::QueryShortChannelIds) -> Result<(), msgs::LightningError> {
                Ok(())
        }
+
+       fn provided_node_features(&self) -> NodeFeatures {
+               let mut features = NodeFeatures::empty();
+               features.set_gossip_queries_optional();
+               features
+       }
+
+       fn provided_init_features(&self, _their_init_features: &PublicKey) -> InitFeatures {
+               let mut features = InitFeatures::empty();
+               features.set_gossip_queries_optional();
+               features
+       }
 }
 
 impl events::MessageSendEventsProvider for TestRoutingMessageHandler {
@@ -589,6 +608,9 @@ impl keysinterface::KeysInterface for TestKeysInterface {
        fn get_node_secret(&self, recipient: Recipient) -> Result<SecretKey, ()> {
                self.backing.get_node_secret(recipient)
        }
+       fn get_node_id(&self, recipient: Recipient) -> Result<PublicKey, ()> {
+               self.backing.get_node_id(recipient)
+       }
        fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>) -> Result<SharedSecret, ()> {
                self.backing.ecdh(recipient, other_key, tweak)
        }