Merge pull request #1146 from jkczyz/2021-10-score-serialization
[rust-lightning] / lightning / src / util / test_utils.rs
index fed16768508f89a1e56ea951bf1839e9b6f53197..4734a1cb459da27f08101be0ef5e3dd2b46e55bd 100644 (file)
@@ -12,6 +12,7 @@ use chain::WatchedOutput;
 use chain::chaininterface;
 use chain::chaininterface::ConfirmationTarget;
 use chain::chainmonitor;
+use chain::chainmonitor::MonitorUpdateId;
 use chain::channelmonitor;
 use chain::channelmonitor::MonitorEvent;
 use chain::transaction::OutPoint;
@@ -20,6 +21,7 @@ use ln::features::{ChannelFeatures, InitFeatures};
 use ln::msgs;
 use ln::msgs::OptionalField;
 use ln::script::ShutdownScript;
+use routing::scorer::{Eternity, ScorerUsingTime};
 use util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
 use util::events;
 use util::logger::{Logger, Level, Record};
@@ -52,9 +54,6 @@ impl Writer for TestVecWriter {
                self.0.extend_from_slice(buf);
                Ok(())
        }
-       fn size_hint(&mut self, size: usize) {
-               self.0.reserve_exact(size);
-       }
 }
 
 pub struct TestFeeEstimator {
@@ -76,41 +75,42 @@ impl keysinterface::KeysInterface for OnlyReadsKeysInterface {
        fn get_channel_signer(&self, _inbound: bool, _channel_value_satoshis: u64) -> EnforcingSigner { unreachable!(); }
        fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
 
-       fn read_chan_signer(&self, reader: &[u8]) -> Result<Self::Signer, msgs::DecodeError> {
-               EnforcingSigner::read(&mut io::Cursor::new(reader))
+       fn read_chan_signer(&self, mut reader: &[u8]) -> Result<Self::Signer, msgs::DecodeError> {
+               let inner: InMemorySigner = Readable::read(&mut reader)?;
+               let state = Arc::new(Mutex::new(EnforcementState::new()));
+
+               Ok(EnforcingSigner::new_with_revoked(
+                       inner,
+                       state,
+                       false
+               ))
        }
        fn sign_invoice(&self, _invoice_preimage: Vec<u8>) -> Result<RecoverableSignature, ()> { unreachable!(); }
 }
 
 pub struct TestChainMonitor<'a> {
        pub added_monitors: Mutex<Vec<(OutPoint, channelmonitor::ChannelMonitor<EnforcingSigner>)>>,
-       pub latest_monitor_update_id: Mutex<HashMap<[u8; 32], (OutPoint, u64)>>,
-       pub chain_monitor: chainmonitor::ChainMonitor<EnforcingSigner, &'a TestChainSource, &'a chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger, &'a channelmonitor::Persist<EnforcingSigner>>,
+       pub latest_monitor_update_id: Mutex<HashMap<[u8; 32], (OutPoint, u64, MonitorUpdateId)>>,
+       pub chain_monitor: chainmonitor::ChainMonitor<EnforcingSigner, &'a TestChainSource, &'a chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger, &'a chainmonitor::Persist<EnforcingSigner>>,
        pub keys_manager: &'a TestKeysInterface,
-       pub update_ret: Mutex<Option<Result<(), channelmonitor::ChannelMonitorUpdateErr>>>,
-       /// If this is set to Some(), after the next return, we'll always return this until update_ret
-       /// is changed:
-       pub next_update_ret: Mutex<Option<Result<(), channelmonitor::ChannelMonitorUpdateErr>>>,
        /// If this is set to Some(), the next update_channel call (not watch_channel) must be a
        /// ChannelForceClosed event for the given channel_id with should_broadcast set to the given
        /// boolean.
        pub expect_channel_force_closed: Mutex<Option<([u8; 32], bool)>>,
 }
 impl<'a> TestChainMonitor<'a> {
-       pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a channelmonitor::Persist<EnforcingSigner>, keys_manager: &'a TestKeysInterface) -> Self {
+       pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a chainmonitor::Persist<EnforcingSigner>, keys_manager: &'a TestKeysInterface) -> Self {
                Self {
                        added_monitors: Mutex::new(Vec::new()),
                        latest_monitor_update_id: Mutex::new(HashMap::new()),
                        chain_monitor: chainmonitor::ChainMonitor::new(chain_source, broadcaster, logger, fee_estimator, persister),
                        keys_manager,
-                       update_ret: Mutex::new(None),
-                       next_update_ret: Mutex::new(None),
                        expect_channel_force_closed: Mutex::new(None),
                }
        }
 }
 impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
-       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
+       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> Result<(), chain::ChannelMonitorUpdateErr> {
                // At every point where we get a monitor update, we should be able to send a useful monitor
                // to a watchtower and disk...
                let mut w = TestVecWriter(Vec::new());
@@ -118,22 +118,13 @@ impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
                let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
                        &mut io::Cursor::new(&w.0), self.keys_manager).unwrap().1;
                assert!(new_monitor == monitor);
-               self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, monitor.get_latest_update_id()));
+               self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(),
+                       (funding_txo, monitor.get_latest_update_id(), MonitorUpdateId::from_new_monitor(&monitor)));
                self.added_monitors.lock().unwrap().push((funding_txo, monitor));
-               let watch_res = self.chain_monitor.watch_channel(funding_txo, new_monitor);
-
-               let ret = self.update_ret.lock().unwrap().clone();
-               if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
-                       *self.update_ret.lock().unwrap() = Some(next_ret);
-               }
-               if ret.is_some() {
-                       assert!(watch_res.is_ok());
-                       return ret.unwrap();
-               }
-               watch_res
+               self.chain_monitor.watch_channel(funding_txo, new_monitor)
        }
 
-       fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
+       fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), chain::ChannelMonitorUpdateErr> {
                // Every monitor update should survive roundtrip
                let mut w = TestVecWriter(Vec::new());
                update.write(&mut w).unwrap();
@@ -148,27 +139,18 @@ impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
                        } else { panic!(); }
                }
 
-               self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, update.update_id));
+               self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(),
+                       (funding_txo, update.update_id, MonitorUpdateId::from_monitor_update(&update)));
                let update_res = self.chain_monitor.update_channel(funding_txo, update);
                // At every point where we get a monitor update, we should be able to send a useful monitor
                // to a watchtower and disk...
-               let monitors = self.chain_monitor.monitors.read().unwrap();
-               let monitor = monitors.get(&funding_txo).unwrap();
+               let monitor = self.chain_monitor.get_monitor(funding_txo).unwrap();
                w.0.clear();
                monitor.write(&mut w).unwrap();
                let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
                        &mut io::Cursor::new(&w.0), self.keys_manager).unwrap().1;
                assert!(new_monitor == *monitor);
                self.added_monitors.lock().unwrap().push((funding_txo, new_monitor));
-
-               let ret = self.update_ret.lock().unwrap().clone();
-               if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
-                       *self.update_ret.lock().unwrap() = Some(next_ret);
-               }
-               if ret.is_some() {
-                       assert!(update_res.is_ok());
-                       return ret.unwrap();
-               }
                update_res
        }
 
@@ -178,26 +160,49 @@ impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
 }
 
 pub struct TestPersister {
-       pub update_ret: Mutex<Result<(), channelmonitor::ChannelMonitorUpdateErr>>
+       pub update_ret: Mutex<Result<(), chain::ChannelMonitorUpdateErr>>,
+       /// If this is set to Some(), after the next return, we'll always return this until update_ret
+       /// is changed:
+       pub next_update_ret: Mutex<Option<Result<(), chain::ChannelMonitorUpdateErr>>>,
+       /// When we get an update_persisted_channel call with no ChannelMonitorUpdate, we insert the
+       /// MonitorUpdateId here.
+       pub chain_sync_monitor_persistences: Mutex<HashMap<OutPoint, HashSet<MonitorUpdateId>>>,
 }
 impl TestPersister {
        pub fn new() -> Self {
                Self {
-                       update_ret: Mutex::new(Ok(()))
+                       update_ret: Mutex::new(Ok(())),
+                       next_update_ret: Mutex::new(None),
+                       chain_sync_monitor_persistences: Mutex::new(HashMap::new()),
                }
        }
 
-       pub fn set_update_ret(&self, ret: Result<(), channelmonitor::ChannelMonitorUpdateErr>) {
+       pub fn set_update_ret(&self, ret: Result<(), chain::ChannelMonitorUpdateErr>) {
                *self.update_ret.lock().unwrap() = ret;
        }
+
+       pub fn set_next_update_ret(&self, next_ret: Option<Result<(), chain::ChannelMonitorUpdateErr>>) {
+               *self.next_update_ret.lock().unwrap() = next_ret;
+       }
 }
-impl<Signer: keysinterface::Sign> channelmonitor::Persist<Signer> for TestPersister {
-       fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<Signer>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
-               self.update_ret.lock().unwrap().clone()
+impl<Signer: keysinterface::Sign> chainmonitor::Persist<Signer> for TestPersister {
+       fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<Signer>, _id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
+               let ret = self.update_ret.lock().unwrap().clone();
+               if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
+                       *self.update_ret.lock().unwrap() = next_ret;
+               }
+               ret
        }
 
-       fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: &channelmonitor::ChannelMonitorUpdate, _data: &channelmonitor::ChannelMonitor<Signer>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
-               self.update_ret.lock().unwrap().clone()
+       fn update_persisted_channel(&self, funding_txo: OutPoint, update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>, update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
+               let ret = self.update_ret.lock().unwrap().clone();
+               if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
+                       *self.update_ret.lock().unwrap() = next_ret;
+               }
+               if update.is_none() {
+                       self.chain_sync_monitor_persistences.lock().unwrap().entry(funding_txo).or_insert(HashSet::new()).insert(update_id);
+               }
+               ret
        }
 }
 
@@ -316,7 +321,6 @@ fn get_dummy_channel_update(short_chan_id: u64) -> msgs::ChannelUpdate {
 pub struct TestRoutingMessageHandler {
        pub chan_upds_recvd: AtomicUsize,
        pub chan_anns_recvd: AtomicUsize,
-       pub chan_anns_sent: AtomicUsize,
        pub request_full_sync: AtomicBool,
 }
 
@@ -325,7 +329,6 @@ impl TestRoutingMessageHandler {
                TestRoutingMessageHandler {
                        chan_upds_recvd: AtomicUsize::new(0),
                        chan_anns_recvd: AtomicUsize::new(0),
-                       chan_anns_sent: AtomicUsize::new(0),
                        request_full_sync: AtomicBool::new(false),
                }
        }
@@ -342,11 +345,10 @@ impl msgs::RoutingMessageHandler for TestRoutingMessageHandler {
                self.chan_upds_recvd.fetch_add(1, Ordering::AcqRel);
                Err(msgs::LightningError { err: "".to_owned(), action: msgs::ErrorAction::IgnoreError })
        }
-       fn handle_htlc_fail_channel_update(&self, _update: &msgs::HTLCFailChannelUpdate) {}
        fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(msgs::ChannelAnnouncement, Option<msgs::ChannelUpdate>, Option<msgs::ChannelUpdate>)> {
                let mut chan_anns = Vec::new();
-               const TOTAL_UPDS: u64 = 100;
-               let end: u64 = cmp::min(starting_point + batch_amount as u64, TOTAL_UPDS - self.chan_anns_sent.load(Ordering::Acquire) as u64);
+               const TOTAL_UPDS: u64 = 50;
+               let end: u64 = cmp::min(starting_point + batch_amount as u64, TOTAL_UPDS);
                for i in starting_point..end {
                        let chan_upd_1 = get_dummy_channel_update(i);
                        let chan_upd_2 = get_dummy_channel_update(i);
@@ -355,7 +357,6 @@ impl msgs::RoutingMessageHandler for TestRoutingMessageHandler {
                        chan_anns.push((chan_ann, Some(chan_upd_1), Some(chan_upd_2)));
                }
 
-               self.chan_anns_sent.fetch_add(chan_anns.len(), Ordering::AcqRel);
                chan_anns
        }
 
@@ -499,11 +500,11 @@ impl keysinterface::KeysInterface for TestKeysInterface {
                let inner: InMemorySigner = Readable::read(&mut reader)?;
                let state = self.make_enforcement_state_cell(inner.commitment_seed);
 
-               Ok(EnforcingSigner {
+               Ok(EnforcingSigner::new_with_revoked(
                        inner,
                        state,
-                       disable_revocation_policy_check: self.disable_revocation_policy_check,
-               })
+                       self.disable_revocation_policy_check
+               ))
        }
 
        fn sign_invoice(&self, invoice_preimage: Vec<u8>) -> Result<RecoverableSignature, ()> {
@@ -690,3 +691,6 @@ impl core::fmt::Debug for OnRegisterOutput {
                        .finish()
        }
 }
+
+/// A scorer useful in testing, when the passage of time isn't a concern.
+pub type TestScorer = ScorerUsingTime<Eternity>;