Rm ChannelMonitor merge capabilities in favor of explicit add/update
[rust-lightning] / lightning / src / util / test_utils.rs
index 6b3649bcf0ecc34075dffaa86b00f4982387a5dd..a38aea2d4374a4a118709ee86d4c51e9580d006e 100644 (file)
@@ -10,7 +10,7 @@ use ln::channelmonitor::HTLCUpdate;
 use util::enforcing_trait_impls::EnforcingChannelKeys;
 use util::events;
 use util::logger::{Logger, Level, Record};
-use util::ser::{ReadableArgs, Writer};
+use util::ser::{Readable, ReadableArgs, Writer, Writeable};
 
 use bitcoin::blockdata::transaction::Transaction;
 use bitcoin::blockdata::script::Script;
@@ -22,7 +22,7 @@ use secp256k1::{SecretKey, PublicKey};
 use std::time::{SystemTime, UNIX_EPOCH};
 use std::sync::{Arc,Mutex};
 use std::{mem};
-use std::collections::HashMap;
+use std::collections::{HashMap, HashSet};
 
 pub struct TestVecWriter(pub Vec<u8>);
 impl Writer for TestVecWriter {
@@ -44,45 +44,81 @@ impl chaininterface::FeeEstimator for TestFeeEstimator {
        }
 }
 
-pub struct TestChannelMonitor {
-       pub added_monitors: Mutex<Vec<(OutPoint, channelmonitor::ChannelMonitor)>>,
-       pub simple_monitor: Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint>>,
+pub struct TestChannelMonitor<'a> {
+       pub added_monitors: Mutex<Vec<(OutPoint, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>>,
+       pub latest_monitor_update_id: Mutex<HashMap<[u8; 32], (OutPoint, u64)>>,
+       pub simple_monitor: channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys, &'a chaininterface::BroadcasterInterface>,
        pub update_ret: Mutex<Result<(), channelmonitor::ChannelMonitorUpdateErr>>,
 }
-impl TestChannelMonitor {
-       pub fn new(chain_monitor: Arc<chaininterface::ChainWatchInterface>, broadcaster: Arc<chaininterface::BroadcasterInterface>, logger: Arc<Logger>, fee_estimator: Arc<chaininterface::FeeEstimator>) -> Self {
+impl<'a> TestChannelMonitor<'a> {
+       pub fn new(chain_monitor: Arc<chaininterface::ChainWatchInterface>, broadcaster: &'a chaininterface::BroadcasterInterface, logger: Arc<Logger>, fee_estimator: Arc<chaininterface::FeeEstimator>) -> Self {
                Self {
                        added_monitors: Mutex::new(Vec::new()),
+                       latest_monitor_update_id: Mutex::new(HashMap::new()),
                        simple_monitor: channelmonitor::SimpleManyChannelMonitor::new(chain_monitor, broadcaster, logger, fee_estimator),
                        update_ret: Mutex::new(Ok(())),
                }
        }
 }
-impl channelmonitor::ManyChannelMonitor for TestChannelMonitor {
-       fn add_update_monitor(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
+impl<'a> channelmonitor::ManyChannelMonitor<EnforcingChannelKeys> for TestChannelMonitor<'a> {
+       fn add_monitor(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingChannelKeys>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
                // At every point where we get a monitor update, we should be able to send a useful monitor
                // to a watchtower and disk...
                let mut w = TestVecWriter(Vec::new());
                monitor.write_for_disk(&mut w).unwrap();
-               assert!(<(Sha256dHash, channelmonitor::ChannelMonitor)>::read(
+               assert!(<(Sha256dHash, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::read(
                                &mut ::std::io::Cursor::new(&w.0), Arc::new(TestLogger::new())).unwrap().1 == monitor);
                w.0.clear();
                monitor.write_for_watchtower(&mut w).unwrap(); // This at least shouldn't crash...
                self.added_monitors.lock().unwrap().push((funding_txo, monitor.clone()));
-               assert!(self.simple_monitor.add_update_monitor(funding_txo, monitor).is_ok());
+               self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, monitor.get_latest_update_id()));
+               assert!(self.simple_monitor.add_monitor(funding_txo, monitor).is_ok());
                self.update_ret.lock().unwrap().clone()
        }
 
-       fn fetch_pending_htlc_updated(&self) -> Vec<HTLCUpdate> {
-               return self.simple_monitor.fetch_pending_htlc_updated();
+       fn update_monitor(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
+               // Every monitor update should survive roundtrip
+               let mut w = TestVecWriter(Vec::new());
+               update.write(&mut w).unwrap();
+               assert!(channelmonitor::ChannelMonitorUpdate::read(
+                               &mut ::std::io::Cursor::new(&w.0)).unwrap() == update);
+
+               self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, update.update_id));
+               assert!(self.simple_monitor.update_monitor(funding_txo, update).is_ok());
+               // At every point where we get a monitor update, we should be able to send a useful monitor
+               // to a watchtower and disk...
+               let monitors = self.simple_monitor.monitors.lock().unwrap();
+               let monitor = monitors.get(&funding_txo).unwrap();
+               w.0.clear();
+               monitor.write_for_disk(&mut w).unwrap();
+               assert!(<(Sha256dHash, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::read(
+                               &mut ::std::io::Cursor::new(&w.0), Arc::new(TestLogger::new())).unwrap().1 == *monitor);
+               w.0.clear();
+               monitor.write_for_watchtower(&mut w).unwrap(); // This at least shouldn't crash...
+               self.added_monitors.lock().unwrap().push((funding_txo, monitor.clone()));
+               self.update_ret.lock().unwrap().clone()
+       }
+
+       fn get_and_clear_pending_htlcs_updated(&self) -> Vec<HTLCUpdate> {
+               return self.simple_monitor.get_and_clear_pending_htlcs_updated();
        }
 }
 
 pub struct TestBroadcaster {
        pub txn_broadcasted: Mutex<Vec<Transaction>>,
+       pub broadcasted_txn: Mutex<HashSet<Sha256dHash>> // Temporary field while refactoring out tx duplication
 }
 impl chaininterface::BroadcasterInterface for TestBroadcaster {
        fn broadcast_transaction(&self, tx: &Transaction) {
+               {
+                       if let Some(_) = self.broadcasted_txn.lock().unwrap().get(&tx.txid()) {
+                               // If commitment tx, HTLC-timeout or HTLC-Success, duplicate broadcast are still ok
+                               if tx.input[0].sequence == 0xfffffffd {
+                                       return;
+                               }
+                       }
+               }
+               self.broadcasted_txn.lock().unwrap().insert(tx.txid());
                self.txn_broadcasted.lock().unwrap().push(tx.clone());
        }
 }
@@ -154,6 +190,9 @@ impl msgs::RoutingMessageHandler for TestRoutingMessageHandler {
        fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec<msgs::NodeAnnouncement> {
                Vec::new()
        }
+       fn should_request_full_sync(&self, _node_id: &PublicKey) -> bool {
+               true
+       }
 }
 
 pub struct TestLogger {
@@ -203,7 +242,9 @@ impl keysinterface::KeysInterface for TestKeysInterface {
        fn get_node_secret(&self) -> SecretKey { self.backing.get_node_secret() }
        fn get_destination_script(&self) -> Script { self.backing.get_destination_script() }
        fn get_shutdown_pubkey(&self) -> PublicKey { self.backing.get_shutdown_pubkey() }
-       fn get_channel_keys(&self, inbound: bool) -> EnforcingChannelKeys { EnforcingChannelKeys::new(self.backing.get_channel_keys(inbound)) }
+       fn get_channel_keys(&self, inbound: bool, channel_value_satoshis: u64) -> EnforcingChannelKeys {
+               EnforcingChannelKeys::new(self.backing.get_channel_keys(inbound, channel_value_satoshis))
+       }
 
        fn get_onion_rand(&self) -> (SecretKey, [u8; 32]) {
                match *self.override_session_priv.lock().unwrap() {