X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Futil%2Ftest_utils.rs;h=e9a7f0ebfca856cfb3a88aec9c32fad4e3f50507;hb=a41954d841ba58537043ddbc98e9cdb410004306;hp=785b40befd472ba10b30fb618bd75eb9ab20c15a;hpb=9ee9809547f12d4f6c2487e0afb945c30ad4aafb;p=rust-lightning diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 785b40be..e9a7f0eb 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -13,7 +13,7 @@ use crate::chain::chaininterface; use crate::chain::chaininterface::ConfirmationTarget; use crate::chain::chaininterface::FEERATE_FLOOR_SATS_PER_KW; use crate::chain::chainmonitor; -use crate::chain::chainmonitor::MonitorUpdateId; +use crate::chain::chainmonitor::{MonitorUpdateId, UpdateOrigin}; use crate::chain::channelmonitor; use crate::chain::channelmonitor::MonitorEvent; use crate::chain::transaction::OutPoint; @@ -40,16 +40,15 @@ use crate::util::logger::{Logger, Level, Record}; use crate::util::ser::{Readable, ReadableArgs, Writer, Writeable}; use crate::util::persist::KVStore; -use bitcoin::EcdsaSighashType; use bitcoin::blockdata::constants::ChainHash; use bitcoin::blockdata::constants::genesis_block; use bitcoin::blockdata::transaction::{Transaction, TxOut}; -use bitcoin::blockdata::script::{Builder, Script}; +use bitcoin::blockdata::script::{Builder, Script, ScriptBuf}; use bitcoin::blockdata::opcodes; use bitcoin::blockdata::block::Block; use bitcoin::network::constants::Network; use bitcoin::hash_types::{BlockHash, Txid}; -use bitcoin::util::sighash::SighashCache; +use bitcoin::sighash::{SighashCache, EcdsaSighashType}; use bitcoin::secp256k1::{PublicKey, Scalar, Secp256k1, SecretKey}; use bitcoin::secp256k1::ecdh::SharedSecret; @@ -124,6 +123,7 @@ impl<'a> Router for TestRouter<'a> { if let Some((find_route_query, find_route_res)) = self.next_routes.lock().unwrap().pop_front() { assert_eq!(find_route_query, *params); if let Ok(ref route) = find_route_res { + assert_eq!(route.route_params, Some(find_route_query)); let scorer = self.scorer.read().unwrap(); let scorer = ScorerAccountingForInFlightHtlcs::new(scorer, &inflight_htlcs); for path in &route.paths { @@ -139,10 +139,10 @@ impl<'a> Router for TestRouter<'a> { // Since the path is reversed, the last element in our iteration is the first // hop. if idx == path.hops.len() - 1 { - scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(payer), &NodeId::from_pubkey(&hop.pubkey), usage, &()); + scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(payer), &NodeId::from_pubkey(&hop.pubkey), usage, &Default::default()); } else { let curr_hop_path_idx = path.hops.len() - 1 - idx; - scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(&path.hops[curr_hop_path_idx - 1].pubkey), &NodeId::from_pubkey(&hop.pubkey), usage, &()); + scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(&path.hops[curr_hop_path_idx - 1].pubkey), &NodeId::from_pubkey(&hop.pubkey), usage, &Default::default()); } } } @@ -152,7 +152,7 @@ impl<'a> Router for TestRouter<'a> { let logger = TestLogger::new(); find_route( payer, params, &self.network_graph, first_hops, &logger, - &ScorerAccountingForInFlightHtlcs::new(self.scorer.read().unwrap(), &inflight_htlcs), &(), + &ScorerAccountingForInFlightHtlcs::new(self.scorer.read().unwrap(), &inflight_htlcs), &Default::default(), &[42; 32] ) } @@ -192,7 +192,7 @@ impl SignerProvider for OnlyReadsKeysInterface { )) } - fn get_destination_script(&self) -> Result { Err(()) } + fn get_destination_script(&self) -> Result { Err(()) } fn get_shutdown_scriptpubkey(&self) -> Result { Err(()) } } @@ -206,6 +206,9 @@ pub struct TestChainMonitor<'a> { /// ChannelForceClosed event for the given channel_id with should_broadcast set to the given /// boolean. pub expect_channel_force_closed: Mutex>, + /// If this is set to Some(), the next round trip serialization check will not hold after an + /// update_channel call (not watch_channel) for the given channel_id. + pub expect_monitor_round_trip_fail: Mutex>, } impl<'a> TestChainMonitor<'a> { pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a chainmonitor::Persist, keys_manager: &'a TestKeysInterface) -> Self { @@ -216,6 +219,7 @@ impl<'a> TestChainMonitor<'a> { chain_monitor: chainmonitor::ChainMonitor::new(chain_source, broadcaster, logger, fee_estimator, persister), keys_manager, expect_channel_force_closed: Mutex::new(None), + expect_monitor_round_trip_fail: Mutex::new(None), } } @@ -225,7 +229,7 @@ impl<'a> TestChainMonitor<'a> { } } impl<'a> chain::Watch for TestChainMonitor<'a> { - fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> chain::ChannelMonitorUpdateStatus { + fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result { // At every point where we get a monitor update, we should be able to send a useful monitor // to a watchtower and disk... let mut w = TestVecWriter(Vec::new()); @@ -266,7 +270,12 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { monitor.write(&mut w).unwrap(); let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager)).unwrap().1; - assert!(new_monitor == *monitor); + if let Some(chan_id) = self.expect_monitor_round_trip_fail.lock().unwrap().take() { + assert_eq!(chan_id, funding_txo.to_channel_id()); + assert!(new_monitor != *monitor); + } else { + assert!(new_monitor == *monitor); + } self.added_monitors.lock().unwrap().push((funding_txo, new_monitor)); update_res } @@ -292,12 +301,12 @@ pub(crate) struct WatchtowerPersister { /// After receiving a revoke_and_ack for a commitment number, we'll form and store the justice /// tx which would be used to provide a watchtower with the data it needs. watchtower_state: Mutex>>, - destination_script: Script, + destination_script: ScriptBuf, } impl WatchtowerPersister { #[cfg(test)] - pub(crate) fn new(destination_script: Script) -> Self { + pub(crate) fn new(destination_script: ScriptBuf) -> Self { WatchtowerPersister { persister: TestPersister::new(), unsigned_justice_tx_data: Mutex::new(HashMap::new()), @@ -413,12 +422,13 @@ impl chainmonitor::Persist fo chain::ChannelMonitorUpdateStatus::Completed } - fn update_persisted_channel(&self, funding_txo: OutPoint, update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor, update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus { + fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor, update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus { let mut ret = chain::ChannelMonitorUpdateStatus::Completed; if let Some(update_ret) = self.update_rets.lock().unwrap().pop_front() { ret = update_ret; } - if update.is_none() { + let is_chain_sync = if let UpdateOrigin::ChainSync(_) = update_id.contents { true } else { false }; + if is_chain_sync { self.chain_sync_monitor_persistences.lock().unwrap().entry(funding_txo).or_insert(HashSet::new()).insert(update_id); } else { self.offchain_monitor_updates.lock().unwrap().entry(funding_txo).or_insert(HashSet::new()).insert(update_id); @@ -440,12 +450,12 @@ impl TestStore { } impl KVStore for TestStore { - fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> io::Result> { + fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> io::Result> { let persisted_lock = self.persisted_bytes.lock().unwrap(); - let prefixed = if sub_namespace.is_empty() { - namespace.to_string() + let prefixed = if secondary_namespace.is_empty() { + primary_namespace.to_string() } else { - format!("{}/{}", namespace, sub_namespace) + format!("{}/{}", primary_namespace, secondary_namespace) }; if let Some(outer_ref) = persisted_lock.get(&prefixed) { @@ -460,7 +470,7 @@ impl KVStore for TestStore { } } - fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()> { + fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()> { if self.read_only { return Err(io::Error::new( io::ErrorKind::PermissionDenied, @@ -469,10 +479,10 @@ impl KVStore for TestStore { } let mut persisted_lock = self.persisted_bytes.lock().unwrap(); - let prefixed = if sub_namespace.is_empty() { - namespace.to_string() + let prefixed = if secondary_namespace.is_empty() { + primary_namespace.to_string() } else { - format!("{}/{}", namespace, sub_namespace) + format!("{}/{}", primary_namespace, secondary_namespace) }; let outer_e = persisted_lock.entry(prefixed).or_insert(HashMap::new()); let mut bytes = Vec::new(); @@ -481,7 +491,7 @@ impl KVStore for TestStore { Ok(()) } - fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, _lazy: bool) -> io::Result<()> { + fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool) -> io::Result<()> { if self.read_only { return Err(io::Error::new( io::ErrorKind::PermissionDenied, @@ -491,10 +501,10 @@ impl KVStore for TestStore { let mut persisted_lock = self.persisted_bytes.lock().unwrap(); - let prefixed = if sub_namespace.is_empty() { - namespace.to_string() + let prefixed = if secondary_namespace.is_empty() { + primary_namespace.to_string() } else { - format!("{}/{}", namespace, sub_namespace) + format!("{}/{}", primary_namespace, secondary_namespace) }; if let Some(outer_ref) = persisted_lock.get_mut(&prefixed) { outer_ref.remove(&key.to_string()); @@ -503,13 +513,13 @@ impl KVStore for TestStore { Ok(()) } - fn list(&self, namespace: &str, sub_namespace: &str) -> io::Result> { + fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { let mut persisted_lock = self.persisted_bytes.lock().unwrap(); - let prefixed = if sub_namespace.is_empty() { - namespace.to_string() + let prefixed = if secondary_namespace.is_empty() { + primary_namespace.to_string() } else { - format!("{}/{}", namespace, sub_namespace) + format!("{}/{}", primary_namespace, secondary_namespace) }; match persisted_lock.entry(prefixed) { hash_map::Entry::Occupied(e) => Ok(e.get().keys().cloned().collect()), @@ -550,9 +560,9 @@ impl TestBroadcaster { impl chaininterface::BroadcasterInterface for TestBroadcaster { fn broadcast_transactions(&self, txs: &[&Transaction]) { for tx in txs { - let lock_time = tx.lock_time.0; + let lock_time = tx.lock_time.to_consensus_u32(); assert!(lock_time < 1_500_000_000); - if bitcoin::LockTime::from(tx.lock_time).is_block_height() && lock_time > self.blocks.lock().unwrap().last().unwrap().1 { + if tx.lock_time.is_block_height() && lock_time > self.blocks.lock().unwrap().last().unwrap().1 { for inp in tx.input.iter() { if inp.sequence != Sequence::MAX { panic!("We should never broadcast a transaction before its locktime ({})!", tx.lock_time); @@ -570,17 +580,17 @@ pub struct TestChannelMessageHandler { expected_recv_msgs: Mutex>>>, connected_peers: Mutex>, pub message_fetch_counter: AtomicUsize, - genesis_hash: ChainHash, + chain_hash: ChainHash, } impl TestChannelMessageHandler { - pub fn new(genesis_hash: ChainHash) -> Self { + pub fn new(chain_hash: ChainHash) -> Self { TestChannelMessageHandler { pending_events: Mutex::new(Vec::new()), expected_recv_msgs: Mutex::new(None), connected_peers: Mutex::new(HashSet::new()), message_fetch_counter: AtomicUsize::new(0), - genesis_hash, + chain_hash, } } @@ -635,6 +645,18 @@ impl msgs::ChannelMessageHandler for TestChannelMessageHandler { fn handle_closing_signed(&self, _their_node_id: &PublicKey, msg: &msgs::ClosingSigned) { self.received_msg(wire::Message::ClosingSigned(msg.clone())); } + fn handle_stfu(&self, _their_node_id: &PublicKey, msg: &msgs::Stfu) { + self.received_msg(wire::Message::Stfu(msg.clone())); + } + fn handle_splice(&self, _their_node_id: &PublicKey, msg: &msgs::Splice) { + self.received_msg(wire::Message::Splice(msg.clone())); + } + fn handle_splice_ack(&self, _their_node_id: &PublicKey, msg: &msgs::SpliceAck) { + self.received_msg(wire::Message::SpliceAck(msg.clone())); + } + fn handle_splice_locked(&self, _their_node_id: &PublicKey, msg: &msgs::SpliceLocked) { + self.received_msg(wire::Message::SpliceLocked(msg.clone())); + } fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) { self.received_msg(wire::Message::UpdateAddHTLC(msg.clone())); } @@ -684,8 +706,8 @@ impl msgs::ChannelMessageHandler for TestChannelMessageHandler { channelmanager::provided_init_features(&UserConfig::default()) } - fn get_genesis_hashes(&self) -> Option> { - Some(vec![self.genesis_hash]) + fn get_chain_hashes(&self) -> Option> { + Some(vec![self.chain_hash]) } fn handle_open_channel_v2(&self, _their_node_id: &PublicKey, msg: &msgs::OpenChannelV2) { @@ -753,7 +775,7 @@ fn get_dummy_channel_announcement(short_chan_id: u64) -> msgs::ChannelAnnounceme let node_2_btckey = SecretKey::from_slice(&[39; 32]).unwrap(); let unsigned_ann = msgs::UnsignedChannelAnnouncement { features: ChannelFeatures::empty(), - chain_hash: genesis_block(network).header.block_hash(), + chain_hash: ChainHash::using_genesis_block(network), short_channel_id: short_chan_id, node_id_1: NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, &node_1_privkey)), node_id_2: NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, &node_2_privkey)), @@ -779,7 +801,7 @@ fn get_dummy_channel_update(short_chan_id: u64) -> msgs::ChannelUpdate { msgs::ChannelUpdate { signature: Signature::from(unsafe { FFISignature::new() }), contents: msgs::UnsignedChannelUpdate { - chain_hash: genesis_block(network).header.block_hash(), + chain_hash: ChainHash::using_genesis_block(network), short_channel_id: short_chan_id, timestamp: 0, flags: 0, @@ -855,7 +877,7 @@ impl msgs::RoutingMessageHandler for TestRoutingMessageHandler { pending_events.push(events::MessageSendEvent::SendGossipTimestampFilter { node_id: their_node_id.clone(), msg: msgs::GossipTimestampFilter { - chain_hash: genesis_block(Network::Testnet).header.block_hash(), + chain_hash: ChainHash::using_genesis_block(Network::Testnet), first_timestamp: gossip_start_time as u32, timestamp_range: u32::max_value(), }, @@ -958,8 +980,10 @@ impl Logger for TestLogger { fn log(&self, record: &Record) { *self.lines.lock().unwrap().entry((record.module_path.to_string(), format!("{}", record.args))).or_insert(0) += 1; if record.level >= self.level { - #[cfg(all(not(ldk_bench), feature = "std"))] - println!("{:<5} {} [{} : {}, {}] {}", record.level.to_string(), self.id, record.module_path, record.file, record.line, record.args); + #[cfg(all(not(ldk_bench), feature = "std"))] { + let pfx = format!("{} {} [{}:{}]", self.id, record.level.to_string(), record.module_path, record.line); + println!("{:<55}{}", pfx, record.args); + } } } } @@ -1097,7 +1121,7 @@ impl SignerProvider for TestKeysInterface { )) } - fn get_destination_script(&self) -> Result { self.backing.get_destination_script() } + fn get_destination_script(&self) -> Result { self.backing.get_destination_script() } fn get_shutdown_scriptpubkey(&self) -> Result { match &mut *self.expectations.lock().unwrap() { @@ -1184,18 +1208,18 @@ impl core::fmt::Debug for OnGetShutdownScriptpubkey { } pub struct TestChainSource { - pub genesis_hash: BlockHash, + pub chain_hash: ChainHash, pub utxo_ret: Mutex, pub get_utxo_call_count: AtomicUsize, - pub watched_txn: Mutex>, - pub watched_outputs: Mutex>, + pub watched_txn: Mutex>, + pub watched_outputs: Mutex>, } impl TestChainSource { pub fn new(network: Network) -> Self { let script_pubkey = Builder::new().push_opcode(opcodes::OP_TRUE).into_script(); Self { - genesis_hash: genesis_block(network).block_hash(), + chain_hash: ChainHash::using_genesis_block(network), utxo_ret: Mutex::new(UtxoResult::Sync(Ok(TxOut { value: u64::max_value(), script_pubkey }))), get_utxo_call_count: AtomicUsize::new(0), watched_txn: Mutex::new(HashSet::new()), @@ -1205,9 +1229,9 @@ impl TestChainSource { } impl UtxoLookup for TestChainSource { - fn get_utxo(&self, genesis_hash: &BlockHash, _short_channel_id: u64) -> UtxoResult { + fn get_utxo(&self, chain_hash: &ChainHash, _short_channel_id: u64) -> UtxoResult { self.get_utxo_call_count.fetch_add(1, Ordering::Relaxed); - if self.genesis_hash != *genesis_hash { + if self.chain_hash != *chain_hash { return UtxoResult::Sync(Err(UtxoLookupError::UnknownChain)); } @@ -1217,7 +1241,7 @@ impl UtxoLookup for TestChainSource { impl chain::Filter for TestChainSource { fn register_tx(&self, txid: &Txid, script_pubkey: &Script) { - self.watched_txn.lock().unwrap().insert((*txid, script_pubkey.clone())); + self.watched_txn.lock().unwrap().insert((*txid, script_pubkey.into())); } fn register_output(&self, output: WatchedOutput) { @@ -1337,9 +1361,9 @@ impl WalletSource for TestWalletSource { Ok(self.utxos.borrow().clone()) } - fn get_change_script(&self) -> Result { + fn get_change_script(&self) -> Result { let public_key = bitcoin::PublicKey::new(self.secret_key.public_key(&self.secp)); - Ok(Script::new_p2pkh(&public_key.pubkey_hash())) + Ok(ScriptBuf::new_p2pkh(&public_key.pubkey_hash())) } fn sign_tx(&self, mut tx: Transaction) -> Result { @@ -1349,10 +1373,10 @@ impl WalletSource for TestWalletSource { let sighash = SighashCache::new(&tx) .legacy_signature_hash(i, &utxo.output.script_pubkey, EcdsaSighashType::All as u32) .map_err(|_| ())?; - let sig = self.secp.sign_ecdsa(&sighash.as_hash().into(), &self.secret_key); - let bitcoin_sig = bitcoin::EcdsaSig { sig, hash_ty: EcdsaSighashType::All }.to_vec(); + let sig = self.secp.sign_ecdsa(&(*sighash.as_raw_hash()).into(), &self.secret_key); + let bitcoin_sig = bitcoin::ecdsa::Signature { sig, hash_ty: EcdsaSighashType::All }; tx.input[i].script_sig = Builder::new() - .push_slice(&bitcoin_sig) + .push_slice(&bitcoin_sig.serialize()) .push_slice(&self.secret_key.public_key(&self.secp).serialize()) .into_script(); }