X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=fuzz%2Fsrc%2Fchanmon_consistency.rs;h=423e69eb8f6f39ed94e61c8a565d366569724b2e;hb=3ccf06416091e107f443ee92027501105c48054b;hp=af0c64d88aea7f76f751ebc24a15bade136919ed;hpb=70ea1108646468335d085a862e18bca774632bf3;p=rust-lightning diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index af0c64d8..423e69eb 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -18,56 +18,72 @@ //! send-side handling is correct, other peers. We consider it a failure if any action results in a //! channel being force-closed. +use bitcoin::amount::Amount; use bitcoin::blockdata::constants::genesis_block; -use bitcoin::blockdata::transaction::{Transaction, TxOut}; -use bitcoin::blockdata::script::{Builder, ScriptBuf}; -use bitcoin::blockdata::opcodes; use bitcoin::blockdata::locktime::absolute::LockTime; -use bitcoin::network::constants::Network; +use bitcoin::blockdata::opcodes; +use bitcoin::blockdata::script::{Builder, ScriptBuf}; +use bitcoin::blockdata::transaction::{Transaction, TxOut}; +use bitcoin::network::Network; +use bitcoin::transaction::Version; -use bitcoin::hashes::Hash as TraitImport; +use bitcoin::hash_types::BlockHash; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::sha256d::Hash as Sha256dHash; -use bitcoin::hash_types::{BlockHash, WPubkeyHash}; +use bitcoin::hashes::Hash as TraitImport; +use bitcoin::WPubkeyHash; +use lightning::blinded_path::payment::ReceiveTlvs; +use lightning::blinded_path::BlindedPath; use lightning::chain; -use lightning::chain::{BestBlock, ChannelMonitorUpdateStatus, chainmonitor, channelmonitor, Confirm, Watch}; +use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; use lightning::chain::channelmonitor::{ChannelMonitor, MonitorEvent}; use lightning::chain::transaction::OutPoint; -use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; -use lightning::sign::{KeyMaterial, InMemorySigner, Recipient, EntropySource, NodeSigner, SignerProvider}; +use lightning::chain::{ + chainmonitor, channelmonitor, BestBlock, ChannelMonitorUpdateStatus, Confirm, Watch, +}; use lightning::events; use lightning::events::MessageSendEventsProvider; -use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret}; -use lightning::ln::channelmanager::{ChainParameters, ChannelDetails, ChannelManager, PaymentSendFailure, ChannelManagerReadArgs, PaymentId, RecipientOnionFields}; use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; -use lightning::ln::msgs::{self, CommitmentUpdate, ChannelMessageHandler, DecodeError, UpdateAddHTLC, Init}; -use lightning::ln::script::ShutdownScript; +use lightning::ln::channel_state::ChannelDetails; +use lightning::ln::channelmanager::{ + ChainParameters, ChannelManager, ChannelManagerReadArgs, PaymentId, PaymentSendFailure, + RecipientOnionFields, +}; use lightning::ln::functional_test_utils::*; -use lightning::offers::invoice::UnsignedBolt12Invoice; +use lightning::ln::msgs::{ + self, ChannelMessageHandler, CommitmentUpdate, DecodeError, Init, UpdateAddHTLC, +}; +use lightning::ln::script::ShutdownScript; +use lightning::ln::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret}; +use lightning::offers::invoice::{BlindedPayInfo, UnsignedBolt12Invoice}; use lightning::offers::invoice_request::UnsignedInvoiceRequest; -use lightning::util::test_channel_signer::{TestChannelSigner, EnforcementState}; +use lightning::onion_message::messenger::{Destination, MessageRouter, OnionMessagePath}; +use lightning::routing::router::{InFlightHtlcs, Path, Route, RouteHop, RouteParameters, Router}; +use lightning::sign::{ + EntropySource, InMemorySigner, KeyMaterial, NodeSigner, Recipient, SignerProvider, +}; +use lightning::util::config::UserConfig; use lightning::util::errors::APIError; +use lightning::util::hash_tables::*; use lightning::util::logger::Logger; -use lightning::util::config::UserConfig; use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer}; -use lightning::routing::router::{InFlightHtlcs, Path, Route, RouteHop, RouteParameters, Router}; +use lightning::util::test_channel_signer::{EnforcementState, TestChannelSigner}; use crate::utils::test_logger::{self, Output}; use crate::utils::test_persister::TestPersister; -use bitcoin::secp256k1::{Message, PublicKey, SecretKey, Scalar, Secp256k1}; use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature}; use bitcoin::secp256k1::schnorr; +use bitcoin::secp256k1::{self, Message, PublicKey, Scalar, Secp256k1, SecretKey}; -use std::mem; +use bech32::u5; use std::cmp::{self, Ordering}; -use hashbrown::{HashSet, hash_map, HashMap}; -use std::sync::{Arc,Mutex}; -use std::sync::atomic; use std::io::Cursor; -use bitcoin::bech32::u5; +use std::mem; +use std::sync::atomic; +use std::sync::{Arc, Mutex}; const MAX_FEE: u32 = 10_000; struct FuzzEstimator { @@ -81,8 +97,14 @@ impl FeeEstimator for FuzzEstimator { // Background feerate which is <= the minimum Normal feerate. match conf_target { ConfirmationTarget::OnChainSweep => MAX_FEE, - ConfirmationTarget::ChannelCloseMinimum|ConfirmationTarget::AnchorChannelFee|ConfirmationTarget::MinAllowedAnchorChannelRemoteFee|ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee => 253, - ConfirmationTarget::NonAnchorChannelFee => cmp::min(self.ret_val.load(atomic::Ordering::Acquire), MAX_FEE), + ConfirmationTarget::ChannelCloseMinimum + | ConfirmationTarget::AnchorChannelFee + | ConfirmationTarget::MinAllowedAnchorChannelRemoteFee + | ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee + | ConfirmationTarget::OutputSpendingFee => 253, + ConfirmationTarget::NonAnchorChannelFee => { + cmp::min(self.ret_val.load(atomic::Ordering::Acquire), MAX_FEE) + }, } } } @@ -91,19 +113,40 @@ struct FuzzRouter {} impl Router for FuzzRouter { fn find_route( - &self, _payer: &PublicKey, _params: &RouteParameters, _first_hops: Option<&[&ChannelDetails]>, - _inflight_htlcs: InFlightHtlcs + &self, _payer: &PublicKey, _params: &RouteParameters, + _first_hops: Option<&[&ChannelDetails]>, _inflight_htlcs: InFlightHtlcs, ) -> Result { Err(msgs::LightningError { err: String::from("Not implemented"), - action: msgs::ErrorAction::IgnoreError + action: msgs::ErrorAction::IgnoreError, }) } + + fn create_blinded_payment_paths( + &self, _recipient: PublicKey, _first_hops: Vec, _tlvs: ReceiveTlvs, + _amount_msats: u64, _secp_ctx: &Secp256k1, + ) -> Result, ()> { + unreachable!() + } +} + +impl MessageRouter for FuzzRouter { + fn find_path( + &self, _sender: PublicKey, _peers: Vec, _destination: Destination, + ) -> Result { + unreachable!() + } + + fn create_blinded_paths( + &self, _recipient: PublicKey, _peers: Vec, _secp_ctx: &Secp256k1, + ) -> Result, ()> { + unreachable!() + } } pub struct TestBroadcaster {} impl BroadcasterInterface for TestBroadcaster { - fn broadcast_transactions(&self, _txs: &[&Transaction]) { } + fn broadcast_transactions(&self, _txs: &[&Transaction]) {} } pub struct VecWriter(pub Vec); @@ -114,55 +157,130 @@ impl Writer for VecWriter { } } +/// The LDK API requires that any time we tell it we're done persisting a `ChannelMonitor[Update]` +/// we never pass it in as the "latest" `ChannelMonitor` on startup. However, we can pass +/// out-of-date monitors as long as we never told LDK we finished persisting them, which we do by +/// storing both old `ChannelMonitor`s and ones that are "being persisted" here. +/// +/// Note that such "being persisted" `ChannelMonitor`s are stored in `ChannelManager` and will +/// simply be replayed on startup. +struct LatestMonitorState { + /// The latest monitor id which we told LDK we've persisted + persisted_monitor_id: u64, + /// The latest serialized `ChannelMonitor` that we told LDK we persisted. + persisted_monitor: Vec, + /// A set of (monitor id, serialized `ChannelMonitor`)s which we're currently "persisting", + /// from LDK's perspective. + pending_monitors: Vec<(u64, Vec)>, +} + struct TestChainMonitor { pub logger: Arc, pub keys: Arc, pub persister: Arc, - pub chain_monitor: Arc, Arc, Arc, Arc, Arc>>, - // If we reload a node with an old copy of ChannelMonitors, the ChannelManager deserialization - // logic will automatically force-close our channels for us (as we don't have an up-to-date - // monitor implying we are not able to punish misbehaving counterparties). Because this test - // "fails" if we ever force-close a channel, we avoid doing so, always saving the latest - // fully-serialized monitor state here, as well as the corresponding update_id. - pub latest_monitors: Mutex)>>, + pub chain_monitor: Arc< + chainmonitor::ChainMonitor< + TestChannelSigner, + Arc, + Arc, + Arc, + Arc, + Arc, + >, + >, + pub latest_monitors: Mutex>, } impl TestChainMonitor { - pub fn new(broadcaster: Arc, logger: Arc, feeest: Arc, persister: Arc, keys: Arc) -> Self { + pub fn new( + broadcaster: Arc, logger: Arc, feeest: Arc, + persister: Arc, keys: Arc, + ) -> Self { Self { - chain_monitor: Arc::new(chainmonitor::ChainMonitor::new(None, broadcaster, logger.clone(), feeest, Arc::clone(&persister))), + chain_monitor: Arc::new(chainmonitor::ChainMonitor::new( + None, + broadcaster, + logger.clone(), + feeest, + Arc::clone(&persister), + )), logger, keys, persister, - latest_monitors: Mutex::new(HashMap::new()), + latest_monitors: Mutex::new(new_hash_map()), } } } impl chain::Watch for TestChainMonitor { - fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result { + fn watch_channel( + &self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor, + ) -> Result { let mut ser = VecWriter(Vec::new()); monitor.write(&mut ser).unwrap(); - if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) { + let monitor_id = monitor.get_latest_update_id(); + let res = self.chain_monitor.watch_channel(funding_txo, monitor); + let state = match res { + Ok(chain::ChannelMonitorUpdateStatus::Completed) => LatestMonitorState { + persisted_monitor_id: monitor_id, + persisted_monitor: ser.0, + pending_monitors: Vec::new(), + }, + Ok(chain::ChannelMonitorUpdateStatus::InProgress) => { + panic!("The test currently doesn't test initial-persistence via the async pipeline") + }, + Ok(chain::ChannelMonitorUpdateStatus::UnrecoverableError) => panic!(), + Err(()) => panic!(), + }; + if self.latest_monitors.lock().unwrap().insert(funding_txo, state).is_some() { panic!("Already had monitor pre-watch_channel"); } - self.chain_monitor.watch_channel(funding_txo, monitor) + res } - fn update_channel(&self, funding_txo: OutPoint, update: &channelmonitor::ChannelMonitorUpdate) -> chain::ChannelMonitorUpdateStatus { + fn update_channel( + &self, funding_txo: OutPoint, update: &channelmonitor::ChannelMonitorUpdate, + ) -> chain::ChannelMonitorUpdateStatus { let mut map_lock = self.latest_monitors.lock().unwrap(); - let mut map_entry = match map_lock.entry(funding_txo) { - hash_map::Entry::Occupied(entry) => entry, - hash_map::Entry::Vacant(_) => panic!("Didn't have monitor on update call"), - }; - let deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>:: - read(&mut Cursor::new(&map_entry.get().1), (&*self.keys, &*self.keys)).unwrap().1; - deserialized_monitor.update_monitor(update, &&TestBroadcaster{}, &&FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap(); + let map_entry = map_lock.get_mut(&funding_txo).expect("Didn't have monitor on update call"); + let latest_monitor_data = map_entry + .pending_monitors + .last() + .as_ref() + .map(|(_, data)| data) + .unwrap_or(&map_entry.persisted_monitor); + let deserialized_monitor = + <(BlockHash, channelmonitor::ChannelMonitor)>::read( + &mut Cursor::new(&latest_monitor_data), + (&*self.keys, &*self.keys), + ) + .unwrap() + .1; + deserialized_monitor + .update_monitor( + update, + &&TestBroadcaster {}, + &&FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, + &self.logger, + ) + .unwrap(); let mut ser = VecWriter(Vec::new()); deserialized_monitor.write(&mut ser).unwrap(); - map_entry.insert((update.update_id, ser.0)); - self.chain_monitor.update_channel(funding_txo, update) + let res = self.chain_monitor.update_channel(funding_txo, update); + match res { + chain::ChannelMonitorUpdateStatus::Completed => { + map_entry.persisted_monitor_id = update.update_id; + map_entry.persisted_monitor = ser.0; + }, + chain::ChannelMonitorUpdateStatus::InProgress => { + map_entry.pending_monitors.push((update.update_id, ser.0)); + }, + chain::ChannelMonitorUpdateStatus::UnrecoverableError => panic!(), + } + res } - fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec, Option)> { + fn release_pending_monitor_events( + &self, + ) -> Vec<(OutPoint, ChannelId, Vec, Option)> { return self.chain_monitor.release_pending_monitor_events(); } } @@ -170,14 +288,15 @@ impl chain::Watch for TestChainMonitor { struct KeyProvider { node_secret: SecretKey, rand_bytes_id: atomic::AtomicU32, - enforcement_states: Mutex>>>, + enforcement_states: Mutex>>>, } impl EntropySource for KeyProvider { fn get_secure_random_bytes(&self) -> [u8; 32] { let id = self.rand_bytes_id.fetch_add(1, atomic::Ordering::Relaxed); + #[rustfmt::skip] let mut res = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, self.node_secret[31]]; - res[30-4..30].copy_from_slice(&id.to_le_bytes()); + res[30 - 4..30].copy_from_slice(&id.to_le_bytes()); res } } @@ -186,15 +305,17 @@ impl NodeSigner for KeyProvider { fn get_node_id(&self, recipient: Recipient) -> Result { let node_secret = match recipient { Recipient::Node => Ok(&self.node_secret), - Recipient::PhantomNode => Err(()) + Recipient::PhantomNode => Err(()), }?; Ok(PublicKey::from_secret_key(&Secp256k1::signing_only(), node_secret)) } - fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>) -> Result { + fn ecdh( + &self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>, + ) -> Result { let mut node_secret = match recipient { Recipient::Node => Ok(self.node_secret.clone()), - Recipient::PhantomNode => Err(()) + Recipient::PhantomNode => Err(()), }?; if let Some(tweak) = tweak { node_secret = node_secret.mul_tweak(tweak).map_err(|_| ())?; @@ -203,15 +324,19 @@ impl NodeSigner for KeyProvider { } fn get_inbound_payment_key_material(&self) -> KeyMaterial { - KeyMaterial([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, self.node_secret[31]]) + #[rustfmt::skip] + let random_bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, self.node_secret[31]]; + KeyMaterial(random_bytes) } - fn sign_invoice(&self, _hrp_bytes: &[u8], _invoice_data: &[u5], _recipient: Recipient) -> Result { + fn sign_invoice( + &self, _hrp_bytes: &[u8], _invoice_data: &[u5], _recipient: Recipient, + ) -> Result { unreachable!() } fn sign_bolt12_invoice_request( - &self, _invoice_request: &UnsignedInvoiceRequest + &self, _invoice_request: &UnsignedInvoiceRequest, ) -> Result { unreachable!() } @@ -222,24 +347,33 @@ impl NodeSigner for KeyProvider { unreachable!() } - fn sign_gossip_message(&self, msg: lightning::ln::msgs::UnsignedGossipMessage) -> Result { - let msg_hash = Message::from_slice(&Sha256dHash::hash(&msg.encode()[..])[..]).map_err(|_| ())?; + fn sign_gossip_message( + &self, msg: lightning::ln::msgs::UnsignedGossipMessage, + ) -> Result { + let msg_hash = Message::from_digest(Sha256dHash::hash(&msg.encode()[..]).to_byte_array()); let secp_ctx = Secp256k1::signing_only(); Ok(secp_ctx.sign_ecdsa(&msg_hash, &self.node_secret)) } } impl SignerProvider for KeyProvider { - type Signer = TestChannelSigner; + type EcdsaSigner = TestChannelSigner; + #[cfg(taproot)] + type TaprootSigner = TestChannelSigner; - fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] { + fn generate_channel_keys_id( + &self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128, + ) -> [u8; 32] { let id = self.rand_bytes_id.fetch_add(1, atomic::Ordering::Relaxed) as u8; [id; 32] } - fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> Self::Signer { + fn derive_channel_signer( + &self, channel_value_satoshis: u64, channel_keys_id: [u8; 32], + ) -> Self::EcdsaSigner { let secp_ctx = Secp256k1::signing_only(); let id = channel_keys_id[0]; + #[rustfmt::skip] let keys = InMemorySigner::new( &secp_ctx, SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, self.node_secret[31]]).unwrap(), @@ -256,40 +390,46 @@ impl SignerProvider for KeyProvider { TestChannelSigner::new_with_revoked(keys, revoked_commitment, false) } - fn read_chan_signer(&self, buffer: &[u8]) -> Result { + fn read_chan_signer(&self, buffer: &[u8]) -> Result { let mut reader = std::io::Cursor::new(buffer); let inner: InMemorySigner = ReadableArgs::read(&mut reader, self)?; let state = self.make_enforcement_state_cell(inner.commitment_seed); - Ok(TestChannelSigner { - inner, - state, - disable_revocation_policy_check: false, - available: Arc::new(Mutex::new(true)), - }) + Ok(TestChannelSigner::new_with_revoked(inner, state, false)) } - fn get_destination_script(&self) -> Result { + fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result { let secp_ctx = Secp256k1::signing_only(); + #[rustfmt::skip] let channel_monitor_claim_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, self.node_secret[31]]).unwrap(); - let our_channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize()); - Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(our_channel_monitor_claim_key_hash).into_script()) + let our_channel_monitor_claim_key_hash = WPubkeyHash::hash( + &PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize(), + ); + Ok(Builder::new() + .push_opcode(opcodes::all::OP_PUSHBYTES_0) + .push_slice(our_channel_monitor_claim_key_hash) + .into_script()) } fn get_shutdown_scriptpubkey(&self) -> Result { let secp_ctx = Secp256k1::signing_only(); + #[rustfmt::skip] let secret_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, self.node_secret[31]]).unwrap(); - let pubkey_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &secret_key).serialize()); + let pubkey_hash = + WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &secret_key).serialize()); Ok(ShutdownScript::new_p2wpkh(&pubkey_hash)) } } impl KeyProvider { - fn make_enforcement_state_cell(&self, commitment_seed: [u8; 32]) -> Arc> { + fn make_enforcement_state_cell( + &self, commitment_seed: [u8; 32], + ) -> Arc> { let mut revoked_commitments = self.enforcement_states.lock().unwrap(); if !revoked_commitments.contains_key(&commitment_seed) { - revoked_commitments.insert(commitment_seed, Arc::new(Mutex::new(EnforcementState::new()))); + revoked_commitments + .insert(commitment_seed, Arc::new(Mutex::new(EnforcementState::new()))); } let cell = revoked_commitments.get(&commitment_seed).unwrap(); Arc::clone(cell) @@ -317,34 +457,61 @@ fn check_api_err(api_err: APIError, sendable_bounds_violated: bool) { APIError::MonitorUpdateInProgress => { // We can (obviously) temp-fail a monitor update }, - APIError::IncompatibleShutdownScript { .. } => panic!("Cannot send an incompatible shutdown script"), + APIError::IncompatibleShutdownScript { .. } => { + panic!("Cannot send an incompatible shutdown script") + }, } } #[inline] fn check_payment_err(send_err: PaymentSendFailure, sendable_bounds_violated: bool) { match send_err { - PaymentSendFailure::ParameterError(api_err) => check_api_err(api_err, sendable_bounds_violated), + PaymentSendFailure::ParameterError(api_err) => { + check_api_err(api_err, sendable_bounds_violated) + }, PaymentSendFailure::PathParameterError(per_path_results) => { - for res in per_path_results { if let Err(api_err) = res { check_api_err(api_err, sendable_bounds_violated); } } + for res in per_path_results { + if let Err(api_err) = res { + check_api_err(api_err, sendable_bounds_violated); + } + } }, PaymentSendFailure::AllFailedResendSafe(per_path_results) => { - for api_err in per_path_results { check_api_err(api_err, sendable_bounds_violated); } + for api_err in per_path_results { + check_api_err(api_err, sendable_bounds_violated); + } }, PaymentSendFailure::PartialFailure { results, .. } => { - for res in results { if let Err(api_err) = res { check_api_err(api_err, sendable_bounds_violated); } } + for res in results { + if let Err(api_err) = res { + check_api_err(api_err, sendable_bounds_violated); + } + } }, PaymentSendFailure::DuplicatePayment => panic!(), } } -type ChanMan<'a> = ChannelManager, Arc, Arc, Arc, Arc, Arc, &'a FuzzRouter, Arc>; +type ChanMan<'a> = ChannelManager< + Arc, + Arc, + Arc, + Arc, + Arc, + Arc, + &'a FuzzRouter, + Arc, +>; #[inline] -fn get_payment_secret_hash(dest: &ChanMan, payment_id: &mut u8) -> Option<(PaymentSecret, PaymentHash)> { +fn get_payment_secret_hash( + dest: &ChanMan, payment_id: &mut u8, +) -> Option<(PaymentSecret, PaymentHash)> { let mut payment_hash; for _ in 0..256 { payment_hash = PaymentHash(Sha256::hash(&[*payment_id; 1]).to_byte_array()); - if let Ok(payment_secret) = dest.create_inbound_payment_for_hash(payment_hash, None, 3600, None) { + if let Ok(payment_secret) = + dest.create_inbound_payment_for_hash(payment_hash, None, 3600, None) + { return Some((payment_secret, payment_hash)); } *payment_id = payment_id.wrapping_add(1); @@ -353,29 +520,53 @@ fn get_payment_secret_hash(dest: &ChanMan, payment_id: &mut u8) -> Option<(Payme } #[inline] -fn send_payment(source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8, payment_idx: &mut u64) -> bool { +fn send_noret( + source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8, + payment_idx: &mut u64, +) { + send_payment(source, dest, dest_chan_id, amt, payment_id, payment_idx); +} + +#[inline] +fn send_payment( + source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8, + payment_idx: &mut u64, +) -> bool { let (payment_secret, payment_hash) = - if let Some((secret, hash)) = get_payment_secret_hash(dest, payment_id) { (secret, hash) } else { return true; }; + if let Some((secret, hash)) = get_payment_secret_hash(dest, payment_id) { + (secret, hash) + } else { + return true; + }; let mut payment_id = [0; 32]; payment_id[0..8].copy_from_slice(&payment_idx.to_ne_bytes()); *payment_idx += 1; - let (min_value_sendable, max_value_sendable) = source.list_usable_channels() - .iter().find(|chan| chan.short_channel_id == Some(dest_chan_id)) - .map(|chan| - (chan.next_outbound_htlc_minimum_msat, chan.next_outbound_htlc_limit_msat)) + let (min_value_sendable, max_value_sendable) = source + .list_usable_channels() + .iter() + .find(|chan| chan.short_channel_id == Some(dest_chan_id)) + .map(|chan| (chan.next_outbound_htlc_minimum_msat, chan.next_outbound_htlc_limit_msat)) .unwrap_or((0, 0)); - if let Err(err) = source.send_payment_with_route(&Route { - paths: vec![Path { hops: vec![RouteHop { - pubkey: dest.get_our_node_id(), - node_features: dest.node_features(), - short_channel_id: dest_chan_id, - channel_features: dest.channel_features(), - fee_msat: amt, - cltv_expiry_delta: 200, - maybe_announced_channel: true, - }], blinded_tail: None }], - route_params: None, - }, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_id)) { + if let Err(err) = source.send_payment_with_route( + &Route { + paths: vec![Path { + hops: vec![RouteHop { + pubkey: dest.get_our_node_id(), + node_features: dest.node_features(), + short_channel_id: dest_chan_id, + channel_features: dest.channel_features(), + fee_msat: amt, + cltv_expiry_delta: 200, + maybe_announced_channel: true, + }], + blinded_tail: None, + }], + route_params: None, + }, + payment_hash, + RecipientOnionFields::secret_only(payment_secret), + PaymentId(payment_id), + ) { check_payment_err(err, amt > max_value_sendable || amt < min_value_sendable); false } else { @@ -386,39 +577,76 @@ fn send_payment(source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, p true } } + +#[inline] +fn send_hop_noret( + source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, dest: &ChanMan, dest_chan_id: u64, + amt: u64, payment_id: &mut u8, payment_idx: &mut u64, +) { + send_hop_payment( + source, + middle, + middle_chan_id, + dest, + dest_chan_id, + amt, + payment_id, + payment_idx, + ); +} + #[inline] -fn send_hop_payment(source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8, payment_idx: &mut u64) -> bool { +fn send_hop_payment( + source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, dest: &ChanMan, dest_chan_id: u64, + amt: u64, payment_id: &mut u8, payment_idx: &mut u64, +) -> bool { let (payment_secret, payment_hash) = - if let Some((secret, hash)) = get_payment_secret_hash(dest, payment_id) { (secret, hash) } else { return true; }; + if let Some((secret, hash)) = get_payment_secret_hash(dest, payment_id) { + (secret, hash) + } else { + return true; + }; let mut payment_id = [0; 32]; payment_id[0..8].copy_from_slice(&payment_idx.to_ne_bytes()); *payment_idx += 1; - let (min_value_sendable, max_value_sendable) = source.list_usable_channels() - .iter().find(|chan| chan.short_channel_id == Some(middle_chan_id)) - .map(|chan| - (chan.next_outbound_htlc_minimum_msat, chan.next_outbound_htlc_limit_msat)) + let (min_value_sendable, max_value_sendable) = source + .list_usable_channels() + .iter() + .find(|chan| chan.short_channel_id == Some(middle_chan_id)) + .map(|chan| (chan.next_outbound_htlc_minimum_msat, chan.next_outbound_htlc_limit_msat)) .unwrap_or((0, 0)); let first_hop_fee = 50_000; - if let Err(err) = source.send_payment_with_route(&Route { - paths: vec![Path { hops: vec![RouteHop { - pubkey: middle.get_our_node_id(), - node_features: middle.node_features(), - short_channel_id: middle_chan_id, - channel_features: middle.channel_features(), - fee_msat: first_hop_fee, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }, RouteHop { - pubkey: dest.get_our_node_id(), - node_features: dest.node_features(), - short_channel_id: dest_chan_id, - channel_features: dest.channel_features(), - fee_msat: amt, - cltv_expiry_delta: 200, - maybe_announced_channel: true, - }], blinded_tail: None }], - route_params: None, - }, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_id)) { + if let Err(err) = source.send_payment_with_route( + &Route { + paths: vec![Path { + hops: vec![ + RouteHop { + pubkey: middle.get_our_node_id(), + node_features: middle.node_features(), + short_channel_id: middle_chan_id, + channel_features: middle.channel_features(), + fee_msat: first_hop_fee, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + RouteHop { + pubkey: dest.get_our_node_id(), + node_features: dest.node_features(), + short_channel_id: dest_chan_id, + channel_features: dest.channel_features(), + fee_msat: amt, + cltv_expiry_delta: 200, + maybe_announced_channel: true, + }, + ], + blinded_tail: None, + }], + route_params: None, + }, + payment_hash, + RecipientOnionFields::secret_only(payment_secret), + PaymentId(payment_id), + ) { let sent_amt = amt + first_hop_fee; check_payment_err(err, sent_amt < min_value_sendable || sent_amt > max_value_sendable); false @@ -434,18 +662,32 @@ fn send_hop_payment(source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, des #[inline] pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let out = SearchingOutput::new(underlying_out); - let broadcast = Arc::new(TestBroadcaster{}); + let broadcast = Arc::new(TestBroadcaster {}); let router = FuzzRouter {}; macro_rules! make_node { - ($node_id: expr, $fee_estimator: expr) => { { - let logger: Arc = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone())); - let node_secret = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, $node_id]).unwrap(); - let keys_manager = Arc::new(KeyProvider { node_secret, rand_bytes_id: atomic::AtomicU32::new(0), enforcement_states: Mutex::new(HashMap::new()) }); - let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(), + ($node_id: expr, $fee_estimator: expr) => {{ + let logger: Arc = + Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone())); + let node_secret = SecretKey::from_slice(&[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, $node_id, + ]) + .unwrap(); + let keys_manager = Arc::new(KeyProvider { + node_secret, + rand_bytes_id: atomic::AtomicU32::new(0), + enforcement_states: Mutex::new(new_hash_map()), + }); + let monitor = Arc::new(TestChainMonitor::new( + broadcast.clone(), + logger.clone(), + $fee_estimator.clone(), Arc::new(TestPersister { - update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed) - }), Arc::clone(&keys_manager))); + update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed), + }), + Arc::clone(&keys_manager), + )); let mut config = UserConfig::default(); config.channel_config.forwarding_fee_proportional_millionths = 0; @@ -456,23 +698,41 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } let network = Network::Bitcoin; let best_block_timestamp = genesis_block(network).header.time; - let params = ChainParameters { - network, - best_block: BestBlock::from_network(network), - }; - (ChannelManager::new($fee_estimator.clone(), monitor.clone(), broadcast.clone(), &router, Arc::clone(&logger), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), config, params, best_block_timestamp), - monitor, keys_manager) - } } + let params = ChainParameters { network, best_block: BestBlock::from_network(network) }; + ( + ChannelManager::new( + $fee_estimator.clone(), + monitor.clone(), + broadcast.clone(), + &router, + Arc::clone(&logger), + keys_manager.clone(), + keys_manager.clone(), + keys_manager.clone(), + config, + params, + best_block_timestamp, + ), + monitor, + keys_manager, + ) + }}; } macro_rules! reload_node { - ($ser: expr, $node_id: expr, $old_monitors: expr, $keys_manager: expr, $fee_estimator: expr) => { { - let keys_manager = Arc::clone(& $keys_manager); - let logger: Arc = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone())); - let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(), + ($ser: expr, $node_id: expr, $old_monitors: expr, $keys_manager: expr, $fee_estimator: expr) => {{ + let keys_manager = Arc::clone(&$keys_manager); + let logger: Arc = + Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone())); + let chain_monitor = Arc::new(TestChainMonitor::new( + broadcast.clone(), + logger.clone(), + $fee_estimator.clone(), Arc::new(TestPersister { - update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed) - }), Arc::clone(& $keys_manager))); + update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed), + }), + Arc::clone(&$keys_manager), + )); let mut config = UserConfig::default(); config.channel_config.forwarding_fee_proportional_millionths = 0; @@ -482,13 +742,25 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { config.manually_accept_inbound_channels = true; } - let mut monitors = HashMap::new(); + let mut monitors = new_hash_map(); let mut old_monitors = $old_monitors.latest_monitors.lock().unwrap(); - for (outpoint, (update_id, monitor_ser)) in old_monitors.drain() { - monitors.insert(outpoint, <(BlockHash, ChannelMonitor)>::read(&mut Cursor::new(&monitor_ser), (&*$keys_manager, &*$keys_manager)).expect("Failed to read monitor").1); - chain_monitor.latest_monitors.lock().unwrap().insert(outpoint, (update_id, monitor_ser)); + for (outpoint, mut prev_state) in old_monitors.drain() { + monitors.insert( + outpoint, + <(BlockHash, ChannelMonitor)>::read( + &mut Cursor::new(&prev_state.persisted_monitor), + (&*$keys_manager, &*$keys_manager), + ) + .expect("Failed to read monitor") + .1, + ); + // Wipe any `ChannelMonitor`s which we never told LDK we finished persisting, + // considering them discarded. LDK should replay these for us as they're stored in + // the `ChannelManager`. + prev_state.pending_monitors.clear(); + chain_monitor.latest_monitors.lock().unwrap().insert(outpoint, prev_state); } - let mut monitor_refs = HashMap::new(); + let mut monitor_refs = new_hash_map(); for (outpoint, monitor) in monitors.iter_mut() { monitor_refs.insert(*outpoint, monitor); } @@ -506,24 +778,37 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { channel_monitors: monitor_refs, }; - let res = (<(BlockHash, ChanMan)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, chain_monitor.clone()); + let res = ( + <(BlockHash, ChanMan)>::read(&mut Cursor::new(&$ser.0), read_args) + .expect("Failed to read manager") + .1, + chain_monitor.clone(), + ); for (funding_txo, mon) in monitors.drain() { - assert_eq!(chain_monitor.chain_monitor.watch_channel(funding_txo, mon), - Ok(ChannelMonitorUpdateStatus::Completed)); + assert_eq!( + chain_monitor.chain_monitor.watch_channel(funding_txo, mon), + Ok(ChannelMonitorUpdateStatus::Completed) + ); } res - } } + }}; } let mut channel_txn = Vec::new(); macro_rules! make_channel { - ($source: expr, $dest: expr, $dest_keys_manager: expr, $chan_id: expr) => { { - $source.peer_connected(&$dest.get_our_node_id(), &Init { - features: $dest.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); - $dest.peer_connected(&$source.get_our_node_id(), &Init { - features: $source.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + ($source: expr, $dest: expr, $dest_keys_manager: expr, $chan_id: expr) => {{ + let init_dest = Init { + features: $dest.init_features(), + networks: None, + remote_network_address: None, + }; + $source.peer_connected(&$dest.get_our_node_id(), &init_dest, true).unwrap(); + let init_src = Init { + features: $source.init_features(), + networks: None, + remote_network_address: None, + }; + $dest.peer_connected(&$source.get_our_node_id(), &init_src, false).unwrap(); $source.create_channel($dest.get_our_node_id(), 100_000, 42, 0, None, None).unwrap(); let open_channel = { @@ -531,7 +816,9 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { assert_eq!(events.len(), 1); if let events::MessageSendEvent::SendOpenChannel { ref msg, .. } = events[0] { msg.clone() - } else { panic!("Wrong event type"); } + } else { + panic!("Wrong event type"); + } }; $dest.handle_open_channel(&$source.get_our_node_id(), &open_channel); @@ -540,23 +827,33 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let events = $dest.get_and_clear_pending_events(); assert_eq!(events.len(), 1); if let events::Event::OpenChannelRequest { - ref temporary_channel_id, ref counterparty_node_id, .. - } = events[0] { + ref temporary_channel_id, + ref counterparty_node_id, + .. + } = events[0] + { let mut random_bytes = [0u8; 16]; - random_bytes.copy_from_slice(&$dest_keys_manager.get_secure_random_bytes()[..16]); + random_bytes + .copy_from_slice(&$dest_keys_manager.get_secure_random_bytes()[..16]); let user_channel_id = u128::from_be_bytes(random_bytes); - $dest.accept_inbound_channel( - temporary_channel_id, - counterparty_node_id, - user_channel_id, - ).unwrap(); - } else { panic!("Wrong event type"); } + $dest + .accept_inbound_channel( + temporary_channel_id, + counterparty_node_id, + user_channel_id, + ) + .unwrap(); + } else { + panic!("Wrong event type"); + } } let events = $dest.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); if let events::MessageSendEvent::SendAcceptChannel { ref msg, .. } = events[0] { msg.clone() - } else { panic!("Wrong event type"); } + } else { + panic!("Wrong event type"); + } }; $source.handle_accept_channel(&$dest.get_our_node_id(), &accept_channel); @@ -564,14 +861,34 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { { let events = $source.get_and_clear_pending_events(); assert_eq!(events.len(), 1); - if let events::Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, .. } = events[0] { - let tx = Transaction { version: $chan_id, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut { - value: *channel_value_satoshis, script_pubkey: output_script.clone(), - }]}; + if let events::Event::FundingGenerationReady { + ref temporary_channel_id, + ref channel_value_satoshis, + ref output_script, + .. + } = events[0] + { + let tx = Transaction { + version: Version($chan_id), + lock_time: LockTime::ZERO, + input: Vec::new(), + output: vec![TxOut { + value: Amount::from_sat(*channel_value_satoshis), + script_pubkey: output_script.clone(), + }], + }; funding_output = OutPoint { txid: tx.txid(), index: 0 }; - $source.funding_transaction_generated(&temporary_channel_id, &$dest.get_our_node_id(), tx.clone()).unwrap(); + $source + .funding_transaction_generated( + &temporary_channel_id, + &$dest.get_our_node_id(), + tx.clone(), + ) + .unwrap(); channel_txn.push(tx); - } else { panic!("Wrong event type"); } + } else { + panic!("Wrong event type"); + } } let funding_created = { @@ -579,7 +896,9 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { assert_eq!(events.len(), 1); if let events::MessageSendEvent::SendFundingCreated { ref msg, .. } = events[0] { msg.clone() - } else { panic!("Wrong event type"); } + } else { + panic!("Wrong event type"); + } }; $dest.handle_funding_created(&$source.get_our_node_id(), &funding_created); @@ -588,53 +907,64 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { assert_eq!(events.len(), 1); if let events::MessageSendEvent::SendFundingSigned { ref msg, .. } = events[0] { msg.clone() - } else { panic!("Wrong event type"); } + } else { + panic!("Wrong event type"); + } }; let events = $dest.get_and_clear_pending_events(); assert_eq!(events.len(), 1); - if let events::Event::ChannelPending{ ref counterparty_node_id, .. } = events[0] { + if let events::Event::ChannelPending { ref counterparty_node_id, .. } = events[0] { assert_eq!(counterparty_node_id, &$source.get_our_node_id()); - } else { panic!("Wrong event type"); } + } else { + panic!("Wrong event type"); + } $source.handle_funding_signed(&$dest.get_our_node_id(), &funding_signed); let events = $source.get_and_clear_pending_events(); assert_eq!(events.len(), 1); - if let events::Event::ChannelPending{ ref counterparty_node_id, .. } = events[0] { + if let events::Event::ChannelPending { ref counterparty_node_id, .. } = events[0] { assert_eq!(counterparty_node_id, &$dest.get_our_node_id()); - } else { panic!("Wrong event type"); } + } else { + panic!("Wrong event type"); + } funding_output - } } + }}; } macro_rules! confirm_txn { - ($node: expr) => { { + ($node: expr) => {{ let chain_hash = genesis_block(Network::Bitcoin).block_hash(); let mut header = create_dummy_header(chain_hash, 42); - let txdata: Vec<_> = channel_txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); + let txdata: Vec<_> = + channel_txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); $node.transactions_confirmed(&header, &txdata, 1); for _ in 2..100 { header = create_dummy_header(header.block_hash(), 42); } $node.best_block_updated(&header, 99); - } } + }}; } macro_rules! lock_fundings { - ($nodes: expr) => { { + ($nodes: expr) => {{ let mut node_events = Vec::new(); for node in $nodes.iter() { node_events.push(node.get_and_clear_pending_msg_events()); } for (idx, node_event) in node_events.iter().enumerate() { for event in node_event { - if let events::MessageSendEvent::SendChannelReady { ref node_id, ref msg } = event { + if let events::MessageSendEvent::SendChannelReady { ref node_id, ref msg } = + event + { for node in $nodes.iter() { if node.get_our_node_id() == *node_id { node.handle_channel_ready(&$nodes[idx].get_our_node_id(), msg); } } - } else { panic!("Wrong event type"); } + } else { + panic!("Wrong event type"); + } } } @@ -642,18 +972,20 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let events = node.get_and_clear_pending_msg_events(); for event in events { if let events::MessageSendEvent::SendAnnouncementSignatures { .. } = event { - } else { panic!("Wrong event type"); } + } else { + panic!("Wrong event type"); + } } } - } } + }}; } let fee_est_a = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); - let mut last_htlc_clear_fee_a = 253; + let mut last_htlc_clear_fee_a = 253; let fee_est_b = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); - let mut last_htlc_clear_fee_b = 253; + let mut last_htlc_clear_fee_b = 253; let fee_est_c = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); - let mut last_htlc_clear_fee_c = 253; + let mut last_htlc_clear_fee_c = 253; // 3 nodes is enough to hit all the possible cases, notably unknown-source-unknown-dest // forwarding. @@ -675,8 +1007,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let chan_a = nodes[0].list_usable_channels()[0].short_channel_id.unwrap(); let chan_b = nodes[2].list_usable_channels()[0].short_channel_id.unwrap(); - let mut payment_id: u8 = 0; - let mut payment_idx: u64 = 0; + let mut p_id: u8 = 0; + let mut p_idx: u64 = 0; let mut chan_a_disconnected = false; let mut chan_b_disconnected = false; @@ -693,26 +1025,24 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { nodes[2].write(&mut node_c_ser).unwrap(); macro_rules! test_return { - () => { { + () => {{ assert_eq!(nodes[0].list_channels().len(), 1); assert_eq!(nodes[1].list_channels().len(), 2); assert_eq!(nodes[2].list_channels().len(), 1); return; - } } + }}; } let mut read_pos = 0; macro_rules! get_slice { - ($len: expr) => { - { - let slice_len = $len as usize; - if data.len() < read_pos + slice_len { - test_return!(); - } - read_pos += slice_len; - &data[read_pos - slice_len..read_pos] + ($len: expr) => {{ + let slice_len = $len as usize; + if data.len() < read_pos + slice_len { + test_return!(); } - } + read_pos += slice_len; + &data[read_pos - slice_len..read_pos] + }}; } loop { @@ -902,8 +1232,14 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } } } + macro_rules! process_msg_noret { + ($node: expr, $corrupt_forward: expr, $limit_events: expr) => {{ + process_msg_events!($node, $corrupt_forward, $limit_events); + }}; + } + macro_rules! drain_msg_events_on_disconnect { - ($counterparty_id: expr) => { { + ($counterparty_id: expr) => {{ if $counterparty_id == 0 { for event in nodes[0].get_and_clear_pending_msg_events() { match event { @@ -915,14 +1251,19 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => { assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set! }, - _ => if out.may_fail.load(atomic::Ordering::Acquire) { - return; - } else { - panic!("Unhandled message event") + _ => { + if out.may_fail.load(atomic::Ordering::Acquire) { + return; + } else { + panic!("Unhandled message event") + } }, } } - push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(0)); + push_excess_b_events!( + nodes[1].get_and_clear_pending_msg_events().drain(..), + Some(0) + ); ab_events.clear(); ba_events.clear(); } else { @@ -936,26 +1277,31 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => { assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set! }, - _ => if out.may_fail.load(atomic::Ordering::Acquire) { - return; - } else { - panic!("Unhandled message event") + _ => { + if out.may_fail.load(atomic::Ordering::Acquire) { + return; + } else { + panic!("Unhandled message event") + } }, } } - push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(2)); + push_excess_b_events!( + nodes[1].get_and_clear_pending_msg_events().drain(..), + Some(2) + ); bc_events.clear(); cb_events.clear(); } - } } + }}; } macro_rules! process_events { - ($node: expr, $fail: expr) => { { + ($node: expr, $fail: expr) => {{ // In case we get 256 payments we may have a hash collision, resulting in the // second claim/fail call not finding the duplicate-hash HTLC, so we have to // deduplicate the calls here. - let mut claim_set = HashSet::new(); + let mut claim_set = new_hash_map(); let mut events = nodes[$node].get_and_clear_pending_events(); // Sort events so that PendingHTLCsForwardable get processed last. This avoids a // case where we first process a PendingHTLCsForwardable, then claim/fail on a @@ -966,18 +1312,24 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { if let events::Event::PaymentClaimable { .. } = a { if let events::Event::PendingHTLCsForwardable { .. } = b { Ordering::Less - } else { Ordering::Equal } + } else { + Ordering::Equal + } } else if let events::Event::PendingHTLCsForwardable { .. } = a { if let events::Event::PaymentClaimable { .. } = b { Ordering::Greater - } else { Ordering::Equal } - } else { Ordering::Equal } + } else { + Ordering::Equal + } + } else { + Ordering::Equal + } }); let had_events = !events.is_empty(); for event in events.drain(..) { match event { events::Event::PaymentClaimable { payment_hash, .. } => { - if claim_set.insert(payment_hash.0) { + if claim_set.insert(payment_hash.0, ()).is_none() { if $fail { nodes[$node].fail_htlc_backwards(&payment_hash); } else { @@ -990,7 +1342,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { events::Event::PaymentPathSuccessful { .. } => {}, events::Event::PaymentPathFailed { .. } => {}, events::Event::PaymentFailed { .. } => {}, - events::Event::ProbeSuccessful { .. } | events::Event::ProbeFailed { .. } => { + events::Event::ProbeSuccessful { .. } + | events::Event::ProbeFailed { .. } => { // Even though we don't explicitly send probes, because probes are // detected based on hashing the payment hash+preimage, its rather // trivial for the fuzzer to build payments that accidentally end up @@ -1002,55 +1355,97 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { nodes[$node].process_pending_htlc_forwards(); }, events::Event::HTLCHandlingFailed { .. } => {}, - _ => if out.may_fail.load(atomic::Ordering::Acquire) { - return; - } else { - panic!("Unhandled event") + _ => { + if out.may_fail.load(atomic::Ordering::Acquire) { + return; + } else { + panic!("Unhandled event") + } }, } } had_events - } } + }}; + } + + macro_rules! process_ev_noret { + ($node: expr, $fail: expr) => {{ + process_events!($node, $fail); + }}; } + let complete_first = |v: &mut Vec<_>| if !v.is_empty() { Some(v.remove(0)) } else { None }; + let complete_second = |v: &mut Vec<_>| if v.len() > 1 { Some(v.remove(1)) } else { None }; + let complete_monitor_update = + |monitor: &Arc, + chan_funding, + compl_selector: &dyn Fn(&mut Vec<(u64, Vec)>) -> Option<(u64, Vec)>| { + if let Some(state) = monitor.latest_monitors.lock().unwrap().get_mut(chan_funding) { + assert!( + state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0), + "updates should be sorted by id" + ); + if let Some((id, data)) = compl_selector(&mut state.pending_monitors) { + monitor.chain_monitor.channel_monitor_updated(*chan_funding, id).unwrap(); + if id > state.persisted_monitor_id { + state.persisted_monitor_id = id; + state.persisted_monitor = data; + } + } + } + }; + + let complete_all_monitor_updates = |monitor: &Arc, chan_funding| { + if let Some(state) = monitor.latest_monitors.lock().unwrap().get_mut(chan_funding) { + assert!( + state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0), + "updates should be sorted by id" + ); + for (id, data) in state.pending_monitors.drain(..) { + monitor.chain_monitor.channel_monitor_updated(*chan_funding, id).unwrap(); + if id > state.persisted_monitor_id { + state.persisted_monitor_id = id; + state.persisted_monitor = data; + } + } + } + }; + let v = get_slice!(1)[0]; out.locked_write(format!("READ A BYTE! HANDLING INPUT {:x}...........\n", v).as_bytes()); match v { // In general, we keep related message groups close together in binary form, allowing // bit-twiddling mutations to have similar effects. This is probably overkill, but no // harm in doing so. - - 0x00 => *monitor_a.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::InProgress, - 0x01 => *monitor_b.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::InProgress, - 0x02 => *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::InProgress, - 0x04 => *monitor_a.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed, - 0x05 => *monitor_b.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed, - 0x06 => *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed, - - 0x08 => { - if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) { - monitor_a.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id); - nodes[0].process_monitor_events(); - } + 0x00 => { + *monitor_a.persister.update_ret.lock().unwrap() = + ChannelMonitorUpdateStatus::InProgress }, - 0x09 => { - if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) { - monitor_b.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id); - nodes[1].process_monitor_events(); - } + 0x01 => { + *monitor_b.persister.update_ret.lock().unwrap() = + ChannelMonitorUpdateStatus::InProgress }, - 0x0a => { - if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) { - monitor_b.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id); - nodes[1].process_monitor_events(); - } + 0x02 => { + *monitor_c.persister.update_ret.lock().unwrap() = + ChannelMonitorUpdateStatus::InProgress }, - 0x0b => { - if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) { - monitor_c.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id); - nodes[2].process_monitor_events(); - } + 0x04 => { + *monitor_a.persister.update_ret.lock().unwrap() = + ChannelMonitorUpdateStatus::Completed + }, + 0x05 => { + *monitor_b.persister.update_ret.lock().unwrap() = + ChannelMonitorUpdateStatus::Completed }, + 0x06 => { + *monitor_c.persister.update_ret.lock().unwrap() = + ChannelMonitorUpdateStatus::Completed + }, + + 0x08 => complete_all_monitor_updates(&monitor_a, &chan_1_funding), + 0x09 => complete_all_monitor_updates(&monitor_b, &chan_1_funding), + 0x0a => complete_all_monitor_updates(&monitor_b, &chan_2_funding), + 0x0b => complete_all_monitor_updates(&monitor_c, &chan_2_funding), 0x0c => { if !chan_a_disconnected { @@ -1070,66 +1465,82 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }, 0x0e => { if chan_a_disconnected { - nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { - features: nodes[1].init_features(), networks: None, remote_network_address: None - }, true).unwrap(); - nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { - features: nodes[0].init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + let init_1 = Init { + features: nodes[1].init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].peer_connected(&nodes[1].get_our_node_id(), &init_1, true).unwrap(); + let init_0 = Init { + features: nodes[0].init_features(), + networks: None, + remote_network_address: None, + }; + nodes[1].peer_connected(&nodes[0].get_our_node_id(), &init_0, false).unwrap(); chan_a_disconnected = false; } }, 0x0f => { if chan_b_disconnected { - nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { - features: nodes[2].init_features(), networks: None, remote_network_address: None - }, true).unwrap(); - nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { - features: nodes[1].init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + let init_2 = Init { + features: nodes[2].init_features(), + networks: None, + remote_network_address: None, + }; + nodes[1].peer_connected(&nodes[2].get_our_node_id(), &init_2, true).unwrap(); + let init_1 = Init { + features: nodes[1].init_features(), + networks: None, + remote_network_address: None, + }; + nodes[2].peer_connected(&nodes[1].get_our_node_id(), &init_1, false).unwrap(); chan_b_disconnected = false; } }, - 0x10 => { process_msg_events!(0, true, ProcessMessages::AllMessages); }, - 0x11 => { process_msg_events!(0, false, ProcessMessages::AllMessages); }, - 0x12 => { process_msg_events!(0, true, ProcessMessages::OneMessage); }, - 0x13 => { process_msg_events!(0, false, ProcessMessages::OneMessage); }, - 0x14 => { process_msg_events!(0, true, ProcessMessages::OnePendingMessage); }, - 0x15 => { process_msg_events!(0, false, ProcessMessages::OnePendingMessage); }, + 0x10 => process_msg_noret!(0, true, ProcessMessages::AllMessages), + 0x11 => process_msg_noret!(0, false, ProcessMessages::AllMessages), + 0x12 => process_msg_noret!(0, true, ProcessMessages::OneMessage), + 0x13 => process_msg_noret!(0, false, ProcessMessages::OneMessage), + 0x14 => process_msg_noret!(0, true, ProcessMessages::OnePendingMessage), + 0x15 => process_msg_noret!(0, false, ProcessMessages::OnePendingMessage), - 0x16 => { process_events!(0, true); }, - 0x17 => { process_events!(0, false); }, + 0x16 => process_ev_noret!(0, true), + 0x17 => process_ev_noret!(0, false), - 0x18 => { process_msg_events!(1, true, ProcessMessages::AllMessages); }, - 0x19 => { process_msg_events!(1, false, ProcessMessages::AllMessages); }, - 0x1a => { process_msg_events!(1, true, ProcessMessages::OneMessage); }, - 0x1b => { process_msg_events!(1, false, ProcessMessages::OneMessage); }, - 0x1c => { process_msg_events!(1, true, ProcessMessages::OnePendingMessage); }, - 0x1d => { process_msg_events!(1, false, ProcessMessages::OnePendingMessage); }, + 0x18 => process_msg_noret!(1, true, ProcessMessages::AllMessages), + 0x19 => process_msg_noret!(1, false, ProcessMessages::AllMessages), + 0x1a => process_msg_noret!(1, true, ProcessMessages::OneMessage), + 0x1b => process_msg_noret!(1, false, ProcessMessages::OneMessage), + 0x1c => process_msg_noret!(1, true, ProcessMessages::OnePendingMessage), + 0x1d => process_msg_noret!(1, false, ProcessMessages::OnePendingMessage), - 0x1e => { process_events!(1, true); }, - 0x1f => { process_events!(1, false); }, + 0x1e => process_ev_noret!(1, true), + 0x1f => process_ev_noret!(1, false), - 0x20 => { process_msg_events!(2, true, ProcessMessages::AllMessages); }, - 0x21 => { process_msg_events!(2, false, ProcessMessages::AllMessages); }, - 0x22 => { process_msg_events!(2, true, ProcessMessages::OneMessage); }, - 0x23 => { process_msg_events!(2, false, ProcessMessages::OneMessage); }, - 0x24 => { process_msg_events!(2, true, ProcessMessages::OnePendingMessage); }, - 0x25 => { process_msg_events!(2, false, ProcessMessages::OnePendingMessage); }, + 0x20 => process_msg_noret!(2, true, ProcessMessages::AllMessages), + 0x21 => process_msg_noret!(2, false, ProcessMessages::AllMessages), + 0x22 => process_msg_noret!(2, true, ProcessMessages::OneMessage), + 0x23 => process_msg_noret!(2, false, ProcessMessages::OneMessage), + 0x24 => process_msg_noret!(2, true, ProcessMessages::OnePendingMessage), + 0x25 => process_msg_noret!(2, false, ProcessMessages::OnePendingMessage), - 0x26 => { process_events!(2, true); }, - 0x27 => { process_events!(2, false); }, + 0x26 => process_ev_noret!(2, true), + 0x27 => process_ev_noret!(2, false), 0x2c => { if !chan_a_disconnected { nodes[1].peer_disconnected(&nodes[0].get_our_node_id()); chan_a_disconnected = true; - push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(0)); + push_excess_b_events!( + nodes[1].get_and_clear_pending_msg_events().drain(..), + Some(0) + ); ab_events.clear(); ba_events.clear(); } - let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a, keys_manager_a, fee_est_a); + let (new_node_a, new_monitor_a) = + reload_node!(node_a_ser, 0, monitor_a, keys_manager_a, fee_est_a); nodes[0] = new_node_a; monitor_a = new_monitor_a; }, @@ -1148,7 +1559,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { bc_events.clear(); cb_events.clear(); } - let (new_node_b, new_monitor_b) = reload_node!(node_b_ser, 1, monitor_b, keys_manager_b, fee_est_b); + let (new_node_b, new_monitor_b) = + reload_node!(node_b_ser, 1, monitor_b, keys_manager_b, fee_est_b); nodes[1] = new_node_b; monitor_b = new_monitor_b; }, @@ -1156,71 +1568,107 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { if !chan_b_disconnected { nodes[1].peer_disconnected(&nodes[2].get_our_node_id()); chan_b_disconnected = true; - push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(2)); + push_excess_b_events!( + nodes[1].get_and_clear_pending_msg_events().drain(..), + Some(2) + ); bc_events.clear(); cb_events.clear(); } - let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c, keys_manager_c, fee_est_c); + let (new_node_c, new_monitor_c) = + reload_node!(node_c_ser, 2, monitor_c, keys_manager_c, fee_est_c); nodes[2] = new_node_c; monitor_c = new_monitor_c; }, // 1/10th the channel size: - 0x30 => { send_payment(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut payment_id, &mut payment_idx); }, - 0x31 => { send_payment(&nodes[1], &nodes[0], chan_a, 10_000_000, &mut payment_id, &mut payment_idx); }, - 0x32 => { send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut payment_id, &mut payment_idx); }, - 0x33 => { send_payment(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut payment_id, &mut payment_idx); }, - 0x34 => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000_000, &mut payment_id, &mut payment_idx); }, - 0x35 => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000_000, &mut payment_id, &mut payment_idx); }, - - 0x38 => { send_payment(&nodes[0], &nodes[1], chan_a, 1_000_000, &mut payment_id, &mut payment_idx); }, - 0x39 => { send_payment(&nodes[1], &nodes[0], chan_a, 1_000_000, &mut payment_id, &mut payment_idx); }, - 0x3a => { send_payment(&nodes[1], &nodes[2], chan_b, 1_000_000, &mut payment_id, &mut payment_idx); }, - 0x3b => { send_payment(&nodes[2], &nodes[1], chan_b, 1_000_000, &mut payment_id, &mut payment_idx); }, - 0x3c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000_000, &mut payment_id, &mut payment_idx); }, - 0x3d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000_000, &mut payment_id, &mut payment_idx); }, - - 0x40 => { send_payment(&nodes[0], &nodes[1], chan_a, 100_000, &mut payment_id, &mut payment_idx); }, - 0x41 => { send_payment(&nodes[1], &nodes[0], chan_a, 100_000, &mut payment_id, &mut payment_idx); }, - 0x42 => { send_payment(&nodes[1], &nodes[2], chan_b, 100_000, &mut payment_id, &mut payment_idx); }, - 0x43 => { send_payment(&nodes[2], &nodes[1], chan_b, 100_000, &mut payment_id, &mut payment_idx); }, - 0x44 => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100_000, &mut payment_id, &mut payment_idx); }, - 0x45 => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100_000, &mut payment_id, &mut payment_idx); }, - - 0x48 => { send_payment(&nodes[0], &nodes[1], chan_a, 10_000, &mut payment_id, &mut payment_idx); }, - 0x49 => { send_payment(&nodes[1], &nodes[0], chan_a, 10_000, &mut payment_id, &mut payment_idx); }, - 0x4a => { send_payment(&nodes[1], &nodes[2], chan_b, 10_000, &mut payment_id, &mut payment_idx); }, - 0x4b => { send_payment(&nodes[2], &nodes[1], chan_b, 10_000, &mut payment_id, &mut payment_idx); }, - 0x4c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000, &mut payment_id, &mut payment_idx); }, - 0x4d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000, &mut payment_id, &mut payment_idx); }, - - 0x50 => { send_payment(&nodes[0], &nodes[1], chan_a, 1_000, &mut payment_id, &mut payment_idx); }, - 0x51 => { send_payment(&nodes[1], &nodes[0], chan_a, 1_000, &mut payment_id, &mut payment_idx); }, - 0x52 => { send_payment(&nodes[1], &nodes[2], chan_b, 1_000, &mut payment_id, &mut payment_idx); }, - 0x53 => { send_payment(&nodes[2], &nodes[1], chan_b, 1_000, &mut payment_id, &mut payment_idx); }, - 0x54 => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000, &mut payment_id, &mut payment_idx); }, - 0x55 => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000, &mut payment_id, &mut payment_idx); }, - - 0x58 => { send_payment(&nodes[0], &nodes[1], chan_a, 100, &mut payment_id, &mut payment_idx); }, - 0x59 => { send_payment(&nodes[1], &nodes[0], chan_a, 100, &mut payment_id, &mut payment_idx); }, - 0x5a => { send_payment(&nodes[1], &nodes[2], chan_b, 100, &mut payment_id, &mut payment_idx); }, - 0x5b => { send_payment(&nodes[2], &nodes[1], chan_b, 100, &mut payment_id, &mut payment_idx); }, - 0x5c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100, &mut payment_id, &mut payment_idx); }, - 0x5d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100, &mut payment_id, &mut payment_idx); }, - - 0x60 => { send_payment(&nodes[0], &nodes[1], chan_a, 10, &mut payment_id, &mut payment_idx); }, - 0x61 => { send_payment(&nodes[1], &nodes[0], chan_a, 10, &mut payment_id, &mut payment_idx); }, - 0x62 => { send_payment(&nodes[1], &nodes[2], chan_b, 10, &mut payment_id, &mut payment_idx); }, - 0x63 => { send_payment(&nodes[2], &nodes[1], chan_b, 10, &mut payment_id, &mut payment_idx); }, - 0x64 => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10, &mut payment_id, &mut payment_idx); }, - 0x65 => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10, &mut payment_id, &mut payment_idx); }, - - 0x68 => { send_payment(&nodes[0], &nodes[1], chan_a, 1, &mut payment_id, &mut payment_idx); }, - 0x69 => { send_payment(&nodes[1], &nodes[0], chan_a, 1, &mut payment_id, &mut payment_idx); }, - 0x6a => { send_payment(&nodes[1], &nodes[2], chan_b, 1, &mut payment_id, &mut payment_idx); }, - 0x6b => { send_payment(&nodes[2], &nodes[1], chan_b, 1, &mut payment_id, &mut payment_idx); }, - 0x6c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1, &mut payment_id, &mut payment_idx); }, - 0x6d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1, &mut payment_id, &mut payment_idx); }, + 0x30 => send_noret(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut p_id, &mut p_idx), + 0x31 => send_noret(&nodes[1], &nodes[0], chan_a, 10_000_000, &mut p_id, &mut p_idx), + 0x32 => send_noret(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut p_id, &mut p_idx), + 0x33 => send_noret(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut p_id, &mut p_idx), + 0x34 => send_hop_noret( + &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000_000, &mut p_id, &mut p_idx, + ), + 0x35 => send_hop_noret( + &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000_000, &mut p_id, &mut p_idx, + ), + + 0x38 => send_noret(&nodes[0], &nodes[1], chan_a, 1_000_000, &mut p_id, &mut p_idx), + 0x39 => send_noret(&nodes[1], &nodes[0], chan_a, 1_000_000, &mut p_id, &mut p_idx), + 0x3a => send_noret(&nodes[1], &nodes[2], chan_b, 1_000_000, &mut p_id, &mut p_idx), + 0x3b => send_noret(&nodes[2], &nodes[1], chan_b, 1_000_000, &mut p_id, &mut p_idx), + 0x3c => send_hop_noret( + &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000_000, &mut p_id, &mut p_idx, + ), + 0x3d => send_hop_noret( + &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000_000, &mut p_id, &mut p_idx, + ), + + 0x40 => send_noret(&nodes[0], &nodes[1], chan_a, 100_000, &mut p_id, &mut p_idx), + 0x41 => send_noret(&nodes[1], &nodes[0], chan_a, 100_000, &mut p_id, &mut p_idx), + 0x42 => send_noret(&nodes[1], &nodes[2], chan_b, 100_000, &mut p_id, &mut p_idx), + 0x43 => send_noret(&nodes[2], &nodes[1], chan_b, 100_000, &mut p_id, &mut p_idx), + 0x44 => send_hop_noret( + &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100_000, &mut p_id, &mut p_idx, + ), + 0x45 => send_hop_noret( + &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100_000, &mut p_id, &mut p_idx, + ), + + 0x48 => send_noret(&nodes[0], &nodes[1], chan_a, 10_000, &mut p_id, &mut p_idx), + 0x49 => send_noret(&nodes[1], &nodes[0], chan_a, 10_000, &mut p_id, &mut p_idx), + 0x4a => send_noret(&nodes[1], &nodes[2], chan_b, 10_000, &mut p_id, &mut p_idx), + 0x4b => send_noret(&nodes[2], &nodes[1], chan_b, 10_000, &mut p_id, &mut p_idx), + 0x4c => send_hop_noret( + &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000, &mut p_id, &mut p_idx, + ), + 0x4d => send_hop_noret( + &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000, &mut p_id, &mut p_idx, + ), + + 0x50 => send_noret(&nodes[0], &nodes[1], chan_a, 1_000, &mut p_id, &mut p_idx), + 0x51 => send_noret(&nodes[1], &nodes[0], chan_a, 1_000, &mut p_id, &mut p_idx), + 0x52 => send_noret(&nodes[1], &nodes[2], chan_b, 1_000, &mut p_id, &mut p_idx), + 0x53 => send_noret(&nodes[2], &nodes[1], chan_b, 1_000, &mut p_id, &mut p_idx), + 0x54 => send_hop_noret( + &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000, &mut p_id, &mut p_idx, + ), + 0x55 => send_hop_noret( + &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000, &mut p_id, &mut p_idx, + ), + + 0x58 => send_noret(&nodes[0], &nodes[1], chan_a, 100, &mut p_id, &mut p_idx), + 0x59 => send_noret(&nodes[1], &nodes[0], chan_a, 100, &mut p_id, &mut p_idx), + 0x5a => send_noret(&nodes[1], &nodes[2], chan_b, 100, &mut p_id, &mut p_idx), + 0x5b => send_noret(&nodes[2], &nodes[1], chan_b, 100, &mut p_id, &mut p_idx), + 0x5c => send_hop_noret( + &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100, &mut p_id, &mut p_idx, + ), + 0x5d => send_hop_noret( + &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100, &mut p_id, &mut p_idx, + ), + + 0x60 => send_noret(&nodes[0], &nodes[1], chan_a, 10, &mut p_id, &mut p_idx), + 0x61 => send_noret(&nodes[1], &nodes[0], chan_a, 10, &mut p_id, &mut p_idx), + 0x62 => send_noret(&nodes[1], &nodes[2], chan_b, 10, &mut p_id, &mut p_idx), + 0x63 => send_noret(&nodes[2], &nodes[1], chan_b, 10, &mut p_id, &mut p_idx), + 0x64 => send_hop_noret( + &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10, &mut p_id, &mut p_idx, + ), + 0x65 => send_hop_noret( + &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10, &mut p_id, &mut p_idx, + ), + + 0x68 => send_noret(&nodes[0], &nodes[1], chan_a, 1, &mut p_id, &mut p_idx), + 0x69 => send_noret(&nodes[1], &nodes[0], chan_a, 1, &mut p_id, &mut p_idx), + 0x6a => send_noret(&nodes[1], &nodes[2], chan_b, 1, &mut p_id, &mut p_idx), + 0x6b => send_noret(&nodes[2], &nodes[1], chan_b, 1, &mut p_id, &mut p_idx), + 0x6c => send_hop_noret( + &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1, &mut p_id, &mut p_idx, + ), + 0x6d => send_hop_noret( + &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1, &mut p_id, &mut p_idx, + ), 0x80 => { let mut max_feerate = last_htlc_clear_fee_a; @@ -1232,7 +1680,10 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } nodes[0].maybe_update_chan_fees(); }, - 0x81 => { fee_est_a.ret_val.store(253, atomic::Ordering::Release); nodes[0].maybe_update_chan_fees(); }, + 0x81 => { + fee_est_a.ret_val.store(253, atomic::Ordering::Release); + nodes[0].maybe_update_chan_fees(); + }, 0x84 => { let mut max_feerate = last_htlc_clear_fee_b; @@ -1244,7 +1695,10 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } nodes[1].maybe_update_chan_fees(); }, - 0x85 => { fee_est_b.ret_val.store(253, atomic::Ordering::Release); nodes[1].maybe_update_chan_fees(); }, + 0x85 => { + fee_est_b.ret_val.store(253, atomic::Ordering::Release); + nodes[1].maybe_update_chan_fees(); + }, 0x88 => { let mut max_feerate = last_htlc_clear_fee_c; @@ -1256,75 +1710,117 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } nodes[2].maybe_update_chan_fees(); }, - 0x89 => { fee_est_c.ret_val.store(253, atomic::Ordering::Release); nodes[2].maybe_update_chan_fees(); }, + 0x89 => { + fee_est_c.ret_val.store(253, atomic::Ordering::Release); + nodes[2].maybe_update_chan_fees(); + }, + + 0xf0 => complete_monitor_update(&monitor_a, &chan_1_funding, &complete_first), + 0xf1 => complete_monitor_update(&monitor_a, &chan_1_funding, &complete_second), + 0xf2 => complete_monitor_update(&monitor_a, &chan_1_funding, &Vec::pop), + + 0xf4 => complete_monitor_update(&monitor_b, &chan_1_funding, &complete_first), + 0xf5 => complete_monitor_update(&monitor_b, &chan_1_funding, &complete_second), + 0xf6 => complete_monitor_update(&monitor_b, &chan_1_funding, &Vec::pop), + + 0xf8 => complete_monitor_update(&monitor_b, &chan_2_funding, &complete_first), + 0xf9 => complete_monitor_update(&monitor_b, &chan_2_funding, &complete_second), + 0xfa => complete_monitor_update(&monitor_b, &chan_2_funding, &Vec::pop), + + 0xfc => complete_monitor_update(&monitor_c, &chan_2_funding, &complete_first), + 0xfd => complete_monitor_update(&monitor_c, &chan_2_funding, &complete_second), + 0xfe => complete_monitor_update(&monitor_c, &chan_2_funding, &Vec::pop), 0xff => { // Test that no channel is in a stuck state where neither party can send funds even // after we resolve all pending events. - // First make sure there are no pending monitor updates, resetting the error state - // and calling force_channel_monitor_updated for each monitor. - *monitor_a.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed; - *monitor_b.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed; - *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed; - - if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) { - monitor_a.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id); - nodes[0].process_monitor_events(); - } - if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) { - monitor_b.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id); - nodes[1].process_monitor_events(); - } - if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) { - monitor_b.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id); - nodes[1].process_monitor_events(); - } - if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) { - monitor_c.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id); - nodes[2].process_monitor_events(); - } + // First make sure there are no pending monitor updates and further update + // operations complete. + *monitor_a.persister.update_ret.lock().unwrap() = + ChannelMonitorUpdateStatus::Completed; + *monitor_b.persister.update_ret.lock().unwrap() = + ChannelMonitorUpdateStatus::Completed; + *monitor_c.persister.update_ret.lock().unwrap() = + ChannelMonitorUpdateStatus::Completed; + + complete_all_monitor_updates(&monitor_a, &chan_1_funding); + complete_all_monitor_updates(&monitor_b, &chan_1_funding); + complete_all_monitor_updates(&monitor_b, &chan_2_funding); + complete_all_monitor_updates(&monitor_c, &chan_2_funding); // Next, make sure peers are all connected to each other if chan_a_disconnected { - nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { - features: nodes[1].init_features(), networks: None, remote_network_address: None - }, true).unwrap(); - nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { - features: nodes[0].init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + let init_1 = Init { + features: nodes[1].init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].peer_connected(&nodes[1].get_our_node_id(), &init_1, true).unwrap(); + let init_0 = Init { + features: nodes[0].init_features(), + networks: None, + remote_network_address: None, + }; + nodes[1].peer_connected(&nodes[0].get_our_node_id(), &init_0, false).unwrap(); chan_a_disconnected = false; } if chan_b_disconnected { - nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { - features: nodes[2].init_features(), networks: None, remote_network_address: None - }, true).unwrap(); - nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { - features: nodes[1].init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + let init_2 = Init { + features: nodes[2].init_features(), + networks: None, + remote_network_address: None, + }; + nodes[1].peer_connected(&nodes[2].get_our_node_id(), &init_2, true).unwrap(); + let init_1 = Init { + features: nodes[1].init_features(), + networks: None, + remote_network_address: None, + }; + nodes[2].peer_connected(&nodes[1].get_our_node_id(), &init_1, false).unwrap(); chan_b_disconnected = false; } for i in 0..std::usize::MAX { - if i == 100 { panic!("It may take may iterations to settle the state, but it should not take forever"); } + if i == 100 { + panic!("It may take may iterations to settle the state, but it should not take forever"); + } // Then, make sure any current forwards make their way to their destination - if process_msg_events!(0, false, ProcessMessages::AllMessages) { continue; } - if process_msg_events!(1, false, ProcessMessages::AllMessages) { continue; } - if process_msg_events!(2, false, ProcessMessages::AllMessages) { continue; } + if process_msg_events!(0, false, ProcessMessages::AllMessages) { + continue; + } + if process_msg_events!(1, false, ProcessMessages::AllMessages) { + continue; + } + if process_msg_events!(2, false, ProcessMessages::AllMessages) { + continue; + } // ...making sure any pending PendingHTLCsForwardable events are handled and // payments claimed. - if process_events!(0, false) { continue; } - if process_events!(1, false) { continue; } - if process_events!(2, false) { continue; } + if process_events!(0, false) { + continue; + } + if process_events!(1, false) { + continue; + } + if process_events!(2, false) { + continue; + } break; } // Finally, make sure that at least one end of each channel can make a substantial payment assert!( - send_payment(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut payment_id, &mut payment_idx) || - send_payment(&nodes[1], &nodes[0], chan_a, 10_000_000, &mut payment_id, &mut payment_idx)); + send_payment(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut p_id, &mut p_idx) + || send_payment( + &nodes[1], &nodes[0], chan_a, 10_000_000, &mut p_id, &mut p_idx + ) + ); assert!( - send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut payment_id, &mut payment_idx) || - send_payment(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut payment_id, &mut payment_idx)); + send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut p_id, &mut p_idx) + || send_payment( + &nodes[2], &nodes[1], chan_b, 10_000_000, &mut p_id, &mut p_idx + ) + ); last_htlc_clear_fee_a = fee_est_a.ret_val.load(atomic::Ordering::Acquire); last_htlc_clear_fee_b = fee_est_b.ret_val.load(atomic::Ordering::Acquire); @@ -1377,6 +1873,6 @@ pub fn chanmon_consistency_test(data: &[u8], out: Out) { #[no_mangle] pub extern "C" fn chanmon_consistency_run(data: *const u8, datalen: usize) { - do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull{}, false); - do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull{}, true); + do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull {}, false); + do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull {}, true); }