X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=fuzz%2Fsrc%2Fchanmon_consistency.rs;h=e923ef882f26ce643aa7c9478f15ecc648c931d8;hb=c383f06538ac664fe3312daf765595ba106d5b98;hp=c10b5c0e364ae770467dd03a74e8d1d1fed01c15;hpb=dddb2e28c1e2586346d94775d5bb63414c6a73bb;p=rust-lightning diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index c10b5c0e..e923ef88 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -18,8 +18,6 @@ //! send-side handling is correct, other peers. We consider it a failure if any action results in a //! channel being force-closed. -use bitcoin::TxMerkleNode; -use bitcoin::blockdata::block::BlockHeader; use bitcoin::blockdata::constants::genesis_block; use bitcoin::blockdata::transaction::{Transaction, TxOut}; use bitcoin::blockdata::script::{Builder, Script}; @@ -37,7 +35,7 @@ use lightning::chain::{BestBlock, ChannelMonitorUpdateStatus, chainmonitor, chan use lightning::chain::channelmonitor::{ChannelMonitor, MonitorEvent}; use lightning::chain::transaction::OutPoint; use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; -use lightning::chain::keysinterface::{KeyMaterial, InMemorySigner, Recipient, EntropySource, NodeSigner, SignerProvider}; +use lightning::sign::{KeyMaterial, InMemorySigner, Recipient, EntropySource, NodeSigner, SignerProvider}; use lightning::events; use lightning::events::MessageSendEventsProvider; use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret}; @@ -45,12 +43,13 @@ use lightning::ln::channelmanager::{ChainParameters, ChannelDetails, ChannelMana use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; use lightning::ln::msgs::{self, CommitmentUpdate, ChannelMessageHandler, DecodeError, UpdateAddHTLC, Init}; use lightning::ln::script::ShutdownScript; +use lightning::ln::functional_test_utils::*; use lightning::util::enforcing_trait_impls::{EnforcingSigner, EnforcementState}; use lightning::util::errors::APIError; use lightning::util::logger::Logger; use lightning::util::config::UserConfig; use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer}; -use lightning::routing::router::{InFlightHtlcs, Route, RouteHop, RouteParameters, Router}; +use lightning::routing::router::{InFlightHtlcs, Path, Route, RouteHop, RouteParameters, Router}; use crate::utils::test_logger::{self, Output}; use crate::utils::test_persister::TestPersister; @@ -79,7 +78,7 @@ impl FeeEstimator for FuzzEstimator { // Background feerate which is <= the minimum Normal feerate. match conf_target { ConfirmationTarget::HighPriority => MAX_FEE, - ConfirmationTarget::Background => 253, + ConfirmationTarget::Background|ConfirmationTarget::MempoolMinimum => 253, ConfirmationTarget::Normal => cmp::min(self.ret_val.load(atomic::Ordering::Acquire), MAX_FEE), } } @@ -90,7 +89,7 @@ struct FuzzRouter {} impl Router for FuzzRouter { fn find_route( &self, _payer: &PublicKey, _params: &RouteParameters, _first_hops: Option<&[&ChannelDetails]>, - _inflight_htlcs: &InFlightHtlcs + _inflight_htlcs: InFlightHtlcs ) -> Result { Err(msgs::LightningError { err: String::from("Not implemented"), @@ -101,7 +100,7 @@ impl Router for FuzzRouter { pub struct TestBroadcaster {} impl BroadcasterInterface for TestBroadcaster { - fn broadcast_transaction(&self, _tx: &Transaction) { } + fn broadcast_transactions(&self, _txs: &[&Transaction]) { } } pub struct VecWriter(pub Vec); @@ -240,6 +239,7 @@ impl SignerProvider for KeyProvider { [id, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, self.node_secret[31]], channel_value_satoshis, channel_keys_id, + channel_keys_id, ); let revoked_commitment = self.make_enforcement_state_cell(keys.commitment_seed); EnforcingSigner::new_with_revoked(keys, revoked_commitment, false) @@ -248,7 +248,7 @@ impl SignerProvider for KeyProvider { fn read_chan_signer(&self, buffer: &[u8]) -> Result { let mut reader = std::io::Cursor::new(buffer); - let inner: InMemorySigner = Readable::read(&mut reader)?; + let inner: InMemorySigner = ReadableArgs::read(&mut reader, self)?; let state = self.make_enforcement_state_cell(inner.commitment_seed); Ok(EnforcingSigner { @@ -258,18 +258,18 @@ impl SignerProvider for KeyProvider { }) } - fn get_destination_script(&self) -> Script { + fn get_destination_script(&self) -> Result { let secp_ctx = Secp256k1::signing_only(); let channel_monitor_claim_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, self.node_secret[31]]).unwrap(); let our_channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize()); - Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&our_channel_monitor_claim_key_hash[..]).into_script() + Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&our_channel_monitor_claim_key_hash[..]).into_script()) } - fn get_shutdown_scriptpubkey(&self) -> ShutdownScript { + fn get_shutdown_scriptpubkey(&self) -> Result { let secp_ctx = Secp256k1::signing_only(); let secret_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, self.node_secret[31]]).unwrap(); let pubkey_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &secret_key).serialize()); - ShutdownScript::new_p2wpkh(&pubkey_hash) + Ok(ShutdownScript::new_p2wpkh(&pubkey_hash)) } } @@ -285,7 +285,7 @@ impl KeyProvider { } #[inline] -fn check_api_err(api_err: APIError) { +fn check_api_err(api_err: APIError, sendable_bounds_violated: bool) { match api_err { APIError::APIMisuseError { .. } => panic!("We can't misuse the API"), APIError::FeeRateTooHigh { .. } => panic!("We can't send too much fee?"), @@ -296,15 +296,11 @@ fn check_api_err(api_err: APIError) { // is probably just stale and you should add new messages here. match err.as_str() { "Peer for first hop currently disconnected" => {}, - _ if err.starts_with("Cannot push more than their max accepted HTLCs ") => {}, - _ if err.starts_with("Cannot send value that would put us over the max HTLC value in flight our peer will accept ") => {}, - _ if err.starts_with("Cannot send value that would put our balance under counterparty-announced channel reserve value") => {}, - _ if err.starts_with("Cannot send value that would put counterparty balance under holder-announced channel reserve value") => {}, - _ if err.starts_with("Cannot send value that would overdraw remaining funds.") => {}, - _ if err.starts_with("Cannot send value that would not leave enough to pay for fees.") => {}, - _ if err.starts_with("Cannot send value that would put our exposure to dust HTLCs at") => {}, + _ if err.starts_with("Cannot send less than our next-HTLC minimum - ") => {}, + _ if err.starts_with("Cannot send more than our next-HTLC maximum - ") => {}, _ => panic!("{}", err), } + assert!(sendable_bounds_violated); }, APIError::MonitorUpdateInProgress => { // We can (obviously) temp-fail a monitor update @@ -313,17 +309,17 @@ fn check_api_err(api_err: APIError) { } } #[inline] -fn check_payment_err(send_err: PaymentSendFailure) { +fn check_payment_err(send_err: PaymentSendFailure, sendable_bounds_violated: bool) { match send_err { - PaymentSendFailure::ParameterError(api_err) => check_api_err(api_err), + PaymentSendFailure::ParameterError(api_err) => check_api_err(api_err, sendable_bounds_violated), PaymentSendFailure::PathParameterError(per_path_results) => { - for res in per_path_results { if let Err(api_err) = res { check_api_err(api_err); } } + for res in per_path_results { if let Err(api_err) = res { check_api_err(api_err, sendable_bounds_violated); } } }, PaymentSendFailure::AllFailedResendSafe(per_path_results) => { - for api_err in per_path_results { check_api_err(api_err); } + for api_err in per_path_results { check_api_err(api_err, sendable_bounds_violated); } }, PaymentSendFailure::PartialFailure { results, .. } => { - for res in results { if let Err(api_err) = res { check_api_err(api_err); } } + for res in results { if let Err(api_err) = res { check_api_err(api_err, sendable_bounds_violated); } } }, PaymentSendFailure::DuplicatePayment => panic!(), } @@ -351,20 +347,31 @@ fn send_payment(source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, p let mut payment_id = [0; 32]; payment_id[0..8].copy_from_slice(&payment_idx.to_ne_bytes()); *payment_idx += 1; + let (min_value_sendable, max_value_sendable) = source.list_usable_channels() + .iter().find(|chan| chan.short_channel_id == Some(dest_chan_id)) + .map(|chan| + (chan.next_outbound_htlc_minimum_msat, chan.next_outbound_htlc_limit_msat)) + .unwrap_or((0, 0)); if let Err(err) = source.send_payment_with_route(&Route { - paths: vec![vec![RouteHop { + paths: vec![Path { hops: vec![RouteHop { pubkey: dest.get_our_node_id(), node_features: dest.node_features(), short_channel_id: dest_chan_id, channel_features: dest.channel_features(), fee_msat: amt, cltv_expiry_delta: 200, - }]], + }], blinded_tail: None }], payment_params: None, }, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_id)) { - check_payment_err(err); + check_payment_err(err, amt > max_value_sendable || amt < min_value_sendable); false - } else { true } + } else { + // Note that while the max is a strict upper-bound, we can occasionally send substantially + // below the minimum, with some gap which is unusable immediately below the minimum. Thus, + // we don't check against min_value_sendable here. + assert!(amt <= max_value_sendable); + true + } } #[inline] fn send_hop_payment(source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8, payment_idx: &mut u64) -> bool { @@ -373,13 +380,19 @@ fn send_hop_payment(source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, des let mut payment_id = [0; 32]; payment_id[0..8].copy_from_slice(&payment_idx.to_ne_bytes()); *payment_idx += 1; + let (min_value_sendable, max_value_sendable) = source.list_usable_channels() + .iter().find(|chan| chan.short_channel_id == Some(middle_chan_id)) + .map(|chan| + (chan.next_outbound_htlc_minimum_msat, chan.next_outbound_htlc_limit_msat)) + .unwrap_or((0, 0)); + let first_hop_fee = 50_000; if let Err(err) = source.send_payment_with_route(&Route { - paths: vec![vec![RouteHop { + paths: vec![Path { hops: vec![RouteHop { pubkey: middle.get_our_node_id(), node_features: middle.node_features(), short_channel_id: middle_chan_id, channel_features: middle.channel_features(), - fee_msat: 50000, + fee_msat: first_hop_fee, cltv_expiry_delta: 100, },RouteHop { pubkey: dest.get_our_node_id(), @@ -388,12 +401,19 @@ fn send_hop_payment(source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, des channel_features: dest.channel_features(), fee_msat: amt, cltv_expiry_delta: 200, - }]], + }], blinded_tail: None }], payment_params: None, }, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_id)) { - check_payment_err(err); + let sent_amt = amt + first_hop_fee; + check_payment_err(err, sent_amt < min_value_sendable || sent_amt > max_value_sendable); false - } else { true } + } else { + // Note that while the max is a strict upper-bound, we can occasionally send substantially + // below the minimum, with some gap which is unusable immediately below the minimum. Thus, + // we don't check against min_value_sendable here. + assert!(amt + first_hop_fee <= max_value_sendable); + true + } } #[inline] @@ -416,11 +436,12 @@ pub fn do_test(data: &[u8], underlying_out: Out) { config.channel_config.forwarding_fee_proportional_millionths = 0; config.channel_handshake_config.announced_channel = true; let network = Network::Bitcoin; + let best_block_timestamp = genesis_block(network).header.time; let params = ChainParameters { network, best_block: BestBlock::from_network(network), }; - (ChannelManager::new($fee_estimator.clone(), monitor.clone(), broadcast.clone(), &router, Arc::clone(&logger), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), config, params), + (ChannelManager::new($fee_estimator.clone(), monitor.clone(), broadcast.clone(), &router, Arc::clone(&logger), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), config, params, best_block_timestamp), monitor, keys_manager) } } } @@ -474,8 +495,12 @@ pub fn do_test(data: &[u8], underlying_out: Out) { let mut channel_txn = Vec::new(); macro_rules! make_channel { ($source: expr, $dest: expr, $chan_id: expr) => { { - $source.peer_connected(&$dest.get_our_node_id(), &Init { features: $dest.init_features(), remote_network_address: None }, true).unwrap(); - $dest.peer_connected(&$source.get_our_node_id(), &Init { features: $source.init_features(), remote_network_address: None }, false).unwrap(); + $source.peer_connected(&$dest.get_our_node_id(), &Init { + features: $dest.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + $dest.peer_connected(&$source.get_our_node_id(), &Init { + features: $source.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); $source.create_channel($dest.get_our_node_id(), 100_000, 42, 0, None).unwrap(); let open_channel = { @@ -546,11 +571,11 @@ pub fn do_test(data: &[u8], underlying_out: Out) { macro_rules! confirm_txn { ($node: expr) => { { let chain_hash = genesis_block(Network::Bitcoin).block_hash(); - let mut header = BlockHeader { version: 0x20000000, prev_blockhash: chain_hash, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }; + let mut header = create_dummy_header(chain_hash, 42); let txdata: Vec<_> = channel_txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); $node.transactions_confirmed(&header, &txdata, 1); for _ in 2..100 { - header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }; + header = create_dummy_header(header.block_hash(), 42); } $node.best_block_updated(&header, 99); } } @@ -1006,15 +1031,23 @@ pub fn do_test(data: &[u8], underlying_out: Out) { }, 0x0e => { if chan_a_disconnected { - nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, true).unwrap(); - nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: nodes[0].init_features(), remote_network_address: None }, false).unwrap(); + nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { + features: nodes[1].init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { + features: nodes[0].init_features(), networks: None, remote_network_address: None + }, false).unwrap(); chan_a_disconnected = false; } }, 0x0f => { if chan_b_disconnected { - nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: nodes[2].init_features(), remote_network_address: None }, true).unwrap(); - nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, false).unwrap(); + nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { + features: nodes[2].init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { + features: nodes[1].init_features(), networks: None, remote_network_address: None + }, false).unwrap(); chan_b_disconnected = false; } }, @@ -1209,13 +1242,21 @@ pub fn do_test(data: &[u8], underlying_out: Out) { // Next, make sure peers are all connected to each other if chan_a_disconnected { - nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, true).unwrap(); - nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: nodes[0].init_features(), remote_network_address: None }, false).unwrap(); + nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { + features: nodes[1].init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { + features: nodes[0].init_features(), networks: None, remote_network_address: None + }, false).unwrap(); chan_a_disconnected = false; } if chan_b_disconnected { - nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: nodes[2].init_features(), remote_network_address: None }, true).unwrap(); - nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, false).unwrap(); + nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { + features: nodes[2].init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { + features: nodes[1].init_features(), networks: None, remote_network_address: None + }, false).unwrap(); chan_b_disconnected = false; }