From f8caa325e51d8afb0cb65effd9cdb351ffda3fc7 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 30 Jun 2021 03:09:04 +0000 Subject: [PATCH] Add fuzz coverage of (potential) fee update messages --- fuzz/src/chanmon_consistency.rs | 89 +++++++++++++++++++++++------- lightning/src/ln/channel.rs | 18 +++++- lightning/src/ln/channelmanager.rs | 31 +++++++++++ lightning/src/ln/mod.rs | 4 ++ 4 files changed, 122 insertions(+), 20 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 4ce0df092..21a8b37f1 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -37,6 +37,7 @@ use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, use lightning::chain::keysinterface::{KeysInterface, InMemorySigner}; use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret}; use lightning::ln::channelmanager::{ChainParameters, ChannelManager, PaymentSendFailure, ChannelManagerReadArgs}; +use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; use lightning::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures}; use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, DecodeError, UpdateAddHTLC, Init}; use lightning::ln::script::ShutdownScript; @@ -58,16 +59,27 @@ use bitcoin::secp256k1::recovery::RecoverableSignature; use bitcoin::secp256k1::Secp256k1; use std::mem; -use std::cmp::Ordering; +use std::cmp::{self, Ordering}; use std::collections::{HashSet, hash_map, HashMap}; use std::sync::{Arc,Mutex}; use std::sync::atomic; use std::io::Cursor; -struct FuzzEstimator {} +const MAX_FEE: u32 = 10_000; +struct FuzzEstimator { + ret_val: atomic::AtomicU32, +} impl FeeEstimator for FuzzEstimator { - fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 { - 253 + fn get_est_sat_per_1000_weight(&self, conf_target: ConfirmationTarget) -> u32 { + // We force-close channels if our counterparty sends us a feerate which is a small multiple + // of our HighPriority fee estimate or smaller than our Background fee estimate. Thus, we + // always return a HighPriority feerate here which is >= the maximum Normal feerate and a + // Background feerate which is <= the minimum Normal feerate. + match conf_target { + ConfirmationTarget::HighPriority => MAX_FEE, + ConfirmationTarget::Background => 253, + ConfirmationTarget::Normal => cmp::min(self.ret_val.load(atomic::Ordering::Acquire), MAX_FEE), + } } } @@ -132,7 +144,7 @@ impl chain::Watch for TestChainMonitor { }; let deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>:: read(&mut Cursor::new(&map_entry.get().1), &*self.keys).unwrap().1; - deserialized_monitor.update_monitor(&update, &&TestBroadcaster{}, &&FuzzEstimator{}, &self.logger).unwrap(); + deserialized_monitor.update_monitor(&update, &&TestBroadcaster{}, &&FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap(); let mut ser = VecWriter(Vec::new()); deserialized_monitor.write(&mut ser).unwrap(); map_entry.insert((update.update_id, ser.0)); @@ -334,14 +346,13 @@ fn send_hop_payment(source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, des #[inline] pub fn do_test(data: &[u8], out: Out) { - let fee_est = Arc::new(FuzzEstimator{}); let broadcast = Arc::new(TestBroadcaster{}); macro_rules! make_node { - ($node_id: expr) => { { + ($node_id: expr, $fee_estimator: expr) => { { let logger: Arc = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone())); let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU32::new(0), revoked_commitments: Mutex::new(HashMap::new()) }); - let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone(), Arc::new(TestPersister{}), Arc::clone(&keys_manager))); + let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(), Arc::new(TestPersister{}), Arc::clone(&keys_manager))); let mut config = UserConfig::default(); config.channel_options.forwarding_fee_proportional_millionths = 0; @@ -351,16 +362,16 @@ pub fn do_test(data: &[u8], out: Out) { network, best_block: BestBlock::from_genesis(network), }; - (ChannelManager::new(fee_est.clone(), monitor.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, params), + (ChannelManager::new($fee_estimator.clone(), monitor.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, params), monitor, keys_manager) } } } macro_rules! reload_node { - ($ser: expr, $node_id: expr, $old_monitors: expr, $keys_manager: expr) => { { + ($ser: expr, $node_id: expr, $old_monitors: expr, $keys_manager: expr, $fee_estimator: expr) => { { let keys_manager = Arc::clone(& $keys_manager); let logger: Arc = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone())); - let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone(), Arc::new(TestPersister{}), Arc::clone(& $keys_manager))); + let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(), Arc::new(TestPersister{}), Arc::clone(& $keys_manager))); let mut config = UserConfig::default(); config.channel_options.forwarding_fee_proportional_millionths = 0; @@ -379,7 +390,7 @@ pub fn do_test(data: &[u8], out: Out) { let read_args = ChannelManagerReadArgs { keys_manager, - fee_estimator: fee_est.clone(), + fee_estimator: $fee_estimator.clone(), chain_monitor: chain_monitor.clone(), tx_broadcaster: broadcast.clone(), logger, @@ -497,11 +508,18 @@ pub fn do_test(data: &[u8], out: Out) { } } } + let fee_est_a = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); + let mut last_htlc_clear_fee_a = 253; + let fee_est_b = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); + let mut last_htlc_clear_fee_b = 253; + let fee_est_c = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); + let mut last_htlc_clear_fee_c = 253; + // 3 nodes is enough to hit all the possible cases, notably unknown-source-unknown-dest // forwarding. - let (node_a, mut monitor_a, keys_manager_a) = make_node!(0); - let (node_b, mut monitor_b, keys_manager_b) = make_node!(1); - let (node_c, mut monitor_c, keys_manager_c) = make_node!(2); + let (node_a, mut monitor_a, keys_manager_a) = make_node!(0, fee_est_a); + let (node_b, mut monitor_b, keys_manager_b) = make_node!(1, fee_est_b); + let (node_c, mut monitor_c, keys_manager_c) = make_node!(2, fee_est_c); let mut nodes = [node_a, node_b, node_c]; @@ -639,7 +657,6 @@ pub fn do_test(data: &[u8], out: Out) { events::MessageSendEvent::UpdateHTLCs { node_id, updates: CommitmentUpdate { update_add_htlcs, update_fail_htlcs, update_fulfill_htlcs, update_fail_malformed_htlcs, update_fee, commitment_signed } } => { for dest in nodes.iter() { if dest.get_our_node_id() == node_id { - assert!(update_fee.is_none()); for update_add in update_add_htlcs.iter() { if !$corrupt_forward { dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), update_add); @@ -663,6 +680,9 @@ pub fn do_test(data: &[u8], out: Out) { for update_fail_malformed in update_fail_malformed_htlcs.iter() { dest.handle_update_fail_malformed_htlc(&nodes[$node].get_our_node_id(), update_fail_malformed); } + if let Some(msg) = update_fee { + dest.handle_update_fee(&nodes[$node].get_our_node_id(), &msg); + } let processed_change = !update_add_htlcs.is_empty() || !update_fulfill_htlcs.is_empty() || !update_fail_htlcs.is_empty() || !update_fail_malformed_htlcs.is_empty(); if $limit_events != ProcessMessages::AllMessages && processed_change { @@ -928,7 +948,7 @@ pub fn do_test(data: &[u8], out: Out) { node_a_ser.0.clear(); nodes[0].write(&mut node_a_ser).unwrap(); } - let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a, keys_manager_a); + let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a, keys_manager_a, fee_est_a); nodes[0] = new_node_a; monitor_a = new_monitor_a; }, @@ -947,7 +967,7 @@ pub fn do_test(data: &[u8], out: Out) { bc_events.clear(); cb_events.clear(); } - let (new_node_b, new_monitor_b) = reload_node!(node_b_ser, 1, monitor_b, keys_manager_b); + let (new_node_b, new_monitor_b) = reload_node!(node_b_ser, 1, monitor_b, keys_manager_b, fee_est_b); nodes[1] = new_node_b; monitor_b = new_monitor_b; }, @@ -961,7 +981,7 @@ pub fn do_test(data: &[u8], out: Out) { node_c_ser.0.clear(); nodes[2].write(&mut node_c_ser).unwrap(); } - let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c, keys_manager_c); + let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c, keys_manager_c, fee_est_c); nodes[2] = new_node_c; monitor_c = new_monitor_c; }, @@ -1023,6 +1043,33 @@ pub fn do_test(data: &[u8], out: Out) { 0x6c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1, &mut payment_id); }, 0x6d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1, &mut payment_id); }, + 0x80 => { + let max_feerate = last_htlc_clear_fee_a * FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; + if fee_est_a.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate { + fee_est_a.ret_val.store(max_feerate, atomic::Ordering::Release); + } + nodes[0].maybe_update_chan_fees(); + }, + 0x81 => { fee_est_a.ret_val.store(253, atomic::Ordering::Release); nodes[0].maybe_update_chan_fees(); }, + + 0x84 => { + let max_feerate = last_htlc_clear_fee_b * FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; + if fee_est_b.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate { + fee_est_b.ret_val.store(max_feerate, atomic::Ordering::Release); + } + nodes[1].maybe_update_chan_fees(); + }, + 0x85 => { fee_est_b.ret_val.store(253, atomic::Ordering::Release); nodes[1].maybe_update_chan_fees(); }, + + 0x88 => { + let max_feerate = last_htlc_clear_fee_c * FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; + if fee_est_c.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate { + fee_est_c.ret_val.store(max_feerate, atomic::Ordering::Release); + } + nodes[2].maybe_update_chan_fees(); + }, + 0x89 => { fee_est_c.ret_val.store(253, atomic::Ordering::Release); nodes[2].maybe_update_chan_fees(); }, + 0xff => { // Test that no channel is in a stuck state where neither party can send funds even // after we resolve all pending events. @@ -1078,6 +1125,10 @@ pub fn do_test(data: &[u8], out: Out) { assert!( send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut payment_id) || send_payment(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut payment_id)); + + last_htlc_clear_fee_a = fee_est_a.ret_val.load(atomic::Ordering::Acquire); + last_htlc_clear_fee_b = fee_est_b.ret_val.load(atomic::Ordering::Acquire); + last_htlc_clear_fee_c = fee_est_c.ret_val.load(atomic::Ordering::Acquire); }, _ => test_return!(), } diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index dc09dc824..2a3edd684 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -341,6 +341,22 @@ pub enum UpdateFulfillCommitFetch { DuplicateClaim {}, } +/// If the majority of the channels funds are to the fundee and the initiator holds only just +/// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the +/// initiator controls the feerate, if they then go to increase the channel fee, they may have no +/// balance but the fundee is unable to send a payment as the increase in fee more than drains +/// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless. +/// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase +/// by this multiple without hitting this case, before sending. +/// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over +/// the channel. Sadly, there isn't really a good number for this - if we expect to have no new +/// HTLCs for days we may need this to suffice for feerate increases across days, but that may +/// leave the channel less usable as we hold a bigger reserve. +#[cfg(fuzzing)] +pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2; +#[cfg(not(fuzzing))] +const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2; + // TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking // has been completed, and then turn into a Channel to get compiler-time enforcement of things like // calling channel_id() before we're set up or things like get_outbound_funding_signed on an @@ -4326,7 +4342,7 @@ impl Channel { // `2 *` and extra HTLC are for the fee spike buffer. let commit_tx_fee_msat = if self.is_outbound() { let htlc_candidate = HTLCCandidate::new(amount_msat, HTLCInitiator::LocalOffered); - 2 * self.next_local_commit_tx_fee_msat(htlc_candidate, Some(())) + FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * self.next_local_commit_tx_fee_msat(htlc_candidate, Some(())) } else { 0 }; if pending_value_to_self_msat - amount_msat < commit_tx_fee_msat { return Err(ChannelError::Ignore(format!("Cannot send value that would not leave enough to pay for fees. Pending value to self: {}. local_commit_tx_fee {}", pending_value_to_self_msat, commit_tx_fee_msat))); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 389e67ddb..7304c698a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -2612,6 +2612,37 @@ impl ChannelMana (retain_channel, NotifyOption::DoPersist, ret_err) } + #[cfg(fuzzing)] + /// In chanmon_consistency we want to sometimes do the channel fee updates done in + /// timer_tick_occurred, but we can't generate the disabled channel updates as it considers + /// these a fuzz failure (as they usually indicate a channel force-close, which is exactly what + /// it wants to detect). Thus, we have a variant exposed here for its benefit. + pub fn maybe_update_chan_fees(&self) { + PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { + let mut should_persist = NotifyOption::SkipPersist; + + let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal); + + let mut handle_errors = Vec::new(); + { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; + let pending_msg_events = &mut channel_state.pending_msg_events; + let short_to_id = &mut channel_state.short_to_id; + channel_state.by_id.retain(|chan_id, chan| { + let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate); + if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } + if err.is_err() { + handle_errors.push(err); + } + retain_channel + }); + } + + should_persist + }); + } + /// Performs actions which should happen on startup and roughly once per minute thereafter. /// /// This currently includes: diff --git a/lightning/src/ln/mod.rs b/lightning/src/ln/mod.rs index bebd763f6..5576d8bd0 100644 --- a/lightning/src/ln/mod.rs +++ b/lightning/src/ln/mod.rs @@ -34,7 +34,11 @@ pub mod peer_channel_encryptor; #[cfg(not(feature = "fuzztarget"))] pub(crate) mod peer_channel_encryptor; +#[cfg(feature = "fuzztarget")] +pub mod channel; +#[cfg(not(feature = "fuzztarget"))] mod channel; + mod onion_utils; mod wire; -- 2.39.5