X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=fuzz%2Fsrc%2Fchanmon_consistency.rs;h=28bab5a28f8942550e26c9036984f57cd6a84f3b;hb=b7a8dd4a1d425059c332c87b6ca622bc77f9c9b1;hp=4ce0df09286bf76351c74391477f19980de96d78;hpb=9d8d24f6906d6fbdc6c02a88e5e2298c1fa50825;p=rust-lightning diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 4ce0df09..28bab5a2 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -30,17 +30,18 @@ use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hash_types::{BlockHash, WPubkeyHash}; use lightning::chain; -use lightning::chain::{BestBlock, chainmonitor, channelmonitor, Confirm, Watch}; -use lightning::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, MonitorEvent}; +use lightning::chain::{BestBlock, ChannelMonitorUpdateErr, chainmonitor, channelmonitor, Confirm, Watch}; +use lightning::chain::channelmonitor::{ChannelMonitor, MonitorEvent}; use lightning::chain::transaction::OutPoint; use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; use lightning::chain::keysinterface::{KeysInterface, InMemorySigner}; use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret}; use lightning::ln::channelmanager::{ChainParameters, ChannelManager, PaymentSendFailure, ChannelManagerReadArgs}; +use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; use lightning::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures}; use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, DecodeError, UpdateAddHTLC, Init}; use lightning::ln::script::ShutdownScript; -use lightning::util::enforcing_trait_impls::{EnforcingSigner, INITIAL_REVOKED_COMMITMENT_NUMBER}; +use lightning::util::enforcing_trait_impls::{EnforcingSigner, EnforcementState}; use lightning::util::errors::APIError; use lightning::util::events; use lightning::util::logger::Logger; @@ -58,16 +59,27 @@ use bitcoin::secp256k1::recovery::RecoverableSignature; use bitcoin::secp256k1::Secp256k1; use std::mem; -use std::cmp::Ordering; +use std::cmp::{self, Ordering}; use std::collections::{HashSet, hash_map, HashMap}; use std::sync::{Arc,Mutex}; use std::sync::atomic; use std::io::Cursor; -struct FuzzEstimator {} +const MAX_FEE: u32 = 10_000; +struct FuzzEstimator { + ret_val: atomic::AtomicU32, +} impl FeeEstimator for FuzzEstimator { - fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 { - 253 + fn get_est_sat_per_1000_weight(&self, conf_target: ConfirmationTarget) -> u32 { + // We force-close channels if our counterparty sends us a feerate which is a small multiple + // of our HighPriority fee estimate or smaller than our Background fee estimate. Thus, we + // always return a HighPriority feerate here which is >= the maximum Normal feerate and a + // Background feerate which is <= the minimum Normal feerate. + match conf_target { + ConfirmationTarget::HighPriority => MAX_FEE, + ConfirmationTarget::Background => 253, + ConfirmationTarget::Normal => cmp::min(self.ret_val.load(atomic::Ordering::Acquire), MAX_FEE), + } } } @@ -82,16 +94,13 @@ impl Writer for VecWriter { self.0.extend_from_slice(buf); Ok(()) } - fn size_hint(&mut self, size: usize) { - self.0.reserve_exact(size); - } } struct TestChainMonitor { pub logger: Arc, pub keys: Arc, + pub persister: Arc, pub chain_monitor: Arc, Arc, Arc, Arc, Arc>>, - pub update_ret: Mutex>, // If we reload a node with an old copy of ChannelMonitors, the ChannelManager deserialization // logic will automatically force-close our channels for us (as we don't have an up-to-date // monitor implying we are not able to punish misbehaving counterparties). Because this test @@ -103,28 +112,27 @@ struct TestChainMonitor { impl TestChainMonitor { pub fn new(broadcaster: Arc, logger: Arc, feeest: Arc, persister: Arc, keys: Arc) -> Self { Self { - chain_monitor: Arc::new(chainmonitor::ChainMonitor::new(None, broadcaster, logger.clone(), feeest, persister)), + chain_monitor: Arc::new(chainmonitor::ChainMonitor::new(None, broadcaster, logger.clone(), feeest, Arc::clone(&persister))), logger, keys, - update_ret: Mutex::new(Ok(())), + persister, latest_monitors: Mutex::new(HashMap::new()), should_update_manager: atomic::AtomicBool::new(false), } } } impl chain::Watch for TestChainMonitor { - fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> { + fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result<(), chain::ChannelMonitorUpdateErr> { let mut ser = VecWriter(Vec::new()); monitor.write(&mut ser).unwrap(); if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) { panic!("Already had monitor pre-watch_channel"); } self.should_update_manager.store(true, atomic::Ordering::Relaxed); - assert!(self.chain_monitor.watch_channel(funding_txo, monitor).is_ok()); - self.update_ret.lock().unwrap().clone() + self.chain_monitor.watch_channel(funding_txo, monitor) } - fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> { + fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), chain::ChannelMonitorUpdateErr> { let mut map_lock = self.latest_monitors.lock().unwrap(); let mut map_entry = match map_lock.entry(funding_txo) { hash_map::Entry::Occupied(entry) => entry, @@ -132,13 +140,12 @@ impl chain::Watch for TestChainMonitor { }; let deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>:: read(&mut Cursor::new(&map_entry.get().1), &*self.keys).unwrap().1; - deserialized_monitor.update_monitor(&update, &&TestBroadcaster{}, &&FuzzEstimator{}, &self.logger).unwrap(); + deserialized_monitor.update_monitor(&update, &&TestBroadcaster{}, &&FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap(); let mut ser = VecWriter(Vec::new()); deserialized_monitor.write(&mut ser).unwrap(); map_entry.insert((update.update_id, ser.0)); self.should_update_manager.store(true, atomic::Ordering::Relaxed); - assert!(self.chain_monitor.update_channel(funding_txo, update).is_ok()); - self.update_ret.lock().unwrap().clone() + self.chain_monitor.update_channel(funding_txo, update) } fn release_pending_monitor_events(&self) -> Vec { @@ -149,7 +156,7 @@ impl chain::Watch for TestChainMonitor { struct KeyProvider { node_id: u8, rand_bytes_id: atomic::AtomicU32, - revoked_commitments: Mutex>>>, + enforcement_states: Mutex>>>, } impl KeysInterface for KeyProvider { type Signer = EnforcingSigner; @@ -186,7 +193,7 @@ impl KeysInterface for KeyProvider { channel_value_satoshis, [0; 32], ); - let revoked_commitment = self.make_revoked_commitment_cell(keys.commitment_seed); + let revoked_commitment = self.make_enforcement_state_cell(keys.commitment_seed); EnforcingSigner::new_with_revoked(keys, revoked_commitment, false) } @@ -201,14 +208,11 @@ impl KeysInterface for KeyProvider { let mut reader = std::io::Cursor::new(buffer); let inner: InMemorySigner = Readable::read(&mut reader)?; - let revoked_commitment = self.make_revoked_commitment_cell(inner.commitment_seed); - - let last_commitment_number = Readable::read(&mut reader)?; + let state = self.make_enforcement_state_cell(inner.commitment_seed); Ok(EnforcingSigner { inner, - last_commitment_number: Arc::new(Mutex::new(last_commitment_number)), - revoked_commitment, + state, disable_revocation_policy_check: false, }) } @@ -219,10 +223,10 @@ impl KeysInterface for KeyProvider { } impl KeyProvider { - fn make_revoked_commitment_cell(&self, commitment_seed: [u8; 32]) -> Arc> { - let mut revoked_commitments = self.revoked_commitments.lock().unwrap(); + fn make_enforcement_state_cell(&self, commitment_seed: [u8; 32]) -> Arc> { + let mut revoked_commitments = self.enforcement_states.lock().unwrap(); if !revoked_commitments.contains_key(&commitment_seed) { - revoked_commitments.insert(commitment_seed, Arc::new(Mutex::new(INITIAL_REVOKED_COMMITMENT_NUMBER))); + revoked_commitments.insert(commitment_seed, Arc::new(Mutex::new(EnforcementState::new()))); } let cell = revoked_commitments.get(&commitment_seed).unwrap(); Arc::clone(cell) @@ -334,14 +338,14 @@ fn send_hop_payment(source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, des #[inline] pub fn do_test(data: &[u8], out: Out) { - let fee_est = Arc::new(FuzzEstimator{}); let broadcast = Arc::new(TestBroadcaster{}); macro_rules! make_node { - ($node_id: expr) => { { + ($node_id: expr, $fee_estimator: expr) => { { let logger: Arc = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone())); - let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU32::new(0), revoked_commitments: Mutex::new(HashMap::new()) }); - let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone(), Arc::new(TestPersister{}), Arc::clone(&keys_manager))); + let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU32::new(0), enforcement_states: Mutex::new(HashMap::new()) }); + let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(), + Arc::new(TestPersister { update_ret: Mutex::new(Ok(())) }), Arc::clone(&keys_manager))); let mut config = UserConfig::default(); config.channel_options.forwarding_fee_proportional_millionths = 0; @@ -351,16 +355,17 @@ pub fn do_test(data: &[u8], out: Out) { network, best_block: BestBlock::from_genesis(network), }; - (ChannelManager::new(fee_est.clone(), monitor.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, params), + (ChannelManager::new($fee_estimator.clone(), monitor.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, params), monitor, keys_manager) } } } macro_rules! reload_node { - ($ser: expr, $node_id: expr, $old_monitors: expr, $keys_manager: expr) => { { + ($ser: expr, $node_id: expr, $old_monitors: expr, $keys_manager: expr, $fee_estimator: expr) => { { let keys_manager = Arc::clone(& $keys_manager); let logger: Arc = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone())); - let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone(), Arc::new(TestPersister{}), Arc::clone(& $keys_manager))); + let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(), + Arc::new(TestPersister { update_ret: Mutex::new(Ok(())) }), Arc::clone(& $keys_manager))); let mut config = UserConfig::default(); config.channel_options.forwarding_fee_proportional_millionths = 0; @@ -379,7 +384,7 @@ pub fn do_test(data: &[u8], out: Out) { let read_args = ChannelManagerReadArgs { keys_manager, - fee_estimator: fee_est.clone(), + fee_estimator: $fee_estimator.clone(), chain_monitor: chain_monitor.clone(), tx_broadcaster: broadcast.clone(), logger, @@ -497,11 +502,18 @@ pub fn do_test(data: &[u8], out: Out) { } } } + let fee_est_a = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); + let mut last_htlc_clear_fee_a = 253; + let fee_est_b = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); + let mut last_htlc_clear_fee_b = 253; + let fee_est_c = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); + let mut last_htlc_clear_fee_c = 253; + // 3 nodes is enough to hit all the possible cases, notably unknown-source-unknown-dest // forwarding. - let (node_a, mut monitor_a, keys_manager_a) = make_node!(0); - let (node_b, mut monitor_b, keys_manager_b) = make_node!(1); - let (node_c, mut monitor_c, keys_manager_c) = make_node!(2); + let (node_a, mut monitor_a, keys_manager_a) = make_node!(0, fee_est_a); + let (node_b, mut monitor_b, keys_manager_b) = make_node!(1, fee_est_b); + let (node_c, mut monitor_c, keys_manager_c) = make_node!(2, fee_est_c); let mut nodes = [node_a, node_b, node_c]; @@ -579,7 +591,6 @@ pub fn do_test(data: &[u8], out: Out) { }, events::MessageSendEvent::SendFundingLocked { .. } => continue, events::MessageSendEvent::SendAnnouncementSignatures { .. } => continue, - events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => continue, events::MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => { assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set! if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } @@ -637,10 +648,10 @@ pub fn do_test(data: &[u8], out: Out) { had_events = true; match event { events::MessageSendEvent::UpdateHTLCs { node_id, updates: CommitmentUpdate { update_add_htlcs, update_fail_htlcs, update_fulfill_htlcs, update_fail_malformed_htlcs, update_fee, commitment_signed } } => { - for dest in nodes.iter() { + for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == node_id { - assert!(update_fee.is_none()); for update_add in update_add_htlcs.iter() { + out.locked_write(format!("Delivering update_add_htlc to node {}.\n", idx).as_bytes()); if !$corrupt_forward { dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), update_add); } else { @@ -655,14 +666,21 @@ pub fn do_test(data: &[u8], out: Out) { } } for update_fulfill in update_fulfill_htlcs.iter() { + out.locked_write(format!("Delivering update_fulfill_htlc to node {}.\n", idx).as_bytes()); dest.handle_update_fulfill_htlc(&nodes[$node].get_our_node_id(), update_fulfill); } for update_fail in update_fail_htlcs.iter() { + out.locked_write(format!("Delivering update_fail_htlc to node {}.\n", idx).as_bytes()); dest.handle_update_fail_htlc(&nodes[$node].get_our_node_id(), update_fail); } for update_fail_malformed in update_fail_malformed_htlcs.iter() { + out.locked_write(format!("Delivering update_fail_malformed_htlc to node {}.\n", idx).as_bytes()); dest.handle_update_fail_malformed_htlc(&nodes[$node].get_our_node_id(), update_fail_malformed); } + if let Some(msg) = update_fee { + out.locked_write(format!("Delivering update_fee to node {}.\n", idx).as_bytes()); + dest.handle_update_fee(&nodes[$node].get_our_node_id(), &msg); + } let processed_change = !update_add_htlcs.is_empty() || !update_fulfill_htlcs.is_empty() || !update_fail_htlcs.is_empty() || !update_fail_malformed_htlcs.is_empty(); if $limit_events != ProcessMessages::AllMessages && processed_change { @@ -677,21 +695,24 @@ pub fn do_test(data: &[u8], out: Out) { } }); break; } + out.locked_write(format!("Delivering commitment_signed to node {}.\n", idx).as_bytes()); dest.handle_commitment_signed(&nodes[$node].get_our_node_id(), &commitment_signed); break; } } }, events::MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { - for dest in nodes.iter() { + for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { + out.locked_write(format!("Delivering revoke_and_ack to node {}.\n", idx).as_bytes()); dest.handle_revoke_and_ack(&nodes[$node].get_our_node_id(), msg); } } }, events::MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { - for dest in nodes.iter() { + for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { + out.locked_write(format!("Delivering channel_reestablish to node {}.\n", idx).as_bytes()); dest.handle_channel_reestablish(&nodes[$node].get_our_node_id(), msg); } } @@ -702,10 +723,6 @@ pub fn do_test(data: &[u8], out: Out) { events::MessageSendEvent::SendAnnouncementSignatures { .. } => { // Can be generated as a reestablish response }, - events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => { - // Can be generated due to a payment forward being rejected due to a - // channel having previously failed a monitor update - }, events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => { // When we reconnect we will resend a channel_update to make sure our // counterparty has the latest parameters for receiving payments @@ -744,7 +761,6 @@ pub fn do_test(data: &[u8], out: Out) { events::MessageSendEvent::SendChannelReestablish { .. } => {}, events::MessageSendEvent::SendFundingLocked { .. } => {}, events::MessageSendEvent::SendAnnouncementSignatures { .. } => {}, - events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {}, events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => { assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set! }, @@ -762,7 +778,6 @@ pub fn do_test(data: &[u8], out: Out) { events::MessageSendEvent::SendChannelReestablish { .. } => {}, events::MessageSendEvent::SendFundingLocked { .. } => {}, events::MessageSendEvent::SendAnnouncementSignatures { .. } => {}, - events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {}, events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => { assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set! }, @@ -812,7 +827,7 @@ pub fn do_test(data: &[u8], out: Out) { } }, events::Event::PaymentSent { .. } => {}, - events::Event::PaymentFailed { .. } => {}, + events::Event::PaymentPathFailed { .. } => {}, events::Event::PaymentForwarded { .. } if $node == 1 => {}, events::Event::PendingHTLCsForwardable { .. } => { nodes[$node].process_pending_htlc_forwards(); @@ -824,36 +839,42 @@ pub fn do_test(data: &[u8], out: Out) { } } } - match get_slice!(1)[0] { + let v = get_slice!(1)[0]; + out.locked_write(format!("READ A BYTE! HANDLING INPUT {:x}...........\n", v).as_bytes()); + match v { // In general, we keep related message groups close together in binary form, allowing // bit-twiddling mutations to have similar effects. This is probably overkill, but no // harm in doing so. - 0x00 => *monitor_a.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure), - 0x01 => *monitor_b.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure), - 0x02 => *monitor_c.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure), - 0x04 => *monitor_a.update_ret.lock().unwrap() = Ok(()), - 0x05 => *monitor_b.update_ret.lock().unwrap() = Ok(()), - 0x06 => *monitor_c.update_ret.lock().unwrap() = Ok(()), + 0x00 => *monitor_a.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure), + 0x01 => *monitor_b.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure), + 0x02 => *monitor_c.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure), + 0x04 => *monitor_a.persister.update_ret.lock().unwrap() = Ok(()), + 0x05 => *monitor_b.persister.update_ret.lock().unwrap() = Ok(()), + 0x06 => *monitor_c.persister.update_ret.lock().unwrap() = Ok(()), 0x08 => { if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) { - nodes[0].channel_monitor_updated(&chan_1_funding, *id); + monitor_a.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id); + nodes[0].process_monitor_events(); } }, 0x09 => { if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) { - nodes[1].channel_monitor_updated(&chan_1_funding, *id); + monitor_b.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id); + nodes[1].process_monitor_events(); } }, 0x0a => { if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) { - nodes[1].channel_monitor_updated(&chan_2_funding, *id); + monitor_b.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id); + nodes[1].process_monitor_events(); } }, 0x0b => { if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) { - nodes[2].channel_monitor_updated(&chan_2_funding, *id); + monitor_c.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id); + nodes[2].process_monitor_events(); } }, @@ -928,7 +949,7 @@ pub fn do_test(data: &[u8], out: Out) { node_a_ser.0.clear(); nodes[0].write(&mut node_a_ser).unwrap(); } - let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a, keys_manager_a); + let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a, keys_manager_a, fee_est_a); nodes[0] = new_node_a; monitor_a = new_monitor_a; }, @@ -947,7 +968,7 @@ pub fn do_test(data: &[u8], out: Out) { bc_events.clear(); cb_events.clear(); } - let (new_node_b, new_monitor_b) = reload_node!(node_b_ser, 1, monitor_b, keys_manager_b); + let (new_node_b, new_monitor_b) = reload_node!(node_b_ser, 1, monitor_b, keys_manager_b, fee_est_b); nodes[1] = new_node_b; monitor_b = new_monitor_b; }, @@ -961,7 +982,7 @@ pub fn do_test(data: &[u8], out: Out) { node_c_ser.0.clear(); nodes[2].write(&mut node_c_ser).unwrap(); } - let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c, keys_manager_c); + let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c, keys_manager_c, fee_est_c); nodes[2] = new_node_c; monitor_c = new_monitor_c; }, @@ -1023,26 +1044,57 @@ pub fn do_test(data: &[u8], out: Out) { 0x6c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1, &mut payment_id); }, 0x6d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1, &mut payment_id); }, + 0x80 => { + let max_feerate = last_htlc_clear_fee_a * FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; + if fee_est_a.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate { + fee_est_a.ret_val.store(max_feerate, atomic::Ordering::Release); + } + nodes[0].maybe_update_chan_fees(); + }, + 0x81 => { fee_est_a.ret_val.store(253, atomic::Ordering::Release); nodes[0].maybe_update_chan_fees(); }, + + 0x84 => { + let max_feerate = last_htlc_clear_fee_b * FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; + if fee_est_b.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate { + fee_est_b.ret_val.store(max_feerate, atomic::Ordering::Release); + } + nodes[1].maybe_update_chan_fees(); + }, + 0x85 => { fee_est_b.ret_val.store(253, atomic::Ordering::Release); nodes[1].maybe_update_chan_fees(); }, + + 0x88 => { + let max_feerate = last_htlc_clear_fee_c * FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; + if fee_est_c.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate { + fee_est_c.ret_val.store(max_feerate, atomic::Ordering::Release); + } + nodes[2].maybe_update_chan_fees(); + }, + 0x89 => { fee_est_c.ret_val.store(253, atomic::Ordering::Release); nodes[2].maybe_update_chan_fees(); }, + 0xff => { // Test that no channel is in a stuck state where neither party can send funds even // after we resolve all pending events. // First make sure there are no pending monitor updates, resetting the error state - // and calling channel_monitor_updated for each monitor. - *monitor_a.update_ret.lock().unwrap() = Ok(()); - *monitor_b.update_ret.lock().unwrap() = Ok(()); - *monitor_c.update_ret.lock().unwrap() = Ok(()); + // and calling force_channel_monitor_updated for each monitor. + *monitor_a.persister.update_ret.lock().unwrap() = Ok(()); + *monitor_b.persister.update_ret.lock().unwrap() = Ok(()); + *monitor_c.persister.update_ret.lock().unwrap() = Ok(()); if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) { - nodes[0].channel_monitor_updated(&chan_1_funding, *id); + monitor_a.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id); + nodes[0].process_monitor_events(); } if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) { - nodes[1].channel_monitor_updated(&chan_1_funding, *id); + monitor_b.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id); + nodes[1].process_monitor_events(); } if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) { - nodes[1].channel_monitor_updated(&chan_2_funding, *id); + monitor_b.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id); + nodes[1].process_monitor_events(); } if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) { - nodes[2].channel_monitor_updated(&chan_2_funding, *id); + monitor_c.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id); + nodes[2].process_monitor_events(); } // Next, make sure peers are all connected to each other @@ -1078,6 +1130,10 @@ pub fn do_test(data: &[u8], out: Out) { assert!( send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut payment_id) || send_payment(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut payment_id)); + + last_htlc_clear_fee_a = fee_est_a.ret_val.load(atomic::Ordering::Acquire); + last_htlc_clear_fee_b = fee_est_b.ret_val.load(atomic::Ordering::Acquire); + last_htlc_clear_fee_c = fee_est_c.ret_val.load(atomic::Ordering::Acquire); }, _ => test_return!(), }