X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=fuzz%2Fsrc%2Fchanmon_consistency.rs;h=e7cd990086163910ebd061f798325ac301ee9950;hb=d015ff250e1bb3e6237e27a179e47b929df8642d;hp=441931d654c4d1a5c4d365571bf28b397897dc05;hpb=63d436570233479c1f2b16103829fdde3213e206;p=rust-lightning diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 441931d6..e7cd9900 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -19,6 +19,7 @@ //! channel being force-closed. use bitcoin::blockdata::block::BlockHeader; +use bitcoin::blockdata::constants::genesis_block; use bitcoin::blockdata::transaction::{Transaction, TxOut}; use bitcoin::blockdata::script::{Builder, Script}; use bitcoin::blockdata::opcodes; @@ -34,17 +35,18 @@ use lightning::chain::channelmonitor; use lightning::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, MonitorEvent}; use lightning::chain::transaction::OutPoint; use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; -use lightning::chain::keysinterface::{KeysInterface, InMemoryChannelKeys}; -use lightning::ln::channelmanager::{ChannelManager, PaymentHash, PaymentPreimage, PaymentSecret, PaymentSendFailure, ChannelManagerReadArgs}; +use lightning::chain::keysinterface::{KeysInterface, InMemorySigner}; +use lightning::ln::channelmanager::{ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentSecret, PaymentSendFailure, ChannelManagerReadArgs}; use lightning::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures}; -use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, ErrorAction, UpdateAddHTLC, Init}; -use lightning::util::enforcing_trait_impls::EnforcingChannelKeys; +use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, DecodeError, ErrorAction, UpdateAddHTLC, Init}; +use lightning::util::enforcing_trait_impls::{EnforcingSigner, INITIAL_REVOKED_COMMITMENT_NUMBER}; use lightning::util::errors::APIError; use lightning::util::events; use lightning::util::logger::Logger; use lightning::util::config::UserConfig; use lightning::util::events::{EventsProvider, MessageSendEventsProvider}; use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer}; +use lightning::util::test_utils::OnlyReadsKeysInterface; use lightning::routing::router::{Route, RouteHop}; @@ -86,7 +88,7 @@ impl Writer for VecWriter { struct TestChainMonitor { pub logger: Arc, - pub chain_monitor: Arc, Arc, Arc, Arc, Arc>>, + pub chain_monitor: Arc, Arc, Arc, Arc, Arc>>, pub update_ret: Mutex>, // If we reload a node with an old copy of ChannelMonitors, the ChannelManager deserialization // logic will automatically force-close our channels for us (as we don't have an up-to-date @@ -107,12 +109,10 @@ impl TestChainMonitor { } } } -impl chain::Watch for TestChainMonitor { - type Keys = EnforcingChannelKeys; - - fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> { +impl chain::Watch for TestChainMonitor { + fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> { let mut ser = VecWriter(Vec::new()); - monitor.serialize_for_disk(&mut ser).unwrap(); + monitor.write(&mut ser).unwrap(); if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) { panic!("Already had monitor pre-watch_channel"); } @@ -127,11 +127,11 @@ impl chain::Watch for TestChainMonitor { hash_map::Entry::Occupied(entry) => entry, hash_map::Entry::Vacant(_) => panic!("Didn't have monitor on update call"), }; - let mut deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>:: - read(&mut Cursor::new(&map_entry.get().1)).unwrap().1; + let deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>:: + read(&mut Cursor::new(&map_entry.get().1), &OnlyReadsKeysInterface {}).unwrap().1; deserialized_monitor.update_monitor(&update, &&TestBroadcaster{}, &&FuzzEstimator{}, &self.logger).unwrap(); let mut ser = VecWriter(Vec::new()); - deserialized_monitor.serialize_for_disk(&mut ser).unwrap(); + deserialized_monitor.write(&mut ser).unwrap(); map_entry.insert((update.update_id, ser.0)); self.should_update_manager.store(true, atomic::Ordering::Relaxed); self.update_ret.lock().unwrap().clone() @@ -145,9 +145,10 @@ impl chain::Watch for TestChainMonitor { struct KeyProvider { node_id: u8, rand_bytes_id: atomic::AtomicU8, + revoked_commitments: Mutex>>>, } impl KeysInterface for KeyProvider { - type ChanKeySigner = EnforcingChannelKeys; + type Signer = EnforcingSigner; fn get_node_secret(&self) -> SecretKey { SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, self.node_id]).unwrap() @@ -165,25 +166,55 @@ impl KeysInterface for KeyProvider { PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, self.node_id]).unwrap()) } - fn get_channel_keys(&self, _inbound: bool, channel_value_satoshis: u64) -> EnforcingChannelKeys { + fn get_channel_signer(&self, _inbound: bool, channel_value_satoshis: u64) -> EnforcingSigner { let secp_ctx = Secp256k1::signing_only(); - EnforcingChannelKeys::new(InMemoryChannelKeys::new( + let id = self.rand_bytes_id.fetch_add(1, atomic::Ordering::Relaxed); + let keys = InMemorySigner::new( &secp_ctx, SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, self.node_id]).unwrap(), SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, self.node_id]).unwrap(), SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, self.node_id]).unwrap(), SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, self.node_id]).unwrap(), SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, self.node_id]).unwrap(), - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, self.node_id], + [id, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, self.node_id], channel_value_satoshis, - (0, 0), - )) + [0; 32], + ); + let revoked_commitment = self.make_revoked_commitment_cell(keys.commitment_seed); + EnforcingSigner::new_with_revoked(keys, revoked_commitment, false) } fn get_secure_random_bytes(&self) -> [u8; 32] { let id = self.rand_bytes_id.fetch_add(1, atomic::Ordering::Relaxed); [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, id, 11, self.node_id] } + + fn read_chan_signer(&self, buffer: &[u8]) -> Result { + let mut reader = std::io::Cursor::new(buffer); + + let inner: InMemorySigner = Readable::read(&mut reader)?; + let revoked_commitment = self.make_revoked_commitment_cell(inner.commitment_seed); + + let last_commitment_number = Readable::read(&mut reader)?; + + Ok(EnforcingSigner { + inner, + last_commitment_number: Arc::new(Mutex::new(last_commitment_number)), + revoked_commitment, + disable_revocation_policy_check: false, + }) + } +} + +impl KeyProvider { + fn make_revoked_commitment_cell(&self, commitment_seed: [u8; 32]) -> Arc> { + let mut revoked_commitments = self.revoked_commitments.lock().unwrap(); + if !revoked_commitments.contains_key(&commitment_seed) { + revoked_commitments.insert(commitment_seed, Arc::new(Mutex::new(INITIAL_REVOKED_COMMITMENT_NUMBER))); + } + let cell = revoked_commitments.get(&commitment_seed).unwrap(); + Arc::clone(cell) + } } #[inline] @@ -203,7 +234,7 @@ fn check_api_err(api_err: APIError) { _ if err.starts_with("Cannot send value that would put our balance under counterparty-announced channel reserve value") => {}, _ if err.starts_with("Cannot send value that would overdraw remaining funds.") => {}, _ if err.starts_with("Cannot send value that would not leave enough to pay for fees.") => {}, - _ => panic!(err), + _ => panic!("{}", err), } }, APIError::MonitorUpdateFailed => { @@ -227,7 +258,7 @@ fn check_payment_err(send_err: PaymentSendFailure) { } } -type ChanMan = ChannelManager, Arc, Arc, Arc, Arc>; +type ChanMan = ChannelManager, Arc, Arc, Arc, Arc>; #[inline] fn send_payment(source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8) -> bool { @@ -283,22 +314,28 @@ pub fn do_test(data: &[u8], out: Out) { let logger: Arc = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone())); let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone(), Arc::new(TestPersister{}))); - let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU8::new(0) }); + let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU8::new(0), revoked_commitments: Mutex::new(HashMap::new()) }); let mut config = UserConfig::default(); config.channel_options.fee_proportional_millionths = 0; config.channel_options.announced_channel = true; config.peer_channel_config_limits.min_dust_limit_satoshis = 0; - (ChannelManager::new(Network::Bitcoin, fee_est.clone(), monitor.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, 0), - monitor) + let network = Network::Bitcoin; + let params = ChainParameters { + network, + latest_hash: genesis_block(network).block_hash(), + latest_height: 0, + }; + (ChannelManager::new(fee_est.clone(), monitor.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, params), + monitor, keys_manager) } } } macro_rules! reload_node { - ($ser: expr, $node_id: expr, $old_monitors: expr) => { { + ($ser: expr, $node_id: expr, $old_monitors: expr, $keys_manager: expr) => { { + let keys_manager = Arc::clone(& $keys_manager); let logger: Arc = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone())); let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone(), Arc::new(TestPersister{}))); - let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU8::new(0) }); let mut config = UserConfig::default(); config.channel_options.fee_proportional_millionths = 0; config.channel_options.announced_channel = true; @@ -307,7 +344,7 @@ pub fn do_test(data: &[u8], out: Out) { let mut monitors = HashMap::new(); let mut old_monitors = $old_monitors.latest_monitors.lock().unwrap(); for (outpoint, (update_id, monitor_ser)) in old_monitors.drain() { - monitors.insert(outpoint, <(BlockHash, ChannelMonitor)>::read(&mut Cursor::new(&monitor_ser)).expect("Failed to read monitor").1); + monitors.insert(outpoint, <(BlockHash, ChannelMonitor)>::read(&mut Cursor::new(&monitor_ser), &OnlyReadsKeysInterface {}).expect("Failed to read monitor").1); chain_monitor.latest_monitors.lock().unwrap().insert(outpoint, (update_id, monitor_ser)); } let mut monitor_refs = HashMap::new(); @@ -395,7 +432,8 @@ pub fn do_test(data: &[u8], out: Out) { macro_rules! confirm_txn { ($node: expr) => { { - let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + let chain_hash = genesis_block(Network::Bitcoin).block_hash(); + let mut header = BlockHeader { version: 0x20000000, prev_blockhash: chain_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; let txdata: Vec<_> = channel_txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); $node.block_connected(&header, &txdata, 1); for i in 2..100 { @@ -435,9 +473,9 @@ pub fn do_test(data: &[u8], out: Out) { // 3 nodes is enough to hit all the possible cases, notably unknown-source-unknown-dest // forwarding. - let (node_a, mut monitor_a) = make_node!(0); - let (node_b, mut monitor_b) = make_node!(1); - let (node_c, mut monitor_c) = make_node!(2); + let (node_a, mut monitor_a, keys_manager_a) = make_node!(0); + let (node_b, mut monitor_b, keys_manager_b) = make_node!(1); + let (node_c, mut monitor_c, keys_manager_c) = make_node!(2); let mut nodes = [node_a, node_b, node_c]; @@ -542,7 +580,9 @@ pub fn do_test(data: &[u8], out: Out) { bc_events.clear(); new_events } else { Vec::new() }; + let mut had_events = false; for event in events.iter().chain(nodes[$node].get_and_clear_pending_msg_events().iter()) { + had_events = true; match event { events::MessageSendEvent::UpdateHTLCs { ref node_id, updates: CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { for dest in nodes.iter() { @@ -599,6 +639,7 @@ pub fn do_test(data: &[u8], out: Out) { _ => panic!("Unhandled message event"), } } + had_events } } } @@ -678,6 +719,7 @@ pub fn do_test(data: &[u8], out: Out) { } else { Ordering::Equal } } else { Ordering::Equal } }); + let had_events = !events.is_empty(); for event in events.drain(..) { match event { events::Event::PaymentReceived { payment_hash, payment_secret, amt } => { @@ -697,6 +739,7 @@ pub fn do_test(data: &[u8], out: Out) { _ => panic!("Unhandled event"), } } + had_events } } } @@ -764,18 +807,18 @@ pub fn do_test(data: &[u8], out: Out) { } }, - 0x10 => process_msg_events!(0, true), - 0x11 => process_msg_events!(0, false), - 0x12 => process_events!(0, true), - 0x13 => process_events!(0, false), - 0x14 => process_msg_events!(1, true), - 0x15 => process_msg_events!(1, false), - 0x16 => process_events!(1, true), - 0x17 => process_events!(1, false), - 0x18 => process_msg_events!(2, true), - 0x19 => process_msg_events!(2, false), - 0x1a => process_events!(2, true), - 0x1b => process_events!(2, false), + 0x10 => { process_msg_events!(0, true); }, + 0x11 => { process_msg_events!(0, false); }, + 0x12 => { process_events!(0, true); }, + 0x13 => { process_events!(0, false); }, + 0x14 => { process_msg_events!(1, true); }, + 0x15 => { process_msg_events!(1, false); }, + 0x16 => { process_events!(1, true); }, + 0x17 => { process_events!(1, false); }, + 0x18 => { process_msg_events!(2, true); }, + 0x19 => { process_msg_events!(2, false); }, + 0x1a => { process_events!(2, true); }, + 0x1b => { process_events!(2, false); }, 0x1c => { if !chan_a_disconnected { @@ -783,7 +826,7 @@ pub fn do_test(data: &[u8], out: Out) { chan_a_disconnected = true; drain_msg_events_on_disconnect!(0); } - let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a); + let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a, keys_manager_a); nodes[0] = new_node_a; monitor_a = new_monitor_a; }, @@ -800,7 +843,7 @@ pub fn do_test(data: &[u8], out: Out) { nodes[2].get_and_clear_pending_msg_events(); bc_events.clear(); } - let (new_node_b, new_monitor_b) = reload_node!(node_b_ser, 1, monitor_b); + let (new_node_b, new_monitor_b) = reload_node!(node_b_ser, 1, monitor_b, keys_manager_b); nodes[1] = new_node_b; monitor_b = new_monitor_b; }, @@ -810,7 +853,7 @@ pub fn do_test(data: &[u8], out: Out) { chan_b_disconnected = true; drain_msg_events_on_disconnect!(2); } - let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c); + let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c, keys_manager_c); nodes[2] = new_node_c; monitor_c = new_monitor_c; }, @@ -875,6 +918,62 @@ pub fn do_test(data: &[u8], out: Out) { 0x5c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1, &mut payment_id); }, 0x5d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1, &mut payment_id); }, + 0xff => { + // Test that no channel is in a stuck state where neither party can send funds even + // after we resolve all pending events. + // First make sure there are no pending monitor updates, resetting the error state + // and calling channel_monitor_updated for each monitor. + *monitor_a.update_ret.lock().unwrap() = Ok(()); + *monitor_b.update_ret.lock().unwrap() = Ok(()); + *monitor_c.update_ret.lock().unwrap() = Ok(()); + + if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) { + nodes[0].channel_monitor_updated(&chan_1_funding, *id); + } + if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) { + nodes[1].channel_monitor_updated(&chan_1_funding, *id); + } + if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) { + nodes[1].channel_monitor_updated(&chan_2_funding, *id); + } + if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) { + nodes[2].channel_monitor_updated(&chan_2_funding, *id); + } + + // Next, make sure peers are all connected to each other + if chan_a_disconnected { + nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::empty() }); + nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: InitFeatures::empty() }); + chan_a_disconnected = false; + } + if chan_b_disconnected { + nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: InitFeatures::empty() }); + nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::empty() }); + chan_b_disconnected = false; + } + + for i in 0..std::usize::MAX { + if i == 100 { panic!("It may take may iterations to settle the state, but it should not take forever"); } + // Then, make sure any current forwards make their way to their destination + if process_msg_events!(0, false) { continue; } + if process_msg_events!(1, false) { continue; } + if process_msg_events!(2, false) { continue; } + // ...making sure any pending PendingHTLCsForwardable events are handled and + // payments claimed. + if process_events!(0, false) { continue; } + if process_events!(1, false) { continue; } + if process_events!(2, false) { continue; } + break; + } + + // Finally, make sure that at least one end of each channel can make a substantial payment. + assert!( + send_payment(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut payment_id) || + send_payment(&nodes[1], &nodes[0], chan_a, 10_000_000, &mut payment_id)); + assert!( + send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut payment_id) || + send_payment(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut payment_id)); + }, _ => test_return!(), }