1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 //! Test that monitor update failures don't get our channel state out of sync.
11 //! One of the biggest concern with the monitor update failure handling code is that messages
12 //! resent after monitor updating is restored are delivered out-of-order, resulting in
13 //! commitment_signed messages having "invalid signatures".
14 //! To test this we stand up a network of three nodes and read bytes from the fuzz input to denote
15 //! actions such as sending payments, handling events, or changing monitor update return values on
16 //! a per-node basis. This should allow it to find any cases where the ordering of actions results
17 //! in us getting out of sync with ourselves, and, assuming at least one of our recieve- or
18 //! send-side handling is correct, other peers. We consider it a failure if any action results in a
19 //! channel being force-closed.
21 use bitcoin::blockdata::block::BlockHeader;
22 use bitcoin::blockdata::constants::genesis_block;
23 use bitcoin::blockdata::transaction::{Transaction, TxOut};
24 use bitcoin::blockdata::script::{Builder, Script};
25 use bitcoin::blockdata::opcodes;
26 use bitcoin::network::constants::Network;
28 use bitcoin::hashes::Hash as TraitImport;
29 use bitcoin::hashes::sha256::Hash as Sha256;
30 use bitcoin::hash_types::{BlockHash, WPubkeyHash};
33 use lightning::chain::{BestBlock, ChannelMonitorUpdateErr, chainmonitor, channelmonitor, Confirm, Watch};
34 use lightning::chain::channelmonitor::{ChannelMonitor, MonitorEvent};
35 use lightning::chain::transaction::OutPoint;
36 use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
37 use lightning::chain::keysinterface::{KeyMaterial, KeysInterface, InMemorySigner, Recipient};
38 use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
39 use lightning::ln::channelmanager::{ChainParameters, ChannelManager, PaymentSendFailure, ChannelManagerReadArgs};
40 use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
41 use lightning::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
42 use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, DecodeError, UpdateAddHTLC, Init};
43 use lightning::ln::script::ShutdownScript;
44 use lightning::util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
45 use lightning::util::errors::APIError;
46 use lightning::util::events;
47 use lightning::util::logger::Logger;
48 use lightning::util::config::UserConfig;
49 use lightning::util::events::MessageSendEventsProvider;
50 use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer};
51 use lightning::routing::router::{Route, RouteHop};
53 use utils::test_logger::{self, Output};
54 use utils::test_persister::TestPersister;
56 use bitcoin::secp256k1::{PublicKey,SecretKey};
57 use bitcoin::secp256k1::ecdh::SharedSecret;
58 use bitcoin::secp256k1::ecdsa::RecoverableSignature;
59 use bitcoin::secp256k1::Secp256k1;
62 use std::cmp::{self, Ordering};
63 use std::collections::{HashSet, hash_map, HashMap};
64 use std::sync::{Arc,Mutex};
65 use std::sync::atomic;
67 use bitcoin::bech32::u5;
69 const MAX_FEE: u32 = 10_000;
70 struct FuzzEstimator {
71 ret_val: atomic::AtomicU32,
73 impl FeeEstimator for FuzzEstimator {
74 fn get_est_sat_per_1000_weight(&self, conf_target: ConfirmationTarget) -> u32 {
75 // We force-close channels if our counterparty sends us a feerate which is a small multiple
76 // of our HighPriority fee estimate or smaller than our Background fee estimate. Thus, we
77 // always return a HighPriority feerate here which is >= the maximum Normal feerate and a
78 // Background feerate which is <= the minimum Normal feerate.
80 ConfirmationTarget::HighPriority => MAX_FEE,
81 ConfirmationTarget::Background => 253,
82 ConfirmationTarget::Normal => cmp::min(self.ret_val.load(atomic::Ordering::Acquire), MAX_FEE),
87 pub struct TestBroadcaster {}
88 impl BroadcasterInterface for TestBroadcaster {
89 fn broadcast_transaction(&self, _tx: &Transaction) { }
92 pub struct VecWriter(pub Vec<u8>);
93 impl Writer for VecWriter {
94 fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
95 self.0.extend_from_slice(buf);
100 struct TestChainMonitor {
101 pub logger: Arc<dyn Logger>,
102 pub keys: Arc<KeyProvider>,
103 pub persister: Arc<TestPersister>,
104 pub chain_monitor: Arc<chainmonitor::ChainMonitor<EnforcingSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
105 // If we reload a node with an old copy of ChannelMonitors, the ChannelManager deserialization
106 // logic will automatically force-close our channels for us (as we don't have an up-to-date
107 // monitor implying we are not able to punish misbehaving counterparties). Because this test
108 // "fails" if we ever force-close a channel, we avoid doing so, always saving the latest
109 // fully-serialized monitor state here, as well as the corresponding update_id.
110 pub latest_monitors: Mutex<HashMap<OutPoint, (u64, Vec<u8>)>>,
111 pub should_update_manager: atomic::AtomicBool,
113 impl TestChainMonitor {
114 pub fn new(broadcaster: Arc<TestBroadcaster>, logger: Arc<dyn Logger>, feeest: Arc<FuzzEstimator>, persister: Arc<TestPersister>, keys: Arc<KeyProvider>) -> Self {
116 chain_monitor: Arc::new(chainmonitor::ChainMonitor::new(None, broadcaster, logger.clone(), feeest, Arc::clone(&persister))),
120 latest_monitors: Mutex::new(HashMap::new()),
121 should_update_manager: atomic::AtomicBool::new(false),
125 impl chain::Watch<EnforcingSigner> for TestChainMonitor {
126 fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> Result<(), chain::ChannelMonitorUpdateErr> {
127 let mut ser = VecWriter(Vec::new());
128 monitor.write(&mut ser).unwrap();
129 if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) {
130 panic!("Already had monitor pre-watch_channel");
132 self.should_update_manager.store(true, atomic::Ordering::Relaxed);
133 self.chain_monitor.watch_channel(funding_txo, monitor)
136 fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), chain::ChannelMonitorUpdateErr> {
137 let mut map_lock = self.latest_monitors.lock().unwrap();
138 let mut map_entry = match map_lock.entry(funding_txo) {
139 hash_map::Entry::Occupied(entry) => entry,
140 hash_map::Entry::Vacant(_) => panic!("Didn't have monitor on update call"),
142 let deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::
143 read(&mut Cursor::new(&map_entry.get().1), &*self.keys).unwrap().1;
144 deserialized_monitor.update_monitor(&update, &&TestBroadcaster{}, &&FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap();
145 let mut ser = VecWriter(Vec::new());
146 deserialized_monitor.write(&mut ser).unwrap();
147 map_entry.insert((update.update_id, ser.0));
148 self.should_update_manager.store(true, atomic::Ordering::Relaxed);
149 self.chain_monitor.update_channel(funding_txo, update)
152 fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>)> {
153 return self.chain_monitor.release_pending_monitor_events();
159 rand_bytes_id: atomic::AtomicU32,
160 enforcement_states: Mutex<HashMap<[u8;32], Arc<Mutex<EnforcementState>>>>,
162 impl KeysInterface for KeyProvider {
163 type Signer = EnforcingSigner;
165 fn get_node_secret(&self, _recipient: Recipient) -> Result<SecretKey, ()> {
166 Ok(SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, self.node_id]).unwrap())
169 fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&[u8; 32]>) -> Result<SharedSecret, ()> {
170 let mut node_secret = self.get_node_secret(recipient)?;
171 if let Some(tweak) = tweak {
172 node_secret.mul_assign(tweak).map_err(|_| ())?;
174 Ok(SharedSecret::new(other_key, &node_secret))
177 fn get_inbound_payment_key_material(&self) -> KeyMaterial {
178 KeyMaterial([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, self.node_id])
181 fn get_destination_script(&self) -> Script {
182 let secp_ctx = Secp256k1::signing_only();
183 let channel_monitor_claim_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, self.node_id]).unwrap();
184 let our_channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
185 Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&our_channel_monitor_claim_key_hash[..]).into_script()
188 fn get_shutdown_scriptpubkey(&self) -> ShutdownScript {
189 let secp_ctx = Secp256k1::signing_only();
190 let secret_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, self.node_id]).unwrap();
191 let pubkey_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &secret_key).serialize());
192 ShutdownScript::new_p2wpkh(&pubkey_hash)
195 fn get_channel_signer(&self, _inbound: bool, channel_value_satoshis: u64) -> EnforcingSigner {
196 let secp_ctx = Secp256k1::signing_only();
197 let id = self.rand_bytes_id.fetch_add(1, atomic::Ordering::Relaxed);
198 let keys = InMemorySigner::new(
200 self.get_node_secret(Recipient::Node).unwrap(),
201 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, self.node_id]).unwrap(),
202 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, self.node_id]).unwrap(),
203 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, self.node_id]).unwrap(),
204 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, self.node_id]).unwrap(),
205 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, self.node_id]).unwrap(),
206 [id as u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, self.node_id],
207 channel_value_satoshis,
210 let revoked_commitment = self.make_enforcement_state_cell(keys.commitment_seed);
211 EnforcingSigner::new_with_revoked(keys, revoked_commitment, false)
214 fn get_secure_random_bytes(&self) -> [u8; 32] {
215 let id = self.rand_bytes_id.fetch_add(1, atomic::Ordering::Relaxed);
216 let mut res = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, self.node_id];
217 res[30-4..30].copy_from_slice(&id.to_le_bytes());
221 fn read_chan_signer(&self, buffer: &[u8]) -> Result<Self::Signer, DecodeError> {
222 let mut reader = std::io::Cursor::new(buffer);
224 let inner: InMemorySigner = ReadableArgs::read(&mut reader, self.get_node_secret(Recipient::Node).unwrap())?;
225 let state = self.make_enforcement_state_cell(inner.commitment_seed);
230 disable_revocation_policy_check: false,
234 fn sign_invoice(&self, _hrp_bytes: &[u8], _invoice_data: &[u5], _recipient: Recipient) -> Result<RecoverableSignature, ()> {
240 fn make_enforcement_state_cell(&self, commitment_seed: [u8; 32]) -> Arc<Mutex<EnforcementState>> {
241 let mut revoked_commitments = self.enforcement_states.lock().unwrap();
242 if !revoked_commitments.contains_key(&commitment_seed) {
243 revoked_commitments.insert(commitment_seed, Arc::new(Mutex::new(EnforcementState::new())));
245 let cell = revoked_commitments.get(&commitment_seed).unwrap();
251 fn check_api_err(api_err: APIError) {
253 APIError::APIMisuseError { .. } => panic!("We can't misuse the API"),
254 APIError::FeeRateTooHigh { .. } => panic!("We can't send too much fee?"),
255 APIError::RouteError { .. } => panic!("Our routes should work"),
256 APIError::ChannelUnavailable { err } => {
257 // Test the error against a list of errors we can hit, and reject
258 // all others. If you hit this panic, the list of acceptable errors
259 // is probably just stale and you should add new messages here.
261 "Peer for first hop currently disconnected/pending monitor update!" => {},
262 _ if err.starts_with("Cannot push more than their max accepted HTLCs ") => {},
263 _ if err.starts_with("Cannot send value that would put us over the max HTLC value in flight our peer will accept ") => {},
264 _ if err.starts_with("Cannot send value that would put our balance under counterparty-announced channel reserve value") => {},
265 _ if err.starts_with("Cannot send value that would put counterparty balance under holder-announced channel reserve value") => {},
266 _ if err.starts_with("Cannot send value that would overdraw remaining funds.") => {},
267 _ if err.starts_with("Cannot send value that would not leave enough to pay for fees.") => {},
268 _ if err.starts_with("Cannot send value that would put our exposure to dust HTLCs at") => {},
269 _ => panic!("{}", err),
272 APIError::MonitorUpdateFailed => {
273 // We can (obviously) temp-fail a monitor update
275 APIError::IncompatibleShutdownScript { .. } => panic!("Cannot send an incompatible shutdown script"),
279 fn check_payment_err(send_err: PaymentSendFailure) {
281 PaymentSendFailure::ParameterError(api_err) => check_api_err(api_err),
282 PaymentSendFailure::PathParameterError(per_path_results) => {
283 for res in per_path_results { if let Err(api_err) = res { check_api_err(api_err); } }
285 PaymentSendFailure::AllFailedRetrySafe(per_path_results) => {
286 for api_err in per_path_results { check_api_err(api_err); }
288 PaymentSendFailure::PartialFailure { results, .. } => {
289 for res in results { if let Err(api_err) = res { check_api_err(api_err); } }
294 type ChanMan = ChannelManager<EnforcingSigner, Arc<TestChainMonitor>, Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<FuzzEstimator>, Arc<dyn Logger>>;
297 fn get_payment_secret_hash(dest: &ChanMan, payment_id: &mut u8) -> Option<(PaymentSecret, PaymentHash)> {
298 let mut payment_hash;
300 payment_hash = PaymentHash(Sha256::hash(&[*payment_id; 1]).into_inner());
301 if let Ok(payment_secret) = dest.create_inbound_payment_for_hash(payment_hash, None, 3600) {
302 return Some((payment_secret, payment_hash));
304 *payment_id = payment_id.wrapping_add(1);
310 fn send_payment(source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8) -> bool {
311 let (payment_secret, payment_hash) =
312 if let Some((secret, hash)) = get_payment_secret_hash(dest, payment_id) { (secret, hash) } else { return true; };
313 if let Err(err) = source.send_payment(&Route {
314 paths: vec![vec![RouteHop {
315 pubkey: dest.get_our_node_id(),
316 node_features: NodeFeatures::known(),
317 short_channel_id: dest_chan_id,
318 channel_features: ChannelFeatures::known(),
320 cltv_expiry_delta: 200,
322 payment_params: None,
323 }, payment_hash, &Some(payment_secret)) {
324 check_payment_err(err);
329 fn send_hop_payment(source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8) -> bool {
330 let (payment_secret, payment_hash) =
331 if let Some((secret, hash)) = get_payment_secret_hash(dest, payment_id) { (secret, hash) } else { return true; };
332 if let Err(err) = source.send_payment(&Route {
333 paths: vec![vec![RouteHop {
334 pubkey: middle.get_our_node_id(),
335 node_features: NodeFeatures::known(),
336 short_channel_id: middle_chan_id,
337 channel_features: ChannelFeatures::known(),
339 cltv_expiry_delta: 100,
341 pubkey: dest.get_our_node_id(),
342 node_features: NodeFeatures::known(),
343 short_channel_id: dest_chan_id,
344 channel_features: ChannelFeatures::known(),
346 cltv_expiry_delta: 200,
348 payment_params: None,
349 }, payment_hash, &Some(payment_secret)) {
350 check_payment_err(err);
356 pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
357 let out = SearchingOutput::new(underlying_out);
358 let broadcast = Arc::new(TestBroadcaster{});
360 macro_rules! make_node {
361 ($node_id: expr, $fee_estimator: expr) => { {
362 let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
363 let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU32::new(0), enforcement_states: Mutex::new(HashMap::new()) });
364 let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(),
365 Arc::new(TestPersister { update_ret: Mutex::new(Ok(())) }), Arc::clone(&keys_manager)));
367 let mut config = UserConfig::default();
368 config.channel_config.forwarding_fee_proportional_millionths = 0;
369 config.channel_handshake_config.announced_channel = true;
370 let network = Network::Bitcoin;
371 let params = ChainParameters {
373 best_block: BestBlock::from_genesis(network),
375 (ChannelManager::new($fee_estimator.clone(), monitor.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, params),
376 monitor, keys_manager)
380 macro_rules! reload_node {
381 ($ser: expr, $node_id: expr, $old_monitors: expr, $keys_manager: expr, $fee_estimator: expr) => { {
382 let keys_manager = Arc::clone(& $keys_manager);
383 let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
384 let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(),
385 Arc::new(TestPersister { update_ret: Mutex::new(Ok(())) }), Arc::clone(& $keys_manager)));
387 let mut config = UserConfig::default();
388 config.channel_config.forwarding_fee_proportional_millionths = 0;
389 config.channel_handshake_config.announced_channel = true;
391 let mut monitors = HashMap::new();
392 let mut old_monitors = $old_monitors.latest_monitors.lock().unwrap();
393 for (outpoint, (update_id, monitor_ser)) in old_monitors.drain() {
394 monitors.insert(outpoint, <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut Cursor::new(&monitor_ser), &*$keys_manager).expect("Failed to read monitor").1);
395 chain_monitor.latest_monitors.lock().unwrap().insert(outpoint, (update_id, monitor_ser));
397 let mut monitor_refs = HashMap::new();
398 for (outpoint, monitor) in monitors.iter_mut() {
399 monitor_refs.insert(*outpoint, monitor);
402 let read_args = ChannelManagerReadArgs {
404 fee_estimator: $fee_estimator.clone(),
405 chain_monitor: chain_monitor.clone(),
406 tx_broadcaster: broadcast.clone(),
408 default_config: config,
409 channel_monitors: monitor_refs,
412 let res = (<(BlockHash, ChanMan)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, chain_monitor.clone());
413 for (funding_txo, mon) in monitors.drain() {
414 assert!(chain_monitor.chain_monitor.watch_channel(funding_txo, mon).is_ok());
420 let mut channel_txn = Vec::new();
421 macro_rules! make_channel {
422 ($source: expr, $dest: expr, $chan_id: expr) => { {
423 $source.peer_connected(&$dest.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
424 $dest.peer_connected(&$source.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
426 $source.create_channel($dest.get_our_node_id(), 100_000, 42, 0, None).unwrap();
428 let events = $source.get_and_clear_pending_msg_events();
429 assert_eq!(events.len(), 1);
430 if let events::MessageSendEvent::SendOpenChannel { ref msg, .. } = events[0] {
432 } else { panic!("Wrong event type"); }
435 $dest.handle_open_channel(&$source.get_our_node_id(), InitFeatures::known(), &open_channel);
436 let accept_channel = {
437 let events = $dest.get_and_clear_pending_msg_events();
438 assert_eq!(events.len(), 1);
439 if let events::MessageSendEvent::SendAcceptChannel { ref msg, .. } = events[0] {
441 } else { panic!("Wrong event type"); }
444 $source.handle_accept_channel(&$dest.get_our_node_id(), InitFeatures::known(), &accept_channel);
447 let events = $source.get_and_clear_pending_events();
448 assert_eq!(events.len(), 1);
449 if let events::Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, .. } = events[0] {
450 let tx = Transaction { version: $chan_id, lock_time: 0, input: Vec::new(), output: vec![TxOut {
451 value: *channel_value_satoshis, script_pubkey: output_script.clone(),
453 funding_output = OutPoint { txid: tx.txid(), index: 0 };
454 $source.funding_transaction_generated(&temporary_channel_id, &$dest.get_our_node_id(), tx.clone()).unwrap();
455 channel_txn.push(tx);
456 } else { panic!("Wrong event type"); }
459 let funding_created = {
460 let events = $source.get_and_clear_pending_msg_events();
461 assert_eq!(events.len(), 1);
462 if let events::MessageSendEvent::SendFundingCreated { ref msg, .. } = events[0] {
464 } else { panic!("Wrong event type"); }
466 $dest.handle_funding_created(&$source.get_our_node_id(), &funding_created);
468 let funding_signed = {
469 let events = $dest.get_and_clear_pending_msg_events();
470 assert_eq!(events.len(), 1);
471 if let events::MessageSendEvent::SendFundingSigned { ref msg, .. } = events[0] {
473 } else { panic!("Wrong event type"); }
475 $source.handle_funding_signed(&$dest.get_our_node_id(), &funding_signed);
481 macro_rules! confirm_txn {
483 let chain_hash = genesis_block(Network::Bitcoin).block_hash();
484 let mut header = BlockHeader { version: 0x20000000, prev_blockhash: chain_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
485 let txdata: Vec<_> = channel_txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect();
486 $node.transactions_confirmed(&header, &txdata, 1);
488 header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
490 $node.best_block_updated(&header, 99);
494 macro_rules! lock_fundings {
495 ($nodes: expr) => { {
496 let mut node_events = Vec::new();
497 for node in $nodes.iter() {
498 node_events.push(node.get_and_clear_pending_msg_events());
500 for (idx, node_event) in node_events.iter().enumerate() {
501 for event in node_event {
502 if let events::MessageSendEvent::SendChannelReady { ref node_id, ref msg } = event {
503 for node in $nodes.iter() {
504 if node.get_our_node_id() == *node_id {
505 node.handle_channel_ready(&$nodes[idx].get_our_node_id(), msg);
508 } else { panic!("Wrong event type"); }
512 for node in $nodes.iter() {
513 let events = node.get_and_clear_pending_msg_events();
514 for event in events {
515 if let events::MessageSendEvent::SendAnnouncementSignatures { .. } = event {
516 } else { panic!("Wrong event type"); }
522 let fee_est_a = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) });
523 let mut last_htlc_clear_fee_a = 253;
524 let fee_est_b = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) });
525 let mut last_htlc_clear_fee_b = 253;
526 let fee_est_c = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) });
527 let mut last_htlc_clear_fee_c = 253;
529 // 3 nodes is enough to hit all the possible cases, notably unknown-source-unknown-dest
531 let (node_a, mut monitor_a, keys_manager_a) = make_node!(0, fee_est_a);
532 let (node_b, mut monitor_b, keys_manager_b) = make_node!(1, fee_est_b);
533 let (node_c, mut monitor_c, keys_manager_c) = make_node!(2, fee_est_c);
535 let mut nodes = [node_a, node_b, node_c];
537 let chan_1_funding = make_channel!(nodes[0], nodes[1], 0);
538 let chan_2_funding = make_channel!(nodes[1], nodes[2], 1);
540 for node in nodes.iter() {
544 lock_fundings!(nodes);
546 let chan_a = nodes[0].list_usable_channels()[0].short_channel_id.unwrap();
547 let chan_b = nodes[2].list_usable_channels()[0].short_channel_id.unwrap();
549 let mut payment_id: u8 = 0;
551 let mut chan_a_disconnected = false;
552 let mut chan_b_disconnected = false;
553 let mut ab_events = Vec::new();
554 let mut ba_events = Vec::new();
555 let mut bc_events = Vec::new();
556 let mut cb_events = Vec::new();
558 let mut node_a_ser = VecWriter(Vec::new());
559 nodes[0].write(&mut node_a_ser).unwrap();
560 let mut node_b_ser = VecWriter(Vec::new());
561 nodes[1].write(&mut node_b_ser).unwrap();
562 let mut node_c_ser = VecWriter(Vec::new());
563 nodes[2].write(&mut node_c_ser).unwrap();
565 macro_rules! test_return {
567 assert_eq!(nodes[0].list_channels().len(), 1);
568 assert_eq!(nodes[1].list_channels().len(), 2);
569 assert_eq!(nodes[2].list_channels().len(), 1);
574 let mut read_pos = 0;
575 macro_rules! get_slice {
578 let slice_len = $len as usize;
579 if data.len() < read_pos + slice_len {
582 read_pos += slice_len;
583 &data[read_pos - slice_len..read_pos]
589 // Push any events from Node B onto ba_events and bc_events
590 macro_rules! push_excess_b_events {
591 ($excess_events: expr, $expect_drop_node: expr) => { {
592 let a_id = nodes[0].get_our_node_id();
593 let expect_drop_node: Option<usize> = $expect_drop_node;
594 let expect_drop_id = if let Some(id) = expect_drop_node { Some(nodes[id].get_our_node_id()) } else { None };
595 for event in $excess_events {
596 let push_a = match event {
597 events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => {
598 if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); }
601 events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => {
602 if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); }
605 events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => {
606 if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); }
609 events::MessageSendEvent::SendChannelReady { .. } => continue,
610 events::MessageSendEvent::SendAnnouncementSignatures { .. } => continue,
611 events::MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => {
612 assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set!
613 if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); }
616 _ => panic!("Unhandled message event {:?}", event),
618 if push_a { ba_events.push(event); } else { bc_events.push(event); }
623 // While delivering messages, we select across three possible message selection processes
624 // to ensure we get as much coverage as possible. See the individual enum variants for more
627 enum ProcessMessages {
628 /// Deliver all available messages, including fetching any new messages from
629 /// `get_and_clear_pending_msg_events()` (which may have side effects).
631 /// Call `get_and_clear_pending_msg_events()` first, and then deliver up to one
632 /// message (which may already be queued).
634 /// Deliver up to one already-queued message. This avoids any potential side-effects
635 /// of `get_and_clear_pending_msg_events()` (eg freeing the HTLC holding cell), which
636 /// provides potentially more coverage.
640 macro_rules! process_msg_events {
641 ($node: expr, $corrupt_forward: expr, $limit_events: expr) => { {
642 let mut events = if $node == 1 {
643 let mut new_events = Vec::new();
644 mem::swap(&mut new_events, &mut ba_events);
645 new_events.extend_from_slice(&bc_events[..]);
648 } else if $node == 0 {
649 let mut new_events = Vec::new();
650 mem::swap(&mut new_events, &mut ab_events);
653 let mut new_events = Vec::new();
654 mem::swap(&mut new_events, &mut cb_events);
657 let mut new_events = Vec::new();
658 if $limit_events != ProcessMessages::OnePendingMessage {
659 new_events = nodes[$node].get_and_clear_pending_msg_events();
661 let mut had_events = false;
662 let mut events_iter = events.drain(..).chain(new_events.drain(..));
663 let mut extra_ev = None;
664 for event in &mut events_iter {
667 events::MessageSendEvent::UpdateHTLCs { node_id, updates: CommitmentUpdate { update_add_htlcs, update_fail_htlcs, update_fulfill_htlcs, update_fail_malformed_htlcs, update_fee, commitment_signed } } => {
668 for (idx, dest) in nodes.iter().enumerate() {
669 if dest.get_our_node_id() == node_id {
670 for update_add in update_add_htlcs.iter() {
671 out.locked_write(format!("Delivering update_add_htlc to node {}.\n", idx).as_bytes());
672 if !$corrupt_forward {
673 dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), update_add);
675 // Corrupt the update_add_htlc message so that its HMAC
676 // check will fail and we generate a
677 // update_fail_malformed_htlc instead of an
678 // update_fail_htlc as we do when we reject a payment.
679 let mut msg_ser = update_add.encode();
680 msg_ser[1000] ^= 0xff;
681 let new_msg = UpdateAddHTLC::read(&mut Cursor::new(&msg_ser)).unwrap();
682 dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), &new_msg);
685 for update_fulfill in update_fulfill_htlcs.iter() {
686 out.locked_write(format!("Delivering update_fulfill_htlc to node {}.\n", idx).as_bytes());
687 dest.handle_update_fulfill_htlc(&nodes[$node].get_our_node_id(), update_fulfill);
689 for update_fail in update_fail_htlcs.iter() {
690 out.locked_write(format!("Delivering update_fail_htlc to node {}.\n", idx).as_bytes());
691 dest.handle_update_fail_htlc(&nodes[$node].get_our_node_id(), update_fail);
693 for update_fail_malformed in update_fail_malformed_htlcs.iter() {
694 out.locked_write(format!("Delivering update_fail_malformed_htlc to node {}.\n", idx).as_bytes());
695 dest.handle_update_fail_malformed_htlc(&nodes[$node].get_our_node_id(), update_fail_malformed);
697 if let Some(msg) = update_fee {
698 out.locked_write(format!("Delivering update_fee to node {}.\n", idx).as_bytes());
699 dest.handle_update_fee(&nodes[$node].get_our_node_id(), &msg);
701 let processed_change = !update_add_htlcs.is_empty() || !update_fulfill_htlcs.is_empty() ||
702 !update_fail_htlcs.is_empty() || !update_fail_malformed_htlcs.is_empty();
703 if $limit_events != ProcessMessages::AllMessages && processed_change {
704 // If we only want to process some messages, don't deliver the CS until later.
705 extra_ev = Some(events::MessageSendEvent::UpdateHTLCs { node_id, updates: CommitmentUpdate {
706 update_add_htlcs: Vec::new(),
707 update_fail_htlcs: Vec::new(),
708 update_fulfill_htlcs: Vec::new(),
709 update_fail_malformed_htlcs: Vec::new(),
715 out.locked_write(format!("Delivering commitment_signed to node {}.\n", idx).as_bytes());
716 dest.handle_commitment_signed(&nodes[$node].get_our_node_id(), &commitment_signed);
721 events::MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
722 for (idx, dest) in nodes.iter().enumerate() {
723 if dest.get_our_node_id() == *node_id {
724 out.locked_write(format!("Delivering revoke_and_ack to node {}.\n", idx).as_bytes());
725 dest.handle_revoke_and_ack(&nodes[$node].get_our_node_id(), msg);
729 events::MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => {
730 for (idx, dest) in nodes.iter().enumerate() {
731 if dest.get_our_node_id() == *node_id {
732 out.locked_write(format!("Delivering channel_reestablish to node {}.\n", idx).as_bytes());
733 dest.handle_channel_reestablish(&nodes[$node].get_our_node_id(), msg);
737 events::MessageSendEvent::SendChannelReady { .. } => {
738 // Can be generated as a reestablish response
740 events::MessageSendEvent::SendAnnouncementSignatures { .. } => {
741 // Can be generated as a reestablish response
743 events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
744 // When we reconnect we will resend a channel_update to make sure our
745 // counterparty has the latest parameters for receiving payments
746 // through us. We do, however, check that the message does not include
747 // the "disabled" bit, as we should never ever have a channel which is
748 // disabled when we send such an update (or it may indicate channel
749 // force-close which we should detect as an error).
750 assert_eq!(msg.contents.flags & 2, 0);
752 _ => if out.may_fail.load(atomic::Ordering::Acquire) {
755 panic!("Unhandled message event {:?}", event)
758 if $limit_events != ProcessMessages::AllMessages {
763 push_excess_b_events!(extra_ev.into_iter().chain(events_iter), None);
764 } else if $node == 0 {
765 if let Some(ev) = extra_ev { ab_events.push(ev); }
766 for event in events_iter { ab_events.push(event); }
768 if let Some(ev) = extra_ev { cb_events.push(ev); }
769 for event in events_iter { cb_events.push(event); }
775 macro_rules! drain_msg_events_on_disconnect {
776 ($counterparty_id: expr) => { {
777 if $counterparty_id == 0 {
778 for event in nodes[0].get_and_clear_pending_msg_events() {
780 events::MessageSendEvent::UpdateHTLCs { .. } => {},
781 events::MessageSendEvent::SendRevokeAndACK { .. } => {},
782 events::MessageSendEvent::SendChannelReestablish { .. } => {},
783 events::MessageSendEvent::SendChannelReady { .. } => {},
784 events::MessageSendEvent::SendAnnouncementSignatures { .. } => {},
785 events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
786 assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set!
788 _ => if out.may_fail.load(atomic::Ordering::Acquire) {
791 panic!("Unhandled message event")
795 push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(0));
799 for event in nodes[2].get_and_clear_pending_msg_events() {
801 events::MessageSendEvent::UpdateHTLCs { .. } => {},
802 events::MessageSendEvent::SendRevokeAndACK { .. } => {},
803 events::MessageSendEvent::SendChannelReestablish { .. } => {},
804 events::MessageSendEvent::SendChannelReady { .. } => {},
805 events::MessageSendEvent::SendAnnouncementSignatures { .. } => {},
806 events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
807 assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set!
809 _ => if out.may_fail.load(atomic::Ordering::Acquire) {
812 panic!("Unhandled message event")
816 push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(2));
823 macro_rules! process_events {
824 ($node: expr, $fail: expr) => { {
825 // In case we get 256 payments we may have a hash collision, resulting in the
826 // second claim/fail call not finding the duplicate-hash HTLC, so we have to
827 // deduplicate the calls here.
828 let mut claim_set = HashSet::new();
829 let mut events = nodes[$node].get_and_clear_pending_events();
830 // Sort events so that PendingHTLCsForwardable get processed last. This avoids a
831 // case where we first process a PendingHTLCsForwardable, then claim/fail on a
832 // PaymentReceived, claiming/failing two HTLCs, but leaving a just-generated
833 // PaymentReceived event for the second HTLC in our pending_events (and breaking
834 // our claim_set deduplication).
835 events.sort_by(|a, b| {
836 if let events::Event::PaymentReceived { .. } = a {
837 if let events::Event::PendingHTLCsForwardable { .. } = b {
839 } else { Ordering::Equal }
840 } else if let events::Event::PendingHTLCsForwardable { .. } = a {
841 if let events::Event::PaymentReceived { .. } = b {
843 } else { Ordering::Equal }
844 } else { Ordering::Equal }
846 let had_events = !events.is_empty();
847 for event in events.drain(..) {
849 events::Event::PaymentReceived { payment_hash, .. } => {
850 if claim_set.insert(payment_hash.0) {
852 nodes[$node].fail_htlc_backwards(&payment_hash);
854 nodes[$node].claim_funds(PaymentPreimage(payment_hash.0));
858 events::Event::PaymentSent { .. } => {},
859 events::Event::PaymentClaimed { .. } => {},
860 events::Event::PaymentPathSuccessful { .. } => {},
861 events::Event::PaymentPathFailed { .. } => {},
862 events::Event::PaymentForwarded { .. } if $node == 1 => {},
863 events::Event::PendingHTLCsForwardable { .. } => {
864 nodes[$node].process_pending_htlc_forwards();
866 _ => if out.may_fail.load(atomic::Ordering::Acquire) {
869 panic!("Unhandled event")
877 let v = get_slice!(1)[0];
878 out.locked_write(format!("READ A BYTE! HANDLING INPUT {:x}...........\n", v).as_bytes());
880 // In general, we keep related message groups close together in binary form, allowing
881 // bit-twiddling mutations to have similar effects. This is probably overkill, but no
884 0x00 => *monitor_a.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
885 0x01 => *monitor_b.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
886 0x02 => *monitor_c.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
887 0x04 => *monitor_a.persister.update_ret.lock().unwrap() = Ok(()),
888 0x05 => *monitor_b.persister.update_ret.lock().unwrap() = Ok(()),
889 0x06 => *monitor_c.persister.update_ret.lock().unwrap() = Ok(()),
892 if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) {
893 monitor_a.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id);
894 nodes[0].process_monitor_events();
898 if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) {
899 monitor_b.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id);
900 nodes[1].process_monitor_events();
904 if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) {
905 monitor_b.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id);
906 nodes[1].process_monitor_events();
910 if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) {
911 monitor_c.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id);
912 nodes[2].process_monitor_events();
917 if !chan_a_disconnected {
918 nodes[0].peer_disconnected(&nodes[1].get_our_node_id(), false);
919 nodes[1].peer_disconnected(&nodes[0].get_our_node_id(), false);
920 chan_a_disconnected = true;
921 drain_msg_events_on_disconnect!(0);
925 if !chan_b_disconnected {
926 nodes[1].peer_disconnected(&nodes[2].get_our_node_id(), false);
927 nodes[2].peer_disconnected(&nodes[1].get_our_node_id(), false);
928 chan_b_disconnected = true;
929 drain_msg_events_on_disconnect!(2);
933 if chan_a_disconnected {
934 nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
935 nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
936 chan_a_disconnected = false;
940 if chan_b_disconnected {
941 nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
942 nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
943 chan_b_disconnected = false;
947 0x10 => { process_msg_events!(0, true, ProcessMessages::AllMessages); },
948 0x11 => { process_msg_events!(0, false, ProcessMessages::AllMessages); },
949 0x12 => { process_msg_events!(0, true, ProcessMessages::OneMessage); },
950 0x13 => { process_msg_events!(0, false, ProcessMessages::OneMessage); },
951 0x14 => { process_msg_events!(0, true, ProcessMessages::OnePendingMessage); },
952 0x15 => { process_msg_events!(0, false, ProcessMessages::OnePendingMessage); },
954 0x16 => { process_events!(0, true); },
955 0x17 => { process_events!(0, false); },
957 0x18 => { process_msg_events!(1, true, ProcessMessages::AllMessages); },
958 0x19 => { process_msg_events!(1, false, ProcessMessages::AllMessages); },
959 0x1a => { process_msg_events!(1, true, ProcessMessages::OneMessage); },
960 0x1b => { process_msg_events!(1, false, ProcessMessages::OneMessage); },
961 0x1c => { process_msg_events!(1, true, ProcessMessages::OnePendingMessage); },
962 0x1d => { process_msg_events!(1, false, ProcessMessages::OnePendingMessage); },
964 0x1e => { process_events!(1, true); },
965 0x1f => { process_events!(1, false); },
967 0x20 => { process_msg_events!(2, true, ProcessMessages::AllMessages); },
968 0x21 => { process_msg_events!(2, false, ProcessMessages::AllMessages); },
969 0x22 => { process_msg_events!(2, true, ProcessMessages::OneMessage); },
970 0x23 => { process_msg_events!(2, false, ProcessMessages::OneMessage); },
971 0x24 => { process_msg_events!(2, true, ProcessMessages::OnePendingMessage); },
972 0x25 => { process_msg_events!(2, false, ProcessMessages::OnePendingMessage); },
974 0x26 => { process_events!(2, true); },
975 0x27 => { process_events!(2, false); },
978 if !chan_a_disconnected {
979 nodes[1].peer_disconnected(&nodes[0].get_our_node_id(), false);
980 chan_a_disconnected = true;
981 drain_msg_events_on_disconnect!(0);
983 if monitor_a.should_update_manager.load(atomic::Ordering::Relaxed) {
984 node_a_ser.0.clear();
985 nodes[0].write(&mut node_a_ser).unwrap();
987 let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a, keys_manager_a, fee_est_a);
988 nodes[0] = new_node_a;
989 monitor_a = new_monitor_a;
992 if !chan_a_disconnected {
993 nodes[0].peer_disconnected(&nodes[1].get_our_node_id(), false);
994 chan_a_disconnected = true;
995 nodes[0].get_and_clear_pending_msg_events();
999 if !chan_b_disconnected {
1000 nodes[2].peer_disconnected(&nodes[1].get_our_node_id(), false);
1001 chan_b_disconnected = true;
1002 nodes[2].get_and_clear_pending_msg_events();
1006 let (new_node_b, new_monitor_b) = reload_node!(node_b_ser, 1, monitor_b, keys_manager_b, fee_est_b);
1007 nodes[1] = new_node_b;
1008 monitor_b = new_monitor_b;
1011 if !chan_b_disconnected {
1012 nodes[1].peer_disconnected(&nodes[2].get_our_node_id(), false);
1013 chan_b_disconnected = true;
1014 drain_msg_events_on_disconnect!(2);
1016 if monitor_c.should_update_manager.load(atomic::Ordering::Relaxed) {
1017 node_c_ser.0.clear();
1018 nodes[2].write(&mut node_c_ser).unwrap();
1020 let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c, keys_manager_c, fee_est_c);
1021 nodes[2] = new_node_c;
1022 monitor_c = new_monitor_c;
1025 // 1/10th the channel size:
1026 0x30 => { send_payment(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut payment_id); },
1027 0x31 => { send_payment(&nodes[1], &nodes[0], chan_a, 10_000_000, &mut payment_id); },
1028 0x32 => { send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut payment_id); },
1029 0x33 => { send_payment(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut payment_id); },
1030 0x34 => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000_000, &mut payment_id); },
1031 0x35 => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000_000, &mut payment_id); },
1033 0x38 => { send_payment(&nodes[0], &nodes[1], chan_a, 1_000_000, &mut payment_id); },
1034 0x39 => { send_payment(&nodes[1], &nodes[0], chan_a, 1_000_000, &mut payment_id); },
1035 0x3a => { send_payment(&nodes[1], &nodes[2], chan_b, 1_000_000, &mut payment_id); },
1036 0x3b => { send_payment(&nodes[2], &nodes[1], chan_b, 1_000_000, &mut payment_id); },
1037 0x3c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000_000, &mut payment_id); },
1038 0x3d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000_000, &mut payment_id); },
1040 0x40 => { send_payment(&nodes[0], &nodes[1], chan_a, 100_000, &mut payment_id); },
1041 0x41 => { send_payment(&nodes[1], &nodes[0], chan_a, 100_000, &mut payment_id); },
1042 0x42 => { send_payment(&nodes[1], &nodes[2], chan_b, 100_000, &mut payment_id); },
1043 0x43 => { send_payment(&nodes[2], &nodes[1], chan_b, 100_000, &mut payment_id); },
1044 0x44 => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100_000, &mut payment_id); },
1045 0x45 => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100_000, &mut payment_id); },
1047 0x48 => { send_payment(&nodes[0], &nodes[1], chan_a, 10_000, &mut payment_id); },
1048 0x49 => { send_payment(&nodes[1], &nodes[0], chan_a, 10_000, &mut payment_id); },
1049 0x4a => { send_payment(&nodes[1], &nodes[2], chan_b, 10_000, &mut payment_id); },
1050 0x4b => { send_payment(&nodes[2], &nodes[1], chan_b, 10_000, &mut payment_id); },
1051 0x4c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000, &mut payment_id); },
1052 0x4d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000, &mut payment_id); },
1054 0x50 => { send_payment(&nodes[0], &nodes[1], chan_a, 1_000, &mut payment_id); },
1055 0x51 => { send_payment(&nodes[1], &nodes[0], chan_a, 1_000, &mut payment_id); },
1056 0x52 => { send_payment(&nodes[1], &nodes[2], chan_b, 1_000, &mut payment_id); },
1057 0x53 => { send_payment(&nodes[2], &nodes[1], chan_b, 1_000, &mut payment_id); },
1058 0x54 => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000, &mut payment_id); },
1059 0x55 => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000, &mut payment_id); },
1061 0x58 => { send_payment(&nodes[0], &nodes[1], chan_a, 100, &mut payment_id); },
1062 0x59 => { send_payment(&nodes[1], &nodes[0], chan_a, 100, &mut payment_id); },
1063 0x5a => { send_payment(&nodes[1], &nodes[2], chan_b, 100, &mut payment_id); },
1064 0x5b => { send_payment(&nodes[2], &nodes[1], chan_b, 100, &mut payment_id); },
1065 0x5c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100, &mut payment_id); },
1066 0x5d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100, &mut payment_id); },
1068 0x60 => { send_payment(&nodes[0], &nodes[1], chan_a, 10, &mut payment_id); },
1069 0x61 => { send_payment(&nodes[1], &nodes[0], chan_a, 10, &mut payment_id); },
1070 0x62 => { send_payment(&nodes[1], &nodes[2], chan_b, 10, &mut payment_id); },
1071 0x63 => { send_payment(&nodes[2], &nodes[1], chan_b, 10, &mut payment_id); },
1072 0x64 => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10, &mut payment_id); },
1073 0x65 => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10, &mut payment_id); },
1075 0x68 => { send_payment(&nodes[0], &nodes[1], chan_a, 1, &mut payment_id); },
1076 0x69 => { send_payment(&nodes[1], &nodes[0], chan_a, 1, &mut payment_id); },
1077 0x6a => { send_payment(&nodes[1], &nodes[2], chan_b, 1, &mut payment_id); },
1078 0x6b => { send_payment(&nodes[2], &nodes[1], chan_b, 1, &mut payment_id); },
1079 0x6c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1, &mut payment_id); },
1080 0x6d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1, &mut payment_id); },
1083 let max_feerate = last_htlc_clear_fee_a * FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32;
1084 if fee_est_a.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate {
1085 fee_est_a.ret_val.store(max_feerate, atomic::Ordering::Release);
1087 nodes[0].maybe_update_chan_fees();
1089 0x81 => { fee_est_a.ret_val.store(253, atomic::Ordering::Release); nodes[0].maybe_update_chan_fees(); },
1092 let max_feerate = last_htlc_clear_fee_b * FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32;
1093 if fee_est_b.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate {
1094 fee_est_b.ret_val.store(max_feerate, atomic::Ordering::Release);
1096 nodes[1].maybe_update_chan_fees();
1098 0x85 => { fee_est_b.ret_val.store(253, atomic::Ordering::Release); nodes[1].maybe_update_chan_fees(); },
1101 let max_feerate = last_htlc_clear_fee_c * FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32;
1102 if fee_est_c.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate {
1103 fee_est_c.ret_val.store(max_feerate, atomic::Ordering::Release);
1105 nodes[2].maybe_update_chan_fees();
1107 0x89 => { fee_est_c.ret_val.store(253, atomic::Ordering::Release); nodes[2].maybe_update_chan_fees(); },
1110 // Test that no channel is in a stuck state where neither party can send funds even
1111 // after we resolve all pending events.
1112 // First make sure there are no pending monitor updates, resetting the error state
1113 // and calling force_channel_monitor_updated for each monitor.
1114 *monitor_a.persister.update_ret.lock().unwrap() = Ok(());
1115 *monitor_b.persister.update_ret.lock().unwrap() = Ok(());
1116 *monitor_c.persister.update_ret.lock().unwrap() = Ok(());
1118 if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) {
1119 monitor_a.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id);
1120 nodes[0].process_monitor_events();
1122 if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) {
1123 monitor_b.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id);
1124 nodes[1].process_monitor_events();
1126 if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) {
1127 monitor_b.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id);
1128 nodes[1].process_monitor_events();
1130 if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) {
1131 monitor_c.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id);
1132 nodes[2].process_monitor_events();
1135 // Next, make sure peers are all connected to each other
1136 if chan_a_disconnected {
1137 nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
1138 nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
1139 chan_a_disconnected = false;
1141 if chan_b_disconnected {
1142 nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
1143 nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
1144 chan_b_disconnected = false;
1147 for i in 0..std::usize::MAX {
1148 if i == 100 { panic!("It may take may iterations to settle the state, but it should not take forever"); }
1149 // Then, make sure any current forwards make their way to their destination
1150 if process_msg_events!(0, false, ProcessMessages::AllMessages) { continue; }
1151 if process_msg_events!(1, false, ProcessMessages::AllMessages) { continue; }
1152 if process_msg_events!(2, false, ProcessMessages::AllMessages) { continue; }
1153 // ...making sure any pending PendingHTLCsForwardable events are handled and
1154 // payments claimed.
1155 if process_events!(0, false) { continue; }
1156 if process_events!(1, false) { continue; }
1157 if process_events!(2, false) { continue; }
1161 // Finally, make sure that at least one end of each channel can make a substantial payment
1163 send_payment(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut payment_id) ||
1164 send_payment(&nodes[1], &nodes[0], chan_a, 10_000_000, &mut payment_id));
1166 send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut payment_id) ||
1167 send_payment(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut payment_id));
1169 last_htlc_clear_fee_a = fee_est_a.ret_val.load(atomic::Ordering::Acquire);
1170 last_htlc_clear_fee_b = fee_est_b.ret_val.load(atomic::Ordering::Acquire);
1171 last_htlc_clear_fee_c = fee_est_c.ret_val.load(atomic::Ordering::Acquire);
1173 _ => test_return!(),
1176 node_a_ser.0.clear();
1177 nodes[0].write(&mut node_a_ser).unwrap();
1178 monitor_a.should_update_manager.store(false, atomic::Ordering::Relaxed);
1179 node_b_ser.0.clear();
1180 nodes[1].write(&mut node_b_ser).unwrap();
1181 monitor_b.should_update_manager.store(false, atomic::Ordering::Relaxed);
1182 node_c_ser.0.clear();
1183 nodes[2].write(&mut node_c_ser).unwrap();
1184 monitor_c.should_update_manager.store(false, atomic::Ordering::Relaxed);
1188 /// We actually have different behavior based on if a certain log string has been seen, so we have
1189 /// to do a bit more tracking.
1191 struct SearchingOutput<O: Output> {
1193 may_fail: Arc<atomic::AtomicBool>,
1195 impl<O: Output> Output for SearchingOutput<O> {
1196 fn locked_write(&self, data: &[u8]) {
1197 // We hit a design limitation of LN state machine (see CONCURRENT_INBOUND_HTLC_FEE_BUFFER)
1198 if std::str::from_utf8(data).unwrap().contains("Outbound update_fee HTLC buffer overflow - counterparty should force-close this channel") {
1199 self.may_fail.store(true, atomic::Ordering::Release);
1201 self.output.locked_write(data)
1204 impl<O: Output> SearchingOutput<O> {
1205 pub fn new(output: O) -> Self {
1206 Self { output, may_fail: Arc::new(atomic::AtomicBool::new(false)) }
1210 pub fn chanmon_consistency_test<Out: Output>(data: &[u8], out: Out) {
1215 pub extern "C" fn chanmon_consistency_run(data: *const u8, datalen: usize) {
1216 do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull{});