1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 //! Test that monitor update failures don't get our channel state out of sync.
11 //! One of the biggest concern with the monitor update failure handling code is that messages
12 //! resent after monitor updating is restored are delivered out-of-order, resulting in
13 //! commitment_signed messages having "invalid signatures".
14 //! To test this we stand up a network of three nodes and read bytes from the fuzz input to denote
15 //! actions such as sending payments, handling events, or changing monitor update return values on
16 //! a per-node basis. This should allow it to find any cases where the ordering of actions results
17 //! in us getting out of sync with ourselves, and, assuming at least one of our recieve- or
18 //! send-side handling is correct, other peers. We consider it a failure if any action results in a
19 //! channel being force-closed.
21 use bitcoin::blockdata::constants::genesis_block;
22 use bitcoin::blockdata::transaction::{Transaction, TxOut};
23 use bitcoin::blockdata::script::{Builder, Script};
24 use bitcoin::blockdata::opcodes;
25 use bitcoin::blockdata::locktime::PackedLockTime;
26 use bitcoin::network::constants::Network;
28 use bitcoin::hashes::Hash as TraitImport;
29 use bitcoin::hashes::sha256::Hash as Sha256;
30 use bitcoin::hashes::sha256d::Hash as Sha256dHash;
31 use bitcoin::hash_types::{BlockHash, WPubkeyHash};
34 use lightning::chain::{BestBlock, ChannelMonitorUpdateStatus, chainmonitor, channelmonitor, Confirm, Watch};
35 use lightning::chain::channelmonitor::{ChannelMonitor, MonitorEvent};
36 use lightning::chain::transaction::OutPoint;
37 use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
38 use lightning::sign::{KeyMaterial, InMemorySigner, Recipient, EntropySource, NodeSigner, SignerProvider};
39 use lightning::events;
40 use lightning::events::MessageSendEventsProvider;
41 use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
42 use lightning::ln::channelmanager::{ChainParameters, ChannelDetails, ChannelManager, PaymentSendFailure, ChannelManagerReadArgs, PaymentId, RecipientOnionFields};
43 use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
44 use lightning::ln::msgs::{self, CommitmentUpdate, ChannelMessageHandler, DecodeError, UpdateAddHTLC, Init};
45 use lightning::ln::script::ShutdownScript;
46 use lightning::ln::functional_test_utils::*;
47 use lightning::util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
48 use lightning::util::errors::APIError;
49 use lightning::util::logger::Logger;
50 use lightning::util::config::UserConfig;
51 use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer};
52 use lightning::routing::router::{InFlightHtlcs, Path, Route, RouteHop, RouteParameters, Router};
54 use crate::utils::test_logger::{self, Output};
55 use crate::utils::test_persister::TestPersister;
57 use bitcoin::secp256k1::{Message, PublicKey, SecretKey, Scalar, Secp256k1};
58 use bitcoin::secp256k1::ecdh::SharedSecret;
59 use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature};
62 use std::cmp::{self, Ordering};
63 use hashbrown::{HashSet, hash_map, HashMap};
64 use std::sync::{Arc,Mutex};
65 use std::sync::atomic;
67 use bitcoin::bech32::u5;
69 const MAX_FEE: u32 = 10_000;
70 struct FuzzEstimator {
71 ret_val: atomic::AtomicU32,
73 impl FeeEstimator for FuzzEstimator {
74 fn get_est_sat_per_1000_weight(&self, conf_target: ConfirmationTarget) -> u32 {
75 // We force-close channels if our counterparty sends us a feerate which is a small multiple
76 // of our HighPriority fee estimate or smaller than our Background fee estimate. Thus, we
77 // always return a HighPriority feerate here which is >= the maximum Normal feerate and a
78 // Background feerate which is <= the minimum Normal feerate.
80 ConfirmationTarget::HighPriority => MAX_FEE,
81 ConfirmationTarget::Background => 253,
82 ConfirmationTarget::Normal => cmp::min(self.ret_val.load(atomic::Ordering::Acquire), MAX_FEE),
89 impl Router for FuzzRouter {
91 &self, _payer: &PublicKey, _params: &RouteParameters, _first_hops: Option<&[&ChannelDetails]>,
92 _inflight_htlcs: &InFlightHtlcs
93 ) -> Result<Route, msgs::LightningError> {
94 Err(msgs::LightningError {
95 err: String::from("Not implemented"),
96 action: msgs::ErrorAction::IgnoreError
101 pub struct TestBroadcaster {}
102 impl BroadcasterInterface for TestBroadcaster {
103 fn broadcast_transactions(&self, _txs: &[&Transaction]) { }
106 pub struct VecWriter(pub Vec<u8>);
107 impl Writer for VecWriter {
108 fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
109 self.0.extend_from_slice(buf);
114 struct TestChainMonitor {
115 pub logger: Arc<dyn Logger>,
116 pub keys: Arc<KeyProvider>,
117 pub persister: Arc<TestPersister>,
118 pub chain_monitor: Arc<chainmonitor::ChainMonitor<EnforcingSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
119 // If we reload a node with an old copy of ChannelMonitors, the ChannelManager deserialization
120 // logic will automatically force-close our channels for us (as we don't have an up-to-date
121 // monitor implying we are not able to punish misbehaving counterparties). Because this test
122 // "fails" if we ever force-close a channel, we avoid doing so, always saving the latest
123 // fully-serialized monitor state here, as well as the corresponding update_id.
124 pub latest_monitors: Mutex<HashMap<OutPoint, (u64, Vec<u8>)>>,
125 pub should_update_manager: atomic::AtomicBool,
127 impl TestChainMonitor {
128 pub fn new(broadcaster: Arc<TestBroadcaster>, logger: Arc<dyn Logger>, feeest: Arc<FuzzEstimator>, persister: Arc<TestPersister>, keys: Arc<KeyProvider>) -> Self {
130 chain_monitor: Arc::new(chainmonitor::ChainMonitor::new(None, broadcaster, logger.clone(), feeest, Arc::clone(&persister))),
134 latest_monitors: Mutex::new(HashMap::new()),
135 should_update_manager: atomic::AtomicBool::new(false),
139 impl chain::Watch<EnforcingSigner> for TestChainMonitor {
140 fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> chain::ChannelMonitorUpdateStatus {
141 let mut ser = VecWriter(Vec::new());
142 monitor.write(&mut ser).unwrap();
143 if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) {
144 panic!("Already had monitor pre-watch_channel");
146 self.should_update_manager.store(true, atomic::Ordering::Relaxed);
147 self.chain_monitor.watch_channel(funding_txo, monitor)
150 fn update_channel(&self, funding_txo: OutPoint, update: &channelmonitor::ChannelMonitorUpdate) -> chain::ChannelMonitorUpdateStatus {
151 let mut map_lock = self.latest_monitors.lock().unwrap();
152 let mut map_entry = match map_lock.entry(funding_txo) {
153 hash_map::Entry::Occupied(entry) => entry,
154 hash_map::Entry::Vacant(_) => panic!("Didn't have monitor on update call"),
156 let deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::
157 read(&mut Cursor::new(&map_entry.get().1), (&*self.keys, &*self.keys)).unwrap().1;
158 deserialized_monitor.update_monitor(update, &&TestBroadcaster{}, &FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap();
159 let mut ser = VecWriter(Vec::new());
160 deserialized_monitor.write(&mut ser).unwrap();
161 map_entry.insert((update.update_id, ser.0));
162 self.should_update_manager.store(true, atomic::Ordering::Relaxed);
163 self.chain_monitor.update_channel(funding_txo, update)
166 fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
167 return self.chain_monitor.release_pending_monitor_events();
172 node_secret: SecretKey,
173 rand_bytes_id: atomic::AtomicU32,
174 enforcement_states: Mutex<HashMap<[u8;32], Arc<Mutex<EnforcementState>>>>,
177 impl EntropySource for KeyProvider {
178 fn get_secure_random_bytes(&self) -> [u8; 32] {
179 let id = self.rand_bytes_id.fetch_add(1, atomic::Ordering::Relaxed);
180 let mut res = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, self.node_secret[31]];
181 res[30-4..30].copy_from_slice(&id.to_le_bytes());
186 impl NodeSigner for KeyProvider {
187 fn get_node_id(&self, recipient: Recipient) -> Result<PublicKey, ()> {
188 let node_secret = match recipient {
189 Recipient::Node => Ok(&self.node_secret),
190 Recipient::PhantomNode => Err(())
192 Ok(PublicKey::from_secret_key(&Secp256k1::signing_only(), node_secret))
195 fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>) -> Result<SharedSecret, ()> {
196 let mut node_secret = match recipient {
197 Recipient::Node => Ok(self.node_secret.clone()),
198 Recipient::PhantomNode => Err(())
200 if let Some(tweak) = tweak {
201 node_secret = node_secret.mul_tweak(tweak).map_err(|_| ())?;
203 Ok(SharedSecret::new(other_key, &node_secret))
206 fn get_inbound_payment_key_material(&self) -> KeyMaterial {
207 KeyMaterial([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, self.node_secret[31]])
210 fn sign_invoice(&self, _hrp_bytes: &[u8], _invoice_data: &[u5], _recipient: Recipient) -> Result<RecoverableSignature, ()> {
214 fn sign_gossip_message(&self, msg: lightning::ln::msgs::UnsignedGossipMessage) -> Result<Signature, ()> {
215 let msg_hash = Message::from_slice(&Sha256dHash::hash(&msg.encode()[..])[..]).map_err(|_| ())?;
216 let secp_ctx = Secp256k1::signing_only();
217 Ok(secp_ctx.sign_ecdsa(&msg_hash, &self.node_secret))
221 impl SignerProvider for KeyProvider {
222 type Signer = EnforcingSigner;
224 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
225 let id = self.rand_bytes_id.fetch_add(1, atomic::Ordering::Relaxed) as u8;
229 fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> Self::Signer {
230 let secp_ctx = Secp256k1::signing_only();
231 let id = channel_keys_id[0];
232 let keys = InMemorySigner::new(
234 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, self.node_secret[31]]).unwrap(),
235 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, self.node_secret[31]]).unwrap(),
236 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, self.node_secret[31]]).unwrap(),
237 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, self.node_secret[31]]).unwrap(),
238 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, self.node_secret[31]]).unwrap(),
239 [id, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, self.node_secret[31]],
240 channel_value_satoshis,
244 let revoked_commitment = self.make_enforcement_state_cell(keys.commitment_seed);
245 EnforcingSigner::new_with_revoked(keys, revoked_commitment, false)
248 fn read_chan_signer(&self, buffer: &[u8]) -> Result<Self::Signer, DecodeError> {
249 let mut reader = std::io::Cursor::new(buffer);
251 let inner: InMemorySigner = ReadableArgs::read(&mut reader, self)?;
252 let state = self.make_enforcement_state_cell(inner.commitment_seed);
257 disable_revocation_policy_check: false,
261 fn get_destination_script(&self) -> Result<Script, ()> {
262 let secp_ctx = Secp256k1::signing_only();
263 let channel_monitor_claim_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, self.node_secret[31]]).unwrap();
264 let our_channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
265 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&our_channel_monitor_claim_key_hash[..]).into_script())
268 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
269 let secp_ctx = Secp256k1::signing_only();
270 let secret_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, self.node_secret[31]]).unwrap();
271 let pubkey_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &secret_key).serialize());
272 Ok(ShutdownScript::new_p2wpkh(&pubkey_hash))
277 fn make_enforcement_state_cell(&self, commitment_seed: [u8; 32]) -> Arc<Mutex<EnforcementState>> {
278 let mut revoked_commitments = self.enforcement_states.lock().unwrap();
279 if !revoked_commitments.contains_key(&commitment_seed) {
280 revoked_commitments.insert(commitment_seed, Arc::new(Mutex::new(EnforcementState::new())));
282 let cell = revoked_commitments.get(&commitment_seed).unwrap();
288 fn check_api_err(api_err: APIError, sendable_bounds_violated: bool) {
290 APIError::APIMisuseError { .. } => panic!("We can't misuse the API"),
291 APIError::FeeRateTooHigh { .. } => panic!("We can't send too much fee?"),
292 APIError::InvalidRoute { .. } => panic!("Our routes should work"),
293 APIError::ChannelUnavailable { err } => {
294 // Test the error against a list of errors we can hit, and reject
295 // all others. If you hit this panic, the list of acceptable errors
296 // is probably just stale and you should add new messages here.
298 "Peer for first hop currently disconnected" => {},
299 _ if err.starts_with("Cannot send less than our next-HTLC minimum - ") => {},
300 _ if err.starts_with("Cannot send more than our next-HTLC maximum - ") => {},
301 _ => panic!("{}", err),
303 assert!(sendable_bounds_violated);
305 APIError::MonitorUpdateInProgress => {
306 // We can (obviously) temp-fail a monitor update
308 APIError::IncompatibleShutdownScript { .. } => panic!("Cannot send an incompatible shutdown script"),
312 fn check_payment_err(send_err: PaymentSendFailure, sendable_bounds_violated: bool) {
314 PaymentSendFailure::ParameterError(api_err) => check_api_err(api_err, sendable_bounds_violated),
315 PaymentSendFailure::PathParameterError(per_path_results) => {
316 for res in per_path_results { if let Err(api_err) = res { check_api_err(api_err, sendable_bounds_violated); } }
318 PaymentSendFailure::AllFailedResendSafe(per_path_results) => {
319 for api_err in per_path_results { check_api_err(api_err, sendable_bounds_violated); }
321 PaymentSendFailure::PartialFailure { results, .. } => {
322 for res in results { if let Err(api_err) = res { check_api_err(api_err, sendable_bounds_violated); } }
324 PaymentSendFailure::DuplicatePayment => panic!(),
328 type ChanMan<'a> = ChannelManager<Arc<TestChainMonitor>, Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<KeyProvider>, Arc<KeyProvider>, Arc<FuzzEstimator>, &'a FuzzRouter, Arc<dyn Logger>>;
331 fn get_payment_secret_hash(dest: &ChanMan, payment_id: &mut u8) -> Option<(PaymentSecret, PaymentHash)> {
332 let mut payment_hash;
334 payment_hash = PaymentHash(Sha256::hash(&[*payment_id; 1]).into_inner());
335 if let Ok(payment_secret) = dest.create_inbound_payment_for_hash(payment_hash, None, 3600, None) {
336 return Some((payment_secret, payment_hash));
338 *payment_id = payment_id.wrapping_add(1);
344 fn send_payment(source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8, payment_idx: &mut u64) -> bool {
345 let (payment_secret, payment_hash) =
346 if let Some((secret, hash)) = get_payment_secret_hash(dest, payment_id) { (secret, hash) } else { return true; };
347 let mut payment_id = [0; 32];
348 payment_id[0..8].copy_from_slice(&payment_idx.to_ne_bytes());
350 let (min_value_sendable, max_value_sendable) = source.list_usable_channels()
351 .iter().find(|chan| chan.short_channel_id == Some(dest_chan_id))
353 (chan.next_outbound_htlc_minimum_msat, chan.next_outbound_htlc_limit_msat))
355 if let Err(err) = source.send_payment_with_route(&Route {
356 paths: vec![Path { hops: vec![RouteHop {
357 pubkey: dest.get_our_node_id(),
358 node_features: dest.node_features(),
359 short_channel_id: dest_chan_id,
360 channel_features: dest.channel_features(),
362 cltv_expiry_delta: 200,
363 }], blinded_tail: None }],
364 payment_params: None,
365 }, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_id)) {
366 check_payment_err(err, amt > max_value_sendable || amt < min_value_sendable);
369 // Note that while the max is a strict upper-bound, we can occasionally send substantially
370 // below the minimum, with some gap which is unusable immediately below the minimum. Thus,
371 // we don't check against min_value_sendable here.
372 assert!(amt <= max_value_sendable);
377 fn send_hop_payment(source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8, payment_idx: &mut u64) -> bool {
378 let (payment_secret, payment_hash) =
379 if let Some((secret, hash)) = get_payment_secret_hash(dest, payment_id) { (secret, hash) } else { return true; };
380 let mut payment_id = [0; 32];
381 payment_id[0..8].copy_from_slice(&payment_idx.to_ne_bytes());
383 let (min_value_sendable, max_value_sendable) = source.list_usable_channels()
384 .iter().find(|chan| chan.short_channel_id == Some(middle_chan_id))
386 (chan.next_outbound_htlc_minimum_msat, chan.next_outbound_htlc_limit_msat))
388 let first_hop_fee = 50_000;
389 if let Err(err) = source.send_payment_with_route(&Route {
390 paths: vec![Path { hops: vec![RouteHop {
391 pubkey: middle.get_our_node_id(),
392 node_features: middle.node_features(),
393 short_channel_id: middle_chan_id,
394 channel_features: middle.channel_features(),
395 fee_msat: first_hop_fee,
396 cltv_expiry_delta: 100,
398 pubkey: dest.get_our_node_id(),
399 node_features: dest.node_features(),
400 short_channel_id: dest_chan_id,
401 channel_features: dest.channel_features(),
403 cltv_expiry_delta: 200,
404 }], blinded_tail: None }],
405 payment_params: None,
406 }, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_id)) {
407 let sent_amt = amt + first_hop_fee;
408 check_payment_err(err, sent_amt < min_value_sendable || sent_amt > max_value_sendable);
411 // Note that while the max is a strict upper-bound, we can occasionally send substantially
412 // below the minimum, with some gap which is unusable immediately below the minimum. Thus,
413 // we don't check against min_value_sendable here.
414 assert!(amt + first_hop_fee <= max_value_sendable);
420 pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
421 let out = SearchingOutput::new(underlying_out);
422 let broadcast = Arc::new(TestBroadcaster{});
423 let router = FuzzRouter {};
425 macro_rules! make_node {
426 ($node_id: expr, $fee_estimator: expr) => { {
427 let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
428 let node_secret = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, $node_id]).unwrap();
429 let keys_manager = Arc::new(KeyProvider { node_secret, rand_bytes_id: atomic::AtomicU32::new(0), enforcement_states: Mutex::new(HashMap::new()) });
430 let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(),
431 Arc::new(TestPersister {
432 update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed)
433 }), Arc::clone(&keys_manager)));
435 let mut config = UserConfig::default();
436 config.channel_config.forwarding_fee_proportional_millionths = 0;
437 config.channel_handshake_config.announced_channel = true;
438 let network = Network::Bitcoin;
439 let params = ChainParameters {
441 best_block: BestBlock::from_network(network),
443 (ChannelManager::new($fee_estimator.clone(), monitor.clone(), broadcast.clone(), &router, Arc::clone(&logger), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), config, params),
444 monitor, keys_manager)
448 macro_rules! reload_node {
449 ($ser: expr, $node_id: expr, $old_monitors: expr, $keys_manager: expr, $fee_estimator: expr) => { {
450 let keys_manager = Arc::clone(& $keys_manager);
451 let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
452 let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(),
453 Arc::new(TestPersister {
454 update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed)
455 }), Arc::clone(& $keys_manager)));
457 let mut config = UserConfig::default();
458 config.channel_config.forwarding_fee_proportional_millionths = 0;
459 config.channel_handshake_config.announced_channel = true;
461 let mut monitors = HashMap::new();
462 let mut old_monitors = $old_monitors.latest_monitors.lock().unwrap();
463 for (outpoint, (update_id, monitor_ser)) in old_monitors.drain() {
464 monitors.insert(outpoint, <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut Cursor::new(&monitor_ser), (&*$keys_manager, &*$keys_manager)).expect("Failed to read monitor").1);
465 chain_monitor.latest_monitors.lock().unwrap().insert(outpoint, (update_id, monitor_ser));
467 let mut monitor_refs = HashMap::new();
468 for (outpoint, monitor) in monitors.iter_mut() {
469 monitor_refs.insert(*outpoint, monitor);
472 let read_args = ChannelManagerReadArgs {
473 entropy_source: keys_manager.clone(),
474 node_signer: keys_manager.clone(),
475 signer_provider: keys_manager.clone(),
476 fee_estimator: $fee_estimator.clone(),
477 chain_monitor: chain_monitor.clone(),
478 tx_broadcaster: broadcast.clone(),
481 default_config: config,
482 channel_monitors: monitor_refs,
485 let res = (<(BlockHash, ChanMan)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, chain_monitor.clone());
486 for (funding_txo, mon) in monitors.drain() {
487 assert_eq!(chain_monitor.chain_monitor.watch_channel(funding_txo, mon),
488 ChannelMonitorUpdateStatus::Completed);
494 let mut channel_txn = Vec::new();
495 macro_rules! make_channel {
496 ($source: expr, $dest: expr, $chan_id: expr) => { {
497 $source.peer_connected(&$dest.get_our_node_id(), &Init { features: $dest.init_features(), remote_network_address: None }, true).unwrap();
498 $dest.peer_connected(&$source.get_our_node_id(), &Init { features: $source.init_features(), remote_network_address: None }, false).unwrap();
500 $source.create_channel($dest.get_our_node_id(), 100_000, 42, 0, None).unwrap();
502 let events = $source.get_and_clear_pending_msg_events();
503 assert_eq!(events.len(), 1);
504 if let events::MessageSendEvent::SendOpenChannel { ref msg, .. } = events[0] {
506 } else { panic!("Wrong event type"); }
509 $dest.handle_open_channel(&$source.get_our_node_id(), &open_channel);
510 let accept_channel = {
511 let events = $dest.get_and_clear_pending_msg_events();
512 assert_eq!(events.len(), 1);
513 if let events::MessageSendEvent::SendAcceptChannel { ref msg, .. } = events[0] {
515 } else { panic!("Wrong event type"); }
518 $source.handle_accept_channel(&$dest.get_our_node_id(), &accept_channel);
521 let events = $source.get_and_clear_pending_events();
522 assert_eq!(events.len(), 1);
523 if let events::Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, .. } = events[0] {
524 let tx = Transaction { version: $chan_id, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
525 value: *channel_value_satoshis, script_pubkey: output_script.clone(),
527 funding_output = OutPoint { txid: tx.txid(), index: 0 };
528 $source.funding_transaction_generated(&temporary_channel_id, &$dest.get_our_node_id(), tx.clone()).unwrap();
529 channel_txn.push(tx);
530 } else { panic!("Wrong event type"); }
533 let funding_created = {
534 let events = $source.get_and_clear_pending_msg_events();
535 assert_eq!(events.len(), 1);
536 if let events::MessageSendEvent::SendFundingCreated { ref msg, .. } = events[0] {
538 } else { panic!("Wrong event type"); }
540 $dest.handle_funding_created(&$source.get_our_node_id(), &funding_created);
542 let funding_signed = {
543 let events = $dest.get_and_clear_pending_msg_events();
544 assert_eq!(events.len(), 1);
545 if let events::MessageSendEvent::SendFundingSigned { ref msg, .. } = events[0] {
547 } else { panic!("Wrong event type"); }
549 let events = $dest.get_and_clear_pending_events();
550 assert_eq!(events.len(), 1);
551 if let events::Event::ChannelPending{ ref counterparty_node_id, .. } = events[0] {
552 assert_eq!(counterparty_node_id, &$source.get_our_node_id());
553 } else { panic!("Wrong event type"); }
555 $source.handle_funding_signed(&$dest.get_our_node_id(), &funding_signed);
556 let events = $source.get_and_clear_pending_events();
557 assert_eq!(events.len(), 1);
558 if let events::Event::ChannelPending{ ref counterparty_node_id, .. } = events[0] {
559 assert_eq!(counterparty_node_id, &$dest.get_our_node_id());
560 } else { panic!("Wrong event type"); }
566 macro_rules! confirm_txn {
568 let chain_hash = genesis_block(Network::Bitcoin).block_hash();
569 let mut header = create_dummy_header(chain_hash, 42);
570 let txdata: Vec<_> = channel_txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect();
571 $node.transactions_confirmed(&header, &txdata, 1);
573 header = create_dummy_header(header.block_hash(), 42);
575 $node.best_block_updated(&header, 99);
579 macro_rules! lock_fundings {
580 ($nodes: expr) => { {
581 let mut node_events = Vec::new();
582 for node in $nodes.iter() {
583 node_events.push(node.get_and_clear_pending_msg_events());
585 for (idx, node_event) in node_events.iter().enumerate() {
586 for event in node_event {
587 if let events::MessageSendEvent::SendChannelReady { ref node_id, ref msg } = event {
588 for node in $nodes.iter() {
589 if node.get_our_node_id() == *node_id {
590 node.handle_channel_ready(&$nodes[idx].get_our_node_id(), msg);
593 } else { panic!("Wrong event type"); }
597 for node in $nodes.iter() {
598 let events = node.get_and_clear_pending_msg_events();
599 for event in events {
600 if let events::MessageSendEvent::SendAnnouncementSignatures { .. } = event {
601 } else { panic!("Wrong event type"); }
607 let fee_est_a = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) });
608 let mut last_htlc_clear_fee_a = 253;
609 let fee_est_b = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) });
610 let mut last_htlc_clear_fee_b = 253;
611 let fee_est_c = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) });
612 let mut last_htlc_clear_fee_c = 253;
614 // 3 nodes is enough to hit all the possible cases, notably unknown-source-unknown-dest
616 let (node_a, mut monitor_a, keys_manager_a) = make_node!(0, fee_est_a);
617 let (node_b, mut monitor_b, keys_manager_b) = make_node!(1, fee_est_b);
618 let (node_c, mut monitor_c, keys_manager_c) = make_node!(2, fee_est_c);
620 let mut nodes = [node_a, node_b, node_c];
622 let chan_1_funding = make_channel!(nodes[0], nodes[1], 0);
623 let chan_2_funding = make_channel!(nodes[1], nodes[2], 1);
625 for node in nodes.iter() {
629 lock_fundings!(nodes);
631 let chan_a = nodes[0].list_usable_channels()[0].short_channel_id.unwrap();
632 let chan_b = nodes[2].list_usable_channels()[0].short_channel_id.unwrap();
634 let mut payment_id: u8 = 0;
635 let mut payment_idx: u64 = 0;
637 let mut chan_a_disconnected = false;
638 let mut chan_b_disconnected = false;
639 let mut ab_events = Vec::new();
640 let mut ba_events = Vec::new();
641 let mut bc_events = Vec::new();
642 let mut cb_events = Vec::new();
644 let mut node_a_ser = VecWriter(Vec::new());
645 nodes[0].write(&mut node_a_ser).unwrap();
646 let mut node_b_ser = VecWriter(Vec::new());
647 nodes[1].write(&mut node_b_ser).unwrap();
648 let mut node_c_ser = VecWriter(Vec::new());
649 nodes[2].write(&mut node_c_ser).unwrap();
651 macro_rules! test_return {
653 assert_eq!(nodes[0].list_channels().len(), 1);
654 assert_eq!(nodes[1].list_channels().len(), 2);
655 assert_eq!(nodes[2].list_channels().len(), 1);
660 let mut read_pos = 0;
661 macro_rules! get_slice {
664 let slice_len = $len as usize;
665 if data.len() < read_pos + slice_len {
668 read_pos += slice_len;
669 &data[read_pos - slice_len..read_pos]
675 // Push any events from Node B onto ba_events and bc_events
676 macro_rules! push_excess_b_events {
677 ($excess_events: expr, $expect_drop_node: expr) => { {
678 let a_id = nodes[0].get_our_node_id();
679 let expect_drop_node: Option<usize> = $expect_drop_node;
680 let expect_drop_id = if let Some(id) = expect_drop_node { Some(nodes[id].get_our_node_id()) } else { None };
681 for event in $excess_events {
682 let push_a = match event {
683 events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => {
684 if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); }
687 events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => {
688 if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); }
691 events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => {
692 if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); }
695 events::MessageSendEvent::SendChannelReady { .. } => continue,
696 events::MessageSendEvent::SendAnnouncementSignatures { .. } => continue,
697 events::MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => {
698 assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set!
699 if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); }
702 _ => panic!("Unhandled message event {:?}", event),
704 if push_a { ba_events.push(event); } else { bc_events.push(event); }
709 // While delivering messages, we select across three possible message selection processes
710 // to ensure we get as much coverage as possible. See the individual enum variants for more
713 enum ProcessMessages {
714 /// Deliver all available messages, including fetching any new messages from
715 /// `get_and_clear_pending_msg_events()` (which may have side effects).
717 /// Call `get_and_clear_pending_msg_events()` first, and then deliver up to one
718 /// message (which may already be queued).
720 /// Deliver up to one already-queued message. This avoids any potential side-effects
721 /// of `get_and_clear_pending_msg_events()` (eg freeing the HTLC holding cell), which
722 /// provides potentially more coverage.
726 macro_rules! process_msg_events {
727 ($node: expr, $corrupt_forward: expr, $limit_events: expr) => { {
728 let mut events = if $node == 1 {
729 let mut new_events = Vec::new();
730 mem::swap(&mut new_events, &mut ba_events);
731 new_events.extend_from_slice(&bc_events[..]);
734 } else if $node == 0 {
735 let mut new_events = Vec::new();
736 mem::swap(&mut new_events, &mut ab_events);
739 let mut new_events = Vec::new();
740 mem::swap(&mut new_events, &mut cb_events);
743 let mut new_events = Vec::new();
744 if $limit_events != ProcessMessages::OnePendingMessage {
745 new_events = nodes[$node].get_and_clear_pending_msg_events();
747 let mut had_events = false;
748 let mut events_iter = events.drain(..).chain(new_events.drain(..));
749 let mut extra_ev = None;
750 for event in &mut events_iter {
753 events::MessageSendEvent::UpdateHTLCs { node_id, updates: CommitmentUpdate { update_add_htlcs, update_fail_htlcs, update_fulfill_htlcs, update_fail_malformed_htlcs, update_fee, commitment_signed } } => {
754 for (idx, dest) in nodes.iter().enumerate() {
755 if dest.get_our_node_id() == node_id {
756 for update_add in update_add_htlcs.iter() {
757 out.locked_write(format!("Delivering update_add_htlc to node {}.\n", idx).as_bytes());
758 if !$corrupt_forward {
759 dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), update_add);
761 // Corrupt the update_add_htlc message so that its HMAC
762 // check will fail and we generate a
763 // update_fail_malformed_htlc instead of an
764 // update_fail_htlc as we do when we reject a payment.
765 let mut msg_ser = update_add.encode();
766 msg_ser[1000] ^= 0xff;
767 let new_msg = UpdateAddHTLC::read(&mut Cursor::new(&msg_ser)).unwrap();
768 dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), &new_msg);
771 for update_fulfill in update_fulfill_htlcs.iter() {
772 out.locked_write(format!("Delivering update_fulfill_htlc to node {}.\n", idx).as_bytes());
773 dest.handle_update_fulfill_htlc(&nodes[$node].get_our_node_id(), update_fulfill);
775 for update_fail in update_fail_htlcs.iter() {
776 out.locked_write(format!("Delivering update_fail_htlc to node {}.\n", idx).as_bytes());
777 dest.handle_update_fail_htlc(&nodes[$node].get_our_node_id(), update_fail);
779 for update_fail_malformed in update_fail_malformed_htlcs.iter() {
780 out.locked_write(format!("Delivering update_fail_malformed_htlc to node {}.\n", idx).as_bytes());
781 dest.handle_update_fail_malformed_htlc(&nodes[$node].get_our_node_id(), update_fail_malformed);
783 if let Some(msg) = update_fee {
784 out.locked_write(format!("Delivering update_fee to node {}.\n", idx).as_bytes());
785 dest.handle_update_fee(&nodes[$node].get_our_node_id(), &msg);
787 let processed_change = !update_add_htlcs.is_empty() || !update_fulfill_htlcs.is_empty() ||
788 !update_fail_htlcs.is_empty() || !update_fail_malformed_htlcs.is_empty();
789 if $limit_events != ProcessMessages::AllMessages && processed_change {
790 // If we only want to process some messages, don't deliver the CS until later.
791 extra_ev = Some(events::MessageSendEvent::UpdateHTLCs { node_id, updates: CommitmentUpdate {
792 update_add_htlcs: Vec::new(),
793 update_fail_htlcs: Vec::new(),
794 update_fulfill_htlcs: Vec::new(),
795 update_fail_malformed_htlcs: Vec::new(),
801 out.locked_write(format!("Delivering commitment_signed to node {}.\n", idx).as_bytes());
802 dest.handle_commitment_signed(&nodes[$node].get_our_node_id(), &commitment_signed);
807 events::MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
808 for (idx, dest) in nodes.iter().enumerate() {
809 if dest.get_our_node_id() == *node_id {
810 out.locked_write(format!("Delivering revoke_and_ack to node {}.\n", idx).as_bytes());
811 dest.handle_revoke_and_ack(&nodes[$node].get_our_node_id(), msg);
815 events::MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => {
816 for (idx, dest) in nodes.iter().enumerate() {
817 if dest.get_our_node_id() == *node_id {
818 out.locked_write(format!("Delivering channel_reestablish to node {}.\n", idx).as_bytes());
819 dest.handle_channel_reestablish(&nodes[$node].get_our_node_id(), msg);
823 events::MessageSendEvent::SendChannelReady { .. } => {
824 // Can be generated as a reestablish response
826 events::MessageSendEvent::SendAnnouncementSignatures { .. } => {
827 // Can be generated as a reestablish response
829 events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
830 // When we reconnect we will resend a channel_update to make sure our
831 // counterparty has the latest parameters for receiving payments
832 // through us. We do, however, check that the message does not include
833 // the "disabled" bit, as we should never ever have a channel which is
834 // disabled when we send such an update (or it may indicate channel
835 // force-close which we should detect as an error).
836 assert_eq!(msg.contents.flags & 2, 0);
838 _ => if out.may_fail.load(atomic::Ordering::Acquire) {
841 panic!("Unhandled message event {:?}", event)
844 if $limit_events != ProcessMessages::AllMessages {
849 push_excess_b_events!(extra_ev.into_iter().chain(events_iter), None);
850 } else if $node == 0 {
851 if let Some(ev) = extra_ev { ab_events.push(ev); }
852 for event in events_iter { ab_events.push(event); }
854 if let Some(ev) = extra_ev { cb_events.push(ev); }
855 for event in events_iter { cb_events.push(event); }
861 macro_rules! drain_msg_events_on_disconnect {
862 ($counterparty_id: expr) => { {
863 if $counterparty_id == 0 {
864 for event in nodes[0].get_and_clear_pending_msg_events() {
866 events::MessageSendEvent::UpdateHTLCs { .. } => {},
867 events::MessageSendEvent::SendRevokeAndACK { .. } => {},
868 events::MessageSendEvent::SendChannelReestablish { .. } => {},
869 events::MessageSendEvent::SendChannelReady { .. } => {},
870 events::MessageSendEvent::SendAnnouncementSignatures { .. } => {},
871 events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
872 assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set!
874 _ => if out.may_fail.load(atomic::Ordering::Acquire) {
877 panic!("Unhandled message event")
881 push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(0));
885 for event in nodes[2].get_and_clear_pending_msg_events() {
887 events::MessageSendEvent::UpdateHTLCs { .. } => {},
888 events::MessageSendEvent::SendRevokeAndACK { .. } => {},
889 events::MessageSendEvent::SendChannelReestablish { .. } => {},
890 events::MessageSendEvent::SendChannelReady { .. } => {},
891 events::MessageSendEvent::SendAnnouncementSignatures { .. } => {},
892 events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
893 assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set!
895 _ => if out.may_fail.load(atomic::Ordering::Acquire) {
898 panic!("Unhandled message event")
902 push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(2));
909 macro_rules! process_events {
910 ($node: expr, $fail: expr) => { {
911 // In case we get 256 payments we may have a hash collision, resulting in the
912 // second claim/fail call not finding the duplicate-hash HTLC, so we have to
913 // deduplicate the calls here.
914 let mut claim_set = HashSet::new();
915 let mut events = nodes[$node].get_and_clear_pending_events();
916 // Sort events so that PendingHTLCsForwardable get processed last. This avoids a
917 // case where we first process a PendingHTLCsForwardable, then claim/fail on a
918 // PaymentClaimable, claiming/failing two HTLCs, but leaving a just-generated
919 // PaymentClaimable event for the second HTLC in our pending_events (and breaking
920 // our claim_set deduplication).
921 events.sort_by(|a, b| {
922 if let events::Event::PaymentClaimable { .. } = a {
923 if let events::Event::PendingHTLCsForwardable { .. } = b {
925 } else { Ordering::Equal }
926 } else if let events::Event::PendingHTLCsForwardable { .. } = a {
927 if let events::Event::PaymentClaimable { .. } = b {
929 } else { Ordering::Equal }
930 } else { Ordering::Equal }
932 let had_events = !events.is_empty();
933 for event in events.drain(..) {
935 events::Event::PaymentClaimable { payment_hash, .. } => {
936 if claim_set.insert(payment_hash.0) {
938 nodes[$node].fail_htlc_backwards(&payment_hash);
940 nodes[$node].claim_funds(PaymentPreimage(payment_hash.0));
944 events::Event::PaymentSent { .. } => {},
945 events::Event::PaymentClaimed { .. } => {},
946 events::Event::PaymentPathSuccessful { .. } => {},
947 events::Event::PaymentPathFailed { .. } => {},
948 events::Event::PaymentFailed { .. } => {},
949 events::Event::ProbeSuccessful { .. } | events::Event::ProbeFailed { .. } => {
950 // Even though we don't explicitly send probes, because probes are
951 // detected based on hashing the payment hash+preimage, its rather
952 // trivial for the fuzzer to build payments that accidentally end up
953 // looking like probes.
955 events::Event::PaymentForwarded { .. } if $node == 1 => {},
956 events::Event::ChannelReady { .. } => {},
957 events::Event::PendingHTLCsForwardable { .. } => {
958 nodes[$node].process_pending_htlc_forwards();
960 events::Event::HTLCHandlingFailed { .. } => {},
961 _ => if out.may_fail.load(atomic::Ordering::Acquire) {
964 panic!("Unhandled event")
972 let v = get_slice!(1)[0];
973 out.locked_write(format!("READ A BYTE! HANDLING INPUT {:x}...........\n", v).as_bytes());
975 // In general, we keep related message groups close together in binary form, allowing
976 // bit-twiddling mutations to have similar effects. This is probably overkill, but no
979 0x00 => *monitor_a.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::InProgress,
980 0x01 => *monitor_b.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::InProgress,
981 0x02 => *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::InProgress,
982 0x04 => *monitor_a.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed,
983 0x05 => *monitor_b.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed,
984 0x06 => *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed,
987 if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) {
988 monitor_a.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id);
989 nodes[0].process_monitor_events();
993 if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) {
994 monitor_b.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id);
995 nodes[1].process_monitor_events();
999 if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) {
1000 monitor_b.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id);
1001 nodes[1].process_monitor_events();
1005 if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) {
1006 monitor_c.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id);
1007 nodes[2].process_monitor_events();
1012 if !chan_a_disconnected {
1013 nodes[0].peer_disconnected(&nodes[1].get_our_node_id());
1014 nodes[1].peer_disconnected(&nodes[0].get_our_node_id());
1015 chan_a_disconnected = true;
1016 drain_msg_events_on_disconnect!(0);
1020 if !chan_b_disconnected {
1021 nodes[1].peer_disconnected(&nodes[2].get_our_node_id());
1022 nodes[2].peer_disconnected(&nodes[1].get_our_node_id());
1023 chan_b_disconnected = true;
1024 drain_msg_events_on_disconnect!(2);
1028 if chan_a_disconnected {
1029 nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, true).unwrap();
1030 nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: nodes[0].init_features(), remote_network_address: None }, false).unwrap();
1031 chan_a_disconnected = false;
1035 if chan_b_disconnected {
1036 nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: nodes[2].init_features(), remote_network_address: None }, true).unwrap();
1037 nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, false).unwrap();
1038 chan_b_disconnected = false;
1042 0x10 => { process_msg_events!(0, true, ProcessMessages::AllMessages); },
1043 0x11 => { process_msg_events!(0, false, ProcessMessages::AllMessages); },
1044 0x12 => { process_msg_events!(0, true, ProcessMessages::OneMessage); },
1045 0x13 => { process_msg_events!(0, false, ProcessMessages::OneMessage); },
1046 0x14 => { process_msg_events!(0, true, ProcessMessages::OnePendingMessage); },
1047 0x15 => { process_msg_events!(0, false, ProcessMessages::OnePendingMessage); },
1049 0x16 => { process_events!(0, true); },
1050 0x17 => { process_events!(0, false); },
1052 0x18 => { process_msg_events!(1, true, ProcessMessages::AllMessages); },
1053 0x19 => { process_msg_events!(1, false, ProcessMessages::AllMessages); },
1054 0x1a => { process_msg_events!(1, true, ProcessMessages::OneMessage); },
1055 0x1b => { process_msg_events!(1, false, ProcessMessages::OneMessage); },
1056 0x1c => { process_msg_events!(1, true, ProcessMessages::OnePendingMessage); },
1057 0x1d => { process_msg_events!(1, false, ProcessMessages::OnePendingMessage); },
1059 0x1e => { process_events!(1, true); },
1060 0x1f => { process_events!(1, false); },
1062 0x20 => { process_msg_events!(2, true, ProcessMessages::AllMessages); },
1063 0x21 => { process_msg_events!(2, false, ProcessMessages::AllMessages); },
1064 0x22 => { process_msg_events!(2, true, ProcessMessages::OneMessage); },
1065 0x23 => { process_msg_events!(2, false, ProcessMessages::OneMessage); },
1066 0x24 => { process_msg_events!(2, true, ProcessMessages::OnePendingMessage); },
1067 0x25 => { process_msg_events!(2, false, ProcessMessages::OnePendingMessage); },
1069 0x26 => { process_events!(2, true); },
1070 0x27 => { process_events!(2, false); },
1073 if !chan_a_disconnected {
1074 nodes[1].peer_disconnected(&nodes[0].get_our_node_id());
1075 chan_a_disconnected = true;
1076 drain_msg_events_on_disconnect!(0);
1078 if monitor_a.should_update_manager.load(atomic::Ordering::Relaxed) {
1079 node_a_ser.0.clear();
1080 nodes[0].write(&mut node_a_ser).unwrap();
1082 let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a, keys_manager_a, fee_est_a);
1083 nodes[0] = new_node_a;
1084 monitor_a = new_monitor_a;
1087 if !chan_a_disconnected {
1088 nodes[0].peer_disconnected(&nodes[1].get_our_node_id());
1089 chan_a_disconnected = true;
1090 nodes[0].get_and_clear_pending_msg_events();
1094 if !chan_b_disconnected {
1095 nodes[2].peer_disconnected(&nodes[1].get_our_node_id());
1096 chan_b_disconnected = true;
1097 nodes[2].get_and_clear_pending_msg_events();
1101 let (new_node_b, new_monitor_b) = reload_node!(node_b_ser, 1, monitor_b, keys_manager_b, fee_est_b);
1102 nodes[1] = new_node_b;
1103 monitor_b = new_monitor_b;
1106 if !chan_b_disconnected {
1107 nodes[1].peer_disconnected(&nodes[2].get_our_node_id());
1108 chan_b_disconnected = true;
1109 drain_msg_events_on_disconnect!(2);
1111 if monitor_c.should_update_manager.load(atomic::Ordering::Relaxed) {
1112 node_c_ser.0.clear();
1113 nodes[2].write(&mut node_c_ser).unwrap();
1115 let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c, keys_manager_c, fee_est_c);
1116 nodes[2] = new_node_c;
1117 monitor_c = new_monitor_c;
1120 // 1/10th the channel size:
1121 0x30 => { send_payment(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut payment_id, &mut payment_idx); },
1122 0x31 => { send_payment(&nodes[1], &nodes[0], chan_a, 10_000_000, &mut payment_id, &mut payment_idx); },
1123 0x32 => { send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut payment_id, &mut payment_idx); },
1124 0x33 => { send_payment(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut payment_id, &mut payment_idx); },
1125 0x34 => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000_000, &mut payment_id, &mut payment_idx); },
1126 0x35 => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000_000, &mut payment_id, &mut payment_idx); },
1128 0x38 => { send_payment(&nodes[0], &nodes[1], chan_a, 1_000_000, &mut payment_id, &mut payment_idx); },
1129 0x39 => { send_payment(&nodes[1], &nodes[0], chan_a, 1_000_000, &mut payment_id, &mut payment_idx); },
1130 0x3a => { send_payment(&nodes[1], &nodes[2], chan_b, 1_000_000, &mut payment_id, &mut payment_idx); },
1131 0x3b => { send_payment(&nodes[2], &nodes[1], chan_b, 1_000_000, &mut payment_id, &mut payment_idx); },
1132 0x3c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000_000, &mut payment_id, &mut payment_idx); },
1133 0x3d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000_000, &mut payment_id, &mut payment_idx); },
1135 0x40 => { send_payment(&nodes[0], &nodes[1], chan_a, 100_000, &mut payment_id, &mut payment_idx); },
1136 0x41 => { send_payment(&nodes[1], &nodes[0], chan_a, 100_000, &mut payment_id, &mut payment_idx); },
1137 0x42 => { send_payment(&nodes[1], &nodes[2], chan_b, 100_000, &mut payment_id, &mut payment_idx); },
1138 0x43 => { send_payment(&nodes[2], &nodes[1], chan_b, 100_000, &mut payment_id, &mut payment_idx); },
1139 0x44 => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100_000, &mut payment_id, &mut payment_idx); },
1140 0x45 => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100_000, &mut payment_id, &mut payment_idx); },
1142 0x48 => { send_payment(&nodes[0], &nodes[1], chan_a, 10_000, &mut payment_id, &mut payment_idx); },
1143 0x49 => { send_payment(&nodes[1], &nodes[0], chan_a, 10_000, &mut payment_id, &mut payment_idx); },
1144 0x4a => { send_payment(&nodes[1], &nodes[2], chan_b, 10_000, &mut payment_id, &mut payment_idx); },
1145 0x4b => { send_payment(&nodes[2], &nodes[1], chan_b, 10_000, &mut payment_id, &mut payment_idx); },
1146 0x4c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000, &mut payment_id, &mut payment_idx); },
1147 0x4d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000, &mut payment_id, &mut payment_idx); },
1149 0x50 => { send_payment(&nodes[0], &nodes[1], chan_a, 1_000, &mut payment_id, &mut payment_idx); },
1150 0x51 => { send_payment(&nodes[1], &nodes[0], chan_a, 1_000, &mut payment_id, &mut payment_idx); },
1151 0x52 => { send_payment(&nodes[1], &nodes[2], chan_b, 1_000, &mut payment_id, &mut payment_idx); },
1152 0x53 => { send_payment(&nodes[2], &nodes[1], chan_b, 1_000, &mut payment_id, &mut payment_idx); },
1153 0x54 => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000, &mut payment_id, &mut payment_idx); },
1154 0x55 => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000, &mut payment_id, &mut payment_idx); },
1156 0x58 => { send_payment(&nodes[0], &nodes[1], chan_a, 100, &mut payment_id, &mut payment_idx); },
1157 0x59 => { send_payment(&nodes[1], &nodes[0], chan_a, 100, &mut payment_id, &mut payment_idx); },
1158 0x5a => { send_payment(&nodes[1], &nodes[2], chan_b, 100, &mut payment_id, &mut payment_idx); },
1159 0x5b => { send_payment(&nodes[2], &nodes[1], chan_b, 100, &mut payment_id, &mut payment_idx); },
1160 0x5c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100, &mut payment_id, &mut payment_idx); },
1161 0x5d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100, &mut payment_id, &mut payment_idx); },
1163 0x60 => { send_payment(&nodes[0], &nodes[1], chan_a, 10, &mut payment_id, &mut payment_idx); },
1164 0x61 => { send_payment(&nodes[1], &nodes[0], chan_a, 10, &mut payment_id, &mut payment_idx); },
1165 0x62 => { send_payment(&nodes[1], &nodes[2], chan_b, 10, &mut payment_id, &mut payment_idx); },
1166 0x63 => { send_payment(&nodes[2], &nodes[1], chan_b, 10, &mut payment_id, &mut payment_idx); },
1167 0x64 => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10, &mut payment_id, &mut payment_idx); },
1168 0x65 => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10, &mut payment_id, &mut payment_idx); },
1170 0x68 => { send_payment(&nodes[0], &nodes[1], chan_a, 1, &mut payment_id, &mut payment_idx); },
1171 0x69 => { send_payment(&nodes[1], &nodes[0], chan_a, 1, &mut payment_id, &mut payment_idx); },
1172 0x6a => { send_payment(&nodes[1], &nodes[2], chan_b, 1, &mut payment_id, &mut payment_idx); },
1173 0x6b => { send_payment(&nodes[2], &nodes[1], chan_b, 1, &mut payment_id, &mut payment_idx); },
1174 0x6c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1, &mut payment_id, &mut payment_idx); },
1175 0x6d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1, &mut payment_id, &mut payment_idx); },
1178 let max_feerate = last_htlc_clear_fee_a * FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32;
1179 if fee_est_a.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate {
1180 fee_est_a.ret_val.store(max_feerate, atomic::Ordering::Release);
1182 nodes[0].maybe_update_chan_fees();
1184 0x81 => { fee_est_a.ret_val.store(253, atomic::Ordering::Release); nodes[0].maybe_update_chan_fees(); },
1187 let max_feerate = last_htlc_clear_fee_b * FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32;
1188 if fee_est_b.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate {
1189 fee_est_b.ret_val.store(max_feerate, atomic::Ordering::Release);
1191 nodes[1].maybe_update_chan_fees();
1193 0x85 => { fee_est_b.ret_val.store(253, atomic::Ordering::Release); nodes[1].maybe_update_chan_fees(); },
1196 let max_feerate = last_htlc_clear_fee_c * FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32;
1197 if fee_est_c.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate {
1198 fee_est_c.ret_val.store(max_feerate, atomic::Ordering::Release);
1200 nodes[2].maybe_update_chan_fees();
1202 0x89 => { fee_est_c.ret_val.store(253, atomic::Ordering::Release); nodes[2].maybe_update_chan_fees(); },
1205 // Test that no channel is in a stuck state where neither party can send funds even
1206 // after we resolve all pending events.
1207 // First make sure there are no pending monitor updates, resetting the error state
1208 // and calling force_channel_monitor_updated for each monitor.
1209 *monitor_a.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed;
1210 *monitor_b.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed;
1211 *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed;
1213 if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) {
1214 monitor_a.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id);
1215 nodes[0].process_monitor_events();
1217 if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) {
1218 monitor_b.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id);
1219 nodes[1].process_monitor_events();
1221 if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) {
1222 monitor_b.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id);
1223 nodes[1].process_monitor_events();
1225 if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) {
1226 monitor_c.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id);
1227 nodes[2].process_monitor_events();
1230 // Next, make sure peers are all connected to each other
1231 if chan_a_disconnected {
1232 nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, true).unwrap();
1233 nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: nodes[0].init_features(), remote_network_address: None }, false).unwrap();
1234 chan_a_disconnected = false;
1236 if chan_b_disconnected {
1237 nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: nodes[2].init_features(), remote_network_address: None }, true).unwrap();
1238 nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, false).unwrap();
1239 chan_b_disconnected = false;
1242 for i in 0..std::usize::MAX {
1243 if i == 100 { panic!("It may take may iterations to settle the state, but it should not take forever"); }
1244 // Then, make sure any current forwards make their way to their destination
1245 if process_msg_events!(0, false, ProcessMessages::AllMessages) { continue; }
1246 if process_msg_events!(1, false, ProcessMessages::AllMessages) { continue; }
1247 if process_msg_events!(2, false, ProcessMessages::AllMessages) { continue; }
1248 // ...making sure any pending PendingHTLCsForwardable events are handled and
1249 // payments claimed.
1250 if process_events!(0, false) { continue; }
1251 if process_events!(1, false) { continue; }
1252 if process_events!(2, false) { continue; }
1256 // Finally, make sure that at least one end of each channel can make a substantial payment
1258 send_payment(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut payment_id, &mut payment_idx) ||
1259 send_payment(&nodes[1], &nodes[0], chan_a, 10_000_000, &mut payment_id, &mut payment_idx));
1261 send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut payment_id, &mut payment_idx) ||
1262 send_payment(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut payment_id, &mut payment_idx));
1264 last_htlc_clear_fee_a = fee_est_a.ret_val.load(atomic::Ordering::Acquire);
1265 last_htlc_clear_fee_b = fee_est_b.ret_val.load(atomic::Ordering::Acquire);
1266 last_htlc_clear_fee_c = fee_est_c.ret_val.load(atomic::Ordering::Acquire);
1268 _ => test_return!(),
1271 node_a_ser.0.clear();
1272 nodes[0].write(&mut node_a_ser).unwrap();
1273 monitor_a.should_update_manager.store(false, atomic::Ordering::Relaxed);
1274 node_b_ser.0.clear();
1275 nodes[1].write(&mut node_b_ser).unwrap();
1276 monitor_b.should_update_manager.store(false, atomic::Ordering::Relaxed);
1277 node_c_ser.0.clear();
1278 nodes[2].write(&mut node_c_ser).unwrap();
1279 monitor_c.should_update_manager.store(false, atomic::Ordering::Relaxed);
1283 /// We actually have different behavior based on if a certain log string has been seen, so we have
1284 /// to do a bit more tracking.
1286 struct SearchingOutput<O: Output> {
1288 may_fail: Arc<atomic::AtomicBool>,
1290 impl<O: Output> Output for SearchingOutput<O> {
1291 fn locked_write(&self, data: &[u8]) {
1292 // We hit a design limitation of LN state machine (see CONCURRENT_INBOUND_HTLC_FEE_BUFFER)
1293 if std::str::from_utf8(data).unwrap().contains("Outbound update_fee HTLC buffer overflow - counterparty should force-close this channel") {
1294 self.may_fail.store(true, atomic::Ordering::Release);
1296 self.output.locked_write(data)
1299 impl<O: Output> SearchingOutput<O> {
1300 pub fn new(output: O) -> Self {
1301 Self { output, may_fail: Arc::new(atomic::AtomicBool::new(false)) }
1305 pub fn chanmon_consistency_test<Out: Output>(data: &[u8], out: Out) {
1310 pub extern "C" fn chanmon_consistency_run(data: *const u8, datalen: usize) {
1311 do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull{});