1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 //! Test that monitor update failures don't get our channel state out of sync.
11 //! One of the biggest concern with the monitor update failure handling code is that messages
12 //! resent after monitor updating is restored are delivered out-of-order, resulting in
13 //! commitment_signed messages having "invalid signatures".
14 //! To test this we stand up a network of three nodes and read bytes from the fuzz input to denote
15 //! actions such as sending payments, handling events, or changing monitor update return values on
16 //! a per-node basis. This should allow it to find any cases where the ordering of actions results
17 //! in us getting out of sync with ourselves, and, assuming at least one of our recieve- or
18 //! send-side handling is correct, other peers. We consider it a failure if any action results in a
19 //! channel being force-closed.
21 use bitcoin::amount::Amount;
22 use bitcoin::blockdata::constants::genesis_block;
23 use bitcoin::blockdata::locktime::absolute::LockTime;
24 use bitcoin::blockdata::opcodes;
25 use bitcoin::blockdata::script::{Builder, ScriptBuf};
26 use bitcoin::blockdata::transaction::{Transaction, TxOut};
27 use bitcoin::network::Network;
28 use bitcoin::transaction::Version;
30 use bitcoin::hash_types::BlockHash;
31 use bitcoin::hashes::sha256::Hash as Sha256;
32 use bitcoin::hashes::sha256d::Hash as Sha256dHash;
33 use bitcoin::hashes::Hash as TraitImport;
34 use bitcoin::WPubkeyHash;
36 use lightning::blinded_path::payment::ReceiveTlvs;
37 use lightning::blinded_path::BlindedPath;
39 use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
40 use lightning::chain::channelmonitor::{ChannelMonitor, MonitorEvent};
41 use lightning::chain::transaction::OutPoint;
42 use lightning::chain::{
43 chainmonitor, channelmonitor, BestBlock, ChannelMonitorUpdateStatus, Confirm, Watch,
45 use lightning::events;
46 use lightning::events::MessageSendEventsProvider;
47 use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
48 use lightning::ln::channel_state::ChannelDetails;
49 use lightning::ln::channelmanager::{
50 ChainParameters, ChannelManager, ChannelManagerReadArgs, PaymentId, PaymentSendFailure,
53 use lightning::ln::functional_test_utils::*;
54 use lightning::ln::msgs::{
55 self, ChannelMessageHandler, CommitmentUpdate, DecodeError, Init, UpdateAddHTLC,
57 use lightning::ln::script::ShutdownScript;
58 use lightning::ln::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
59 use lightning::offers::invoice::{BlindedPayInfo, UnsignedBolt12Invoice};
60 use lightning::offers::invoice_request::UnsignedInvoiceRequest;
61 use lightning::onion_message::messenger::{Destination, MessageRouter, OnionMessagePath};
62 use lightning::routing::router::{InFlightHtlcs, Path, Route, RouteHop, RouteParameters, Router};
63 use lightning::sign::{
64 EntropySource, InMemorySigner, KeyMaterial, NodeSigner, Recipient, SignerProvider,
66 use lightning::util::config::UserConfig;
67 use lightning::util::errors::APIError;
68 use lightning::util::hash_tables::*;
69 use lightning::util::logger::Logger;
70 use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer};
71 use lightning::util::test_channel_signer::{EnforcementState, TestChannelSigner};
73 use crate::utils::test_logger::{self, Output};
74 use crate::utils::test_persister::TestPersister;
76 use bitcoin::secp256k1::ecdh::SharedSecret;
77 use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature};
78 use bitcoin::secp256k1::schnorr;
79 use bitcoin::secp256k1::{self, Message, PublicKey, Scalar, Secp256k1, SecretKey};
82 use std::cmp::{self, Ordering};
85 use std::sync::atomic;
86 use std::sync::{Arc, Mutex};
88 const MAX_FEE: u32 = 10_000;
89 struct FuzzEstimator {
90 ret_val: atomic::AtomicU32,
92 impl FeeEstimator for FuzzEstimator {
93 fn get_est_sat_per_1000_weight(&self, conf_target: ConfirmationTarget) -> u32 {
94 // We force-close channels if our counterparty sends us a feerate which is a small multiple
95 // of our HighPriority fee estimate or smaller than our Background fee estimate. Thus, we
96 // always return a HighPriority feerate here which is >= the maximum Normal feerate and a
97 // Background feerate which is <= the minimum Normal feerate.
99 ConfirmationTarget::OnChainSweep => MAX_FEE,
100 ConfirmationTarget::ChannelCloseMinimum
101 | ConfirmationTarget::AnchorChannelFee
102 | ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
103 | ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
104 | ConfirmationTarget::OutputSpendingFee => 253,
105 ConfirmationTarget::NonAnchorChannelFee => {
106 cmp::min(self.ret_val.load(atomic::Ordering::Acquire), MAX_FEE)
114 impl Router for FuzzRouter {
116 &self, _payer: &PublicKey, _params: &RouteParameters,
117 _first_hops: Option<&[&ChannelDetails]>, _inflight_htlcs: InFlightHtlcs,
118 ) -> Result<Route, msgs::LightningError> {
119 Err(msgs::LightningError {
120 err: String::from("Not implemented"),
121 action: msgs::ErrorAction::IgnoreError,
125 fn create_blinded_payment_paths<T: secp256k1::Signing + secp256k1::Verification>(
126 &self, _recipient: PublicKey, _first_hops: Vec<ChannelDetails>, _tlvs: ReceiveTlvs,
127 _amount_msats: u64, _secp_ctx: &Secp256k1<T>,
128 ) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()> {
133 impl MessageRouter for FuzzRouter {
135 &self, _sender: PublicKey, _peers: Vec<PublicKey>, _destination: Destination,
136 ) -> Result<OnionMessagePath, ()> {
140 fn create_blinded_paths<T: secp256k1::Signing + secp256k1::Verification>(
141 &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _secp_ctx: &Secp256k1<T>,
142 ) -> Result<Vec<BlindedPath>, ()> {
147 pub struct TestBroadcaster {}
148 impl BroadcasterInterface for TestBroadcaster {
149 fn broadcast_transactions(&self, _txs: &[&Transaction]) {}
152 pub struct VecWriter(pub Vec<u8>);
153 impl Writer for VecWriter {
154 fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
155 self.0.extend_from_slice(buf);
160 /// The LDK API requires that any time we tell it we're done persisting a `ChannelMonitor[Update]`
161 /// we never pass it in as the "latest" `ChannelMonitor` on startup. However, we can pass
162 /// out-of-date monitors as long as we never told LDK we finished persisting them, which we do by
163 /// storing both old `ChannelMonitor`s and ones that are "being persisted" here.
165 /// Note that such "being persisted" `ChannelMonitor`s are stored in `ChannelManager` and will
166 /// simply be replayed on startup.
167 struct LatestMonitorState {
168 /// The latest monitor id which we told LDK we've persisted
169 persisted_monitor_id: u64,
170 /// The latest serialized `ChannelMonitor` that we told LDK we persisted.
171 persisted_monitor: Vec<u8>,
172 /// A set of (monitor id, serialized `ChannelMonitor`)s which we're currently "persisting",
173 /// from LDK's perspective.
174 pending_monitors: Vec<(u64, Vec<u8>)>,
177 struct TestChainMonitor {
178 pub logger: Arc<dyn Logger>,
179 pub keys: Arc<KeyProvider>,
180 pub persister: Arc<TestPersister>,
181 pub chain_monitor: Arc<
182 chainmonitor::ChainMonitor<
184 Arc<dyn chain::Filter>,
185 Arc<TestBroadcaster>,
191 pub latest_monitors: Mutex<HashMap<OutPoint, LatestMonitorState>>,
193 impl TestChainMonitor {
195 broadcaster: Arc<TestBroadcaster>, logger: Arc<dyn Logger>, feeest: Arc<FuzzEstimator>,
196 persister: Arc<TestPersister>, keys: Arc<KeyProvider>,
199 chain_monitor: Arc::new(chainmonitor::ChainMonitor::new(
204 Arc::clone(&persister),
209 latest_monitors: Mutex::new(new_hash_map()),
213 impl chain::Watch<TestChannelSigner> for TestChainMonitor {
215 &self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<TestChannelSigner>,
216 ) -> Result<chain::ChannelMonitorUpdateStatus, ()> {
217 let mut ser = VecWriter(Vec::new());
218 monitor.write(&mut ser).unwrap();
219 let monitor_id = monitor.get_latest_update_id();
220 let res = self.chain_monitor.watch_channel(funding_txo, monitor);
221 let state = match res {
222 Ok(chain::ChannelMonitorUpdateStatus::Completed) => LatestMonitorState {
223 persisted_monitor_id: monitor_id,
224 persisted_monitor: ser.0,
225 pending_monitors: Vec::new(),
227 Ok(chain::ChannelMonitorUpdateStatus::InProgress) => {
228 panic!("The test currently doesn't test initial-persistence via the async pipeline")
230 Ok(chain::ChannelMonitorUpdateStatus::UnrecoverableError) => panic!(),
233 if self.latest_monitors.lock().unwrap().insert(funding_txo, state).is_some() {
234 panic!("Already had monitor pre-watch_channel");
240 &self, funding_txo: OutPoint, update: &channelmonitor::ChannelMonitorUpdate,
241 ) -> chain::ChannelMonitorUpdateStatus {
242 let mut map_lock = self.latest_monitors.lock().unwrap();
243 let map_entry = map_lock.get_mut(&funding_txo).expect("Didn't have monitor on update call");
244 let latest_monitor_data = map_entry
248 .map(|(_, data)| data)
249 .unwrap_or(&map_entry.persisted_monitor);
250 let deserialized_monitor =
251 <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
252 &mut Cursor::new(&latest_monitor_data),
253 (&*self.keys, &*self.keys),
260 &&TestBroadcaster {},
261 &&FuzzEstimator { ret_val: atomic::AtomicU32::new(253) },
265 let mut ser = VecWriter(Vec::new());
266 deserialized_monitor.write(&mut ser).unwrap();
267 let res = self.chain_monitor.update_channel(funding_txo, update);
269 chain::ChannelMonitorUpdateStatus::Completed => {
270 map_entry.persisted_monitor_id = update.update_id;
271 map_entry.persisted_monitor = ser.0;
273 chain::ChannelMonitorUpdateStatus::InProgress => {
274 map_entry.pending_monitors.push((update.update_id, ser.0));
276 chain::ChannelMonitorUpdateStatus::UnrecoverableError => panic!(),
281 fn release_pending_monitor_events(
283 ) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)> {
284 return self.chain_monitor.release_pending_monitor_events();
289 node_secret: SecretKey,
290 rand_bytes_id: atomic::AtomicU32,
291 enforcement_states: Mutex<HashMap<[u8; 32], Arc<Mutex<EnforcementState>>>>,
294 impl EntropySource for KeyProvider {
295 fn get_secure_random_bytes(&self) -> [u8; 32] {
296 let id = self.rand_bytes_id.fetch_add(1, atomic::Ordering::Relaxed);
298 let mut res = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, self.node_secret[31]];
299 res[30 - 4..30].copy_from_slice(&id.to_le_bytes());
304 impl NodeSigner for KeyProvider {
305 fn get_node_id(&self, recipient: Recipient) -> Result<PublicKey, ()> {
306 let node_secret = match recipient {
307 Recipient::Node => Ok(&self.node_secret),
308 Recipient::PhantomNode => Err(()),
310 Ok(PublicKey::from_secret_key(&Secp256k1::signing_only(), node_secret))
314 &self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>,
315 ) -> Result<SharedSecret, ()> {
316 let mut node_secret = match recipient {
317 Recipient::Node => Ok(self.node_secret.clone()),
318 Recipient::PhantomNode => Err(()),
320 if let Some(tweak) = tweak {
321 node_secret = node_secret.mul_tweak(tweak).map_err(|_| ())?;
323 Ok(SharedSecret::new(other_key, &node_secret))
326 fn get_inbound_payment_key_material(&self) -> KeyMaterial {
328 let random_bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, self.node_secret[31]];
329 KeyMaterial(random_bytes)
333 &self, _hrp_bytes: &[u8], _invoice_data: &[u5], _recipient: Recipient,
334 ) -> Result<RecoverableSignature, ()> {
338 fn sign_bolt12_invoice_request(
339 &self, _invoice_request: &UnsignedInvoiceRequest,
340 ) -> Result<schnorr::Signature, ()> {
344 fn sign_bolt12_invoice(
345 &self, _invoice: &UnsignedBolt12Invoice,
346 ) -> Result<schnorr::Signature, ()> {
350 fn sign_gossip_message(
351 &self, msg: lightning::ln::msgs::UnsignedGossipMessage,
352 ) -> Result<Signature, ()> {
353 let msg_hash = Message::from_digest(Sha256dHash::hash(&msg.encode()[..]).to_byte_array());
354 let secp_ctx = Secp256k1::signing_only();
355 Ok(secp_ctx.sign_ecdsa(&msg_hash, &self.node_secret))
359 impl SignerProvider for KeyProvider {
360 type EcdsaSigner = TestChannelSigner;
362 type TaprootSigner = TestChannelSigner;
364 fn generate_channel_keys_id(
365 &self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128,
367 let id = self.rand_bytes_id.fetch_add(1, atomic::Ordering::Relaxed) as u8;
371 fn derive_channel_signer(
372 &self, channel_value_satoshis: u64, channel_keys_id: [u8; 32],
373 ) -> Self::EcdsaSigner {
374 let secp_ctx = Secp256k1::signing_only();
375 let id = channel_keys_id[0];
377 let keys = InMemorySigner::new(
379 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, self.node_secret[31]]).unwrap(),
380 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, self.node_secret[31]]).unwrap(),
381 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, self.node_secret[31]]).unwrap(),
382 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, self.node_secret[31]]).unwrap(),
383 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, self.node_secret[31]]).unwrap(),
384 [id, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, self.node_secret[31]],
385 channel_value_satoshis,
389 let revoked_commitment = self.make_enforcement_state_cell(keys.commitment_seed);
390 TestChannelSigner::new_with_revoked(keys, revoked_commitment, false)
393 fn read_chan_signer(&self, buffer: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> {
394 let mut reader = std::io::Cursor::new(buffer);
396 let inner: InMemorySigner = ReadableArgs::read(&mut reader, self)?;
397 let state = self.make_enforcement_state_cell(inner.commitment_seed);
399 Ok(TestChannelSigner::new_with_revoked(inner, state, false))
402 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
403 let secp_ctx = Secp256k1::signing_only();
405 let channel_monitor_claim_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, self.node_secret[31]]).unwrap();
406 let our_channel_monitor_claim_key_hash = WPubkeyHash::hash(
407 &PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize(),
410 .push_opcode(opcodes::all::OP_PUSHBYTES_0)
411 .push_slice(our_channel_monitor_claim_key_hash)
415 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
416 let secp_ctx = Secp256k1::signing_only();
418 let secret_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, self.node_secret[31]]).unwrap();
420 WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &secret_key).serialize());
421 Ok(ShutdownScript::new_p2wpkh(&pubkey_hash))
426 fn make_enforcement_state_cell(
427 &self, commitment_seed: [u8; 32],
428 ) -> Arc<Mutex<EnforcementState>> {
429 let mut revoked_commitments = self.enforcement_states.lock().unwrap();
430 if !revoked_commitments.contains_key(&commitment_seed) {
432 .insert(commitment_seed, Arc::new(Mutex::new(EnforcementState::new())));
434 let cell = revoked_commitments.get(&commitment_seed).unwrap();
440 fn check_api_err(api_err: APIError, sendable_bounds_violated: bool) {
442 APIError::APIMisuseError { .. } => panic!("We can't misuse the API"),
443 APIError::FeeRateTooHigh { .. } => panic!("We can't send too much fee?"),
444 APIError::InvalidRoute { .. } => panic!("Our routes should work"),
445 APIError::ChannelUnavailable { err } => {
446 // Test the error against a list of errors we can hit, and reject
447 // all others. If you hit this panic, the list of acceptable errors
448 // is probably just stale and you should add new messages here.
450 "Peer for first hop currently disconnected" => {},
451 _ if err.starts_with("Cannot send less than our next-HTLC minimum - ") => {},
452 _ if err.starts_with("Cannot send more than our next-HTLC maximum - ") => {},
453 _ => panic!("{}", err),
455 assert!(sendable_bounds_violated);
457 APIError::MonitorUpdateInProgress => {
458 // We can (obviously) temp-fail a monitor update
460 APIError::IncompatibleShutdownScript { .. } => {
461 panic!("Cannot send an incompatible shutdown script")
466 fn check_payment_err(send_err: PaymentSendFailure, sendable_bounds_violated: bool) {
468 PaymentSendFailure::ParameterError(api_err) => {
469 check_api_err(api_err, sendable_bounds_violated)
471 PaymentSendFailure::PathParameterError(per_path_results) => {
472 for res in per_path_results {
473 if let Err(api_err) = res {
474 check_api_err(api_err, sendable_bounds_violated);
478 PaymentSendFailure::AllFailedResendSafe(per_path_results) => {
479 for api_err in per_path_results {
480 check_api_err(api_err, sendable_bounds_violated);
483 PaymentSendFailure::PartialFailure { results, .. } => {
485 if let Err(api_err) = res {
486 check_api_err(api_err, sendable_bounds_violated);
490 PaymentSendFailure::DuplicatePayment => panic!(),
494 type ChanMan<'a> = ChannelManager<
495 Arc<TestChainMonitor>,
496 Arc<TestBroadcaster>,
506 fn get_payment_secret_hash(
507 dest: &ChanMan, payment_id: &mut u8,
508 ) -> Option<(PaymentSecret, PaymentHash)> {
509 let mut payment_hash;
511 payment_hash = PaymentHash(Sha256::hash(&[*payment_id; 1]).to_byte_array());
512 if let Ok(payment_secret) =
513 dest.create_inbound_payment_for_hash(payment_hash, None, 3600, None)
515 return Some((payment_secret, payment_hash));
517 *payment_id = payment_id.wrapping_add(1);
524 source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8,
525 payment_idx: &mut u64,
527 send_payment(source, dest, dest_chan_id, amt, payment_id, payment_idx);
532 source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8,
533 payment_idx: &mut u64,
535 let (payment_secret, payment_hash) =
536 if let Some((secret, hash)) = get_payment_secret_hash(dest, payment_id) {
541 let mut payment_id = [0; 32];
542 payment_id[0..8].copy_from_slice(&payment_idx.to_ne_bytes());
544 let (min_value_sendable, max_value_sendable) = source
545 .list_usable_channels()
547 .find(|chan| chan.short_channel_id == Some(dest_chan_id))
548 .map(|chan| (chan.next_outbound_htlc_minimum_msat, chan.next_outbound_htlc_limit_msat))
550 if let Err(err) = source.send_payment_with_route(
553 hops: vec![RouteHop {
554 pubkey: dest.get_our_node_id(),
555 node_features: dest.node_features(),
556 short_channel_id: dest_chan_id,
557 channel_features: dest.channel_features(),
559 cltv_expiry_delta: 200,
560 maybe_announced_channel: true,
567 RecipientOnionFields::secret_only(payment_secret),
568 PaymentId(payment_id),
570 check_payment_err(err, amt > max_value_sendable || amt < min_value_sendable);
573 // Note that while the max is a strict upper-bound, we can occasionally send substantially
574 // below the minimum, with some gap which is unusable immediately below the minimum. Thus,
575 // we don't check against min_value_sendable here.
576 assert!(amt <= max_value_sendable);
583 source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, dest: &ChanMan, dest_chan_id: u64,
584 amt: u64, payment_id: &mut u8, payment_idx: &mut u64,
600 source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, dest: &ChanMan, dest_chan_id: u64,
601 amt: u64, payment_id: &mut u8, payment_idx: &mut u64,
603 let (payment_secret, payment_hash) =
604 if let Some((secret, hash)) = get_payment_secret_hash(dest, payment_id) {
609 let mut payment_id = [0; 32];
610 payment_id[0..8].copy_from_slice(&payment_idx.to_ne_bytes());
612 let (min_value_sendable, max_value_sendable) = source
613 .list_usable_channels()
615 .find(|chan| chan.short_channel_id == Some(middle_chan_id))
616 .map(|chan| (chan.next_outbound_htlc_minimum_msat, chan.next_outbound_htlc_limit_msat))
618 let first_hop_fee = 50_000;
619 if let Err(err) = source.send_payment_with_route(
624 pubkey: middle.get_our_node_id(),
625 node_features: middle.node_features(),
626 short_channel_id: middle_chan_id,
627 channel_features: middle.channel_features(),
628 fee_msat: first_hop_fee,
629 cltv_expiry_delta: 100,
630 maybe_announced_channel: true,
633 pubkey: dest.get_our_node_id(),
634 node_features: dest.node_features(),
635 short_channel_id: dest_chan_id,
636 channel_features: dest.channel_features(),
638 cltv_expiry_delta: 200,
639 maybe_announced_channel: true,
647 RecipientOnionFields::secret_only(payment_secret),
648 PaymentId(payment_id),
650 let sent_amt = amt + first_hop_fee;
651 check_payment_err(err, sent_amt < min_value_sendable || sent_amt > max_value_sendable);
654 // Note that while the max is a strict upper-bound, we can occasionally send substantially
655 // below the minimum, with some gap which is unusable immediately below the minimum. Thus,
656 // we don't check against min_value_sendable here.
657 assert!(amt + first_hop_fee <= max_value_sendable);
663 pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
664 let out = SearchingOutput::new(underlying_out);
665 let broadcast = Arc::new(TestBroadcaster {});
666 let router = FuzzRouter {};
668 macro_rules! make_node {
669 ($node_id: expr, $fee_estimator: expr) => {{
670 let logger: Arc<dyn Logger> =
671 Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
672 let node_secret = SecretKey::from_slice(&[
673 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
677 let keys_manager = Arc::new(KeyProvider {
679 rand_bytes_id: atomic::AtomicU32::new(0),
680 enforcement_states: Mutex::new(new_hash_map()),
682 let monitor = Arc::new(TestChainMonitor::new(
685 $fee_estimator.clone(),
686 Arc::new(TestPersister {
687 update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed),
689 Arc::clone(&keys_manager),
692 let mut config = UserConfig::default();
693 config.channel_config.forwarding_fee_proportional_millionths = 0;
694 config.channel_handshake_config.announced_channel = true;
696 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
697 config.manually_accept_inbound_channels = true;
699 let network = Network::Bitcoin;
700 let best_block_timestamp = genesis_block(network).header.time;
701 let params = ChainParameters { network, best_block: BestBlock::from_network(network) };
704 $fee_estimator.clone(),
709 keys_manager.clone(),
710 keys_manager.clone(),
711 keys_manager.clone(),
714 best_block_timestamp,
722 macro_rules! reload_node {
723 ($ser: expr, $node_id: expr, $old_monitors: expr, $keys_manager: expr, $fee_estimator: expr) => {{
724 let keys_manager = Arc::clone(&$keys_manager);
725 let logger: Arc<dyn Logger> =
726 Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
727 let chain_monitor = Arc::new(TestChainMonitor::new(
730 $fee_estimator.clone(),
731 Arc::new(TestPersister {
732 update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed),
734 Arc::clone(&$keys_manager),
737 let mut config = UserConfig::default();
738 config.channel_config.forwarding_fee_proportional_millionths = 0;
739 config.channel_handshake_config.announced_channel = true;
741 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
742 config.manually_accept_inbound_channels = true;
745 let mut monitors = new_hash_map();
746 let mut old_monitors = $old_monitors.latest_monitors.lock().unwrap();
747 for (outpoint, mut prev_state) in old_monitors.drain() {
750 <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(
751 &mut Cursor::new(&prev_state.persisted_monitor),
752 (&*$keys_manager, &*$keys_manager),
754 .expect("Failed to read monitor")
757 // Wipe any `ChannelMonitor`s which we never told LDK we finished persisting,
758 // considering them discarded. LDK should replay these for us as they're stored in
759 // the `ChannelManager`.
760 prev_state.pending_monitors.clear();
761 chain_monitor.latest_monitors.lock().unwrap().insert(outpoint, prev_state);
763 let mut monitor_refs = new_hash_map();
764 for (outpoint, monitor) in monitors.iter_mut() {
765 monitor_refs.insert(*outpoint, monitor);
768 let read_args = ChannelManagerReadArgs {
769 entropy_source: keys_manager.clone(),
770 node_signer: keys_manager.clone(),
771 signer_provider: keys_manager.clone(),
772 fee_estimator: $fee_estimator.clone(),
773 chain_monitor: chain_monitor.clone(),
774 tx_broadcaster: broadcast.clone(),
777 default_config: config,
778 channel_monitors: monitor_refs,
782 <(BlockHash, ChanMan)>::read(&mut Cursor::new(&$ser.0), read_args)
783 .expect("Failed to read manager")
785 chain_monitor.clone(),
787 for (funding_txo, mon) in monitors.drain() {
789 chain_monitor.chain_monitor.watch_channel(funding_txo, mon),
790 Ok(ChannelMonitorUpdateStatus::Completed)
797 let mut channel_txn = Vec::new();
798 macro_rules! make_channel {
799 ($source: expr, $dest: expr, $dest_keys_manager: expr, $chan_id: expr) => {{
800 let init_dest = Init {
801 features: $dest.init_features(),
803 remote_network_address: None,
805 $source.peer_connected(&$dest.get_our_node_id(), &init_dest, true).unwrap();
806 let init_src = Init {
807 features: $source.init_features(),
809 remote_network_address: None,
811 $dest.peer_connected(&$source.get_our_node_id(), &init_src, false).unwrap();
813 $source.create_channel($dest.get_our_node_id(), 100_000, 42, 0, None, None).unwrap();
815 let events = $source.get_and_clear_pending_msg_events();
816 assert_eq!(events.len(), 1);
817 if let events::MessageSendEvent::SendOpenChannel { ref msg, .. } = events[0] {
820 panic!("Wrong event type");
824 $dest.handle_open_channel(&$source.get_our_node_id(), &open_channel);
825 let accept_channel = {
827 let events = $dest.get_and_clear_pending_events();
828 assert_eq!(events.len(), 1);
829 if let events::Event::OpenChannelRequest {
830 ref temporary_channel_id,
831 ref counterparty_node_id,
835 let mut random_bytes = [0u8; 16];
837 .copy_from_slice(&$dest_keys_manager.get_secure_random_bytes()[..16]);
838 let user_channel_id = u128::from_be_bytes(random_bytes);
840 .accept_inbound_channel(
841 temporary_channel_id,
842 counterparty_node_id,
847 panic!("Wrong event type");
850 let events = $dest.get_and_clear_pending_msg_events();
851 assert_eq!(events.len(), 1);
852 if let events::MessageSendEvent::SendAcceptChannel { ref msg, .. } = events[0] {
855 panic!("Wrong event type");
859 $source.handle_accept_channel(&$dest.get_our_node_id(), &accept_channel);
862 let events = $source.get_and_clear_pending_events();
863 assert_eq!(events.len(), 1);
864 if let events::Event::FundingGenerationReady {
865 ref temporary_channel_id,
866 ref channel_value_satoshis,
871 let tx = Transaction {
872 version: Version($chan_id),
873 lock_time: LockTime::ZERO,
876 value: Amount::from_sat(*channel_value_satoshis),
877 script_pubkey: output_script.clone(),
880 funding_output = OutPoint { txid: tx.txid(), index: 0 };
882 .funding_transaction_generated(
883 &temporary_channel_id,
884 &$dest.get_our_node_id(),
888 channel_txn.push(tx);
890 panic!("Wrong event type");
894 let funding_created = {
895 let events = $source.get_and_clear_pending_msg_events();
896 assert_eq!(events.len(), 1);
897 if let events::MessageSendEvent::SendFundingCreated { ref msg, .. } = events[0] {
900 panic!("Wrong event type");
903 $dest.handle_funding_created(&$source.get_our_node_id(), &funding_created);
905 let funding_signed = {
906 let events = $dest.get_and_clear_pending_msg_events();
907 assert_eq!(events.len(), 1);
908 if let events::MessageSendEvent::SendFundingSigned { ref msg, .. } = events[0] {
911 panic!("Wrong event type");
914 let events = $dest.get_and_clear_pending_events();
915 assert_eq!(events.len(), 1);
916 if let events::Event::ChannelPending { ref counterparty_node_id, .. } = events[0] {
917 assert_eq!(counterparty_node_id, &$source.get_our_node_id());
919 panic!("Wrong event type");
922 $source.handle_funding_signed(&$dest.get_our_node_id(), &funding_signed);
923 let events = $source.get_and_clear_pending_events();
924 assert_eq!(events.len(), 1);
925 if let events::Event::ChannelPending { ref counterparty_node_id, .. } = events[0] {
926 assert_eq!(counterparty_node_id, &$dest.get_our_node_id());
928 panic!("Wrong event type");
935 macro_rules! confirm_txn {
937 let chain_hash = genesis_block(Network::Bitcoin).block_hash();
938 let mut header = create_dummy_header(chain_hash, 42);
940 channel_txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect();
941 $node.transactions_confirmed(&header, &txdata, 1);
943 header = create_dummy_header(header.block_hash(), 42);
945 $node.best_block_updated(&header, 99);
949 macro_rules! lock_fundings {
951 let mut node_events = Vec::new();
952 for node in $nodes.iter() {
953 node_events.push(node.get_and_clear_pending_msg_events());
955 for (idx, node_event) in node_events.iter().enumerate() {
956 for event in node_event {
957 if let events::MessageSendEvent::SendChannelReady { ref node_id, ref msg } =
960 for node in $nodes.iter() {
961 if node.get_our_node_id() == *node_id {
962 node.handle_channel_ready(&$nodes[idx].get_our_node_id(), msg);
966 panic!("Wrong event type");
971 for node in $nodes.iter() {
972 let events = node.get_and_clear_pending_msg_events();
973 for event in events {
974 if let events::MessageSendEvent::SendAnnouncementSignatures { .. } = event {
976 panic!("Wrong event type");
983 let fee_est_a = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) });
984 let mut last_htlc_clear_fee_a = 253;
985 let fee_est_b = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) });
986 let mut last_htlc_clear_fee_b = 253;
987 let fee_est_c = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) });
988 let mut last_htlc_clear_fee_c = 253;
990 // 3 nodes is enough to hit all the possible cases, notably unknown-source-unknown-dest
992 let (node_a, mut monitor_a, keys_manager_a) = make_node!(0, fee_est_a);
993 let (node_b, mut monitor_b, keys_manager_b) = make_node!(1, fee_est_b);
994 let (node_c, mut monitor_c, keys_manager_c) = make_node!(2, fee_est_c);
996 let mut nodes = [node_a, node_b, node_c];
998 let chan_1_funding = make_channel!(nodes[0], nodes[1], keys_manager_b, 0);
999 let chan_2_funding = make_channel!(nodes[1], nodes[2], keys_manager_c, 1);
1001 for node in nodes.iter() {
1005 lock_fundings!(nodes);
1007 let chan_a = nodes[0].list_usable_channels()[0].short_channel_id.unwrap();
1008 let chan_b = nodes[2].list_usable_channels()[0].short_channel_id.unwrap();
1010 let mut p_id: u8 = 0;
1011 let mut p_idx: u64 = 0;
1013 let mut chan_a_disconnected = false;
1014 let mut chan_b_disconnected = false;
1015 let mut ab_events = Vec::new();
1016 let mut ba_events = Vec::new();
1017 let mut bc_events = Vec::new();
1018 let mut cb_events = Vec::new();
1020 let mut node_a_ser = VecWriter(Vec::new());
1021 nodes[0].write(&mut node_a_ser).unwrap();
1022 let mut node_b_ser = VecWriter(Vec::new());
1023 nodes[1].write(&mut node_b_ser).unwrap();
1024 let mut node_c_ser = VecWriter(Vec::new());
1025 nodes[2].write(&mut node_c_ser).unwrap();
1027 macro_rules! test_return {
1029 assert_eq!(nodes[0].list_channels().len(), 1);
1030 assert_eq!(nodes[1].list_channels().len(), 2);
1031 assert_eq!(nodes[2].list_channels().len(), 1);
1036 let mut read_pos = 0;
1037 macro_rules! get_slice {
1039 let slice_len = $len as usize;
1040 if data.len() < read_pos + slice_len {
1043 read_pos += slice_len;
1044 &data[read_pos - slice_len..read_pos]
1049 // Push any events from Node B onto ba_events and bc_events
1050 macro_rules! push_excess_b_events {
1051 ($excess_events: expr, $expect_drop_node: expr) => { {
1052 let a_id = nodes[0].get_our_node_id();
1053 let expect_drop_node: Option<usize> = $expect_drop_node;
1054 let expect_drop_id = if let Some(id) = expect_drop_node { Some(nodes[id].get_our_node_id()) } else { None };
1055 for event in $excess_events {
1056 let push_a = match event {
1057 events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => {
1058 if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); }
1061 events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => {
1062 if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); }
1065 events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => {
1066 if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); }
1069 events::MessageSendEvent::SendChannelReady { .. } => continue,
1070 events::MessageSendEvent::SendAnnouncementSignatures { .. } => continue,
1071 events::MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => {
1072 assert_eq!(msg.contents.channel_flags & 2, 0); // The disable bit must never be set!
1073 if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); }
1076 _ => panic!("Unhandled message event {:?}", event),
1078 if push_a { ba_events.push(event); } else { bc_events.push(event); }
1083 // While delivering messages, we select across three possible message selection processes
1084 // to ensure we get as much coverage as possible. See the individual enum variants for more
1086 #[derive(PartialEq)]
1087 enum ProcessMessages {
1088 /// Deliver all available messages, including fetching any new messages from
1089 /// `get_and_clear_pending_msg_events()` (which may have side effects).
1091 /// Call `get_and_clear_pending_msg_events()` first, and then deliver up to one
1092 /// message (which may already be queued).
1094 /// Deliver up to one already-queued message. This avoids any potential side-effects
1095 /// of `get_and_clear_pending_msg_events()` (eg freeing the HTLC holding cell), which
1096 /// provides potentially more coverage.
1100 macro_rules! process_msg_events {
1101 ($node: expr, $corrupt_forward: expr, $limit_events: expr) => { {
1102 let mut events = if $node == 1 {
1103 let mut new_events = Vec::new();
1104 mem::swap(&mut new_events, &mut ba_events);
1105 new_events.extend_from_slice(&bc_events[..]);
1108 } else if $node == 0 {
1109 let mut new_events = Vec::new();
1110 mem::swap(&mut new_events, &mut ab_events);
1113 let mut new_events = Vec::new();
1114 mem::swap(&mut new_events, &mut cb_events);
1117 let mut new_events = Vec::new();
1118 if $limit_events != ProcessMessages::OnePendingMessage {
1119 new_events = nodes[$node].get_and_clear_pending_msg_events();
1121 let mut had_events = false;
1122 let mut events_iter = events.drain(..).chain(new_events.drain(..));
1123 let mut extra_ev = None;
1124 for event in &mut events_iter {
1127 events::MessageSendEvent::UpdateHTLCs { node_id, updates: CommitmentUpdate { update_add_htlcs, update_fail_htlcs, update_fulfill_htlcs, update_fail_malformed_htlcs, update_fee, commitment_signed } } => {
1128 for (idx, dest) in nodes.iter().enumerate() {
1129 if dest.get_our_node_id() == node_id {
1130 for update_add in update_add_htlcs.iter() {
1131 out.locked_write(format!("Delivering update_add_htlc to node {}.\n", idx).as_bytes());
1132 if !$corrupt_forward {
1133 dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), update_add);
1135 // Corrupt the update_add_htlc message so that its HMAC
1136 // check will fail and we generate a
1137 // update_fail_malformed_htlc instead of an
1138 // update_fail_htlc as we do when we reject a payment.
1139 let mut msg_ser = update_add.encode();
1140 msg_ser[1000] ^= 0xff;
1141 let new_msg = UpdateAddHTLC::read(&mut Cursor::new(&msg_ser)).unwrap();
1142 dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), &new_msg);
1145 for update_fulfill in update_fulfill_htlcs.iter() {
1146 out.locked_write(format!("Delivering update_fulfill_htlc to node {}.\n", idx).as_bytes());
1147 dest.handle_update_fulfill_htlc(&nodes[$node].get_our_node_id(), update_fulfill);
1149 for update_fail in update_fail_htlcs.iter() {
1150 out.locked_write(format!("Delivering update_fail_htlc to node {}.\n", idx).as_bytes());
1151 dest.handle_update_fail_htlc(&nodes[$node].get_our_node_id(), update_fail);
1153 for update_fail_malformed in update_fail_malformed_htlcs.iter() {
1154 out.locked_write(format!("Delivering update_fail_malformed_htlc to node {}.\n", idx).as_bytes());
1155 dest.handle_update_fail_malformed_htlc(&nodes[$node].get_our_node_id(), update_fail_malformed);
1157 if let Some(msg) = update_fee {
1158 out.locked_write(format!("Delivering update_fee to node {}.\n", idx).as_bytes());
1159 dest.handle_update_fee(&nodes[$node].get_our_node_id(), &msg);
1161 let processed_change = !update_add_htlcs.is_empty() || !update_fulfill_htlcs.is_empty() ||
1162 !update_fail_htlcs.is_empty() || !update_fail_malformed_htlcs.is_empty();
1163 if $limit_events != ProcessMessages::AllMessages && processed_change {
1164 // If we only want to process some messages, don't deliver the CS until later.
1165 extra_ev = Some(events::MessageSendEvent::UpdateHTLCs { node_id, updates: CommitmentUpdate {
1166 update_add_htlcs: Vec::new(),
1167 update_fail_htlcs: Vec::new(),
1168 update_fulfill_htlcs: Vec::new(),
1169 update_fail_malformed_htlcs: Vec::new(),
1175 out.locked_write(format!("Delivering commitment_signed to node {}.\n", idx).as_bytes());
1176 dest.handle_commitment_signed(&nodes[$node].get_our_node_id(), &commitment_signed);
1181 events::MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
1182 for (idx, dest) in nodes.iter().enumerate() {
1183 if dest.get_our_node_id() == *node_id {
1184 out.locked_write(format!("Delivering revoke_and_ack to node {}.\n", idx).as_bytes());
1185 dest.handle_revoke_and_ack(&nodes[$node].get_our_node_id(), msg);
1189 events::MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => {
1190 for (idx, dest) in nodes.iter().enumerate() {
1191 if dest.get_our_node_id() == *node_id {
1192 out.locked_write(format!("Delivering channel_reestablish to node {}.\n", idx).as_bytes());
1193 dest.handle_channel_reestablish(&nodes[$node].get_our_node_id(), msg);
1197 events::MessageSendEvent::SendChannelReady { .. } => {
1198 // Can be generated as a reestablish response
1200 events::MessageSendEvent::SendAnnouncementSignatures { .. } => {
1201 // Can be generated as a reestablish response
1203 events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
1204 // When we reconnect we will resend a channel_update to make sure our
1205 // counterparty has the latest parameters for receiving payments
1206 // through us. We do, however, check that the message does not include
1207 // the "disabled" bit, as we should never ever have a channel which is
1208 // disabled when we send such an update (or it may indicate channel
1209 // force-close which we should detect as an error).
1210 assert_eq!(msg.contents.channel_flags & 2, 0);
1212 _ => if out.may_fail.load(atomic::Ordering::Acquire) {
1215 panic!("Unhandled message event {:?}", event)
1218 if $limit_events != ProcessMessages::AllMessages {
1223 push_excess_b_events!(extra_ev.into_iter().chain(events_iter), None);
1224 } else if $node == 0 {
1225 if let Some(ev) = extra_ev { ab_events.push(ev); }
1226 for event in events_iter { ab_events.push(event); }
1228 if let Some(ev) = extra_ev { cb_events.push(ev); }
1229 for event in events_iter { cb_events.push(event); }
1235 macro_rules! process_msg_noret {
1236 ($node: expr, $corrupt_forward: expr, $limit_events: expr) => {{
1237 process_msg_events!($node, $corrupt_forward, $limit_events);
1241 macro_rules! drain_msg_events_on_disconnect {
1242 ($counterparty_id: expr) => {{
1243 if $counterparty_id == 0 {
1244 for event in nodes[0].get_and_clear_pending_msg_events() {
1246 events::MessageSendEvent::UpdateHTLCs { .. } => {},
1247 events::MessageSendEvent::SendRevokeAndACK { .. } => {},
1248 events::MessageSendEvent::SendChannelReestablish { .. } => {},
1249 events::MessageSendEvent::SendChannelReady { .. } => {},
1250 events::MessageSendEvent::SendAnnouncementSignatures { .. } => {},
1251 events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
1252 assert_eq!(msg.contents.channel_flags & 2, 0); // The disable bit must never be set!
1255 if out.may_fail.load(atomic::Ordering::Acquire) {
1258 panic!("Unhandled message event")
1263 push_excess_b_events!(
1264 nodes[1].get_and_clear_pending_msg_events().drain(..),
1270 for event in nodes[2].get_and_clear_pending_msg_events() {
1272 events::MessageSendEvent::UpdateHTLCs { .. } => {},
1273 events::MessageSendEvent::SendRevokeAndACK { .. } => {},
1274 events::MessageSendEvent::SendChannelReestablish { .. } => {},
1275 events::MessageSendEvent::SendChannelReady { .. } => {},
1276 events::MessageSendEvent::SendAnnouncementSignatures { .. } => {},
1277 events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
1278 assert_eq!(msg.contents.channel_flags & 2, 0); // The disable bit must never be set!
1281 if out.may_fail.load(atomic::Ordering::Acquire) {
1284 panic!("Unhandled message event")
1289 push_excess_b_events!(
1290 nodes[1].get_and_clear_pending_msg_events().drain(..),
1299 macro_rules! process_events {
1300 ($node: expr, $fail: expr) => {{
1301 // In case we get 256 payments we may have a hash collision, resulting in the
1302 // second claim/fail call not finding the duplicate-hash HTLC, so we have to
1303 // deduplicate the calls here.
1304 let mut claim_set = new_hash_map();
1305 let mut events = nodes[$node].get_and_clear_pending_events();
1306 // Sort events so that PendingHTLCsForwardable get processed last. This avoids a
1307 // case where we first process a PendingHTLCsForwardable, then claim/fail on a
1308 // PaymentClaimable, claiming/failing two HTLCs, but leaving a just-generated
1309 // PaymentClaimable event for the second HTLC in our pending_events (and breaking
1310 // our claim_set deduplication).
1311 events.sort_by(|a, b| {
1312 if let events::Event::PaymentClaimable { .. } = a {
1313 if let events::Event::PendingHTLCsForwardable { .. } = b {
1318 } else if let events::Event::PendingHTLCsForwardable { .. } = a {
1319 if let events::Event::PaymentClaimable { .. } = b {
1328 let had_events = !events.is_empty();
1329 for event in events.drain(..) {
1331 events::Event::PaymentClaimable { payment_hash, .. } => {
1332 if claim_set.insert(payment_hash.0, ()).is_none() {
1334 nodes[$node].fail_htlc_backwards(&payment_hash);
1336 nodes[$node].claim_funds(PaymentPreimage(payment_hash.0));
1340 events::Event::PaymentSent { .. } => {},
1341 events::Event::PaymentClaimed { .. } => {},
1342 events::Event::PaymentPathSuccessful { .. } => {},
1343 events::Event::PaymentPathFailed { .. } => {},
1344 events::Event::PaymentFailed { .. } => {},
1345 events::Event::ProbeSuccessful { .. }
1346 | events::Event::ProbeFailed { .. } => {
1347 // Even though we don't explicitly send probes, because probes are
1348 // detected based on hashing the payment hash+preimage, its rather
1349 // trivial for the fuzzer to build payments that accidentally end up
1350 // looking like probes.
1352 events::Event::PaymentForwarded { .. } if $node == 1 => {},
1353 events::Event::ChannelReady { .. } => {},
1354 events::Event::PendingHTLCsForwardable { .. } => {
1355 nodes[$node].process_pending_htlc_forwards();
1357 events::Event::HTLCHandlingFailed { .. } => {},
1359 if out.may_fail.load(atomic::Ordering::Acquire) {
1362 panic!("Unhandled event")
1371 macro_rules! process_ev_noret {
1372 ($node: expr, $fail: expr) => {{
1373 process_events!($node, $fail);
1377 let complete_first = |v: &mut Vec<_>| if !v.is_empty() { Some(v.remove(0)) } else { None };
1378 let complete_second = |v: &mut Vec<_>| if v.len() > 1 { Some(v.remove(1)) } else { None };
1379 let complete_monitor_update =
1380 |monitor: &Arc<TestChainMonitor>,
1382 compl_selector: &dyn Fn(&mut Vec<(u64, Vec<u8>)>) -> Option<(u64, Vec<u8>)>| {
1383 if let Some(state) = monitor.latest_monitors.lock().unwrap().get_mut(chan_funding) {
1385 state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0),
1386 "updates should be sorted by id"
1388 if let Some((id, data)) = compl_selector(&mut state.pending_monitors) {
1389 monitor.chain_monitor.channel_monitor_updated(*chan_funding, id).unwrap();
1390 if id > state.persisted_monitor_id {
1391 state.persisted_monitor_id = id;
1392 state.persisted_monitor = data;
1398 let complete_all_monitor_updates = |monitor: &Arc<TestChainMonitor>, chan_funding| {
1399 if let Some(state) = monitor.latest_monitors.lock().unwrap().get_mut(chan_funding) {
1401 state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0),
1402 "updates should be sorted by id"
1404 for (id, data) in state.pending_monitors.drain(..) {
1405 monitor.chain_monitor.channel_monitor_updated(*chan_funding, id).unwrap();
1406 if id > state.persisted_monitor_id {
1407 state.persisted_monitor_id = id;
1408 state.persisted_monitor = data;
1414 let v = get_slice!(1)[0];
1415 out.locked_write(format!("READ A BYTE! HANDLING INPUT {:x}...........\n", v).as_bytes());
1417 // In general, we keep related message groups close together in binary form, allowing
1418 // bit-twiddling mutations to have similar effects. This is probably overkill, but no
1419 // harm in doing so.
1421 *monitor_a.persister.update_ret.lock().unwrap() =
1422 ChannelMonitorUpdateStatus::InProgress
1425 *monitor_b.persister.update_ret.lock().unwrap() =
1426 ChannelMonitorUpdateStatus::InProgress
1429 *monitor_c.persister.update_ret.lock().unwrap() =
1430 ChannelMonitorUpdateStatus::InProgress
1433 *monitor_a.persister.update_ret.lock().unwrap() =
1434 ChannelMonitorUpdateStatus::Completed
1437 *monitor_b.persister.update_ret.lock().unwrap() =
1438 ChannelMonitorUpdateStatus::Completed
1441 *monitor_c.persister.update_ret.lock().unwrap() =
1442 ChannelMonitorUpdateStatus::Completed
1445 0x08 => complete_all_monitor_updates(&monitor_a, &chan_1_funding),
1446 0x09 => complete_all_monitor_updates(&monitor_b, &chan_1_funding),
1447 0x0a => complete_all_monitor_updates(&monitor_b, &chan_2_funding),
1448 0x0b => complete_all_monitor_updates(&monitor_c, &chan_2_funding),
1451 if !chan_a_disconnected {
1452 nodes[0].peer_disconnected(&nodes[1].get_our_node_id());
1453 nodes[1].peer_disconnected(&nodes[0].get_our_node_id());
1454 chan_a_disconnected = true;
1455 drain_msg_events_on_disconnect!(0);
1459 if !chan_b_disconnected {
1460 nodes[1].peer_disconnected(&nodes[2].get_our_node_id());
1461 nodes[2].peer_disconnected(&nodes[1].get_our_node_id());
1462 chan_b_disconnected = true;
1463 drain_msg_events_on_disconnect!(2);
1467 if chan_a_disconnected {
1469 features: nodes[1].init_features(),
1471 remote_network_address: None,
1473 nodes[0].peer_connected(&nodes[1].get_our_node_id(), &init_1, true).unwrap();
1475 features: nodes[0].init_features(),
1477 remote_network_address: None,
1479 nodes[1].peer_connected(&nodes[0].get_our_node_id(), &init_0, false).unwrap();
1480 chan_a_disconnected = false;
1484 if chan_b_disconnected {
1486 features: nodes[2].init_features(),
1488 remote_network_address: None,
1490 nodes[1].peer_connected(&nodes[2].get_our_node_id(), &init_2, true).unwrap();
1492 features: nodes[1].init_features(),
1494 remote_network_address: None,
1496 nodes[2].peer_connected(&nodes[1].get_our_node_id(), &init_1, false).unwrap();
1497 chan_b_disconnected = false;
1501 0x10 => process_msg_noret!(0, true, ProcessMessages::AllMessages),
1502 0x11 => process_msg_noret!(0, false, ProcessMessages::AllMessages),
1503 0x12 => process_msg_noret!(0, true, ProcessMessages::OneMessage),
1504 0x13 => process_msg_noret!(0, false, ProcessMessages::OneMessage),
1505 0x14 => process_msg_noret!(0, true, ProcessMessages::OnePendingMessage),
1506 0x15 => process_msg_noret!(0, false, ProcessMessages::OnePendingMessage),
1508 0x16 => process_ev_noret!(0, true),
1509 0x17 => process_ev_noret!(0, false),
1511 0x18 => process_msg_noret!(1, true, ProcessMessages::AllMessages),
1512 0x19 => process_msg_noret!(1, false, ProcessMessages::AllMessages),
1513 0x1a => process_msg_noret!(1, true, ProcessMessages::OneMessage),
1514 0x1b => process_msg_noret!(1, false, ProcessMessages::OneMessage),
1515 0x1c => process_msg_noret!(1, true, ProcessMessages::OnePendingMessage),
1516 0x1d => process_msg_noret!(1, false, ProcessMessages::OnePendingMessage),
1518 0x1e => process_ev_noret!(1, true),
1519 0x1f => process_ev_noret!(1, false),
1521 0x20 => process_msg_noret!(2, true, ProcessMessages::AllMessages),
1522 0x21 => process_msg_noret!(2, false, ProcessMessages::AllMessages),
1523 0x22 => process_msg_noret!(2, true, ProcessMessages::OneMessage),
1524 0x23 => process_msg_noret!(2, false, ProcessMessages::OneMessage),
1525 0x24 => process_msg_noret!(2, true, ProcessMessages::OnePendingMessage),
1526 0x25 => process_msg_noret!(2, false, ProcessMessages::OnePendingMessage),
1528 0x26 => process_ev_noret!(2, true),
1529 0x27 => process_ev_noret!(2, false),
1532 if !chan_a_disconnected {
1533 nodes[1].peer_disconnected(&nodes[0].get_our_node_id());
1534 chan_a_disconnected = true;
1535 push_excess_b_events!(
1536 nodes[1].get_and_clear_pending_msg_events().drain(..),
1542 let (new_node_a, new_monitor_a) =
1543 reload_node!(node_a_ser, 0, monitor_a, keys_manager_a, fee_est_a);
1544 nodes[0] = new_node_a;
1545 monitor_a = new_monitor_a;
1548 if !chan_a_disconnected {
1549 nodes[0].peer_disconnected(&nodes[1].get_our_node_id());
1550 chan_a_disconnected = true;
1551 nodes[0].get_and_clear_pending_msg_events();
1555 if !chan_b_disconnected {
1556 nodes[2].peer_disconnected(&nodes[1].get_our_node_id());
1557 chan_b_disconnected = true;
1558 nodes[2].get_and_clear_pending_msg_events();
1562 let (new_node_b, new_monitor_b) =
1563 reload_node!(node_b_ser, 1, monitor_b, keys_manager_b, fee_est_b);
1564 nodes[1] = new_node_b;
1565 monitor_b = new_monitor_b;
1568 if !chan_b_disconnected {
1569 nodes[1].peer_disconnected(&nodes[2].get_our_node_id());
1570 chan_b_disconnected = true;
1571 push_excess_b_events!(
1572 nodes[1].get_and_clear_pending_msg_events().drain(..),
1578 let (new_node_c, new_monitor_c) =
1579 reload_node!(node_c_ser, 2, monitor_c, keys_manager_c, fee_est_c);
1580 nodes[2] = new_node_c;
1581 monitor_c = new_monitor_c;
1584 // 1/10th the channel size:
1585 0x30 => send_noret(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut p_id, &mut p_idx),
1586 0x31 => send_noret(&nodes[1], &nodes[0], chan_a, 10_000_000, &mut p_id, &mut p_idx),
1587 0x32 => send_noret(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut p_id, &mut p_idx),
1588 0x33 => send_noret(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut p_id, &mut p_idx),
1589 0x34 => send_hop_noret(
1590 &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000_000, &mut p_id, &mut p_idx,
1592 0x35 => send_hop_noret(
1593 &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000_000, &mut p_id, &mut p_idx,
1596 0x38 => send_noret(&nodes[0], &nodes[1], chan_a, 1_000_000, &mut p_id, &mut p_idx),
1597 0x39 => send_noret(&nodes[1], &nodes[0], chan_a, 1_000_000, &mut p_id, &mut p_idx),
1598 0x3a => send_noret(&nodes[1], &nodes[2], chan_b, 1_000_000, &mut p_id, &mut p_idx),
1599 0x3b => send_noret(&nodes[2], &nodes[1], chan_b, 1_000_000, &mut p_id, &mut p_idx),
1600 0x3c => send_hop_noret(
1601 &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000_000, &mut p_id, &mut p_idx,
1603 0x3d => send_hop_noret(
1604 &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000_000, &mut p_id, &mut p_idx,
1607 0x40 => send_noret(&nodes[0], &nodes[1], chan_a, 100_000, &mut p_id, &mut p_idx),
1608 0x41 => send_noret(&nodes[1], &nodes[0], chan_a, 100_000, &mut p_id, &mut p_idx),
1609 0x42 => send_noret(&nodes[1], &nodes[2], chan_b, 100_000, &mut p_id, &mut p_idx),
1610 0x43 => send_noret(&nodes[2], &nodes[1], chan_b, 100_000, &mut p_id, &mut p_idx),
1611 0x44 => send_hop_noret(
1612 &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100_000, &mut p_id, &mut p_idx,
1614 0x45 => send_hop_noret(
1615 &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100_000, &mut p_id, &mut p_idx,
1618 0x48 => send_noret(&nodes[0], &nodes[1], chan_a, 10_000, &mut p_id, &mut p_idx),
1619 0x49 => send_noret(&nodes[1], &nodes[0], chan_a, 10_000, &mut p_id, &mut p_idx),
1620 0x4a => send_noret(&nodes[1], &nodes[2], chan_b, 10_000, &mut p_id, &mut p_idx),
1621 0x4b => send_noret(&nodes[2], &nodes[1], chan_b, 10_000, &mut p_id, &mut p_idx),
1622 0x4c => send_hop_noret(
1623 &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000, &mut p_id, &mut p_idx,
1625 0x4d => send_hop_noret(
1626 &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000, &mut p_id, &mut p_idx,
1629 0x50 => send_noret(&nodes[0], &nodes[1], chan_a, 1_000, &mut p_id, &mut p_idx),
1630 0x51 => send_noret(&nodes[1], &nodes[0], chan_a, 1_000, &mut p_id, &mut p_idx),
1631 0x52 => send_noret(&nodes[1], &nodes[2], chan_b, 1_000, &mut p_id, &mut p_idx),
1632 0x53 => send_noret(&nodes[2], &nodes[1], chan_b, 1_000, &mut p_id, &mut p_idx),
1633 0x54 => send_hop_noret(
1634 &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000, &mut p_id, &mut p_idx,
1636 0x55 => send_hop_noret(
1637 &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000, &mut p_id, &mut p_idx,
1640 0x58 => send_noret(&nodes[0], &nodes[1], chan_a, 100, &mut p_id, &mut p_idx),
1641 0x59 => send_noret(&nodes[1], &nodes[0], chan_a, 100, &mut p_id, &mut p_idx),
1642 0x5a => send_noret(&nodes[1], &nodes[2], chan_b, 100, &mut p_id, &mut p_idx),
1643 0x5b => send_noret(&nodes[2], &nodes[1], chan_b, 100, &mut p_id, &mut p_idx),
1644 0x5c => send_hop_noret(
1645 &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100, &mut p_id, &mut p_idx,
1647 0x5d => send_hop_noret(
1648 &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100, &mut p_id, &mut p_idx,
1651 0x60 => send_noret(&nodes[0], &nodes[1], chan_a, 10, &mut p_id, &mut p_idx),
1652 0x61 => send_noret(&nodes[1], &nodes[0], chan_a, 10, &mut p_id, &mut p_idx),
1653 0x62 => send_noret(&nodes[1], &nodes[2], chan_b, 10, &mut p_id, &mut p_idx),
1654 0x63 => send_noret(&nodes[2], &nodes[1], chan_b, 10, &mut p_id, &mut p_idx),
1655 0x64 => send_hop_noret(
1656 &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10, &mut p_id, &mut p_idx,
1658 0x65 => send_hop_noret(
1659 &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10, &mut p_id, &mut p_idx,
1662 0x68 => send_noret(&nodes[0], &nodes[1], chan_a, 1, &mut p_id, &mut p_idx),
1663 0x69 => send_noret(&nodes[1], &nodes[0], chan_a, 1, &mut p_id, &mut p_idx),
1664 0x6a => send_noret(&nodes[1], &nodes[2], chan_b, 1, &mut p_id, &mut p_idx),
1665 0x6b => send_noret(&nodes[2], &nodes[1], chan_b, 1, &mut p_id, &mut p_idx),
1666 0x6c => send_hop_noret(
1667 &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1, &mut p_id, &mut p_idx,
1669 0x6d => send_hop_noret(
1670 &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1, &mut p_id, &mut p_idx,
1674 let mut max_feerate = last_htlc_clear_fee_a;
1676 max_feerate *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32;
1678 if fee_est_a.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate {
1679 fee_est_a.ret_val.store(max_feerate, atomic::Ordering::Release);
1681 nodes[0].maybe_update_chan_fees();
1684 fee_est_a.ret_val.store(253, atomic::Ordering::Release);
1685 nodes[0].maybe_update_chan_fees();
1689 let mut max_feerate = last_htlc_clear_fee_b;
1691 max_feerate *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32;
1693 if fee_est_b.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate {
1694 fee_est_b.ret_val.store(max_feerate, atomic::Ordering::Release);
1696 nodes[1].maybe_update_chan_fees();
1699 fee_est_b.ret_val.store(253, atomic::Ordering::Release);
1700 nodes[1].maybe_update_chan_fees();
1704 let mut max_feerate = last_htlc_clear_fee_c;
1706 max_feerate *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32;
1708 if fee_est_c.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate {
1709 fee_est_c.ret_val.store(max_feerate, atomic::Ordering::Release);
1711 nodes[2].maybe_update_chan_fees();
1714 fee_est_c.ret_val.store(253, atomic::Ordering::Release);
1715 nodes[2].maybe_update_chan_fees();
1718 0xf0 => complete_monitor_update(&monitor_a, &chan_1_funding, &complete_first),
1719 0xf1 => complete_monitor_update(&monitor_a, &chan_1_funding, &complete_second),
1720 0xf2 => complete_monitor_update(&monitor_a, &chan_1_funding, &Vec::pop),
1722 0xf4 => complete_monitor_update(&monitor_b, &chan_1_funding, &complete_first),
1723 0xf5 => complete_monitor_update(&monitor_b, &chan_1_funding, &complete_second),
1724 0xf6 => complete_monitor_update(&monitor_b, &chan_1_funding, &Vec::pop),
1726 0xf8 => complete_monitor_update(&monitor_b, &chan_2_funding, &complete_first),
1727 0xf9 => complete_monitor_update(&monitor_b, &chan_2_funding, &complete_second),
1728 0xfa => complete_monitor_update(&monitor_b, &chan_2_funding, &Vec::pop),
1730 0xfc => complete_monitor_update(&monitor_c, &chan_2_funding, &complete_first),
1731 0xfd => complete_monitor_update(&monitor_c, &chan_2_funding, &complete_second),
1732 0xfe => complete_monitor_update(&monitor_c, &chan_2_funding, &Vec::pop),
1735 // Test that no channel is in a stuck state where neither party can send funds even
1736 // after we resolve all pending events.
1737 // First make sure there are no pending monitor updates and further update
1738 // operations complete.
1739 *monitor_a.persister.update_ret.lock().unwrap() =
1740 ChannelMonitorUpdateStatus::Completed;
1741 *monitor_b.persister.update_ret.lock().unwrap() =
1742 ChannelMonitorUpdateStatus::Completed;
1743 *monitor_c.persister.update_ret.lock().unwrap() =
1744 ChannelMonitorUpdateStatus::Completed;
1746 complete_all_monitor_updates(&monitor_a, &chan_1_funding);
1747 complete_all_monitor_updates(&monitor_b, &chan_1_funding);
1748 complete_all_monitor_updates(&monitor_b, &chan_2_funding);
1749 complete_all_monitor_updates(&monitor_c, &chan_2_funding);
1751 // Next, make sure peers are all connected to each other
1752 if chan_a_disconnected {
1754 features: nodes[1].init_features(),
1756 remote_network_address: None,
1758 nodes[0].peer_connected(&nodes[1].get_our_node_id(), &init_1, true).unwrap();
1760 features: nodes[0].init_features(),
1762 remote_network_address: None,
1764 nodes[1].peer_connected(&nodes[0].get_our_node_id(), &init_0, false).unwrap();
1765 chan_a_disconnected = false;
1767 if chan_b_disconnected {
1769 features: nodes[2].init_features(),
1771 remote_network_address: None,
1773 nodes[1].peer_connected(&nodes[2].get_our_node_id(), &init_2, true).unwrap();
1775 features: nodes[1].init_features(),
1777 remote_network_address: None,
1779 nodes[2].peer_connected(&nodes[1].get_our_node_id(), &init_1, false).unwrap();
1780 chan_b_disconnected = false;
1783 for i in 0..std::usize::MAX {
1785 panic!("It may take may iterations to settle the state, but it should not take forever");
1787 // Then, make sure any current forwards make their way to their destination
1788 if process_msg_events!(0, false, ProcessMessages::AllMessages) {
1791 if process_msg_events!(1, false, ProcessMessages::AllMessages) {
1794 if process_msg_events!(2, false, ProcessMessages::AllMessages) {
1797 // ...making sure any pending PendingHTLCsForwardable events are handled and
1798 // payments claimed.
1799 if process_events!(0, false) {
1802 if process_events!(1, false) {
1805 if process_events!(2, false) {
1811 // Finally, make sure that at least one end of each channel can make a substantial payment
1813 send_payment(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut p_id, &mut p_idx)
1815 &nodes[1], &nodes[0], chan_a, 10_000_000, &mut p_id, &mut p_idx
1819 send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut p_id, &mut p_idx)
1821 &nodes[2], &nodes[1], chan_b, 10_000_000, &mut p_id, &mut p_idx
1825 last_htlc_clear_fee_a = fee_est_a.ret_val.load(atomic::Ordering::Acquire);
1826 last_htlc_clear_fee_b = fee_est_b.ret_val.load(atomic::Ordering::Acquire);
1827 last_htlc_clear_fee_c = fee_est_c.ret_val.load(atomic::Ordering::Acquire);
1829 _ => test_return!(),
1832 if nodes[0].get_and_clear_needs_persistence() == true {
1833 node_a_ser.0.clear();
1834 nodes[0].write(&mut node_a_ser).unwrap();
1836 if nodes[1].get_and_clear_needs_persistence() == true {
1837 node_b_ser.0.clear();
1838 nodes[1].write(&mut node_b_ser).unwrap();
1840 if nodes[2].get_and_clear_needs_persistence() == true {
1841 node_c_ser.0.clear();
1842 nodes[2].write(&mut node_c_ser).unwrap();
1847 /// We actually have different behavior based on if a certain log string has been seen, so we have
1848 /// to do a bit more tracking.
1850 struct SearchingOutput<O: Output> {
1852 may_fail: Arc<atomic::AtomicBool>,
1854 impl<O: Output> Output for SearchingOutput<O> {
1855 fn locked_write(&self, data: &[u8]) {
1856 // We hit a design limitation of LN state machine (see CONCURRENT_INBOUND_HTLC_FEE_BUFFER)
1857 if std::str::from_utf8(data).unwrap().contains("Outbound update_fee HTLC buffer overflow - counterparty should force-close this channel") {
1858 self.may_fail.store(true, atomic::Ordering::Release);
1860 self.output.locked_write(data)
1863 impl<O: Output> SearchingOutput<O> {
1864 pub fn new(output: O) -> Self {
1865 Self { output, may_fail: Arc::new(atomic::AtomicBool::new(false)) }
1869 pub fn chanmon_consistency_test<Out: Output>(data: &[u8], out: Out) {
1870 do_test(data, out.clone(), false);
1871 do_test(data, out, true);
1875 pub extern "C" fn chanmon_consistency_run(data: *const u8, datalen: usize) {
1876 do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull {}, false);
1877 do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull {}, true);