use bitcoin::amount::Amount;
use bitcoin::blockdata::constants::genesis_block;
-use bitcoin::blockdata::transaction::{Transaction, TxOut};
-use bitcoin::blockdata::script::{Builder, ScriptBuf};
-use bitcoin::blockdata::opcodes;
use bitcoin::blockdata::locktime::absolute::LockTime;
+use bitcoin::blockdata::opcodes;
+use bitcoin::blockdata::script::{Builder, ScriptBuf};
+use bitcoin::blockdata::transaction::{Transaction, TxOut};
use bitcoin::network::Network;
use bitcoin::transaction::Version;
-use bitcoin::WPubkeyHash;
-use bitcoin::hashes::Hash as TraitImport;
+use bitcoin::hash_types::BlockHash;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hashes::sha256d::Hash as Sha256dHash;
-use bitcoin::hash_types::BlockHash;
+use bitcoin::hashes::Hash as TraitImport;
+use bitcoin::WPubkeyHash;
-use lightning::blinded_path::BlindedPath;
use lightning::blinded_path::payment::ReceiveTlvs;
+use lightning::blinded_path::BlindedPath;
use lightning::chain;
-use lightning::chain::{BestBlock, ChannelMonitorUpdateStatus, chainmonitor, channelmonitor, Confirm, Watch};
+use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
use lightning::chain::channelmonitor::{ChannelMonitor, MonitorEvent};
use lightning::chain::transaction::OutPoint;
-use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
-use lightning::sign::{KeyMaterial, InMemorySigner, Recipient, EntropySource, NodeSigner, SignerProvider};
+use lightning::chain::{
+ chainmonitor, channelmonitor, BestBlock, ChannelMonitorUpdateStatus, Confirm, Watch,
+};
use lightning::events;
use lightning::events::MessageSendEventsProvider;
-use lightning::ln::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
-use lightning::ln::channel_state::ChannelDetails;
-use lightning::ln::channelmanager::{ChainParameters,ChannelManager, PaymentSendFailure, ChannelManagerReadArgs, PaymentId, RecipientOnionFields};
use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
-use lightning::ln::msgs::{self, CommitmentUpdate, ChannelMessageHandler, DecodeError, UpdateAddHTLC, Init};
-use lightning::ln::script::ShutdownScript;
+use lightning::ln::channel_state::ChannelDetails;
+use lightning::ln::channelmanager::{
+ ChainParameters, ChannelManager, ChannelManagerReadArgs, PaymentId, PaymentSendFailure,
+ RecipientOnionFields,
+};
use lightning::ln::functional_test_utils::*;
+use lightning::ln::msgs::{
+ self, ChannelMessageHandler, CommitmentUpdate, DecodeError, Init, UpdateAddHTLC,
+};
+use lightning::ln::script::ShutdownScript;
+use lightning::ln::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
use lightning::offers::invoice::{BlindedPayInfo, UnsignedBolt12Invoice};
use lightning::offers::invoice_request::UnsignedInvoiceRequest;
use lightning::onion_message::messenger::{Destination, MessageRouter, OnionMessagePath};
-use lightning::util::test_channel_signer::{TestChannelSigner, EnforcementState};
+use lightning::routing::router::{InFlightHtlcs, Path, Route, RouteHop, RouteParameters, Router};
+use lightning::sign::{
+ EntropySource, InMemorySigner, KeyMaterial, NodeSigner, Recipient, SignerProvider,
+};
+use lightning::util::config::UserConfig;
use lightning::util::errors::APIError;
use lightning::util::hash_tables::*;
use lightning::util::logger::Logger;
-use lightning::util::config::UserConfig;
use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer};
-use lightning::routing::router::{InFlightHtlcs, Path, Route, RouteHop, RouteParameters, Router};
+use lightning::util::test_channel_signer::{EnforcementState, TestChannelSigner};
use crate::utils::test_logger::{self, Output};
use crate::utils::test_persister::TestPersister;
-use bitcoin::secp256k1::{Message, PublicKey, SecretKey, Scalar, Secp256k1, self};
use bitcoin::secp256k1::ecdh::SharedSecret;
use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature};
use bitcoin::secp256k1::schnorr;
+use bitcoin::secp256k1::{self, Message, PublicKey, Scalar, Secp256k1, SecretKey};
-use std::mem;
+use bech32::u5;
use std::cmp::{self, Ordering};
-use std::sync::{Arc,Mutex};
-use std::sync::atomic;
use std::io::Cursor;
-use bech32::u5;
+use std::mem;
+use std::sync::atomic;
+use std::sync::{Arc, Mutex};
const MAX_FEE: u32 = 10_000;
struct FuzzEstimator {
// Background feerate which is <= the minimum Normal feerate.
match conf_target {
ConfirmationTarget::OnChainSweep => MAX_FEE,
- ConfirmationTarget::ChannelCloseMinimum|ConfirmationTarget::AnchorChannelFee|ConfirmationTarget::MinAllowedAnchorChannelRemoteFee|ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee|ConfirmationTarget::OutputSpendingFee => 253,
- ConfirmationTarget::NonAnchorChannelFee => cmp::min(self.ret_val.load(atomic::Ordering::Acquire), MAX_FEE),
+ ConfirmationTarget::ChannelCloseMinimum
+ | ConfirmationTarget::AnchorChannelFee
+ | ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
+ | ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
+ | ConfirmationTarget::OutputSpendingFee => 253,
+ ConfirmationTarget::NonAnchorChannelFee => {
+ cmp::min(self.ret_val.load(atomic::Ordering::Acquire), MAX_FEE)
+ },
}
}
}
impl Router for FuzzRouter {
fn find_route(
- &self, _payer: &PublicKey, _params: &RouteParameters, _first_hops: Option<&[&ChannelDetails]>,
- _inflight_htlcs: InFlightHtlcs
+ &self, _payer: &PublicKey, _params: &RouteParameters,
+ _first_hops: Option<&[&ChannelDetails]>, _inflight_htlcs: InFlightHtlcs,
) -> Result<Route, msgs::LightningError> {
Err(msgs::LightningError {
err: String::from("Not implemented"),
- action: msgs::ErrorAction::IgnoreError
+ action: msgs::ErrorAction::IgnoreError,
})
}
impl MessageRouter for FuzzRouter {
fn find_path(
- &self, _sender: PublicKey, _peers: Vec<PublicKey>, _destination: Destination
+ &self, _sender: PublicKey, _peers: Vec<PublicKey>, _destination: Destination,
) -> Result<OnionMessagePath, ()> {
unreachable!()
}
pub struct TestBroadcaster {}
impl BroadcasterInterface for TestBroadcaster {
- fn broadcast_transactions(&self, _txs: &[&Transaction]) { }
+ fn broadcast_transactions(&self, _txs: &[&Transaction]) {}
}
pub struct VecWriter(pub Vec<u8>);
pub logger: Arc<dyn Logger>,
pub keys: Arc<KeyProvider>,
pub persister: Arc<TestPersister>,
- pub chain_monitor: Arc<chainmonitor::ChainMonitor<TestChannelSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
+ pub chain_monitor: Arc<
+ chainmonitor::ChainMonitor<
+ TestChannelSigner,
+ Arc<dyn chain::Filter>,
+ Arc<TestBroadcaster>,
+ Arc<FuzzEstimator>,
+ Arc<dyn Logger>,
+ Arc<TestPersister>,
+ >,
+ >,
pub latest_monitors: Mutex<HashMap<OutPoint, LatestMonitorState>>,
}
impl TestChainMonitor {
- pub fn new(broadcaster: Arc<TestBroadcaster>, logger: Arc<dyn Logger>, feeest: Arc<FuzzEstimator>, persister: Arc<TestPersister>, keys: Arc<KeyProvider>) -> Self {
+ pub fn new(
+ broadcaster: Arc<TestBroadcaster>, logger: Arc<dyn Logger>, feeest: Arc<FuzzEstimator>,
+ persister: Arc<TestPersister>, keys: Arc<KeyProvider>,
+ ) -> Self {
Self {
- chain_monitor: Arc::new(chainmonitor::ChainMonitor::new(None, broadcaster, logger.clone(), feeest, Arc::clone(&persister))),
+ chain_monitor: Arc::new(chainmonitor::ChainMonitor::new(
+ None,
+ broadcaster,
+ logger.clone(),
+ feeest,
+ Arc::clone(&persister),
+ )),
logger,
keys,
persister,
}
}
impl chain::Watch<TestChannelSigner> for TestChainMonitor {
- fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<TestChannelSigner>) -> Result<chain::ChannelMonitorUpdateStatus, ()> {
+ fn watch_channel(
+ &self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<TestChannelSigner>,
+ ) -> Result<chain::ChannelMonitorUpdateStatus, ()> {
let mut ser = VecWriter(Vec::new());
monitor.write(&mut ser).unwrap();
let monitor_id = monitor.get_latest_update_id();
let res = self.chain_monitor.watch_channel(funding_txo, monitor);
let state = match res {
- Ok(chain::ChannelMonitorUpdateStatus::Completed) => {
- LatestMonitorState {
- persisted_monitor_id: monitor_id, persisted_monitor: ser.0,
- pending_monitors: Vec::new(),
- }
+ Ok(chain::ChannelMonitorUpdateStatus::Completed) => LatestMonitorState {
+ persisted_monitor_id: monitor_id,
+ persisted_monitor: ser.0,
+ pending_monitors: Vec::new(),
+ },
+ Ok(chain::ChannelMonitorUpdateStatus::InProgress) => {
+ panic!("The test currently doesn't test initial-persistence via the async pipeline")
},
- Ok(chain::ChannelMonitorUpdateStatus::InProgress) =>
- panic!("The test currently doesn't test initial-persistence via the async pipeline"),
Ok(chain::ChannelMonitorUpdateStatus::UnrecoverableError) => panic!(),
Err(()) => panic!(),
};
res
}
- fn update_channel(&self, funding_txo: OutPoint, update: &channelmonitor::ChannelMonitorUpdate) -> chain::ChannelMonitorUpdateStatus {
+ fn update_channel(
+ &self, funding_txo: OutPoint, update: &channelmonitor::ChannelMonitorUpdate,
+ ) -> chain::ChannelMonitorUpdateStatus {
let mut map_lock = self.latest_monitors.lock().unwrap();
let map_entry = map_lock.get_mut(&funding_txo).expect("Didn't have monitor on update call");
- let latest_monitor_data = map_entry.pending_monitors.last().as_ref().map(|(_, data)| data).unwrap_or(&map_entry.persisted_monitor);
- let deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::
- read(&mut Cursor::new(&latest_monitor_data), (&*self.keys, &*self.keys)).unwrap().1;
- deserialized_monitor.update_monitor(update, &&TestBroadcaster{}, &&FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap();
+ let latest_monitor_data = map_entry
+ .pending_monitors
+ .last()
+ .as_ref()
+ .map(|(_, data)| data)
+ .unwrap_or(&map_entry.persisted_monitor);
+ let deserialized_monitor =
+ <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
+ &mut Cursor::new(&latest_monitor_data),
+ (&*self.keys, &*self.keys),
+ )
+ .unwrap()
+ .1;
+ deserialized_monitor
+ .update_monitor(
+ update,
+ &&TestBroadcaster {},
+ &&FuzzEstimator { ret_val: atomic::AtomicU32::new(253) },
+ &self.logger,
+ )
+ .unwrap();
let mut ser = VecWriter(Vec::new());
deserialized_monitor.write(&mut ser).unwrap();
let res = self.chain_monitor.update_channel(funding_txo, update);
res
}
- fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)> {
+ fn release_pending_monitor_events(
+ &self,
+ ) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)> {
return self.chain_monitor.release_pending_monitor_events();
}
}
struct KeyProvider {
node_secret: SecretKey,
rand_bytes_id: atomic::AtomicU32,
- enforcement_states: Mutex<HashMap<[u8;32], Arc<Mutex<EnforcementState>>>>,
+ enforcement_states: Mutex<HashMap<[u8; 32], Arc<Mutex<EnforcementState>>>>,
}
impl EntropySource for KeyProvider {
let id = self.rand_bytes_id.fetch_add(1, atomic::Ordering::Relaxed);
#[rustfmt::skip]
let mut res = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, self.node_secret[31]];
- res[30-4..30].copy_from_slice(&id.to_le_bytes());
+ res[30 - 4..30].copy_from_slice(&id.to_le_bytes());
res
}
}
fn get_node_id(&self, recipient: Recipient) -> Result<PublicKey, ()> {
let node_secret = match recipient {
Recipient::Node => Ok(&self.node_secret),
- Recipient::PhantomNode => Err(())
+ Recipient::PhantomNode => Err(()),
}?;
Ok(PublicKey::from_secret_key(&Secp256k1::signing_only(), node_secret))
}
- fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>) -> Result<SharedSecret, ()> {
+ fn ecdh(
+ &self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>,
+ ) -> Result<SharedSecret, ()> {
let mut node_secret = match recipient {
Recipient::Node => Ok(self.node_secret.clone()),
- Recipient::PhantomNode => Err(())
+ Recipient::PhantomNode => Err(()),
}?;
if let Some(tweak) = tweak {
node_secret = node_secret.mul_tweak(tweak).map_err(|_| ())?;
KeyMaterial(random_bytes)
}
- fn sign_invoice(&self, _hrp_bytes: &[u8], _invoice_data: &[u5], _recipient: Recipient) -> Result<RecoverableSignature, ()> {
+ fn sign_invoice(
+ &self, _hrp_bytes: &[u8], _invoice_data: &[u5], _recipient: Recipient,
+ ) -> Result<RecoverableSignature, ()> {
unreachable!()
}
fn sign_bolt12_invoice_request(
- &self, _invoice_request: &UnsignedInvoiceRequest
+ &self, _invoice_request: &UnsignedInvoiceRequest,
) -> Result<schnorr::Signature, ()> {
unreachable!()
}
unreachable!()
}
- fn sign_gossip_message(&self, msg: lightning::ln::msgs::UnsignedGossipMessage) -> Result<Signature, ()> {
+ fn sign_gossip_message(
+ &self, msg: lightning::ln::msgs::UnsignedGossipMessage,
+ ) -> Result<Signature, ()> {
let msg_hash = Message::from_digest(Sha256dHash::hash(&msg.encode()[..]).to_byte_array());
let secp_ctx = Secp256k1::signing_only();
Ok(secp_ctx.sign_ecdsa(&msg_hash, &self.node_secret))
#[cfg(taproot)]
type TaprootSigner = TestChannelSigner;
- fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
+ fn generate_channel_keys_id(
+ &self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128,
+ ) -> [u8; 32] {
let id = self.rand_bytes_id.fetch_add(1, atomic::Ordering::Relaxed) as u8;
[id; 32]
}
- fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
+ fn derive_channel_signer(
+ &self, channel_value_satoshis: u64, channel_keys_id: [u8; 32],
+ ) -> Self::EcdsaSigner {
let secp_ctx = Secp256k1::signing_only();
let id = channel_keys_id[0];
#[rustfmt::skip]
let secp_ctx = Secp256k1::signing_only();
#[rustfmt::skip]
let channel_monitor_claim_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, self.node_secret[31]]).unwrap();
- let our_channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
- Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(our_channel_monitor_claim_key_hash).into_script())
+ let our_channel_monitor_claim_key_hash = WPubkeyHash::hash(
+ &PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize(),
+ );
+ Ok(Builder::new()
+ .push_opcode(opcodes::all::OP_PUSHBYTES_0)
+ .push_slice(our_channel_monitor_claim_key_hash)
+ .into_script())
}
fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
let secp_ctx = Secp256k1::signing_only();
#[rustfmt::skip]
let secret_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, self.node_secret[31]]).unwrap();
- let pubkey_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &secret_key).serialize());
+ let pubkey_hash =
+ WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &secret_key).serialize());
Ok(ShutdownScript::new_p2wpkh(&pubkey_hash))
}
}
impl KeyProvider {
- fn make_enforcement_state_cell(&self, commitment_seed: [u8; 32]) -> Arc<Mutex<EnforcementState>> {
+ fn make_enforcement_state_cell(
+ &self, commitment_seed: [u8; 32],
+ ) -> Arc<Mutex<EnforcementState>> {
let mut revoked_commitments = self.enforcement_states.lock().unwrap();
if !revoked_commitments.contains_key(&commitment_seed) {
- revoked_commitments.insert(commitment_seed, Arc::new(Mutex::new(EnforcementState::new())));
+ revoked_commitments
+ .insert(commitment_seed, Arc::new(Mutex::new(EnforcementState::new())));
}
let cell = revoked_commitments.get(&commitment_seed).unwrap();
Arc::clone(cell)
APIError::MonitorUpdateInProgress => {
// We can (obviously) temp-fail a monitor update
},
- APIError::IncompatibleShutdownScript { .. } => panic!("Cannot send an incompatible shutdown script"),
+ APIError::IncompatibleShutdownScript { .. } => {
+ panic!("Cannot send an incompatible shutdown script")
+ },
}
}
#[inline]
fn check_payment_err(send_err: PaymentSendFailure, sendable_bounds_violated: bool) {
match send_err {
- PaymentSendFailure::ParameterError(api_err) => check_api_err(api_err, sendable_bounds_violated),
+ PaymentSendFailure::ParameterError(api_err) => {
+ check_api_err(api_err, sendable_bounds_violated)
+ },
PaymentSendFailure::PathParameterError(per_path_results) => {
- for res in per_path_results { if let Err(api_err) = res { check_api_err(api_err, sendable_bounds_violated); } }
+ for res in per_path_results {
+ if let Err(api_err) = res {
+ check_api_err(api_err, sendable_bounds_violated);
+ }
+ }
},
PaymentSendFailure::AllFailedResendSafe(per_path_results) => {
- for api_err in per_path_results { check_api_err(api_err, sendable_bounds_violated); }
+ for api_err in per_path_results {
+ check_api_err(api_err, sendable_bounds_violated);
+ }
},
PaymentSendFailure::PartialFailure { results, .. } => {
- for res in results { if let Err(api_err) = res { check_api_err(api_err, sendable_bounds_violated); } }
+ for res in results {
+ if let Err(api_err) = res {
+ check_api_err(api_err, sendable_bounds_violated);
+ }
+ }
},
PaymentSendFailure::DuplicatePayment => panic!(),
}
}
-type ChanMan<'a> = ChannelManager<Arc<TestChainMonitor>, Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<KeyProvider>, Arc<KeyProvider>, Arc<FuzzEstimator>, &'a FuzzRouter, Arc<dyn Logger>>;
+type ChanMan<'a> = ChannelManager<
+ Arc<TestChainMonitor>,
+ Arc<TestBroadcaster>,
+ Arc<KeyProvider>,
+ Arc<KeyProvider>,
+ Arc<KeyProvider>,
+ Arc<FuzzEstimator>,
+ &'a FuzzRouter,
+ Arc<dyn Logger>,
+>;
#[inline]
-fn get_payment_secret_hash(dest: &ChanMan, payment_id: &mut u8) -> Option<(PaymentSecret, PaymentHash)> {
+fn get_payment_secret_hash(
+ dest: &ChanMan, payment_id: &mut u8,
+) -> Option<(PaymentSecret, PaymentHash)> {
let mut payment_hash;
for _ in 0..256 {
payment_hash = PaymentHash(Sha256::hash(&[*payment_id; 1]).to_byte_array());
- if let Ok(payment_secret) = dest.create_inbound_payment_for_hash(payment_hash, None, 3600, None) {
+ if let Ok(payment_secret) =
+ dest.create_inbound_payment_for_hash(payment_hash, None, 3600, None)
+ {
return Some((payment_secret, payment_hash));
}
*payment_id = payment_id.wrapping_add(1);
}
#[inline]
-fn send_noret(source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8, payment_idx: &mut u64) {
+fn send_noret(
+ source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8,
+ payment_idx: &mut u64,
+) {
send_payment(source, dest, dest_chan_id, amt, payment_id, payment_idx);
}
#[inline]
-fn send_payment(source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8, payment_idx: &mut u64) -> bool {
+fn send_payment(
+ source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8,
+ payment_idx: &mut u64,
+) -> bool {
let (payment_secret, payment_hash) =
- if let Some((secret, hash)) = get_payment_secret_hash(dest, payment_id) { (secret, hash) } else { return true; };
+ if let Some((secret, hash)) = get_payment_secret_hash(dest, payment_id) {
+ (secret, hash)
+ } else {
+ return true;
+ };
let mut payment_id = [0; 32];
payment_id[0..8].copy_from_slice(&payment_idx.to_ne_bytes());
*payment_idx += 1;
- let (min_value_sendable, max_value_sendable) = source.list_usable_channels()
- .iter().find(|chan| chan.short_channel_id == Some(dest_chan_id))
- .map(|chan|
- (chan.next_outbound_htlc_minimum_msat, chan.next_outbound_htlc_limit_msat))
+ let (min_value_sendable, max_value_sendable) = source
+ .list_usable_channels()
+ .iter()
+ .find(|chan| chan.short_channel_id == Some(dest_chan_id))
+ .map(|chan| (chan.next_outbound_htlc_minimum_msat, chan.next_outbound_htlc_limit_msat))
.unwrap_or((0, 0));
- if let Err(err) = source.send_payment_with_route(&Route {
- paths: vec![Path { hops: vec![RouteHop {
- pubkey: dest.get_our_node_id(),
- node_features: dest.node_features(),
- short_channel_id: dest_chan_id,
- channel_features: dest.channel_features(),
- fee_msat: amt,
- cltv_expiry_delta: 200,
- maybe_announced_channel: true,
- }], blinded_tail: None }],
- route_params: None,
- }, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_id)) {
+ if let Err(err) = source.send_payment_with_route(
+ &Route {
+ paths: vec![Path {
+ hops: vec![RouteHop {
+ pubkey: dest.get_our_node_id(),
+ node_features: dest.node_features(),
+ short_channel_id: dest_chan_id,
+ channel_features: dest.channel_features(),
+ fee_msat: amt,
+ cltv_expiry_delta: 200,
+ maybe_announced_channel: true,
+ }],
+ blinded_tail: None,
+ }],
+ route_params: None,
+ },
+ payment_hash,
+ RecipientOnionFields::secret_only(payment_secret),
+ PaymentId(payment_id),
+ ) {
check_payment_err(err, amt > max_value_sendable || amt < min_value_sendable);
false
} else {
}
#[inline]
-fn send_hop_noret(source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8, payment_idx: &mut u64) {
- send_hop_payment(source, middle, middle_chan_id, dest, dest_chan_id, amt, payment_id, payment_idx);
+fn send_hop_noret(
+ source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, dest: &ChanMan, dest_chan_id: u64,
+ amt: u64, payment_id: &mut u8, payment_idx: &mut u64,
+) {
+ send_hop_payment(
+ source,
+ middle,
+ middle_chan_id,
+ dest,
+ dest_chan_id,
+ amt,
+ payment_id,
+ payment_idx,
+ );
}
#[inline]
-fn send_hop_payment(source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8, payment_idx: &mut u64) -> bool {
+fn send_hop_payment(
+ source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, dest: &ChanMan, dest_chan_id: u64,
+ amt: u64, payment_id: &mut u8, payment_idx: &mut u64,
+) -> bool {
let (payment_secret, payment_hash) =
- if let Some((secret, hash)) = get_payment_secret_hash(dest, payment_id) { (secret, hash) } else { return true; };
+ if let Some((secret, hash)) = get_payment_secret_hash(dest, payment_id) {
+ (secret, hash)
+ } else {
+ return true;
+ };
let mut payment_id = [0; 32];
payment_id[0..8].copy_from_slice(&payment_idx.to_ne_bytes());
*payment_idx += 1;
- let (min_value_sendable, max_value_sendable) = source.list_usable_channels()
- .iter().find(|chan| chan.short_channel_id == Some(middle_chan_id))
- .map(|chan|
- (chan.next_outbound_htlc_minimum_msat, chan.next_outbound_htlc_limit_msat))
+ let (min_value_sendable, max_value_sendable) = source
+ .list_usable_channels()
+ .iter()
+ .find(|chan| chan.short_channel_id == Some(middle_chan_id))
+ .map(|chan| (chan.next_outbound_htlc_minimum_msat, chan.next_outbound_htlc_limit_msat))
.unwrap_or((0, 0));
let first_hop_fee = 50_000;
- if let Err(err) = source.send_payment_with_route(&Route {
- paths: vec![Path { hops: vec![RouteHop {
- pubkey: middle.get_our_node_id(),
- node_features: middle.node_features(),
- short_channel_id: middle_chan_id,
- channel_features: middle.channel_features(),
- fee_msat: first_hop_fee,
- cltv_expiry_delta: 100,
- maybe_announced_channel: true,
- }, RouteHop {
- pubkey: dest.get_our_node_id(),
- node_features: dest.node_features(),
- short_channel_id: dest_chan_id,
- channel_features: dest.channel_features(),
- fee_msat: amt,
- cltv_expiry_delta: 200,
- maybe_announced_channel: true,
- }], blinded_tail: None }],
- route_params: None,
- }, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_id)) {
+ if let Err(err) = source.send_payment_with_route(
+ &Route {
+ paths: vec![Path {
+ hops: vec![
+ RouteHop {
+ pubkey: middle.get_our_node_id(),
+ node_features: middle.node_features(),
+ short_channel_id: middle_chan_id,
+ channel_features: middle.channel_features(),
+ fee_msat: first_hop_fee,
+ cltv_expiry_delta: 100,
+ maybe_announced_channel: true,
+ },
+ RouteHop {
+ pubkey: dest.get_our_node_id(),
+ node_features: dest.node_features(),
+ short_channel_id: dest_chan_id,
+ channel_features: dest.channel_features(),
+ fee_msat: amt,
+ cltv_expiry_delta: 200,
+ maybe_announced_channel: true,
+ },
+ ],
+ blinded_tail: None,
+ }],
+ route_params: None,
+ },
+ payment_hash,
+ RecipientOnionFields::secret_only(payment_secret),
+ PaymentId(payment_id),
+ ) {
let sent_amt = amt + first_hop_fee;
check_payment_err(err, sent_amt < min_value_sendable || sent_amt > max_value_sendable);
false
#[inline]
pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
let out = SearchingOutput::new(underlying_out);
- let broadcast = Arc::new(TestBroadcaster{});
+ let broadcast = Arc::new(TestBroadcaster {});
let router = FuzzRouter {};
macro_rules! make_node {
- ($node_id: expr, $fee_estimator: expr) => { {
- let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
- let node_secret = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, $node_id]).unwrap();
- let keys_manager = Arc::new(KeyProvider { node_secret, rand_bytes_id: atomic::AtomicU32::new(0), enforcement_states: Mutex::new(new_hash_map()) });
- let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(),
+ ($node_id: expr, $fee_estimator: expr) => {{
+ let logger: Arc<dyn Logger> =
+ Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
+ let node_secret = SecretKey::from_slice(&[
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, $node_id,
+ ])
+ .unwrap();
+ let keys_manager = Arc::new(KeyProvider {
+ node_secret,
+ rand_bytes_id: atomic::AtomicU32::new(0),
+ enforcement_states: Mutex::new(new_hash_map()),
+ });
+ let monitor = Arc::new(TestChainMonitor::new(
+ broadcast.clone(),
+ logger.clone(),
+ $fee_estimator.clone(),
Arc::new(TestPersister {
- update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed)
- }), Arc::clone(&keys_manager)));
+ update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed),
+ }),
+ Arc::clone(&keys_manager),
+ ));
let mut config = UserConfig::default();
config.channel_config.forwarding_fee_proportional_millionths = 0;
}
let network = Network::Bitcoin;
let best_block_timestamp = genesis_block(network).header.time;
- let params = ChainParameters {
- network,
- best_block: BestBlock::from_network(network),
- };
- (ChannelManager::new($fee_estimator.clone(), monitor.clone(), broadcast.clone(), &router, Arc::clone(&logger), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), config, params, best_block_timestamp),
- monitor, keys_manager)
- } }
+ let params = ChainParameters { network, best_block: BestBlock::from_network(network) };
+ (
+ ChannelManager::new(
+ $fee_estimator.clone(),
+ monitor.clone(),
+ broadcast.clone(),
+ &router,
+ Arc::clone(&logger),
+ keys_manager.clone(),
+ keys_manager.clone(),
+ keys_manager.clone(),
+ config,
+ params,
+ best_block_timestamp,
+ ),
+ monitor,
+ keys_manager,
+ )
+ }};
}
macro_rules! reload_node {
- ($ser: expr, $node_id: expr, $old_monitors: expr, $keys_manager: expr, $fee_estimator: expr) => { {
- let keys_manager = Arc::clone(& $keys_manager);
- let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
- let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(),
+ ($ser: expr, $node_id: expr, $old_monitors: expr, $keys_manager: expr, $fee_estimator: expr) => {{
+ let keys_manager = Arc::clone(&$keys_manager);
+ let logger: Arc<dyn Logger> =
+ Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
+ let chain_monitor = Arc::new(TestChainMonitor::new(
+ broadcast.clone(),
+ logger.clone(),
+ $fee_estimator.clone(),
Arc::new(TestPersister {
- update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed)
- }), Arc::clone(& $keys_manager)));
+ update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed),
+ }),
+ Arc::clone(&$keys_manager),
+ ));
let mut config = UserConfig::default();
config.channel_config.forwarding_fee_proportional_millionths = 0;
let mut monitors = new_hash_map();
let mut old_monitors = $old_monitors.latest_monitors.lock().unwrap();
for (outpoint, mut prev_state) in old_monitors.drain() {
- monitors.insert(outpoint, <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(
- &mut Cursor::new(&prev_state.persisted_monitor), (&*$keys_manager, &*$keys_manager)
- ).expect("Failed to read monitor").1);
+ monitors.insert(
+ outpoint,
+ <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(
+ &mut Cursor::new(&prev_state.persisted_monitor),
+ (&*$keys_manager, &*$keys_manager),
+ )
+ .expect("Failed to read monitor")
+ .1,
+ );
// Wipe any `ChannelMonitor`s which we never told LDK we finished persisting,
// considering them discarded. LDK should replay these for us as they're stored in
// the `ChannelManager`.
channel_monitors: monitor_refs,
};
- let res = (<(BlockHash, ChanMan)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, chain_monitor.clone());
+ let res = (
+ <(BlockHash, ChanMan)>::read(&mut Cursor::new(&$ser.0), read_args)
+ .expect("Failed to read manager")
+ .1,
+ chain_monitor.clone(),
+ );
for (funding_txo, mon) in monitors.drain() {
- assert_eq!(chain_monitor.chain_monitor.watch_channel(funding_txo, mon),
- Ok(ChannelMonitorUpdateStatus::Completed));
+ assert_eq!(
+ chain_monitor.chain_monitor.watch_channel(funding_txo, mon),
+ Ok(ChannelMonitorUpdateStatus::Completed)
+ );
}
res
- } }
+ }};
}
let mut channel_txn = Vec::new();
macro_rules! make_channel {
- ($source: expr, $dest: expr, $dest_keys_manager: expr, $chan_id: expr) => { {
+ ($source: expr, $dest: expr, $dest_keys_manager: expr, $chan_id: expr) => {{
let init_dest = Init {
- features: $dest.init_features(), networks: None, remote_network_address: None
+ features: $dest.init_features(),
+ networks: None,
+ remote_network_address: None,
};
$source.peer_connected(&$dest.get_our_node_id(), &init_dest, true).unwrap();
let init_src = Init {
- features: $source.init_features(), networks: None, remote_network_address: None
+ features: $source.init_features(),
+ networks: None,
+ remote_network_address: None,
};
$dest.peer_connected(&$source.get_our_node_id(), &init_src, false).unwrap();
assert_eq!(events.len(), 1);
if let events::MessageSendEvent::SendOpenChannel { ref msg, .. } = events[0] {
msg.clone()
- } else { panic!("Wrong event type"); }
+ } else {
+ panic!("Wrong event type");
+ }
};
$dest.handle_open_channel(&$source.get_our_node_id(), &open_channel);
let events = $dest.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
if let events::Event::OpenChannelRequest {
- ref temporary_channel_id, ref counterparty_node_id, ..
- } = events[0] {
+ ref temporary_channel_id,
+ ref counterparty_node_id,
+ ..
+ } = events[0]
+ {
let mut random_bytes = [0u8; 16];
- random_bytes.copy_from_slice(&$dest_keys_manager.get_secure_random_bytes()[..16]);
+ random_bytes
+ .copy_from_slice(&$dest_keys_manager.get_secure_random_bytes()[..16]);
let user_channel_id = u128::from_be_bytes(random_bytes);
- $dest.accept_inbound_channel(
- temporary_channel_id,
- counterparty_node_id,
- user_channel_id,
- ).unwrap();
- } else { panic!("Wrong event type"); }
+ $dest
+ .accept_inbound_channel(
+ temporary_channel_id,
+ counterparty_node_id,
+ user_channel_id,
+ )
+ .unwrap();
+ } else {
+ panic!("Wrong event type");
+ }
}
let events = $dest.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
if let events::MessageSendEvent::SendAcceptChannel { ref msg, .. } = events[0] {
msg.clone()
- } else { panic!("Wrong event type"); }
+ } else {
+ panic!("Wrong event type");
+ }
};
$source.handle_accept_channel(&$dest.get_our_node_id(), &accept_channel);
{
let events = $source.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
- if let events::Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, .. } = events[0] {
- let tx = Transaction { version: Version($chan_id), lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
- value: Amount::from_sat(*channel_value_satoshis), script_pubkey: output_script.clone(),
- }]};
+ if let events::Event::FundingGenerationReady {
+ ref temporary_channel_id,
+ ref channel_value_satoshis,
+ ref output_script,
+ ..
+ } = events[0]
+ {
+ let tx = Transaction {
+ version: Version($chan_id),
+ lock_time: LockTime::ZERO,
+ input: Vec::new(),
+ output: vec![TxOut {
+ value: Amount::from_sat(*channel_value_satoshis),
+ script_pubkey: output_script.clone(),
+ }],
+ };
funding_output = OutPoint { txid: tx.txid(), index: 0 };
- $source.funding_transaction_generated(&temporary_channel_id, &$dest.get_our_node_id(), tx.clone()).unwrap();
+ $source
+ .funding_transaction_generated(
+ &temporary_channel_id,
+ &$dest.get_our_node_id(),
+ tx.clone(),
+ )
+ .unwrap();
channel_txn.push(tx);
- } else { panic!("Wrong event type"); }
+ } else {
+ panic!("Wrong event type");
+ }
}
let funding_created = {
assert_eq!(events.len(), 1);
if let events::MessageSendEvent::SendFundingCreated { ref msg, .. } = events[0] {
msg.clone()
- } else { panic!("Wrong event type"); }
+ } else {
+ panic!("Wrong event type");
+ }
};
$dest.handle_funding_created(&$source.get_our_node_id(), &funding_created);
assert_eq!(events.len(), 1);
if let events::MessageSendEvent::SendFundingSigned { ref msg, .. } = events[0] {
msg.clone()
- } else { panic!("Wrong event type"); }
+ } else {
+ panic!("Wrong event type");
+ }
};
let events = $dest.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
- if let events::Event::ChannelPending{ ref counterparty_node_id, .. } = events[0] {
+ if let events::Event::ChannelPending { ref counterparty_node_id, .. } = events[0] {
assert_eq!(counterparty_node_id, &$source.get_our_node_id());
- } else { panic!("Wrong event type"); }
+ } else {
+ panic!("Wrong event type");
+ }
$source.handle_funding_signed(&$dest.get_our_node_id(), &funding_signed);
let events = $source.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
- if let events::Event::ChannelPending{ ref counterparty_node_id, .. } = events[0] {
+ if let events::Event::ChannelPending { ref counterparty_node_id, .. } = events[0] {
assert_eq!(counterparty_node_id, &$dest.get_our_node_id());
- } else { panic!("Wrong event type"); }
+ } else {
+ panic!("Wrong event type");
+ }
funding_output
- } }
+ }};
}
macro_rules! confirm_txn {
- ($node: expr) => { {
+ ($node: expr) => {{
let chain_hash = genesis_block(Network::Bitcoin).block_hash();
let mut header = create_dummy_header(chain_hash, 42);
- let txdata: Vec<_> = channel_txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect();
+ let txdata: Vec<_> =
+ channel_txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect();
$node.transactions_confirmed(&header, &txdata, 1);
for _ in 2..100 {
header = create_dummy_header(header.block_hash(), 42);
}
$node.best_block_updated(&header, 99);
- } }
+ }};
}
macro_rules! lock_fundings {
- ($nodes: expr) => { {
+ ($nodes: expr) => {{
let mut node_events = Vec::new();
for node in $nodes.iter() {
node_events.push(node.get_and_clear_pending_msg_events());
}
for (idx, node_event) in node_events.iter().enumerate() {
for event in node_event {
- if let events::MessageSendEvent::SendChannelReady { ref node_id, ref msg } = event {
+ if let events::MessageSendEvent::SendChannelReady { ref node_id, ref msg } =
+ event
+ {
for node in $nodes.iter() {
if node.get_our_node_id() == *node_id {
node.handle_channel_ready(&$nodes[idx].get_our_node_id(), msg);
}
}
- } else { panic!("Wrong event type"); }
+ } else {
+ panic!("Wrong event type");
+ }
}
}
let events = node.get_and_clear_pending_msg_events();
for event in events {
if let events::MessageSendEvent::SendAnnouncementSignatures { .. } = event {
- } else { panic!("Wrong event type"); }
+ } else {
+ panic!("Wrong event type");
+ }
}
}
- } }
+ }};
}
let fee_est_a = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) });
- let mut last_htlc_clear_fee_a = 253;
+ let mut last_htlc_clear_fee_a = 253;
let fee_est_b = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) });
- let mut last_htlc_clear_fee_b = 253;
+ let mut last_htlc_clear_fee_b = 253;
let fee_est_c = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) });
- let mut last_htlc_clear_fee_c = 253;
+ let mut last_htlc_clear_fee_c = 253;
// 3 nodes is enough to hit all the possible cases, notably unknown-source-unknown-dest
// forwarding.
nodes[2].write(&mut node_c_ser).unwrap();
macro_rules! test_return {
- () => { {
+ () => {{
assert_eq!(nodes[0].list_channels().len(), 1);
assert_eq!(nodes[1].list_channels().len(), 2);
assert_eq!(nodes[2].list_channels().len(), 1);
return;
- } }
+ }};
}
let mut read_pos = 0;
macro_rules! get_slice {
- ($len: expr) => {
- {
- let slice_len = $len as usize;
- if data.len() < read_pos + slice_len {
- test_return!();
- }
- read_pos += slice_len;
- &data[read_pos - slice_len..read_pos]
+ ($len: expr) => {{
+ let slice_len = $len as usize;
+ if data.len() < read_pos + slice_len {
+ test_return!();
}
- }
+ read_pos += slice_len;
+ &data[read_pos - slice_len..read_pos]
+ }};
}
loop {
}
macro_rules! process_msg_noret {
- ($node: expr, $corrupt_forward: expr, $limit_events: expr) => { {
+ ($node: expr, $corrupt_forward: expr, $limit_events: expr) => {{
process_msg_events!($node, $corrupt_forward, $limit_events);
- } }
+ }};
}
macro_rules! drain_msg_events_on_disconnect {
- ($counterparty_id: expr) => { {
+ ($counterparty_id: expr) => {{
if $counterparty_id == 0 {
for event in nodes[0].get_and_clear_pending_msg_events() {
match event {
events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set!
},
- _ => if out.may_fail.load(atomic::Ordering::Acquire) {
- return;
- } else {
- panic!("Unhandled message event")
+ _ => {
+ if out.may_fail.load(atomic::Ordering::Acquire) {
+ return;
+ } else {
+ panic!("Unhandled message event")
+ }
},
}
}
- push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(0));
+ push_excess_b_events!(
+ nodes[1].get_and_clear_pending_msg_events().drain(..),
+ Some(0)
+ );
ab_events.clear();
ba_events.clear();
} else {
events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set!
},
- _ => if out.may_fail.load(atomic::Ordering::Acquire) {
- return;
- } else {
- panic!("Unhandled message event")
+ _ => {
+ if out.may_fail.load(atomic::Ordering::Acquire) {
+ return;
+ } else {
+ panic!("Unhandled message event")
+ }
},
}
}
- push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(2));
+ push_excess_b_events!(
+ nodes[1].get_and_clear_pending_msg_events().drain(..),
+ Some(2)
+ );
bc_events.clear();
cb_events.clear();
}
- } }
+ }};
}
macro_rules! process_events {
- ($node: expr, $fail: expr) => { {
+ ($node: expr, $fail: expr) => {{
// In case we get 256 payments we may have a hash collision, resulting in the
// second claim/fail call not finding the duplicate-hash HTLC, so we have to
// deduplicate the calls here.
if let events::Event::PaymentClaimable { .. } = a {
if let events::Event::PendingHTLCsForwardable { .. } = b {
Ordering::Less
- } else { Ordering::Equal }
+ } else {
+ Ordering::Equal
+ }
} else if let events::Event::PendingHTLCsForwardable { .. } = a {
if let events::Event::PaymentClaimable { .. } = b {
Ordering::Greater
- } else { Ordering::Equal }
- } else { Ordering::Equal }
+ } else {
+ Ordering::Equal
+ }
+ } else {
+ Ordering::Equal
+ }
});
let had_events = !events.is_empty();
for event in events.drain(..) {
events::Event::PaymentPathSuccessful { .. } => {},
events::Event::PaymentPathFailed { .. } => {},
events::Event::PaymentFailed { .. } => {},
- events::Event::ProbeSuccessful { .. } | events::Event::ProbeFailed { .. } => {
+ events::Event::ProbeSuccessful { .. }
+ | events::Event::ProbeFailed { .. } => {
// Even though we don't explicitly send probes, because probes are
// detected based on hashing the payment hash+preimage, its rather
// trivial for the fuzzer to build payments that accidentally end up
nodes[$node].process_pending_htlc_forwards();
},
events::Event::HTLCHandlingFailed { .. } => {},
- _ => if out.may_fail.load(atomic::Ordering::Acquire) {
- return;
- } else {
- panic!("Unhandled event")
+ _ => {
+ if out.may_fail.load(atomic::Ordering::Acquire) {
+ return;
+ } else {
+ panic!("Unhandled event")
+ }
},
}
}
had_events
- } }
+ }};
}
macro_rules! process_ev_noret {
- ($node: expr, $fail: expr) => { {
+ ($node: expr, $fail: expr) => {{
process_events!($node, $fail);
- } }
+ }};
}
let complete_first = |v: &mut Vec<_>| if !v.is_empty() { Some(v.remove(0)) } else { None };
let complete_second = |v: &mut Vec<_>| if v.len() > 1 { Some(v.remove(1)) } else { None };
- let complete_monitor_update = |
- monitor: &Arc<TestChainMonitor>, chan_funding,
- compl_selector: &dyn Fn(&mut Vec<(u64, Vec<u8>)>) -> Option<(u64, Vec<u8>)>,
- | {
- if let Some(state) = monitor.latest_monitors.lock().unwrap().get_mut(chan_funding) {
- assert!(
- state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0),
- "updates should be sorted by id"
- );
- if let Some((id, data)) = compl_selector(&mut state.pending_monitors) {
- monitor.chain_monitor.channel_monitor_updated(*chan_funding, id).unwrap();
- if id > state.persisted_monitor_id {
- state.persisted_monitor_id = id;
- state.persisted_monitor = data;
+ let complete_monitor_update =
+ |monitor: &Arc<TestChainMonitor>,
+ chan_funding,
+ compl_selector: &dyn Fn(&mut Vec<(u64, Vec<u8>)>) -> Option<(u64, Vec<u8>)>| {
+ if let Some(state) = monitor.latest_monitors.lock().unwrap().get_mut(chan_funding) {
+ assert!(
+ state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0),
+ "updates should be sorted by id"
+ );
+ if let Some((id, data)) = compl_selector(&mut state.pending_monitors) {
+ monitor.chain_monitor.channel_monitor_updated(*chan_funding, id).unwrap();
+ if id > state.persisted_monitor_id {
+ state.persisted_monitor_id = id;
+ state.persisted_monitor = data;
+ }
}
}
- }
- };
+ };
let complete_all_monitor_updates = |monitor: &Arc<TestChainMonitor>, chan_funding| {
if let Some(state) = monitor.latest_monitors.lock().unwrap().get_mut(chan_funding) {
// In general, we keep related message groups close together in binary form, allowing
// bit-twiddling mutations to have similar effects. This is probably overkill, but no
// harm in doing so.
-
- 0x00 => *monitor_a.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::InProgress,
- 0x01 => *monitor_b.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::InProgress,
- 0x02 => *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::InProgress,
- 0x04 => *monitor_a.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed,
- 0x05 => *monitor_b.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed,
- 0x06 => *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed,
+ 0x00 => {
+ *monitor_a.persister.update_ret.lock().unwrap() =
+ ChannelMonitorUpdateStatus::InProgress
+ },
+ 0x01 => {
+ *monitor_b.persister.update_ret.lock().unwrap() =
+ ChannelMonitorUpdateStatus::InProgress
+ },
+ 0x02 => {
+ *monitor_c.persister.update_ret.lock().unwrap() =
+ ChannelMonitorUpdateStatus::InProgress
+ },
+ 0x04 => {
+ *monitor_a.persister.update_ret.lock().unwrap() =
+ ChannelMonitorUpdateStatus::Completed
+ },
+ 0x05 => {
+ *monitor_b.persister.update_ret.lock().unwrap() =
+ ChannelMonitorUpdateStatus::Completed
+ },
+ 0x06 => {
+ *monitor_c.persister.update_ret.lock().unwrap() =
+ ChannelMonitorUpdateStatus::Completed
+ },
0x08 => complete_all_monitor_updates(&monitor_a, &chan_1_funding),
0x09 => complete_all_monitor_updates(&monitor_b, &chan_1_funding),
0x0e => {
if chan_a_disconnected {
let init_1 = Init {
- features: nodes[1].init_features(), networks: None, remote_network_address: None
+ features: nodes[1].init_features(),
+ networks: None,
+ remote_network_address: None,
};
nodes[0].peer_connected(&nodes[1].get_our_node_id(), &init_1, true).unwrap();
let init_0 = Init {
- features: nodes[0].init_features(), networks: None, remote_network_address: None
+ features: nodes[0].init_features(),
+ networks: None,
+ remote_network_address: None,
};
nodes[1].peer_connected(&nodes[0].get_our_node_id(), &init_0, false).unwrap();
chan_a_disconnected = false;
0x0f => {
if chan_b_disconnected {
let init_2 = Init {
- features: nodes[2].init_features(), networks: None, remote_network_address: None
+ features: nodes[2].init_features(),
+ networks: None,
+ remote_network_address: None,
};
nodes[1].peer_connected(&nodes[2].get_our_node_id(), &init_2, true).unwrap();
let init_1 = Init {
- features: nodes[1].init_features(), networks: None, remote_network_address: None
+ features: nodes[1].init_features(),
+ networks: None,
+ remote_network_address: None,
};
nodes[2].peer_connected(&nodes[1].get_our_node_id(), &init_1, false).unwrap();
chan_b_disconnected = false;
if !chan_a_disconnected {
nodes[1].peer_disconnected(&nodes[0].get_our_node_id());
chan_a_disconnected = true;
- push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(0));
+ push_excess_b_events!(
+ nodes[1].get_and_clear_pending_msg_events().drain(..),
+ Some(0)
+ );
ab_events.clear();
ba_events.clear();
}
- let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a, keys_manager_a, fee_est_a);
+ let (new_node_a, new_monitor_a) =
+ reload_node!(node_a_ser, 0, monitor_a, keys_manager_a, fee_est_a);
nodes[0] = new_node_a;
monitor_a = new_monitor_a;
},
bc_events.clear();
cb_events.clear();
}
- let (new_node_b, new_monitor_b) = reload_node!(node_b_ser, 1, monitor_b, keys_manager_b, fee_est_b);
+ let (new_node_b, new_monitor_b) =
+ reload_node!(node_b_ser, 1, monitor_b, keys_manager_b, fee_est_b);
nodes[1] = new_node_b;
monitor_b = new_monitor_b;
},
if !chan_b_disconnected {
nodes[1].peer_disconnected(&nodes[2].get_our_node_id());
chan_b_disconnected = true;
- push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(2));
+ push_excess_b_events!(
+ nodes[1].get_and_clear_pending_msg_events().drain(..),
+ Some(2)
+ );
bc_events.clear();
cb_events.clear();
}
- let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c, keys_manager_c, fee_est_c);
+ let (new_node_c, new_monitor_c) =
+ reload_node!(node_c_ser, 2, monitor_c, keys_manager_c, fee_est_c);
nodes[2] = new_node_c;
monitor_c = new_monitor_c;
},
0x31 => send_noret(&nodes[1], &nodes[0], chan_a, 10_000_000, &mut p_id, &mut p_idx),
0x32 => send_noret(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut p_id, &mut p_idx),
0x33 => send_noret(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut p_id, &mut p_idx),
- 0x34 => send_hop_noret(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000_000, &mut p_id, &mut p_idx),
- 0x35 => send_hop_noret(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000_000, &mut p_id, &mut p_idx),
+ 0x34 => send_hop_noret(
+ &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000_000, &mut p_id, &mut p_idx,
+ ),
+ 0x35 => send_hop_noret(
+ &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000_000, &mut p_id, &mut p_idx,
+ ),
0x38 => send_noret(&nodes[0], &nodes[1], chan_a, 1_000_000, &mut p_id, &mut p_idx),
0x39 => send_noret(&nodes[1], &nodes[0], chan_a, 1_000_000, &mut p_id, &mut p_idx),
0x3a => send_noret(&nodes[1], &nodes[2], chan_b, 1_000_000, &mut p_id, &mut p_idx),
0x3b => send_noret(&nodes[2], &nodes[1], chan_b, 1_000_000, &mut p_id, &mut p_idx),
- 0x3c => send_hop_noret(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000_000, &mut p_id, &mut p_idx),
- 0x3d => send_hop_noret(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000_000, &mut p_id, &mut p_idx),
+ 0x3c => send_hop_noret(
+ &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000_000, &mut p_id, &mut p_idx,
+ ),
+ 0x3d => send_hop_noret(
+ &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000_000, &mut p_id, &mut p_idx,
+ ),
0x40 => send_noret(&nodes[0], &nodes[1], chan_a, 100_000, &mut p_id, &mut p_idx),
0x41 => send_noret(&nodes[1], &nodes[0], chan_a, 100_000, &mut p_id, &mut p_idx),
0x42 => send_noret(&nodes[1], &nodes[2], chan_b, 100_000, &mut p_id, &mut p_idx),
0x43 => send_noret(&nodes[2], &nodes[1], chan_b, 100_000, &mut p_id, &mut p_idx),
- 0x44 => send_hop_noret(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100_000, &mut p_id, &mut p_idx),
- 0x45 => send_hop_noret(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100_000, &mut p_id, &mut p_idx),
+ 0x44 => send_hop_noret(
+ &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100_000, &mut p_id, &mut p_idx,
+ ),
+ 0x45 => send_hop_noret(
+ &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100_000, &mut p_id, &mut p_idx,
+ ),
0x48 => send_noret(&nodes[0], &nodes[1], chan_a, 10_000, &mut p_id, &mut p_idx),
0x49 => send_noret(&nodes[1], &nodes[0], chan_a, 10_000, &mut p_id, &mut p_idx),
0x4a => send_noret(&nodes[1], &nodes[2], chan_b, 10_000, &mut p_id, &mut p_idx),
0x4b => send_noret(&nodes[2], &nodes[1], chan_b, 10_000, &mut p_id, &mut p_idx),
- 0x4c => send_hop_noret(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000, &mut p_id, &mut p_idx),
- 0x4d => send_hop_noret(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000, &mut p_id, &mut p_idx),
+ 0x4c => send_hop_noret(
+ &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000, &mut p_id, &mut p_idx,
+ ),
+ 0x4d => send_hop_noret(
+ &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000, &mut p_id, &mut p_idx,
+ ),
0x50 => send_noret(&nodes[0], &nodes[1], chan_a, 1_000, &mut p_id, &mut p_idx),
0x51 => send_noret(&nodes[1], &nodes[0], chan_a, 1_000, &mut p_id, &mut p_idx),
0x52 => send_noret(&nodes[1], &nodes[2], chan_b, 1_000, &mut p_id, &mut p_idx),
0x53 => send_noret(&nodes[2], &nodes[1], chan_b, 1_000, &mut p_id, &mut p_idx),
- 0x54 => send_hop_noret(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000, &mut p_id, &mut p_idx),
- 0x55 => send_hop_noret(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000, &mut p_id, &mut p_idx),
+ 0x54 => send_hop_noret(
+ &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000, &mut p_id, &mut p_idx,
+ ),
+ 0x55 => send_hop_noret(
+ &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000, &mut p_id, &mut p_idx,
+ ),
0x58 => send_noret(&nodes[0], &nodes[1], chan_a, 100, &mut p_id, &mut p_idx),
0x59 => send_noret(&nodes[1], &nodes[0], chan_a, 100, &mut p_id, &mut p_idx),
0x5a => send_noret(&nodes[1], &nodes[2], chan_b, 100, &mut p_id, &mut p_idx),
0x5b => send_noret(&nodes[2], &nodes[1], chan_b, 100, &mut p_id, &mut p_idx),
- 0x5c => send_hop_noret(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100, &mut p_id, &mut p_idx),
- 0x5d => send_hop_noret(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100, &mut p_id, &mut p_idx),
+ 0x5c => send_hop_noret(
+ &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100, &mut p_id, &mut p_idx,
+ ),
+ 0x5d => send_hop_noret(
+ &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100, &mut p_id, &mut p_idx,
+ ),
0x60 => send_noret(&nodes[0], &nodes[1], chan_a, 10, &mut p_id, &mut p_idx),
0x61 => send_noret(&nodes[1], &nodes[0], chan_a, 10, &mut p_id, &mut p_idx),
0x62 => send_noret(&nodes[1], &nodes[2], chan_b, 10, &mut p_id, &mut p_idx),
0x63 => send_noret(&nodes[2], &nodes[1], chan_b, 10, &mut p_id, &mut p_idx),
- 0x64 => send_hop_noret(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10, &mut p_id, &mut p_idx),
- 0x65 => send_hop_noret(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10, &mut p_id, &mut p_idx),
+ 0x64 => send_hop_noret(
+ &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10, &mut p_id, &mut p_idx,
+ ),
+ 0x65 => send_hop_noret(
+ &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10, &mut p_id, &mut p_idx,
+ ),
0x68 => send_noret(&nodes[0], &nodes[1], chan_a, 1, &mut p_id, &mut p_idx),
0x69 => send_noret(&nodes[1], &nodes[0], chan_a, 1, &mut p_id, &mut p_idx),
0x6a => send_noret(&nodes[1], &nodes[2], chan_b, 1, &mut p_id, &mut p_idx),
0x6b => send_noret(&nodes[2], &nodes[1], chan_b, 1, &mut p_id, &mut p_idx),
- 0x6c => send_hop_noret(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1, &mut p_id, &mut p_idx),
- 0x6d => send_hop_noret(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1, &mut p_id, &mut p_idx),
+ 0x6c => send_hop_noret(
+ &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1, &mut p_id, &mut p_idx,
+ ),
+ 0x6d => send_hop_noret(
+ &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1, &mut p_id, &mut p_idx,
+ ),
0x80 => {
let mut max_feerate = last_htlc_clear_fee_a;
}
nodes[0].maybe_update_chan_fees();
},
- 0x81 => { fee_est_a.ret_val.store(253, atomic::Ordering::Release); nodes[0].maybe_update_chan_fees(); },
+ 0x81 => {
+ fee_est_a.ret_val.store(253, atomic::Ordering::Release);
+ nodes[0].maybe_update_chan_fees();
+ },
0x84 => {
let mut max_feerate = last_htlc_clear_fee_b;
}
nodes[1].maybe_update_chan_fees();
},
- 0x85 => { fee_est_b.ret_val.store(253, atomic::Ordering::Release); nodes[1].maybe_update_chan_fees(); },
+ 0x85 => {
+ fee_est_b.ret_val.store(253, atomic::Ordering::Release);
+ nodes[1].maybe_update_chan_fees();
+ },
0x88 => {
let mut max_feerate = last_htlc_clear_fee_c;
}
nodes[2].maybe_update_chan_fees();
},
- 0x89 => { fee_est_c.ret_val.store(253, atomic::Ordering::Release); nodes[2].maybe_update_chan_fees(); },
+ 0x89 => {
+ fee_est_c.ret_val.store(253, atomic::Ordering::Release);
+ nodes[2].maybe_update_chan_fees();
+ },
0xf0 => complete_monitor_update(&monitor_a, &chan_1_funding, &complete_first),
0xf1 => complete_monitor_update(&monitor_a, &chan_1_funding, &complete_second),
// after we resolve all pending events.
// First make sure there are no pending monitor updates and further update
// operations complete.
- *monitor_a.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed;
- *monitor_b.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed;
- *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed;
+ *monitor_a.persister.update_ret.lock().unwrap() =
+ ChannelMonitorUpdateStatus::Completed;
+ *monitor_b.persister.update_ret.lock().unwrap() =
+ ChannelMonitorUpdateStatus::Completed;
+ *monitor_c.persister.update_ret.lock().unwrap() =
+ ChannelMonitorUpdateStatus::Completed;
complete_all_monitor_updates(&monitor_a, &chan_1_funding);
complete_all_monitor_updates(&monitor_b, &chan_1_funding);
// Next, make sure peers are all connected to each other
if chan_a_disconnected {
let init_1 = Init {
- features: nodes[1].init_features(), networks: None, remote_network_address: None
+ features: nodes[1].init_features(),
+ networks: None,
+ remote_network_address: None,
};
nodes[0].peer_connected(&nodes[1].get_our_node_id(), &init_1, true).unwrap();
let init_0 = Init {
- features: nodes[0].init_features(), networks: None, remote_network_address: None
+ features: nodes[0].init_features(),
+ networks: None,
+ remote_network_address: None,
};
nodes[1].peer_connected(&nodes[0].get_our_node_id(), &init_0, false).unwrap();
chan_a_disconnected = false;
}
if chan_b_disconnected {
let init_2 = Init {
- features: nodes[2].init_features(), networks: None, remote_network_address: None
+ features: nodes[2].init_features(),
+ networks: None,
+ remote_network_address: None,
};
nodes[1].peer_connected(&nodes[2].get_our_node_id(), &init_2, true).unwrap();
let init_1 = Init {
- features: nodes[1].init_features(), networks: None, remote_network_address: None
+ features: nodes[1].init_features(),
+ networks: None,
+ remote_network_address: None,
};
nodes[2].peer_connected(&nodes[1].get_our_node_id(), &init_1, false).unwrap();
chan_b_disconnected = false;
}
for i in 0..std::usize::MAX {
- if i == 100 { panic!("It may take may iterations to settle the state, but it should not take forever"); }
+ if i == 100 {
+ panic!("It may take may iterations to settle the state, but it should not take forever");
+ }
// Then, make sure any current forwards make their way to their destination
- if process_msg_events!(0, false, ProcessMessages::AllMessages) { continue; }
- if process_msg_events!(1, false, ProcessMessages::AllMessages) { continue; }
- if process_msg_events!(2, false, ProcessMessages::AllMessages) { continue; }
+ if process_msg_events!(0, false, ProcessMessages::AllMessages) {
+ continue;
+ }
+ if process_msg_events!(1, false, ProcessMessages::AllMessages) {
+ continue;
+ }
+ if process_msg_events!(2, false, ProcessMessages::AllMessages) {
+ continue;
+ }
// ...making sure any pending PendingHTLCsForwardable events are handled and
// payments claimed.
- if process_events!(0, false) { continue; }
- if process_events!(1, false) { continue; }
- if process_events!(2, false) { continue; }
+ if process_events!(0, false) {
+ continue;
+ }
+ if process_events!(1, false) {
+ continue;
+ }
+ if process_events!(2, false) {
+ continue;
+ }
break;
}
// Finally, make sure that at least one end of each channel can make a substantial payment
assert!(
- send_payment(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut p_id, &mut p_idx) ||
- send_payment(&nodes[1], &nodes[0], chan_a, 10_000_000, &mut p_id, &mut p_idx));
+ send_payment(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut p_id, &mut p_idx)
+ || send_payment(
+ &nodes[1], &nodes[0], chan_a, 10_000_000, &mut p_id, &mut p_idx
+ )
+ );
assert!(
- send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut p_id, &mut p_idx) ||
- send_payment(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut p_id, &mut p_idx));
+ send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut p_id, &mut p_idx)
+ || send_payment(
+ &nodes[2], &nodes[1], chan_b, 10_000_000, &mut p_id, &mut p_idx
+ )
+ );
last_htlc_clear_fee_a = fee_est_a.ret_val.load(atomic::Ordering::Acquire);
last_htlc_clear_fee_b = fee_est_b.ret_val.load(atomic::Ordering::Acquire);
#[no_mangle]
pub extern "C" fn chanmon_consistency_run(data: *const u8, datalen: usize) {
- do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull{}, false);
- do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull{}, true);
+ do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull {}, false);
+ do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull {}, true);
}