use bitcoin::hash_types::{BlockHash, WPubkeyHash};
use lightning::chain;
-use lightning::chain::Confirm;
-use lightning::chain::chainmonitor;
-use lightning::chain::channelmonitor;
-use lightning::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, MonitorEvent};
+use lightning::chain::{BestBlock, ChannelMonitorUpdateErr, chainmonitor, channelmonitor, Confirm, Watch};
+use lightning::chain::channelmonitor::{ChannelMonitor, MonitorEvent};
use lightning::chain::transaction::OutPoint;
use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
use lightning::chain::keysinterface::{KeysInterface, InMemorySigner};
use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
-use lightning::ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentSendFailure, ChannelManagerReadArgs};
+use lightning::ln::channelmanager::{ChainParameters, ChannelManager, PaymentSendFailure, ChannelManagerReadArgs};
+use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
use lightning::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
-use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, DecodeError, ErrorAction, UpdateAddHTLC, Init};
-use lightning::util::enforcing_trait_impls::{EnforcingSigner, INITIAL_REVOKED_COMMITMENT_NUMBER};
+use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, DecodeError, UpdateAddHTLC, Init};
+use lightning::ln::script::ShutdownScript;
+use lightning::util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
use lightning::util::errors::APIError;
use lightning::util::events;
use lightning::util::logger::Logger;
use lightning::util::config::UserConfig;
-use lightning::util::events::{EventsProvider, MessageSendEventsProvider};
+use lightning::util::events::MessageSendEventsProvider;
use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer};
-use lightning::util::test_utils::OnlyReadsKeysInterface;
use lightning::routing::router::{Route, RouteHop};
use bitcoin::secp256k1::Secp256k1;
use std::mem;
-use std::cmp::Ordering;
+use std::cmp::{self, Ordering};
use std::collections::{HashSet, hash_map, HashMap};
use std::sync::{Arc,Mutex};
use std::sync::atomic;
use std::io::Cursor;
-struct FuzzEstimator {}
+const MAX_FEE: u32 = 10_000;
+struct FuzzEstimator {
+ ret_val: atomic::AtomicU32,
+}
impl FeeEstimator for FuzzEstimator {
- fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
- 253
+ fn get_est_sat_per_1000_weight(&self, conf_target: ConfirmationTarget) -> u32 {
+ // We force-close channels if our counterparty sends us a feerate which is a small multiple
+ // of our HighPriority fee estimate or smaller than our Background fee estimate. Thus, we
+ // always return a HighPriority feerate here which is >= the maximum Normal feerate and a
+ // Background feerate which is <= the minimum Normal feerate.
+ match conf_target {
+ ConfirmationTarget::HighPriority => MAX_FEE,
+ ConfirmationTarget::Background => 253,
+ ConfirmationTarget::Normal => cmp::min(self.ret_val.load(atomic::Ordering::Acquire), MAX_FEE),
+ }
}
}
self.0.extend_from_slice(buf);
Ok(())
}
- fn size_hint(&mut self, size: usize) {
- self.0.reserve_exact(size);
- }
}
struct TestChainMonitor {
pub logger: Arc<dyn Logger>,
+ pub keys: Arc<KeyProvider>,
+ pub persister: Arc<TestPersister>,
pub chain_monitor: Arc<chainmonitor::ChainMonitor<EnforcingSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
- pub update_ret: Mutex<Result<(), channelmonitor::ChannelMonitorUpdateErr>>,
// If we reload a node with an old copy of ChannelMonitors, the ChannelManager deserialization
// logic will automatically force-close our channels for us (as we don't have an up-to-date
// monitor implying we are not able to punish misbehaving counterparties). Because this test
pub should_update_manager: atomic::AtomicBool,
}
impl TestChainMonitor {
- pub fn new(broadcaster: Arc<TestBroadcaster>, logger: Arc<dyn Logger>, feeest: Arc<FuzzEstimator>, persister: Arc<TestPersister>) -> Self {
+ pub fn new(broadcaster: Arc<TestBroadcaster>, logger: Arc<dyn Logger>, feeest: Arc<FuzzEstimator>, persister: Arc<TestPersister>, keys: Arc<KeyProvider>) -> Self {
Self {
- chain_monitor: Arc::new(chainmonitor::ChainMonitor::new(None, broadcaster, logger.clone(), feeest, persister)),
+ chain_monitor: Arc::new(chainmonitor::ChainMonitor::new(None, broadcaster, logger.clone(), feeest, Arc::clone(&persister))),
logger,
- update_ret: Mutex::new(Ok(())),
+ keys,
+ persister,
latest_monitors: Mutex::new(HashMap::new()),
should_update_manager: atomic::AtomicBool::new(false),
}
}
}
impl chain::Watch<EnforcingSigner> for TestChainMonitor {
- fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
+ fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> Result<(), chain::ChannelMonitorUpdateErr> {
let mut ser = VecWriter(Vec::new());
monitor.write(&mut ser).unwrap();
if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) {
panic!("Already had monitor pre-watch_channel");
}
self.should_update_manager.store(true, atomic::Ordering::Relaxed);
- assert!(self.chain_monitor.watch_channel(funding_txo, monitor).is_ok());
- self.update_ret.lock().unwrap().clone()
+ self.chain_monitor.watch_channel(funding_txo, monitor)
}
- fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
+ fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), chain::ChannelMonitorUpdateErr> {
let mut map_lock = self.latest_monitors.lock().unwrap();
let mut map_entry = match map_lock.entry(funding_txo) {
hash_map::Entry::Occupied(entry) => entry,
hash_map::Entry::Vacant(_) => panic!("Didn't have monitor on update call"),
};
let deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::
- read(&mut Cursor::new(&map_entry.get().1), &OnlyReadsKeysInterface {}).unwrap().1;
- deserialized_monitor.update_monitor(&update, &&TestBroadcaster{}, &&FuzzEstimator{}, &self.logger).unwrap();
+ read(&mut Cursor::new(&map_entry.get().1), &*self.keys).unwrap().1;
+ deserialized_monitor.update_monitor(&update, &&TestBroadcaster{}, &&FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap();
let mut ser = VecWriter(Vec::new());
deserialized_monitor.write(&mut ser).unwrap();
map_entry.insert((update.update_id, ser.0));
self.should_update_manager.store(true, atomic::Ordering::Relaxed);
- self.update_ret.lock().unwrap().clone()
+ self.chain_monitor.update_channel(funding_txo, update)
}
fn release_pending_monitor_events(&self) -> Vec<MonitorEvent> {
struct KeyProvider {
node_id: u8,
- rand_bytes_id: atomic::AtomicU8,
- revoked_commitments: Mutex<HashMap<[u8;32], Arc<Mutex<u64>>>>,
+ rand_bytes_id: atomic::AtomicU32,
+ enforcement_states: Mutex<HashMap<[u8;32], Arc<Mutex<EnforcementState>>>>,
}
impl KeysInterface for KeyProvider {
type Signer = EnforcingSigner;
Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&our_channel_monitor_claim_key_hash[..]).into_script()
}
- fn get_shutdown_pubkey(&self) -> PublicKey {
+ fn get_shutdown_scriptpubkey(&self) -> ShutdownScript {
let secp_ctx = Secp256k1::signing_only();
- PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, self.node_id]).unwrap())
+ let secret_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, self.node_id]).unwrap();
+ let pubkey_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &secret_key).serialize());
+ ShutdownScript::new_p2wpkh(&pubkey_hash)
}
fn get_channel_signer(&self, _inbound: bool, channel_value_satoshis: u64) -> EnforcingSigner {
SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, self.node_id]).unwrap(),
SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, self.node_id]).unwrap(),
SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, self.node_id]).unwrap(),
- [id, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, self.node_id],
+ [id as u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, self.node_id],
channel_value_satoshis,
[0; 32],
);
- let revoked_commitment = self.make_revoked_commitment_cell(keys.commitment_seed);
+ let revoked_commitment = self.make_enforcement_state_cell(keys.commitment_seed);
EnforcingSigner::new_with_revoked(keys, revoked_commitment, false)
}
fn get_secure_random_bytes(&self) -> [u8; 32] {
let id = self.rand_bytes_id.fetch_add(1, atomic::Ordering::Relaxed);
- [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, id, 11, self.node_id]
+ let mut res = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, self.node_id];
+ res[30-4..30].copy_from_slice(&id.to_le_bytes());
+ res
}
fn read_chan_signer(&self, buffer: &[u8]) -> Result<Self::Signer, DecodeError> {
let mut reader = std::io::Cursor::new(buffer);
let inner: InMemorySigner = Readable::read(&mut reader)?;
- let revoked_commitment = self.make_revoked_commitment_cell(inner.commitment_seed);
-
- let last_commitment_number = Readable::read(&mut reader)?;
+ let state = self.make_enforcement_state_cell(inner.commitment_seed);
Ok(EnforcingSigner {
inner,
- last_commitment_number: Arc::new(Mutex::new(last_commitment_number)),
- revoked_commitment,
+ state,
disable_revocation_policy_check: false,
})
}
}
impl KeyProvider {
- fn make_revoked_commitment_cell(&self, commitment_seed: [u8; 32]) -> Arc<Mutex<u64>> {
- let mut revoked_commitments = self.revoked_commitments.lock().unwrap();
+ fn make_enforcement_state_cell(&self, commitment_seed: [u8; 32]) -> Arc<Mutex<EnforcementState>> {
+ let mut revoked_commitments = self.enforcement_states.lock().unwrap();
if !revoked_commitments.contains_key(&commitment_seed) {
- revoked_commitments.insert(commitment_seed, Arc::new(Mutex::new(INITIAL_REVOKED_COMMITMENT_NUMBER)));
+ revoked_commitments.insert(commitment_seed, Arc::new(Mutex::new(EnforcementState::new())));
}
let cell = revoked_commitments.get(&commitment_seed).unwrap();
Arc::clone(cell)
_ if err.starts_with("Cannot push more than their max accepted HTLCs ") => {},
_ if err.starts_with("Cannot send value that would put us over the max HTLC value in flight our peer will accept ") => {},
_ if err.starts_with("Cannot send value that would put our balance under counterparty-announced channel reserve value") => {},
+ _ if err.starts_with("Cannot send value that would put counterparty balance under holder-announced channel reserve value") => {},
_ if err.starts_with("Cannot send value that would overdraw remaining funds.") => {},
_ if err.starts_with("Cannot send value that would not leave enough to pay for fees.") => {},
+ _ if err.starts_with("Cannot send value that would put our exposure to dust HTLCs at") => {},
_ => panic!("{}", err),
}
},
APIError::MonitorUpdateFailed => {
// We can (obviously) temp-fail a monitor update
},
+ APIError::IncompatibleShutdownScript { .. } => panic!("Cannot send an incompatible shutdown script"),
}
}
#[inline]
let mut payment_hash;
for _ in 0..256 {
payment_hash = PaymentHash(Sha256::hash(&[*payment_id; 1]).into_inner());
- if let Ok(payment_secret) = dest.create_inbound_payment_for_hash(payment_hash, None, 7200, 0) {
+ if let Ok(payment_secret) = dest.create_inbound_payment_for_hash(payment_hash, None, 3600, 0) {
return Some((payment_secret, payment_hash));
}
*payment_id = payment_id.wrapping_add(1);
#[inline]
pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
- let fee_est = Arc::new(FuzzEstimator{});
let broadcast = Arc::new(TestBroadcaster{});
macro_rules! make_node {
- ($node_id: expr) => { {
+ ($node_id: expr, $fee_estimator: expr) => { {
let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
- let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone(), Arc::new(TestPersister{})));
+ let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU32::new(0), enforcement_states: Mutex::new(HashMap::new()) });
+ let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(),
+ Arc::new(TestPersister { update_ret: Mutex::new(Ok(())) }), Arc::clone(&keys_manager)));
- let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU8::new(0), revoked_commitments: Mutex::new(HashMap::new()) });
let mut config = UserConfig::default();
- config.channel_options.fee_proportional_millionths = 0;
+ config.channel_options.forwarding_fee_proportional_millionths = 0;
config.channel_options.announced_channel = true;
- config.peer_channel_config_limits.min_dust_limit_satoshis = 0;
let network = Network::Bitcoin;
let params = ChainParameters {
network,
best_block: BestBlock::from_genesis(network),
};
- (ChannelManager::new(fee_est.clone(), monitor.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, params),
+ (ChannelManager::new($fee_estimator.clone(), monitor.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, params),
monitor, keys_manager)
} }
}
macro_rules! reload_node {
- ($ser: expr, $node_id: expr, $old_monitors: expr, $keys_manager: expr) => { {
+ ($ser: expr, $node_id: expr, $old_monitors: expr, $keys_manager: expr, $fee_estimator: expr) => { {
let keys_manager = Arc::clone(& $keys_manager);
let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
- let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone(), Arc::new(TestPersister{})));
+ let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(),
+ Arc::new(TestPersister { update_ret: Mutex::new(Ok(())) }), Arc::clone(& $keys_manager)));
let mut config = UserConfig::default();
- config.channel_options.fee_proportional_millionths = 0;
+ config.channel_options.forwarding_fee_proportional_millionths = 0;
config.channel_options.announced_channel = true;
- config.peer_channel_config_limits.min_dust_limit_satoshis = 0;
let mut monitors = HashMap::new();
let mut old_monitors = $old_monitors.latest_monitors.lock().unwrap();
for (outpoint, (update_id, monitor_ser)) in old_monitors.drain() {
- monitors.insert(outpoint, <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut Cursor::new(&monitor_ser), &OnlyReadsKeysInterface {}).expect("Failed to read monitor").1);
+ monitors.insert(outpoint, <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut Cursor::new(&monitor_ser), &*$keys_manager).expect("Failed to read monitor").1);
chain_monitor.latest_monitors.lock().unwrap().insert(outpoint, (update_id, monitor_ser));
}
let mut monitor_refs = HashMap::new();
let read_args = ChannelManagerReadArgs {
keys_manager,
- fee_estimator: fee_est.clone(),
+ fee_estimator: $fee_estimator.clone(),
chain_monitor: chain_monitor.clone(),
tx_broadcaster: broadcast.clone(),
logger,
channel_monitors: monitor_refs,
};
- (<(BlockHash, ChanMan)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, chain_monitor)
+ let res = (<(BlockHash, ChanMan)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, chain_monitor.clone());
+ for (funding_txo, mon) in monitors.drain() {
+ assert!(chain_monitor.chain_monitor.watch_channel(funding_txo, mon).is_ok());
+ }
+ res
} }
}
let mut channel_txn = Vec::new();
macro_rules! make_channel {
($source: expr, $dest: expr, $chan_id: expr) => { {
+ $source.peer_connected(&$dest.get_our_node_id(), &Init { features: InitFeatures::known() });
+ $dest.peer_connected(&$source.get_our_node_id(), &Init { features: InitFeatures::known() });
+
$source.create_channel($dest.get_our_node_id(), 100_000, 42, 0, None).unwrap();
let open_channel = {
let events = $source.get_and_clear_pending_msg_events();
} }
}
+ let fee_est_a = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) });
+ let mut last_htlc_clear_fee_a = 253;
+ let fee_est_b = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) });
+ let mut last_htlc_clear_fee_b = 253;
+ let fee_est_c = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) });
+ let mut last_htlc_clear_fee_c = 253;
+
// 3 nodes is enough to hit all the possible cases, notably unknown-source-unknown-dest
// forwarding.
- let (node_a, mut monitor_a, keys_manager_a) = make_node!(0);
- let (node_b, mut monitor_b, keys_manager_b) = make_node!(1);
- let (node_c, mut monitor_c, keys_manager_c) = make_node!(2);
+ let (node_a, mut monitor_a, keys_manager_a) = make_node!(0, fee_est_a);
+ let (node_b, mut monitor_b, keys_manager_b) = make_node!(1, fee_est_b);
+ let (node_c, mut monitor_c, keys_manager_c) = make_node!(2, fee_est_c);
let mut nodes = [node_a, node_b, node_c];
let mut chan_a_disconnected = false;
let mut chan_b_disconnected = false;
+ let mut ab_events = Vec::new();
let mut ba_events = Vec::new();
let mut bc_events = Vec::new();
+ let mut cb_events = Vec::new();
let mut node_a_ser = VecWriter(Vec::new());
nodes[0].write(&mut node_a_ser).unwrap();
}
loop {
+ // Push any events from Node B onto ba_events and bc_events
+ macro_rules! push_excess_b_events {
+ ($excess_events: expr, $expect_drop_node: expr) => { {
+ let a_id = nodes[0].get_our_node_id();
+ let expect_drop_node: Option<usize> = $expect_drop_node;
+ let expect_drop_id = if let Some(id) = expect_drop_node { Some(nodes[id].get_our_node_id()) } else { None };
+ for event in $excess_events {
+ let push_a = match event {
+ events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => {
+ if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); }
+ *node_id == a_id
+ },
+ events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => {
+ if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); }
+ *node_id == a_id
+ },
+ events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => {
+ if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); }
+ *node_id == a_id
+ },
+ events::MessageSendEvent::SendFundingLocked { .. } => continue,
+ events::MessageSendEvent::SendAnnouncementSignatures { .. } => continue,
+ events::MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => {
+ assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set!
+ if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); }
+ *node_id == a_id
+ },
+ _ => panic!("Unhandled message event {:?}", event),
+ };
+ if push_a { ba_events.push(event); } else { bc_events.push(event); }
+ }
+ } }
+ }
+
+ // While delivering messages, we select across three possible message selection processes
+ // to ensure we get as much coverage as possible. See the individual enum variants for more
+ // details.
+ #[derive(PartialEq)]
+ enum ProcessMessages {
+ /// Deliver all available messages, including fetching any new messages from
+ /// `get_and_clear_pending_msg_events()` (which may have side effects).
+ AllMessages,
+ /// Call `get_and_clear_pending_msg_events()` first, and then deliver up to one
+ /// message (which may already be queued).
+ OneMessage,
+ /// Deliver up to one already-queued message. This avoids any potential side-effects
+ /// of `get_and_clear_pending_msg_events()` (eg freeing the HTLC holding cell), which
+ /// provides potentially more coverage.
+ OnePendingMessage,
+ }
+
macro_rules! process_msg_events {
- ($node: expr, $corrupt_forward: expr) => { {
- let events = if $node == 1 {
+ ($node: expr, $corrupt_forward: expr, $limit_events: expr) => { {
+ let mut events = if $node == 1 {
let mut new_events = Vec::new();
mem::swap(&mut new_events, &mut ba_events);
new_events.extend_from_slice(&bc_events[..]);
bc_events.clear();
new_events
- } else { Vec::new() };
+ } else if $node == 0 {
+ let mut new_events = Vec::new();
+ mem::swap(&mut new_events, &mut ab_events);
+ new_events
+ } else {
+ let mut new_events = Vec::new();
+ mem::swap(&mut new_events, &mut cb_events);
+ new_events
+ };
+ let mut new_events = Vec::new();
+ if $limit_events != ProcessMessages::OnePendingMessage {
+ new_events = nodes[$node].get_and_clear_pending_msg_events();
+ }
let mut had_events = false;
- for event in events.iter().chain(nodes[$node].get_and_clear_pending_msg_events().iter()) {
+ let mut events_iter = events.drain(..).chain(new_events.drain(..));
+ let mut extra_ev = None;
+ for event in &mut events_iter {
had_events = true;
match event {
- events::MessageSendEvent::UpdateHTLCs { ref node_id, updates: CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
- for dest in nodes.iter() {
- if dest.get_our_node_id() == *node_id {
- assert!(update_fee.is_none());
- for update_add in update_add_htlcs {
+ events::MessageSendEvent::UpdateHTLCs { node_id, updates: CommitmentUpdate { update_add_htlcs, update_fail_htlcs, update_fulfill_htlcs, update_fail_malformed_htlcs, update_fee, commitment_signed } } => {
+ for (idx, dest) in nodes.iter().enumerate() {
+ if dest.get_our_node_id() == node_id {
+ for update_add in update_add_htlcs.iter() {
+ out.locked_write(format!("Delivering update_add_htlc to node {}.\n", idx).as_bytes());
if !$corrupt_forward {
- dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), &update_add);
+ dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), update_add);
} else {
// Corrupt the update_add_htlc message so that its HMAC
// check will fail and we generate a
dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), &new_msg);
}
}
- for update_fulfill in update_fulfill_htlcs {
- dest.handle_update_fulfill_htlc(&nodes[$node].get_our_node_id(), &update_fulfill);
+ for update_fulfill in update_fulfill_htlcs.iter() {
+ out.locked_write(format!("Delivering update_fulfill_htlc to node {}.\n", idx).as_bytes());
+ dest.handle_update_fulfill_htlc(&nodes[$node].get_our_node_id(), update_fulfill);
}
- for update_fail in update_fail_htlcs {
- dest.handle_update_fail_htlc(&nodes[$node].get_our_node_id(), &update_fail);
+ for update_fail in update_fail_htlcs.iter() {
+ out.locked_write(format!("Delivering update_fail_htlc to node {}.\n", idx).as_bytes());
+ dest.handle_update_fail_htlc(&nodes[$node].get_our_node_id(), update_fail);
}
- for update_fail_malformed in update_fail_malformed_htlcs {
- dest.handle_update_fail_malformed_htlc(&nodes[$node].get_our_node_id(), &update_fail_malformed);
+ for update_fail_malformed in update_fail_malformed_htlcs.iter() {
+ out.locked_write(format!("Delivering update_fail_malformed_htlc to node {}.\n", idx).as_bytes());
+ dest.handle_update_fail_malformed_htlc(&nodes[$node].get_our_node_id(), update_fail_malformed);
}
+ if let Some(msg) = update_fee {
+ out.locked_write(format!("Delivering update_fee to node {}.\n", idx).as_bytes());
+ dest.handle_update_fee(&nodes[$node].get_our_node_id(), &msg);
+ }
+ let processed_change = !update_add_htlcs.is_empty() || !update_fulfill_htlcs.is_empty() ||
+ !update_fail_htlcs.is_empty() || !update_fail_malformed_htlcs.is_empty();
+ if $limit_events != ProcessMessages::AllMessages && processed_change {
+ // If we only want to process some messages, don't deliver the CS until later.
+ extra_ev = Some(events::MessageSendEvent::UpdateHTLCs { node_id, updates: CommitmentUpdate {
+ update_add_htlcs: Vec::new(),
+ update_fail_htlcs: Vec::new(),
+ update_fulfill_htlcs: Vec::new(),
+ update_fail_malformed_htlcs: Vec::new(),
+ update_fee: None,
+ commitment_signed
+ } });
+ break;
+ }
+ out.locked_write(format!("Delivering commitment_signed to node {}.\n", idx).as_bytes());
dest.handle_commitment_signed(&nodes[$node].get_our_node_id(), &commitment_signed);
+ break;
}
}
},
events::MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
- for dest in nodes.iter() {
+ for (idx, dest) in nodes.iter().enumerate() {
if dest.get_our_node_id() == *node_id {
+ out.locked_write(format!("Delivering revoke_and_ack to node {}.\n", idx).as_bytes());
dest.handle_revoke_and_ack(&nodes[$node].get_our_node_id(), msg);
}
}
},
events::MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => {
- for dest in nodes.iter() {
+ for (idx, dest) in nodes.iter().enumerate() {
if dest.get_our_node_id() == *node_id {
+ out.locked_write(format!("Delivering channel_reestablish to node {}.\n", idx).as_bytes());
dest.handle_channel_reestablish(&nodes[$node].get_our_node_id(), msg);
}
}
events::MessageSendEvent::SendFundingLocked { .. } => {
// Can be generated as a reestablish response
},
- events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {
- // Can be generated due to a payment forward being rejected due to a
- // channel having previously failed a monitor update
+ events::MessageSendEvent::SendAnnouncementSignatures { .. } => {
+ // Can be generated as a reestablish response
+ },
+ events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
+ // When we reconnect we will resend a channel_update to make sure our
+ // counterparty has the latest parameters for receiving payments
+ // through us. We do, however, check that the message does not include
+ // the "disabled" bit, as we should never ever have a channel which is
+ // disabled when we send such an update (or it may indicate channel
+ // force-close which we should detect as an error).
+ assert_eq!(msg.contents.flags & 2, 0);
},
- _ => panic!("Unhandled message event"),
+ _ => panic!("Unhandled message event {:?}", event),
}
+ if $limit_events != ProcessMessages::AllMessages {
+ break;
+ }
+ }
+ if $node == 1 {
+ push_excess_b_events!(extra_ev.into_iter().chain(events_iter), None);
+ } else if $node == 0 {
+ if let Some(ev) = extra_ev { ab_events.push(ev); }
+ for event in events_iter { ab_events.push(event); }
+ } else {
+ if let Some(ev) = extra_ev { cb_events.push(ev); }
+ for event in events_iter { cb_events.push(event); }
}
had_events
} }
events::MessageSendEvent::SendRevokeAndACK { .. } => {},
events::MessageSendEvent::SendChannelReestablish { .. } => {},
events::MessageSendEvent::SendFundingLocked { .. } => {},
- events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
- events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => {},
+ events::MessageSendEvent::SendAnnouncementSignatures { .. } => {},
+ events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
+ assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set!
+ },
_ => panic!("Unhandled message event"),
}
}
+ push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(0));
+ ab_events.clear();
ba_events.clear();
} else {
for event in nodes[2].get_and_clear_pending_msg_events() {
events::MessageSendEvent::SendRevokeAndACK { .. } => {},
events::MessageSendEvent::SendChannelReestablish { .. } => {},
events::MessageSendEvent::SendFundingLocked { .. } => {},
- events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
- events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => {},
+ events::MessageSendEvent::SendAnnouncementSignatures { .. } => {},
+ events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
+ assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set!
+ },
_ => panic!("Unhandled message event"),
}
}
+ push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(2));
bc_events.clear();
- }
- let mut events = nodes[1].get_and_clear_pending_msg_events();
- let drop_node_id = if $counterparty_id == 0 { nodes[0].get_our_node_id() } else { nodes[2].get_our_node_id() };
- let msg_sink = if $counterparty_id == 0 { &mut bc_events } else { &mut ba_events };
- for event in events.drain(..) {
- let push = match event {
- events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => {
- if *node_id != drop_node_id { true } else { false }
- },
- events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => {
- if *node_id != drop_node_id { true } else { false }
- },
- events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => {
- if *node_id != drop_node_id { true } else { false }
- },
- events::MessageSendEvent::SendFundingLocked { .. } => false,
- events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => false,
- events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => false,
- _ => panic!("Unhandled message event"),
- };
- if push { msg_sink.push(event); }
+ cb_events.clear();
}
} }
}
}
},
events::Event::PaymentSent { .. } => {},
- events::Event::PaymentFailed { .. } => {},
+ events::Event::PaymentPathFailed { .. } => {},
+ events::Event::PaymentForwarded { .. } if $node == 1 => {},
events::Event::PendingHTLCsForwardable { .. } => {
nodes[$node].process_pending_htlc_forwards();
},
} }
}
- match get_slice!(1)[0] {
+ let v = get_slice!(1)[0];
+ out.locked_write(format!("READ A BYTE! HANDLING INPUT {:x}...........\n", v).as_bytes());
+ match v {
// In general, we keep related message groups close together in binary form, allowing
// bit-twiddling mutations to have similar effects. This is probably overkill, but no
// harm in doing so.
- 0x00 => *monitor_a.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
- 0x01 => *monitor_b.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
- 0x02 => *monitor_c.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
- 0x04 => *monitor_a.update_ret.lock().unwrap() = Ok(()),
- 0x05 => *monitor_b.update_ret.lock().unwrap() = Ok(()),
- 0x06 => *monitor_c.update_ret.lock().unwrap() = Ok(()),
+ 0x00 => *monitor_a.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
+ 0x01 => *monitor_b.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
+ 0x02 => *monitor_c.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
+ 0x04 => *monitor_a.persister.update_ret.lock().unwrap() = Ok(()),
+ 0x05 => *monitor_b.persister.update_ret.lock().unwrap() = Ok(()),
+ 0x06 => *monitor_c.persister.update_ret.lock().unwrap() = Ok(()),
0x08 => {
if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) {
- nodes[0].channel_monitor_updated(&chan_1_funding, *id);
+ monitor_a.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id);
+ nodes[0].process_monitor_events();
}
},
0x09 => {
if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) {
- nodes[1].channel_monitor_updated(&chan_1_funding, *id);
+ monitor_b.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id);
+ nodes[1].process_monitor_events();
}
},
0x0a => {
if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) {
- nodes[1].channel_monitor_updated(&chan_2_funding, *id);
+ monitor_b.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id);
+ nodes[1].process_monitor_events();
}
},
0x0b => {
if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) {
- nodes[2].channel_monitor_updated(&chan_2_funding, *id);
+ monitor_c.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id);
+ nodes[2].process_monitor_events();
}
},
}
},
- 0x10 => { process_msg_events!(0, true); },
- 0x11 => { process_msg_events!(0, false); },
- 0x12 => { process_events!(0, true); },
- 0x13 => { process_events!(0, false); },
- 0x14 => { process_msg_events!(1, true); },
- 0x15 => { process_msg_events!(1, false); },
- 0x16 => { process_events!(1, true); },
- 0x17 => { process_events!(1, false); },
- 0x18 => { process_msg_events!(2, true); },
- 0x19 => { process_msg_events!(2, false); },
- 0x1a => { process_events!(2, true); },
- 0x1b => { process_events!(2, false); },
-
- 0x1c => {
+ 0x10 => { process_msg_events!(0, true, ProcessMessages::AllMessages); },
+ 0x11 => { process_msg_events!(0, false, ProcessMessages::AllMessages); },
+ 0x12 => { process_msg_events!(0, true, ProcessMessages::OneMessage); },
+ 0x13 => { process_msg_events!(0, false, ProcessMessages::OneMessage); },
+ 0x14 => { process_msg_events!(0, true, ProcessMessages::OnePendingMessage); },
+ 0x15 => { process_msg_events!(0, false, ProcessMessages::OnePendingMessage); },
+
+ 0x16 => { process_events!(0, true); },
+ 0x17 => { process_events!(0, false); },
+
+ 0x18 => { process_msg_events!(1, true, ProcessMessages::AllMessages); },
+ 0x19 => { process_msg_events!(1, false, ProcessMessages::AllMessages); },
+ 0x1a => { process_msg_events!(1, true, ProcessMessages::OneMessage); },
+ 0x1b => { process_msg_events!(1, false, ProcessMessages::OneMessage); },
+ 0x1c => { process_msg_events!(1, true, ProcessMessages::OnePendingMessage); },
+ 0x1d => { process_msg_events!(1, false, ProcessMessages::OnePendingMessage); },
+
+ 0x1e => { process_events!(1, true); },
+ 0x1f => { process_events!(1, false); },
+
+ 0x20 => { process_msg_events!(2, true, ProcessMessages::AllMessages); },
+ 0x21 => { process_msg_events!(2, false, ProcessMessages::AllMessages); },
+ 0x22 => { process_msg_events!(2, true, ProcessMessages::OneMessage); },
+ 0x23 => { process_msg_events!(2, false, ProcessMessages::OneMessage); },
+ 0x24 => { process_msg_events!(2, true, ProcessMessages::OnePendingMessage); },
+ 0x25 => { process_msg_events!(2, false, ProcessMessages::OnePendingMessage); },
+
+ 0x26 => { process_events!(2, true); },
+ 0x27 => { process_events!(2, false); },
+
+ 0x2c => {
if !chan_a_disconnected {
nodes[1].peer_disconnected(&nodes[0].get_our_node_id(), false);
chan_a_disconnected = true;
drain_msg_events_on_disconnect!(0);
}
- let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a, keys_manager_a);
+ if monitor_a.should_update_manager.load(atomic::Ordering::Relaxed) {
+ node_a_ser.0.clear();
+ nodes[0].write(&mut node_a_ser).unwrap();
+ }
+ let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a, keys_manager_a, fee_est_a);
nodes[0] = new_node_a;
monitor_a = new_monitor_a;
},
- 0x1d => {
+ 0x2d => {
if !chan_a_disconnected {
nodes[0].peer_disconnected(&nodes[1].get_our_node_id(), false);
chan_a_disconnected = true;
nodes[0].get_and_clear_pending_msg_events();
+ ab_events.clear();
ba_events.clear();
}
if !chan_b_disconnected {
chan_b_disconnected = true;
nodes[2].get_and_clear_pending_msg_events();
bc_events.clear();
+ cb_events.clear();
}
- let (new_node_b, new_monitor_b) = reload_node!(node_b_ser, 1, monitor_b, keys_manager_b);
+ let (new_node_b, new_monitor_b) = reload_node!(node_b_ser, 1, monitor_b, keys_manager_b, fee_est_b);
nodes[1] = new_node_b;
monitor_b = new_monitor_b;
},
- 0x1e => {
+ 0x2e => {
if !chan_b_disconnected {
nodes[1].peer_disconnected(&nodes[2].get_our_node_id(), false);
chan_b_disconnected = true;
drain_msg_events_on_disconnect!(2);
}
- let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c, keys_manager_c);
+ if monitor_c.should_update_manager.load(atomic::Ordering::Relaxed) {
+ node_c_ser.0.clear();
+ nodes[2].write(&mut node_c_ser).unwrap();
+ }
+ let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c, keys_manager_c, fee_est_c);
nodes[2] = new_node_c;
monitor_c = new_monitor_c;
},
// 1/10th the channel size:
- 0x20 => { send_payment(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut payment_id); },
- 0x21 => { send_payment(&nodes[1], &nodes[0], chan_a, 10_000_000, &mut payment_id); },
- 0x22 => { send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut payment_id); },
- 0x23 => { send_payment(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut payment_id); },
- 0x24 => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000_000, &mut payment_id); },
- 0x25 => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000_000, &mut payment_id); },
-
- 0x28 => { send_payment(&nodes[0], &nodes[1], chan_a, 1_000_000, &mut payment_id); },
- 0x29 => { send_payment(&nodes[1], &nodes[0], chan_a, 1_000_000, &mut payment_id); },
- 0x2a => { send_payment(&nodes[1], &nodes[2], chan_b, 1_000_000, &mut payment_id); },
- 0x2b => { send_payment(&nodes[2], &nodes[1], chan_b, 1_000_000, &mut payment_id); },
- 0x2c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000_000, &mut payment_id); },
- 0x2d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000_000, &mut payment_id); },
-
- 0x30 => { send_payment(&nodes[0], &nodes[1], chan_a, 100_000, &mut payment_id); },
- 0x31 => { send_payment(&nodes[1], &nodes[0], chan_a, 100_000, &mut payment_id); },
- 0x32 => { send_payment(&nodes[1], &nodes[2], chan_b, 100_000, &mut payment_id); },
- 0x33 => { send_payment(&nodes[2], &nodes[1], chan_b, 100_000, &mut payment_id); },
- 0x34 => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100_000, &mut payment_id); },
- 0x35 => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100_000, &mut payment_id); },
-
- 0x38 => { send_payment(&nodes[0], &nodes[1], chan_a, 10_000, &mut payment_id); },
- 0x39 => { send_payment(&nodes[1], &nodes[0], chan_a, 10_000, &mut payment_id); },
- 0x3a => { send_payment(&nodes[1], &nodes[2], chan_b, 10_000, &mut payment_id); },
- 0x3b => { send_payment(&nodes[2], &nodes[1], chan_b, 10_000, &mut payment_id); },
- 0x3c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000, &mut payment_id); },
- 0x3d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000, &mut payment_id); },
-
- 0x40 => { send_payment(&nodes[0], &nodes[1], chan_a, 1_000, &mut payment_id); },
- 0x41 => { send_payment(&nodes[1], &nodes[0], chan_a, 1_000, &mut payment_id); },
- 0x42 => { send_payment(&nodes[1], &nodes[2], chan_b, 1_000, &mut payment_id); },
- 0x43 => { send_payment(&nodes[2], &nodes[1], chan_b, 1_000, &mut payment_id); },
- 0x44 => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000, &mut payment_id); },
- 0x45 => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000, &mut payment_id); },
-
- 0x48 => { send_payment(&nodes[0], &nodes[1], chan_a, 100, &mut payment_id); },
- 0x49 => { send_payment(&nodes[1], &nodes[0], chan_a, 100, &mut payment_id); },
- 0x4a => { send_payment(&nodes[1], &nodes[2], chan_b, 100, &mut payment_id); },
- 0x4b => { send_payment(&nodes[2], &nodes[1], chan_b, 100, &mut payment_id); },
- 0x4c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100, &mut payment_id); },
- 0x4d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100, &mut payment_id); },
-
- 0x50 => { send_payment(&nodes[0], &nodes[1], chan_a, 10, &mut payment_id); },
- 0x51 => { send_payment(&nodes[1], &nodes[0], chan_a, 10, &mut payment_id); },
- 0x52 => { send_payment(&nodes[1], &nodes[2], chan_b, 10, &mut payment_id); },
- 0x53 => { send_payment(&nodes[2], &nodes[1], chan_b, 10, &mut payment_id); },
- 0x54 => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10, &mut payment_id); },
- 0x55 => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10, &mut payment_id); },
-
- 0x58 => { send_payment(&nodes[0], &nodes[1], chan_a, 1, &mut payment_id); },
- 0x59 => { send_payment(&nodes[1], &nodes[0], chan_a, 1, &mut payment_id); },
- 0x5a => { send_payment(&nodes[1], &nodes[2], chan_b, 1, &mut payment_id); },
- 0x5b => { send_payment(&nodes[2], &nodes[1], chan_b, 1, &mut payment_id); },
- 0x5c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1, &mut payment_id); },
- 0x5d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1, &mut payment_id); },
+ 0x30 => { send_payment(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut payment_id); },
+ 0x31 => { send_payment(&nodes[1], &nodes[0], chan_a, 10_000_000, &mut payment_id); },
+ 0x32 => { send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut payment_id); },
+ 0x33 => { send_payment(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut payment_id); },
+ 0x34 => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000_000, &mut payment_id); },
+ 0x35 => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000_000, &mut payment_id); },
+
+ 0x38 => { send_payment(&nodes[0], &nodes[1], chan_a, 1_000_000, &mut payment_id); },
+ 0x39 => { send_payment(&nodes[1], &nodes[0], chan_a, 1_000_000, &mut payment_id); },
+ 0x3a => { send_payment(&nodes[1], &nodes[2], chan_b, 1_000_000, &mut payment_id); },
+ 0x3b => { send_payment(&nodes[2], &nodes[1], chan_b, 1_000_000, &mut payment_id); },
+ 0x3c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000_000, &mut payment_id); },
+ 0x3d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000_000, &mut payment_id); },
+
+ 0x40 => { send_payment(&nodes[0], &nodes[1], chan_a, 100_000, &mut payment_id); },
+ 0x41 => { send_payment(&nodes[1], &nodes[0], chan_a, 100_000, &mut payment_id); },
+ 0x42 => { send_payment(&nodes[1], &nodes[2], chan_b, 100_000, &mut payment_id); },
+ 0x43 => { send_payment(&nodes[2], &nodes[1], chan_b, 100_000, &mut payment_id); },
+ 0x44 => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100_000, &mut payment_id); },
+ 0x45 => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100_000, &mut payment_id); },
+
+ 0x48 => { send_payment(&nodes[0], &nodes[1], chan_a, 10_000, &mut payment_id); },
+ 0x49 => { send_payment(&nodes[1], &nodes[0], chan_a, 10_000, &mut payment_id); },
+ 0x4a => { send_payment(&nodes[1], &nodes[2], chan_b, 10_000, &mut payment_id); },
+ 0x4b => { send_payment(&nodes[2], &nodes[1], chan_b, 10_000, &mut payment_id); },
+ 0x4c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000, &mut payment_id); },
+ 0x4d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000, &mut payment_id); },
+
+ 0x50 => { send_payment(&nodes[0], &nodes[1], chan_a, 1_000, &mut payment_id); },
+ 0x51 => { send_payment(&nodes[1], &nodes[0], chan_a, 1_000, &mut payment_id); },
+ 0x52 => { send_payment(&nodes[1], &nodes[2], chan_b, 1_000, &mut payment_id); },
+ 0x53 => { send_payment(&nodes[2], &nodes[1], chan_b, 1_000, &mut payment_id); },
+ 0x54 => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000, &mut payment_id); },
+ 0x55 => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000, &mut payment_id); },
+
+ 0x58 => { send_payment(&nodes[0], &nodes[1], chan_a, 100, &mut payment_id); },
+ 0x59 => { send_payment(&nodes[1], &nodes[0], chan_a, 100, &mut payment_id); },
+ 0x5a => { send_payment(&nodes[1], &nodes[2], chan_b, 100, &mut payment_id); },
+ 0x5b => { send_payment(&nodes[2], &nodes[1], chan_b, 100, &mut payment_id); },
+ 0x5c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100, &mut payment_id); },
+ 0x5d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100, &mut payment_id); },
+
+ 0x60 => { send_payment(&nodes[0], &nodes[1], chan_a, 10, &mut payment_id); },
+ 0x61 => { send_payment(&nodes[1], &nodes[0], chan_a, 10, &mut payment_id); },
+ 0x62 => { send_payment(&nodes[1], &nodes[2], chan_b, 10, &mut payment_id); },
+ 0x63 => { send_payment(&nodes[2], &nodes[1], chan_b, 10, &mut payment_id); },
+ 0x64 => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10, &mut payment_id); },
+ 0x65 => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10, &mut payment_id); },
+
+ 0x68 => { send_payment(&nodes[0], &nodes[1], chan_a, 1, &mut payment_id); },
+ 0x69 => { send_payment(&nodes[1], &nodes[0], chan_a, 1, &mut payment_id); },
+ 0x6a => { send_payment(&nodes[1], &nodes[2], chan_b, 1, &mut payment_id); },
+ 0x6b => { send_payment(&nodes[2], &nodes[1], chan_b, 1, &mut payment_id); },
+ 0x6c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1, &mut payment_id); },
+ 0x6d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1, &mut payment_id); },
+
+ 0x80 => {
+ let max_feerate = last_htlc_clear_fee_a * FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32;
+ if fee_est_a.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate {
+ fee_est_a.ret_val.store(max_feerate, atomic::Ordering::Release);
+ }
+ nodes[0].maybe_update_chan_fees();
+ },
+ 0x81 => { fee_est_a.ret_val.store(253, atomic::Ordering::Release); nodes[0].maybe_update_chan_fees(); },
+
+ 0x84 => {
+ let max_feerate = last_htlc_clear_fee_b * FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32;
+ if fee_est_b.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate {
+ fee_est_b.ret_val.store(max_feerate, atomic::Ordering::Release);
+ }
+ nodes[1].maybe_update_chan_fees();
+ },
+ 0x85 => { fee_est_b.ret_val.store(253, atomic::Ordering::Release); nodes[1].maybe_update_chan_fees(); },
+
+ 0x88 => {
+ let max_feerate = last_htlc_clear_fee_c * FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32;
+ if fee_est_c.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate {
+ fee_est_c.ret_val.store(max_feerate, atomic::Ordering::Release);
+ }
+ nodes[2].maybe_update_chan_fees();
+ },
+ 0x89 => { fee_est_c.ret_val.store(253, atomic::Ordering::Release); nodes[2].maybe_update_chan_fees(); },
0xff => {
// Test that no channel is in a stuck state where neither party can send funds even
// after we resolve all pending events.
// First make sure there are no pending monitor updates, resetting the error state
- // and calling channel_monitor_updated for each monitor.
- *monitor_a.update_ret.lock().unwrap() = Ok(());
- *monitor_b.update_ret.lock().unwrap() = Ok(());
- *monitor_c.update_ret.lock().unwrap() = Ok(());
+ // and calling force_channel_monitor_updated for each monitor.
+ *monitor_a.persister.update_ret.lock().unwrap() = Ok(());
+ *monitor_b.persister.update_ret.lock().unwrap() = Ok(());
+ *monitor_c.persister.update_ret.lock().unwrap() = Ok(());
if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) {
- nodes[0].channel_monitor_updated(&chan_1_funding, *id);
+ monitor_a.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id);
+ nodes[0].process_monitor_events();
}
if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) {
- nodes[1].channel_monitor_updated(&chan_1_funding, *id);
+ monitor_b.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id);
+ nodes[1].process_monitor_events();
}
if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) {
- nodes[1].channel_monitor_updated(&chan_2_funding, *id);
+ monitor_b.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id);
+ nodes[1].process_monitor_events();
}
if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) {
- nodes[2].channel_monitor_updated(&chan_2_funding, *id);
+ monitor_c.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id);
+ nodes[2].process_monitor_events();
}
// Next, make sure peers are all connected to each other
for i in 0..std::usize::MAX {
if i == 100 { panic!("It may take may iterations to settle the state, but it should not take forever"); }
// Then, make sure any current forwards make their way to their destination
- if process_msg_events!(0, false) { continue; }
- if process_msg_events!(1, false) { continue; }
- if process_msg_events!(2, false) { continue; }
+ if process_msg_events!(0, false, ProcessMessages::AllMessages) { continue; }
+ if process_msg_events!(1, false, ProcessMessages::AllMessages) { continue; }
+ if process_msg_events!(2, false, ProcessMessages::AllMessages) { continue; }
// ...making sure any pending PendingHTLCsForwardable events are handled and
// payments claimed.
if process_events!(0, false) { continue; }
assert!(
send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut payment_id) ||
send_payment(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut payment_id));
+
+ last_htlc_clear_fee_a = fee_est_a.ret_val.load(atomic::Ordering::Acquire);
+ last_htlc_clear_fee_b = fee_est_b.ret_val.load(atomic::Ordering::Acquire);
+ last_htlc_clear_fee_c = fee_est_c.ret_val.load(atomic::Ordering::Acquire);
},
_ => test_return!(),
}