1 //! Test that monitor update failures don't get our channel state out of sync.
2 //! One of the biggest concern with the monitor update failure handling code is that messages
3 //! resent after monitor updating is restored are delivered out-of-order, resulting in
4 //! commitment_signed messages having "invalid signatures".
5 //! To test this we stand up a network of three nodes and read bytes from the fuzz input to denote
6 //! actions such as sending payments, handling events, or changing monitor update return values on
7 //! a per-node basis. This should allow it to find any cases where the ordering of actions results
8 //! in us getting out of sync with ourselves, and, assuming at least one of our recieve- or
9 //! send-side handling is correct, other peers. We consider it a failure if any action results in a
10 //! channel being force-closed.
12 use bitcoin::BitcoinHash;
13 use bitcoin::blockdata::block::BlockHeader;
14 use bitcoin::blockdata::transaction::{Transaction, TxOut};
15 use bitcoin::blockdata::script::{Builder, Script};
16 use bitcoin::blockdata::opcodes;
17 use bitcoin::network::constants::Network;
19 use bitcoin::hashes::Hash as TraitImport;
20 use bitcoin::hashes::sha256::Hash as Sha256;
21 use bitcoin::hash_types::{BlockHash, WPubkeyHash};
23 use lightning::chain::chaininterface;
24 use lightning::chain::transaction::OutPoint;
25 use lightning::chain::chaininterface::{BroadcasterInterface,ConfirmationTarget,ChainListener,FeeEstimator,ChainWatchInterfaceUtil,ChainWatchInterface};
26 use lightning::chain::keysinterface::{KeysInterface, InMemoryChannelKeys};
27 use lightning::ln::channelmonitor;
28 use lightning::ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, HTLCUpdate};
29 use lightning::ln::channelmanager::{ChannelManager, PaymentHash, PaymentPreimage, PaymentSecret, ChannelManagerReadArgs};
30 use lightning::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
31 use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, ErrorAction, UpdateAddHTLC, Init};
32 use lightning::util::enforcing_trait_impls::EnforcingChannelKeys;
33 use lightning::util::events;
34 use lightning::util::logger::Logger;
35 use lightning::util::config::UserConfig;
36 use lightning::util::events::{EventsProvider, MessageSendEventsProvider};
37 use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer};
38 use lightning::routing::router::{Route, RouteHop};
41 use utils::test_logger;
43 use bitcoin::secp256k1::key::{PublicKey,SecretKey};
44 use bitcoin::secp256k1::Secp256k1;
47 use std::cmp::Ordering;
48 use std::collections::{HashSet, hash_map, HashMap};
49 use std::sync::{Arc,Mutex};
50 use std::sync::atomic;
53 struct FuzzEstimator {}
54 impl FeeEstimator for FuzzEstimator {
55 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u64 {
60 pub struct TestBroadcaster {}
61 impl BroadcasterInterface for TestBroadcaster {
62 fn broadcast_transaction(&self, _tx: &Transaction) { }
65 pub struct VecWriter(pub Vec<u8>);
66 impl Writer for VecWriter {
67 fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
68 self.0.extend_from_slice(buf);
71 fn size_hint(&mut self, size: usize) {
72 self.0.reserve_exact(size);
76 struct TestChannelMonitor {
77 pub logger: Arc<dyn Logger>,
78 pub simple_monitor: Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<dyn ChainWatchInterface>>>,
79 pub update_ret: Mutex<Result<(), channelmonitor::ChannelMonitorUpdateErr>>,
80 // If we reload a node with an old copy of ChannelMonitors, the ChannelManager deserialization
81 // logic will automatically force-close our channels for us (as we don't have an up-to-date
82 // monitor implying we are not able to punish misbehaving counterparties). Because this test
83 // "fails" if we ever force-close a channel, we avoid doing so, always saving the latest
84 // fully-serialized monitor state here, as well as the corresponding update_id.
85 pub latest_monitors: Mutex<HashMap<OutPoint, (u64, Vec<u8>)>>,
86 pub should_update_manager: atomic::AtomicBool,
88 impl TestChannelMonitor {
89 pub fn new(chain_monitor: Arc<dyn chaininterface::ChainWatchInterface>, broadcaster: Arc<TestBroadcaster>, logger: Arc<dyn Logger>, feeest: Arc<FuzzEstimator>) -> Self {
91 simple_monitor: Arc::new(channelmonitor::SimpleManyChannelMonitor::new(chain_monitor, broadcaster, logger.clone(), feeest)),
93 update_ret: Mutex::new(Ok(())),
94 latest_monitors: Mutex::new(HashMap::new()),
95 should_update_manager: atomic::AtomicBool::new(false),
99 impl channelmonitor::ManyChannelMonitor for TestChannelMonitor {
100 type Keys = EnforcingChannelKeys;
102 fn add_monitor(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingChannelKeys>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
103 let mut ser = VecWriter(Vec::new());
104 monitor.write_for_disk(&mut ser).unwrap();
105 if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) {
106 panic!("Already had monitor pre-add_monitor");
108 self.should_update_manager.store(true, atomic::Ordering::Relaxed);
109 assert!(self.simple_monitor.add_monitor(funding_txo, monitor).is_ok());
110 self.update_ret.lock().unwrap().clone()
113 fn update_monitor(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
114 let mut map_lock = self.latest_monitors.lock().unwrap();
115 let mut map_entry = match map_lock.entry(funding_txo) {
116 hash_map::Entry::Occupied(entry) => entry,
117 hash_map::Entry::Vacant(_) => panic!("Didn't have monitor on update call"),
119 let mut deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::
120 read(&mut Cursor::new(&map_entry.get().1)).unwrap().1;
121 deserialized_monitor.update_monitor(update.clone(), &&TestBroadcaster {}, &self.logger).unwrap();
122 let mut ser = VecWriter(Vec::new());
123 deserialized_monitor.write_for_disk(&mut ser).unwrap();
124 map_entry.insert((update.update_id, ser.0));
125 self.should_update_manager.store(true, atomic::Ordering::Relaxed);
126 self.update_ret.lock().unwrap().clone()
129 fn get_and_clear_pending_htlcs_updated(&self) -> Vec<HTLCUpdate> {
130 return self.simple_monitor.get_and_clear_pending_htlcs_updated();
136 session_id: atomic::AtomicU8,
137 channel_id: atomic::AtomicU8,
139 impl KeysInterface for KeyProvider {
140 type ChanKeySigner = EnforcingChannelKeys;
142 fn get_node_secret(&self) -> SecretKey {
143 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, self.node_id]).unwrap()
146 fn get_destination_script(&self) -> Script {
147 let secp_ctx = Secp256k1::signing_only();
148 let channel_monitor_claim_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, self.node_id]).unwrap();
149 let our_channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
150 Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&our_channel_monitor_claim_key_hash[..]).into_script()
153 fn get_shutdown_pubkey(&self) -> PublicKey {
154 let secp_ctx = Secp256k1::signing_only();
155 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, self.node_id]).unwrap())
158 fn get_channel_keys(&self, _inbound: bool, channel_value_satoshis: u64) -> EnforcingChannelKeys {
159 let secp_ctx = Secp256k1::signing_only();
160 EnforcingChannelKeys::new(InMemoryChannelKeys::new(
162 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, self.node_id]).unwrap(),
163 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, self.node_id]).unwrap(),
164 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, self.node_id]).unwrap(),
165 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, self.node_id]).unwrap(),
166 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, self.node_id]).unwrap(),
167 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, self.node_id],
168 channel_value_satoshis,
172 fn get_onion_rand(&self) -> (SecretKey, [u8; 32]) {
173 let id = self.session_id.fetch_add(1, atomic::Ordering::Relaxed);
174 (SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, id, 10, self.node_id]).unwrap(),
178 fn get_channel_id(&self) -> [u8; 32] {
179 let id = self.channel_id.fetch_add(1, atomic::Ordering::Relaxed);
180 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, id, 11, self.node_id]
185 pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
186 let fee_est = Arc::new(FuzzEstimator{});
187 let broadcast = Arc::new(TestBroadcaster{});
189 macro_rules! make_node {
190 ($node_id: expr) => { {
191 let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
192 let watch = Arc::new(ChainWatchInterfaceUtil::new(Network::Bitcoin));
193 let monitor = Arc::new(TestChannelMonitor::new(watch.clone(), broadcast.clone(), logger.clone(), fee_est.clone()));
195 let keys_manager = Arc::new(KeyProvider { node_id: $node_id, session_id: atomic::AtomicU8::new(0), channel_id: atomic::AtomicU8::new(0) });
196 let mut config = UserConfig::default();
197 config.channel_options.fee_proportional_millionths = 0;
198 config.channel_options.announced_channel = true;
199 config.peer_channel_config_limits.min_dust_limit_satoshis = 0;
200 (Arc::new(ChannelManager::new(Network::Bitcoin, fee_est.clone(), monitor.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, 0)),
205 macro_rules! reload_node {
206 ($ser: expr, $node_id: expr, $old_monitors: expr) => { {
207 let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
208 let watch = Arc::new(ChainWatchInterfaceUtil::new(Network::Bitcoin));
209 let monitor = Arc::new(TestChannelMonitor::new(watch.clone(), broadcast.clone(), logger.clone(), fee_est.clone()));
211 let keys_manager = Arc::new(KeyProvider { node_id: $node_id, session_id: atomic::AtomicU8::new(0), channel_id: atomic::AtomicU8::new(0) });
212 let mut config = UserConfig::default();
213 config.channel_options.fee_proportional_millionths = 0;
214 config.channel_options.announced_channel = true;
215 config.peer_channel_config_limits.min_dust_limit_satoshis = 0;
217 let mut monitors = HashMap::new();
218 let mut old_monitors = $old_monitors.latest_monitors.lock().unwrap();
219 for (outpoint, (update_id, monitor_ser)) in old_monitors.drain() {
220 monitors.insert(outpoint, <(BlockHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut Cursor::new(&monitor_ser)).expect("Failed to read monitor").1);
221 monitor.latest_monitors.lock().unwrap().insert(outpoint, (update_id, monitor_ser));
223 let mut monitor_refs = HashMap::new();
224 for (outpoint, monitor) in monitors.iter_mut() {
225 monitor_refs.insert(*outpoint, monitor);
228 let read_args = ChannelManagerReadArgs {
230 fee_estimator: fee_est.clone(),
231 monitor: monitor.clone(),
232 tx_broadcaster: broadcast.clone(),
234 default_config: config,
235 channel_monitors: &mut monitor_refs,
238 (<(BlockHash, ChannelManager<EnforcingChannelKeys, Arc<TestChannelMonitor>, Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<FuzzEstimator>, Arc<dyn Logger>>)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, monitor)
242 let mut channel_txn = Vec::new();
243 macro_rules! make_channel {
244 ($source: expr, $dest: expr, $chan_id: expr) => { {
245 $source.create_channel($dest.get_our_node_id(), 10000000, 42, 0, None).unwrap();
247 let events = $source.get_and_clear_pending_msg_events();
248 assert_eq!(events.len(), 1);
249 if let events::MessageSendEvent::SendOpenChannel { ref msg, .. } = events[0] {
251 } else { panic!("Wrong event type"); }
254 $dest.handle_open_channel(&$source.get_our_node_id(), InitFeatures::known(), &open_channel);
255 let accept_channel = {
256 let events = $dest.get_and_clear_pending_msg_events();
257 assert_eq!(events.len(), 1);
258 if let events::MessageSendEvent::SendAcceptChannel { ref msg, .. } = events[0] {
260 } else { panic!("Wrong event type"); }
263 $source.handle_accept_channel(&$dest.get_our_node_id(), InitFeatures::known(), &accept_channel);
266 let events = $source.get_and_clear_pending_events();
267 assert_eq!(events.len(), 1);
268 if let events::Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, .. } = events[0] {
269 let tx = Transaction { version: $chan_id, lock_time: 0, input: Vec::new(), output: vec![TxOut {
270 value: *channel_value_satoshis, script_pubkey: output_script.clone(),
272 funding_output = OutPoint { txid: tx.txid(), index: 0 };
273 $source.funding_transaction_generated(&temporary_channel_id, funding_output);
274 channel_txn.push(tx);
275 } else { panic!("Wrong event type"); }
278 let funding_created = {
279 let events = $source.get_and_clear_pending_msg_events();
280 assert_eq!(events.len(), 1);
281 if let events::MessageSendEvent::SendFundingCreated { ref msg, .. } = events[0] {
283 } else { panic!("Wrong event type"); }
285 $dest.handle_funding_created(&$source.get_our_node_id(), &funding_created);
287 let funding_signed = {
288 let events = $dest.get_and_clear_pending_msg_events();
289 assert_eq!(events.len(), 1);
290 if let events::MessageSendEvent::SendFundingSigned { ref msg, .. } = events[0] {
292 } else { panic!("Wrong event type"); }
294 $source.handle_funding_signed(&$dest.get_our_node_id(), &funding_signed);
297 let events = $source.get_and_clear_pending_events();
298 assert_eq!(events.len(), 1);
299 if let events::Event::FundingBroadcastSafe { .. } = events[0] {
300 } else { panic!("Wrong event type"); }
306 macro_rules! confirm_txn {
308 let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
309 let mut txn = Vec::with_capacity(channel_txn.len());
310 let mut posn = Vec::with_capacity(channel_txn.len());
311 for i in 0..channel_txn.len() {
312 txn.push(&channel_txn[i]);
313 posn.push(i as u32 + 1);
315 $node.block_connected(&header, 1, &txn, &posn);
317 header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
318 $node.block_connected(&header, i, &Vec::new(), &[0; 0]);
323 macro_rules! lock_fundings {
324 ($nodes: expr) => { {
325 let mut node_events = Vec::new();
326 for node in $nodes.iter() {
327 node_events.push(node.get_and_clear_pending_msg_events());
329 for (idx, node_event) in node_events.iter().enumerate() {
330 for event in node_event {
331 if let events::MessageSendEvent::SendFundingLocked { ref node_id, ref msg } = event {
332 for node in $nodes.iter() {
333 if node.get_our_node_id() == *node_id {
334 node.handle_funding_locked(&$nodes[idx].get_our_node_id(), msg);
337 } else { panic!("Wrong event type"); }
341 for node in $nodes.iter() {
342 let events = node.get_and_clear_pending_msg_events();
343 for event in events {
344 if let events::MessageSendEvent::SendAnnouncementSignatures { .. } = event {
345 } else { panic!("Wrong event type"); }
351 // 3 nodes is enough to hit all the possible cases, notably unknown-source-unknown-dest
353 let (mut node_a, mut monitor_a) = make_node!(0);
354 let (mut node_b, mut monitor_b) = make_node!(1);
355 let (mut node_c, mut monitor_c) = make_node!(2);
357 let mut nodes = [node_a, node_b, node_c];
359 let chan_1_funding = make_channel!(nodes[0], nodes[1], 0);
360 let chan_2_funding = make_channel!(nodes[1], nodes[2], 1);
362 for node in nodes.iter() {
366 lock_fundings!(nodes);
368 let chan_a = nodes[0].list_usable_channels()[0].short_channel_id.unwrap();
369 let chan_b = nodes[2].list_usable_channels()[0].short_channel_id.unwrap();
371 let mut payment_id = 0;
373 let mut chan_a_disconnected = false;
374 let mut chan_b_disconnected = false;
375 let mut ba_events = Vec::new();
376 let mut bc_events = Vec::new();
378 let mut node_a_ser = VecWriter(Vec::new());
379 nodes[0].write(&mut node_a_ser).unwrap();
380 let mut node_b_ser = VecWriter(Vec::new());
381 nodes[1].write(&mut node_b_ser).unwrap();
382 let mut node_c_ser = VecWriter(Vec::new());
383 nodes[2].write(&mut node_c_ser).unwrap();
385 macro_rules! test_return {
387 assert_eq!(nodes[0].list_channels().len(), 1);
388 assert_eq!(nodes[1].list_channels().len(), 2);
389 assert_eq!(nodes[2].list_channels().len(), 1);
394 let mut read_pos = 0;
395 macro_rules! get_slice {
398 let slice_len = $len as usize;
399 if data.len() < read_pos + slice_len {
402 read_pos += slice_len;
403 &data[read_pos - slice_len..read_pos]
409 macro_rules! send_payment {
410 ($source: expr, $dest: expr) => { {
411 let payment_hash = Sha256::hash(&[payment_id; 1]);
412 payment_id = payment_id.wrapping_add(1);
413 if let Err(_) = $source.send_payment(&Route {
414 paths: vec![vec![RouteHop {
415 pubkey: $dest.0.get_our_node_id(),
416 node_features: NodeFeatures::empty(),
417 short_channel_id: $dest.1,
418 channel_features: ChannelFeatures::empty(),
420 cltv_expiry_delta: 200,
422 }, PaymentHash(payment_hash.into_inner()), &None) {
423 // Probably ran out of funds
427 ($source: expr, $middle: expr, $dest: expr) => { {
428 let payment_hash = Sha256::hash(&[payment_id; 1]);
429 payment_id = payment_id.wrapping_add(1);
430 if let Err(_) = $source.send_payment(&Route {
431 paths: vec![vec![RouteHop {
432 pubkey: $middle.0.get_our_node_id(),
433 node_features: NodeFeatures::empty(),
434 short_channel_id: $middle.1,
435 channel_features: ChannelFeatures::empty(),
437 cltv_expiry_delta: 100,
439 pubkey: $dest.0.get_our_node_id(),
440 node_features: NodeFeatures::empty(),
441 short_channel_id: $dest.1,
442 channel_features: ChannelFeatures::empty(),
444 cltv_expiry_delta: 200,
446 }, PaymentHash(payment_hash.into_inner()), &None) {
447 // Probably ran out of funds
452 macro_rules! send_payment_with_secret {
453 ($source: expr, $middle: expr, $dest: expr) => { {
454 let payment_hash = Sha256::hash(&[payment_id; 1]);
455 payment_id = payment_id.wrapping_add(1);
456 let payment_secret = Sha256::hash(&[payment_id; 1]);
457 payment_id = payment_id.wrapping_add(1);
458 if let Err(_) = $source.send_payment(&Route {
459 paths: vec![vec![RouteHop {
460 pubkey: $middle.0.get_our_node_id(),
461 node_features: NodeFeatures::empty(),
462 short_channel_id: $middle.1,
463 channel_features: ChannelFeatures::empty(),
465 cltv_expiry_delta: 100,
467 pubkey: $dest.0.get_our_node_id(),
468 node_features: NodeFeatures::empty(),
469 short_channel_id: $dest.1,
470 channel_features: ChannelFeatures::empty(),
472 cltv_expiry_delta: 200,
474 pubkey: $middle.0.get_our_node_id(),
475 node_features: NodeFeatures::empty(),
476 short_channel_id: $middle.1,
477 channel_features: ChannelFeatures::empty(),
479 cltv_expiry_delta: 100,
481 pubkey: $dest.0.get_our_node_id(),
482 node_features: NodeFeatures::empty(),
483 short_channel_id: $dest.1,
484 channel_features: ChannelFeatures::empty(),
486 cltv_expiry_delta: 200,
488 }, PaymentHash(payment_hash.into_inner()), &Some(PaymentSecret(payment_secret.into_inner()))) {
489 // Probably ran out of funds
495 macro_rules! process_msg_events {
496 ($node: expr, $corrupt_forward: expr) => { {
497 let events = if $node == 1 {
498 let mut new_events = Vec::new();
499 mem::swap(&mut new_events, &mut ba_events);
500 new_events.extend_from_slice(&bc_events[..]);
503 } else { Vec::new() };
504 for event in events.iter().chain(nodes[$node].get_and_clear_pending_msg_events().iter()) {
506 events::MessageSendEvent::UpdateHTLCs { ref node_id, updates: CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
507 for dest in nodes.iter() {
508 if dest.get_our_node_id() == *node_id {
509 assert!(update_fee.is_none());
510 for update_add in update_add_htlcs {
511 if !$corrupt_forward {
512 dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), &update_add);
514 // Corrupt the update_add_htlc message so that its HMAC
515 // check will fail and we generate a
516 // update_fail_malformed_htlc instead of an
517 // update_fail_htlc as we do when we reject a payment.
518 let mut msg_ser = update_add.encode();
519 msg_ser[1000] ^= 0xff;
520 let new_msg = UpdateAddHTLC::read(&mut Cursor::new(&msg_ser)).unwrap();
521 dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), &new_msg);
524 for update_fulfill in update_fulfill_htlcs {
525 dest.handle_update_fulfill_htlc(&nodes[$node].get_our_node_id(), &update_fulfill);
527 for update_fail in update_fail_htlcs {
528 dest.handle_update_fail_htlc(&nodes[$node].get_our_node_id(), &update_fail);
530 for update_fail_malformed in update_fail_malformed_htlcs {
531 dest.handle_update_fail_malformed_htlc(&nodes[$node].get_our_node_id(), &update_fail_malformed);
533 dest.handle_commitment_signed(&nodes[$node].get_our_node_id(), &commitment_signed);
537 events::MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
538 for dest in nodes.iter() {
539 if dest.get_our_node_id() == *node_id {
540 dest.handle_revoke_and_ack(&nodes[$node].get_our_node_id(), msg);
544 events::MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => {
545 for dest in nodes.iter() {
546 if dest.get_our_node_id() == *node_id {
547 dest.handle_channel_reestablish(&nodes[$node].get_our_node_id(), msg);
551 events::MessageSendEvent::SendFundingLocked { .. } => {
552 // Can be generated as a reestablish response
554 events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {
555 // Can be generated due to a payment forward being rejected due to a
556 // channel having previously failed a monitor update
558 events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => {
559 // Can be generated at any processing step to send back an error, disconnect
560 // peer or just ignore
562 _ => panic!("Unhandled message event"),
568 macro_rules! drain_msg_events_on_disconnect {
569 ($counterparty_id: expr) => { {
570 if $counterparty_id == 0 {
571 for event in nodes[0].get_and_clear_pending_msg_events() {
573 events::MessageSendEvent::UpdateHTLCs { .. } => {},
574 events::MessageSendEvent::SendRevokeAndACK { .. } => {},
575 events::MessageSendEvent::SendChannelReestablish { .. } => {},
576 events::MessageSendEvent::SendFundingLocked { .. } => {},
577 events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
578 events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => {},
579 _ => panic!("Unhandled message event"),
584 for event in nodes[2].get_and_clear_pending_msg_events() {
586 events::MessageSendEvent::UpdateHTLCs { .. } => {},
587 events::MessageSendEvent::SendRevokeAndACK { .. } => {},
588 events::MessageSendEvent::SendChannelReestablish { .. } => {},
589 events::MessageSendEvent::SendFundingLocked { .. } => {},
590 events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
591 events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => {},
592 _ => panic!("Unhandled message event"),
597 let mut events = nodes[1].get_and_clear_pending_msg_events();
598 let drop_node_id = if $counterparty_id == 0 { nodes[0].get_our_node_id() } else { nodes[2].get_our_node_id() };
599 let msg_sink = if $counterparty_id == 0 { &mut bc_events } else { &mut ba_events };
600 for event in events.drain(..) {
601 let push = match event {
602 events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => {
603 if *node_id != drop_node_id { true } else { false }
605 events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => {
606 if *node_id != drop_node_id { true } else { false }
608 events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => {
609 if *node_id != drop_node_id { true } else { false }
611 events::MessageSendEvent::SendFundingLocked { .. } => false,
612 events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => false,
613 events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => false,
614 _ => panic!("Unhandled message event"),
616 if push { msg_sink.push(event); }
621 macro_rules! process_events {
622 ($node: expr, $fail: expr) => { {
623 // In case we get 256 payments we may have a hash collision, resulting in the
624 // second claim/fail call not finding the duplicate-hash HTLC, so we have to
625 // deduplicate the calls here.
626 let mut claim_set = HashSet::new();
627 let mut events = nodes[$node].get_and_clear_pending_events();
628 // Sort events so that PendingHTLCsForwardable get processed last. This avoids a
629 // case where we first process a PendingHTLCsForwardable, then claim/fail on a
630 // PaymentReceived, claiming/failing two HTLCs, but leaving a just-generated
631 // PaymentReceived event for the second HTLC in our pending_events (and breaking
632 // our claim_set deduplication).
633 events.sort_by(|a, b| {
634 if let events::Event::PaymentReceived { .. } = a {
635 if let events::Event::PendingHTLCsForwardable { .. } = b {
637 } else { Ordering::Equal }
638 } else if let events::Event::PendingHTLCsForwardable { .. } = a {
639 if let events::Event::PaymentReceived { .. } = b {
641 } else { Ordering::Equal }
642 } else { Ordering::Equal }
644 for event in events.drain(..) {
646 events::Event::PaymentReceived { payment_hash, payment_secret, .. } => {
647 if claim_set.insert(payment_hash.0) {
649 assert!(nodes[$node].fail_htlc_backwards(&payment_hash, &payment_secret));
651 assert!(nodes[$node].claim_funds(PaymentPreimage(payment_hash.0), &payment_secret, 5_000_000));
655 events::Event::PaymentSent { .. } => {},
656 events::Event::PaymentFailed { .. } => {},
657 events::Event::PendingHTLCsForwardable { .. } => {
658 nodes[$node].process_pending_htlc_forwards();
660 _ => panic!("Unhandled event"),
666 match get_slice!(1)[0] {
667 0x00 => *monitor_a.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
668 0x01 => *monitor_b.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
669 0x02 => *monitor_c.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
670 0x03 => *monitor_a.update_ret.lock().unwrap() = Ok(()),
671 0x04 => *monitor_b.update_ret.lock().unwrap() = Ok(()),
672 0x05 => *monitor_c.update_ret.lock().unwrap() = Ok(()),
674 if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) {
675 nodes[0].channel_monitor_updated(&chan_1_funding, *id);
679 if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) {
680 nodes[1].channel_monitor_updated(&chan_1_funding, *id);
684 if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) {
685 nodes[1].channel_monitor_updated(&chan_2_funding, *id);
689 if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) {
690 nodes[2].channel_monitor_updated(&chan_2_funding, *id);
693 0x09 => send_payment!(nodes[0], (&nodes[1], chan_a)),
694 0x0a => send_payment!(nodes[1], (&nodes[0], chan_a)),
695 0x0b => send_payment!(nodes[1], (&nodes[2], chan_b)),
696 0x0c => send_payment!(nodes[2], (&nodes[1], chan_b)),
697 0x0d => send_payment!(nodes[0], (&nodes[1], chan_a), (&nodes[2], chan_b)),
698 0x0e => send_payment!(nodes[2], (&nodes[1], chan_b), (&nodes[0], chan_a)),
700 if !chan_a_disconnected {
701 nodes[0].peer_disconnected(&nodes[1].get_our_node_id(), false);
702 nodes[1].peer_disconnected(&nodes[0].get_our_node_id(), false);
703 chan_a_disconnected = true;
704 drain_msg_events_on_disconnect!(0);
708 if !chan_b_disconnected {
709 nodes[1].peer_disconnected(&nodes[2].get_our_node_id(), false);
710 nodes[2].peer_disconnected(&nodes[1].get_our_node_id(), false);
711 chan_b_disconnected = true;
712 drain_msg_events_on_disconnect!(2);
716 if chan_a_disconnected {
717 nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::empty() });
718 nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: InitFeatures::empty() });
719 chan_a_disconnected = false;
723 if chan_b_disconnected {
724 nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: InitFeatures::empty() });
725 nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::empty() });
726 chan_b_disconnected = false;
729 0x13 => process_msg_events!(0, true),
730 0x14 => process_msg_events!(0, false),
731 0x15 => process_events!(0, true),
732 0x16 => process_events!(0, false),
733 0x17 => process_msg_events!(1, true),
734 0x18 => process_msg_events!(1, false),
735 0x19 => process_events!(1, true),
736 0x1a => process_events!(1, false),
737 0x1b => process_msg_events!(2, true),
738 0x1c => process_msg_events!(2, false),
739 0x1d => process_events!(2, true),
740 0x1e => process_events!(2, false),
742 if !chan_a_disconnected {
743 nodes[1].peer_disconnected(&nodes[0].get_our_node_id(), false);
744 chan_a_disconnected = true;
745 drain_msg_events_on_disconnect!(0);
747 let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a);
748 node_a = Arc::new(new_node_a);
749 nodes[0] = node_a.clone();
750 monitor_a = new_monitor_a;
753 if !chan_a_disconnected {
754 nodes[0].peer_disconnected(&nodes[1].get_our_node_id(), false);
755 chan_a_disconnected = true;
756 nodes[0].get_and_clear_pending_msg_events();
759 if !chan_b_disconnected {
760 nodes[2].peer_disconnected(&nodes[1].get_our_node_id(), false);
761 chan_b_disconnected = true;
762 nodes[2].get_and_clear_pending_msg_events();
765 let (new_node_b, new_monitor_b) = reload_node!(node_b_ser, 1, monitor_b);
766 node_b = Arc::new(new_node_b);
767 nodes[1] = node_b.clone();
768 monitor_b = new_monitor_b;
771 if !chan_b_disconnected {
772 nodes[1].peer_disconnected(&nodes[2].get_our_node_id(), false);
773 chan_b_disconnected = true;
774 drain_msg_events_on_disconnect!(2);
776 let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c);
777 node_c = Arc::new(new_node_c);
778 nodes[2] = node_c.clone();
779 monitor_c = new_monitor_c;
781 0x22 => send_payment_with_secret!(nodes[0], (&nodes[1], chan_a), (&nodes[2], chan_b)),
782 0x23 => send_payment_with_secret!(nodes[2], (&nodes[1], chan_b), (&nodes[0], chan_a)),
783 // 0x24 defined above
787 node_a_ser.0.clear();
788 nodes[0].write(&mut node_a_ser).unwrap();
789 monitor_a.should_update_manager.store(false, atomic::Ordering::Relaxed);
790 node_b_ser.0.clear();
791 nodes[1].write(&mut node_b_ser).unwrap();
792 monitor_b.should_update_manager.store(false, atomic::Ordering::Relaxed);
793 node_c_ser.0.clear();
794 nodes[2].write(&mut node_c_ser).unwrap();
795 monitor_c.should_update_manager.store(false, atomic::Ordering::Relaxed);
799 pub fn chanmon_consistency_test<Out: test_logger::Output>(data: &[u8], out: Out) {
804 pub extern "C" fn chanmon_consistency_run(data: *const u8, datalen: usize) {
805 do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull{});