1 //! Test that monitor update failures don't get our channel state out of sync.
2 //! One of the biggest concern with the monitor update failure handling code is that messages
3 //! resent after monitor updating is restored are delivered out-of-order, resulting in
4 //! commitment_signed messages having "invalid signatures".
5 //! To test this we stand up a network of three nodes and read bytes from the fuzz input to denote
6 //! actions such as sending payments, handling events, or changing monitor update return values on
7 //! a per-node basis. This should allow it to find any cases where the ordering of actions results
8 //! in us getting out of sync with ourselves, and, assuming at least one of our recieve- or
9 //! send-side handling is correct, other peers. We consider it a failure if any action results in a
10 //! channel being force-closed.
12 use bitcoin::BitcoinHash;
13 use bitcoin::blockdata::block::BlockHeader;
14 use bitcoin::blockdata::transaction::{Transaction, TxOut};
15 use bitcoin::blockdata::script::{Builder, Script};
16 use bitcoin::blockdata::opcodes;
17 use bitcoin::network::constants::Network;
19 use bitcoin::hashes::Hash as TraitImport;
20 use bitcoin::hashes::sha256::Hash as Sha256;
21 use bitcoin::hash_types::{BlockHash, WPubkeyHash};
23 use lightning::chain::chaininterface;
24 use lightning::chain::transaction::OutPoint;
25 use lightning::chain::chaininterface::{BroadcasterInterface,ConfirmationTarget,ChainListener,FeeEstimator,ChainWatchInterfaceUtil,ChainWatchInterface};
26 use lightning::chain::keysinterface::{KeysInterface, InMemoryChannelKeys};
27 use lightning::ln::channelmonitor;
28 use lightning::ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, HTLCUpdate};
29 use lightning::ln::channelmanager::{ChannelManager, PaymentHash, PaymentPreimage, PaymentSecret, ChannelManagerReadArgs};
30 use lightning::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
31 use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, ErrorAction, UpdateAddHTLC, Init};
32 use lightning::util::enforcing_trait_impls::EnforcingChannelKeys;
33 use lightning::util::events;
34 use lightning::util::logger::Logger;
35 use lightning::util::config::UserConfig;
36 use lightning::util::events::{EventsProvider, MessageSendEventsProvider};
37 use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer};
38 use lightning::routing::router::{Route, RouteHop};
41 use utils::test_logger;
43 use bitcoin::secp256k1::key::{PublicKey,SecretKey};
44 use bitcoin::secp256k1::Secp256k1;
47 use std::cmp::Ordering;
48 use std::collections::{HashSet, hash_map, HashMap};
49 use std::sync::{Arc,Mutex};
50 use std::sync::atomic;
53 struct FuzzEstimator {}
54 impl FeeEstimator for FuzzEstimator {
55 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u64 {
60 pub struct TestBroadcaster {}
61 impl BroadcasterInterface for TestBroadcaster {
62 fn broadcast_transaction(&self, _tx: &Transaction) { }
65 pub struct VecWriter(pub Vec<u8>);
66 impl Writer for VecWriter {
67 fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
68 self.0.extend_from_slice(buf);
71 fn size_hint(&mut self, size: usize) {
72 self.0.reserve_exact(size);
76 struct TestChannelMonitor {
77 pub logger: Arc<dyn Logger>,
78 pub simple_monitor: Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<dyn ChainWatchInterface>>>,
79 pub update_ret: Mutex<Result<(), channelmonitor::ChannelMonitorUpdateErr>>,
80 // If we reload a node with an old copy of ChannelMonitors, the ChannelManager deserialization
81 // logic will automatically force-close our channels for us (as we don't have an up-to-date
82 // monitor implying we are not able to punish misbehaving counterparties). Because this test
83 // "fails" if we ever force-close a channel, we avoid doing so, always saving the latest
84 // fully-serialized monitor state here, as well as the corresponding update_id.
85 pub latest_monitors: Mutex<HashMap<OutPoint, (u64, Vec<u8>)>>,
86 pub should_update_manager: atomic::AtomicBool,
88 impl TestChannelMonitor {
89 pub fn new(chain_monitor: Arc<dyn chaininterface::ChainWatchInterface>, broadcaster: Arc<TestBroadcaster>, logger: Arc<dyn Logger>, feeest: Arc<FuzzEstimator>) -> Self {
91 simple_monitor: Arc::new(channelmonitor::SimpleManyChannelMonitor::new(chain_monitor, broadcaster, logger.clone(), feeest)),
93 update_ret: Mutex::new(Ok(())),
94 latest_monitors: Mutex::new(HashMap::new()),
95 should_update_manager: atomic::AtomicBool::new(false),
99 impl channelmonitor::ManyChannelMonitor<EnforcingChannelKeys> for TestChannelMonitor {
100 fn add_monitor(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingChannelKeys>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
101 let mut ser = VecWriter(Vec::new());
102 monitor.write_for_disk(&mut ser).unwrap();
103 if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) {
104 panic!("Already had monitor pre-add_monitor");
106 self.should_update_manager.store(true, atomic::Ordering::Relaxed);
107 assert!(self.simple_monitor.add_monitor(funding_txo, monitor).is_ok());
108 self.update_ret.lock().unwrap().clone()
111 fn update_monitor(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
112 let mut map_lock = self.latest_monitors.lock().unwrap();
113 let mut map_entry = match map_lock.entry(funding_txo) {
114 hash_map::Entry::Occupied(entry) => entry,
115 hash_map::Entry::Vacant(_) => panic!("Didn't have monitor on update call"),
117 let mut deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::
118 read(&mut Cursor::new(&map_entry.get().1)).unwrap().1;
119 deserialized_monitor.update_monitor(update.clone(), &&TestBroadcaster {}, &self.logger).unwrap();
120 let mut ser = VecWriter(Vec::new());
121 deserialized_monitor.write_for_disk(&mut ser).unwrap();
122 map_entry.insert((update.update_id, ser.0));
123 self.should_update_manager.store(true, atomic::Ordering::Relaxed);
124 self.update_ret.lock().unwrap().clone()
127 fn get_and_clear_pending_htlcs_updated(&self) -> Vec<HTLCUpdate> {
128 return self.simple_monitor.get_and_clear_pending_htlcs_updated();
134 session_id: atomic::AtomicU8,
135 channel_id: atomic::AtomicU8,
137 impl KeysInterface for KeyProvider {
138 type ChanKeySigner = EnforcingChannelKeys;
140 fn get_node_secret(&self) -> SecretKey {
141 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, self.node_id]).unwrap()
144 fn get_destination_script(&self) -> Script {
145 let secp_ctx = Secp256k1::signing_only();
146 let channel_monitor_claim_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, self.node_id]).unwrap();
147 let our_channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
148 Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&our_channel_monitor_claim_key_hash[..]).into_script()
151 fn get_shutdown_pubkey(&self) -> PublicKey {
152 let secp_ctx = Secp256k1::signing_only();
153 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, self.node_id]).unwrap())
156 fn get_channel_keys(&self, _inbound: bool, channel_value_satoshis: u64) -> EnforcingChannelKeys {
157 let secp_ctx = Secp256k1::signing_only();
158 EnforcingChannelKeys::new(InMemoryChannelKeys::new(
160 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, self.node_id]).unwrap(),
161 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, self.node_id]).unwrap(),
162 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, self.node_id]).unwrap(),
163 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, self.node_id]).unwrap(),
164 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, self.node_id]).unwrap(),
165 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, self.node_id],
166 channel_value_satoshis,
170 fn get_onion_rand(&self) -> (SecretKey, [u8; 32]) {
171 let id = self.session_id.fetch_add(1, atomic::Ordering::Relaxed);
172 (SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, id, 10, self.node_id]).unwrap(),
176 fn get_channel_id(&self) -> [u8; 32] {
177 let id = self.channel_id.fetch_add(1, atomic::Ordering::Relaxed);
178 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, id, 11, self.node_id]
183 pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
184 let fee_est = Arc::new(FuzzEstimator{});
185 let broadcast = Arc::new(TestBroadcaster{});
187 macro_rules! make_node {
188 ($node_id: expr) => { {
189 let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
190 let watch = Arc::new(ChainWatchInterfaceUtil::new(Network::Bitcoin));
191 let monitor = Arc::new(TestChannelMonitor::new(watch.clone(), broadcast.clone(), logger.clone(), fee_est.clone()));
193 let keys_manager = Arc::new(KeyProvider { node_id: $node_id, session_id: atomic::AtomicU8::new(0), channel_id: atomic::AtomicU8::new(0) });
194 let mut config = UserConfig::default();
195 config.channel_options.fee_proportional_millionths = 0;
196 config.channel_options.announced_channel = true;
197 config.peer_channel_config_limits.min_dust_limit_satoshis = 0;
198 (Arc::new(ChannelManager::new(Network::Bitcoin, fee_est.clone(), monitor.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, 0).unwrap()),
203 macro_rules! reload_node {
204 ($ser: expr, $node_id: expr, $old_monitors: expr) => { {
205 let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
206 let watch = Arc::new(ChainWatchInterfaceUtil::new(Network::Bitcoin));
207 let monitor = Arc::new(TestChannelMonitor::new(watch.clone(), broadcast.clone(), logger.clone(), fee_est.clone()));
209 let keys_manager = Arc::new(KeyProvider { node_id: $node_id, session_id: atomic::AtomicU8::new(0), channel_id: atomic::AtomicU8::new(0) });
210 let mut config = UserConfig::default();
211 config.channel_options.fee_proportional_millionths = 0;
212 config.channel_options.announced_channel = true;
213 config.peer_channel_config_limits.min_dust_limit_satoshis = 0;
215 let mut monitors = HashMap::new();
216 let mut old_monitors = $old_monitors.latest_monitors.lock().unwrap();
217 for (outpoint, (update_id, monitor_ser)) in old_monitors.drain() {
218 monitors.insert(outpoint, <(BlockHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut Cursor::new(&monitor_ser)).expect("Failed to read monitor").1);
219 monitor.latest_monitors.lock().unwrap().insert(outpoint, (update_id, monitor_ser));
221 let mut monitor_refs = HashMap::new();
222 for (outpoint, monitor) in monitors.iter_mut() {
223 monitor_refs.insert(*outpoint, monitor);
226 let read_args = ChannelManagerReadArgs {
228 fee_estimator: fee_est.clone(),
229 monitor: monitor.clone(),
230 tx_broadcaster: broadcast.clone(),
232 default_config: config,
233 channel_monitors: &mut monitor_refs,
236 (<(BlockHash, ChannelManager<EnforcingChannelKeys, Arc<TestChannelMonitor>, Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<FuzzEstimator>, Arc<dyn Logger>>)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, monitor)
240 let mut channel_txn = Vec::new();
241 macro_rules! make_channel {
242 ($source: expr, $dest: expr, $chan_id: expr) => { {
243 $source.create_channel($dest.get_our_node_id(), 10000000, 42, 0, None).unwrap();
245 let events = $source.get_and_clear_pending_msg_events();
246 assert_eq!(events.len(), 1);
247 if let events::MessageSendEvent::SendOpenChannel { ref msg, .. } = events[0] {
249 } else { panic!("Wrong event type"); }
252 $dest.handle_open_channel(&$source.get_our_node_id(), InitFeatures::known(), &open_channel);
253 let accept_channel = {
254 let events = $dest.get_and_clear_pending_msg_events();
255 assert_eq!(events.len(), 1);
256 if let events::MessageSendEvent::SendAcceptChannel { ref msg, .. } = events[0] {
258 } else { panic!("Wrong event type"); }
261 $source.handle_accept_channel(&$dest.get_our_node_id(), InitFeatures::known(), &accept_channel);
264 let events = $source.get_and_clear_pending_events();
265 assert_eq!(events.len(), 1);
266 if let events::Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, .. } = events[0] {
267 let tx = Transaction { version: $chan_id, lock_time: 0, input: Vec::new(), output: vec![TxOut {
268 value: *channel_value_satoshis, script_pubkey: output_script.clone(),
270 funding_output = OutPoint::new(tx.txid(), 0);
271 $source.funding_transaction_generated(&temporary_channel_id, funding_output);
272 channel_txn.push(tx);
273 } else { panic!("Wrong event type"); }
276 let funding_created = {
277 let events = $source.get_and_clear_pending_msg_events();
278 assert_eq!(events.len(), 1);
279 if let events::MessageSendEvent::SendFundingCreated { ref msg, .. } = events[0] {
281 } else { panic!("Wrong event type"); }
283 $dest.handle_funding_created(&$source.get_our_node_id(), &funding_created);
285 let funding_signed = {
286 let events = $dest.get_and_clear_pending_msg_events();
287 assert_eq!(events.len(), 1);
288 if let events::MessageSendEvent::SendFundingSigned { ref msg, .. } = events[0] {
290 } else { panic!("Wrong event type"); }
292 $source.handle_funding_signed(&$dest.get_our_node_id(), &funding_signed);
295 let events = $source.get_and_clear_pending_events();
296 assert_eq!(events.len(), 1);
297 if let events::Event::FundingBroadcastSafe { .. } = events[0] {
298 } else { panic!("Wrong event type"); }
304 macro_rules! confirm_txn {
306 let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
307 let mut txn = Vec::with_capacity(channel_txn.len());
308 let mut posn = Vec::with_capacity(channel_txn.len());
309 for i in 0..channel_txn.len() {
310 txn.push(&channel_txn[i]);
311 posn.push(i as u32 + 1);
313 $node.block_connected(&header, 1, &txn, &posn);
315 header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
316 $node.block_connected(&header, i, &Vec::new(), &[0; 0]);
321 macro_rules! lock_fundings {
322 ($nodes: expr) => { {
323 let mut node_events = Vec::new();
324 for node in $nodes.iter() {
325 node_events.push(node.get_and_clear_pending_msg_events());
327 for (idx, node_event) in node_events.iter().enumerate() {
328 for event in node_event {
329 if let events::MessageSendEvent::SendFundingLocked { ref node_id, ref msg } = event {
330 for node in $nodes.iter() {
331 if node.get_our_node_id() == *node_id {
332 node.handle_funding_locked(&$nodes[idx].get_our_node_id(), msg);
335 } else { panic!("Wrong event type"); }
339 for node in $nodes.iter() {
340 let events = node.get_and_clear_pending_msg_events();
341 for event in events {
342 if let events::MessageSendEvent::SendAnnouncementSignatures { .. } = event {
343 } else { panic!("Wrong event type"); }
349 // 3 nodes is enough to hit all the possible cases, notably unknown-source-unknown-dest
351 let (mut node_a, mut monitor_a) = make_node!(0);
352 let (mut node_b, mut monitor_b) = make_node!(1);
353 let (mut node_c, mut monitor_c) = make_node!(2);
355 let mut nodes = [node_a, node_b, node_c];
357 let chan_1_funding = make_channel!(nodes[0], nodes[1], 0);
358 let chan_2_funding = make_channel!(nodes[1], nodes[2], 1);
360 for node in nodes.iter() {
364 lock_fundings!(nodes);
366 let chan_a = nodes[0].list_usable_channels()[0].short_channel_id.unwrap();
367 let chan_b = nodes[2].list_usable_channels()[0].short_channel_id.unwrap();
369 let mut payment_id = 0;
371 let mut chan_a_disconnected = false;
372 let mut chan_b_disconnected = false;
373 let mut ba_events = Vec::new();
374 let mut bc_events = Vec::new();
376 let mut node_a_ser = VecWriter(Vec::new());
377 nodes[0].write(&mut node_a_ser).unwrap();
378 let mut node_b_ser = VecWriter(Vec::new());
379 nodes[1].write(&mut node_b_ser).unwrap();
380 let mut node_c_ser = VecWriter(Vec::new());
381 nodes[2].write(&mut node_c_ser).unwrap();
383 macro_rules! test_return {
385 assert_eq!(nodes[0].list_channels().len(), 1);
386 assert_eq!(nodes[1].list_channels().len(), 2);
387 assert_eq!(nodes[2].list_channels().len(), 1);
392 let mut read_pos = 0;
393 macro_rules! get_slice {
396 let slice_len = $len as usize;
397 if data.len() < read_pos + slice_len {
400 read_pos += slice_len;
401 &data[read_pos - slice_len..read_pos]
407 macro_rules! send_payment {
408 ($source: expr, $dest: expr) => { {
409 let payment_hash = Sha256::hash(&[payment_id; 1]);
410 payment_id = payment_id.wrapping_add(1);
411 if let Err(_) = $source.send_payment(&Route {
412 paths: vec![vec![RouteHop {
413 pubkey: $dest.0.get_our_node_id(),
414 node_features: NodeFeatures::empty(),
415 short_channel_id: $dest.1,
416 channel_features: ChannelFeatures::empty(),
418 cltv_expiry_delta: 200,
420 }, PaymentHash(payment_hash.into_inner()), &None) {
421 // Probably ran out of funds
425 ($source: expr, $middle: expr, $dest: expr) => { {
426 let payment_hash = Sha256::hash(&[payment_id; 1]);
427 payment_id = payment_id.wrapping_add(1);
428 if let Err(_) = $source.send_payment(&Route {
429 paths: vec![vec![RouteHop {
430 pubkey: $middle.0.get_our_node_id(),
431 node_features: NodeFeatures::empty(),
432 short_channel_id: $middle.1,
433 channel_features: ChannelFeatures::empty(),
435 cltv_expiry_delta: 100,
437 pubkey: $dest.0.get_our_node_id(),
438 node_features: NodeFeatures::empty(),
439 short_channel_id: $dest.1,
440 channel_features: ChannelFeatures::empty(),
442 cltv_expiry_delta: 200,
444 }, PaymentHash(payment_hash.into_inner()), &None) {
445 // Probably ran out of funds
450 macro_rules! send_payment_with_secret {
451 ($source: expr, $middle: expr, $dest: expr) => { {
452 let payment_hash = Sha256::hash(&[payment_id; 1]);
453 payment_id = payment_id.wrapping_add(1);
454 let payment_secret = Sha256::hash(&[payment_id; 1]);
455 payment_id = payment_id.wrapping_add(1);
456 if let Err(_) = $source.send_payment(&Route {
457 paths: vec![vec![RouteHop {
458 pubkey: $middle.0.get_our_node_id(),
459 node_features: NodeFeatures::empty(),
460 short_channel_id: $middle.1,
461 channel_features: ChannelFeatures::empty(),
463 cltv_expiry_delta: 100,
465 pubkey: $dest.0.get_our_node_id(),
466 node_features: NodeFeatures::empty(),
467 short_channel_id: $dest.1,
468 channel_features: ChannelFeatures::empty(),
470 cltv_expiry_delta: 200,
472 pubkey: $middle.0.get_our_node_id(),
473 node_features: NodeFeatures::empty(),
474 short_channel_id: $middle.1,
475 channel_features: ChannelFeatures::empty(),
477 cltv_expiry_delta: 100,
479 pubkey: $dest.0.get_our_node_id(),
480 node_features: NodeFeatures::empty(),
481 short_channel_id: $dest.1,
482 channel_features: ChannelFeatures::empty(),
484 cltv_expiry_delta: 200,
486 }, PaymentHash(payment_hash.into_inner()), &Some(PaymentSecret(payment_secret.into_inner()))) {
487 // Probably ran out of funds
493 macro_rules! process_msg_events {
494 ($node: expr, $corrupt_forward: expr) => { {
495 let events = if $node == 1 {
496 let mut new_events = Vec::new();
497 mem::swap(&mut new_events, &mut ba_events);
498 new_events.extend_from_slice(&bc_events[..]);
501 } else { Vec::new() };
502 let ev = nodes[$node].get_and_clear_pending_msg_events();
503 for event in events.iter().chain(ev.iter()) {
505 events::MessageSendEvent::UpdateHTLCs { updates: CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. }, .. } => {
506 println!("UPDATEHTLCs {} {} {} {}", update_add_htlcs.len(), update_fail_htlcs.len(), update_fulfill_htlcs.len(), update_fail_malformed_htlcs.len());
508 events::MessageSendEvent::SendRevokeAndACK { .. } => {
511 events::MessageSendEvent::SendChannelReestablish { .. } => {
512 println!("Chan REE");
514 events::MessageSendEvent::SendFundingLocked { .. } => {
517 events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {
518 println!("Fail net update");
520 _ => panic!("Unhandled message event"),
523 for event in events.iter().chain(ev.iter()) {
525 events::MessageSendEvent::UpdateHTLCs { ref node_id, updates: CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
526 for dest in nodes.iter() {
527 if dest.get_our_node_id() == *node_id {
528 assert!(update_fee.is_none());
529 for update_add in update_add_htlcs {
530 if !$corrupt_forward {
531 dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), &update_add);
533 // Corrupt the update_add_htlc message so that its HMAC
534 // check will fail and we generate a
535 // update_fail_malformed_htlc instead of an
536 // update_fail_htlc as we do when we reject a payment.
537 let mut msg_ser = update_add.encode();
538 msg_ser[1000] ^= 0xff;
539 let new_msg = UpdateAddHTLC::read(&mut Cursor::new(&msg_ser)).unwrap();
540 dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), &new_msg);
543 for update_fulfill in update_fulfill_htlcs {
544 dest.handle_update_fulfill_htlc(&nodes[$node].get_our_node_id(), &update_fulfill);
546 for update_fail in update_fail_htlcs {
547 dest.handle_update_fail_htlc(&nodes[$node].get_our_node_id(), &update_fail);
549 for update_fail_malformed in update_fail_malformed_htlcs {
550 dest.handle_update_fail_malformed_htlc(&nodes[$node].get_our_node_id(), &update_fail_malformed);
552 dest.handle_commitment_signed(&nodes[$node].get_our_node_id(), &commitment_signed);
556 events::MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
557 for dest in nodes.iter() {
558 if dest.get_our_node_id() == *node_id {
559 dest.handle_revoke_and_ack(&nodes[$node].get_our_node_id(), msg);
563 events::MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => {
564 for dest in nodes.iter() {
565 if dest.get_our_node_id() == *node_id {
566 dest.handle_channel_reestablish(&nodes[$node].get_our_node_id(), msg);
570 events::MessageSendEvent::SendFundingLocked { .. } => {
571 // Can be generated as a reestablish response
573 events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {
574 // Can be generated due to a payment forward being rejected due to a
575 // channel having previously failed a monitor update
577 events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => {
578 // Can be generated at any processing step to send back an error, disconnect
579 // peer or just ignore
581 _ => panic!("Unhandled message event"),
587 macro_rules! drain_msg_events_on_disconnect {
588 ($counterparty_id: expr) => { {
589 if $counterparty_id == 0 {
590 for event in nodes[0].get_and_clear_pending_msg_events() {
592 events::MessageSendEvent::UpdateHTLCs { .. } => {},
593 events::MessageSendEvent::SendRevokeAndACK { .. } => {},
594 events::MessageSendEvent::SendChannelReestablish { .. } => {},
595 events::MessageSendEvent::SendFundingLocked { .. } => {},
596 events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
597 events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => {},
598 _ => panic!("Unhandled message event"),
603 for event in nodes[2].get_and_clear_pending_msg_events() {
605 events::MessageSendEvent::UpdateHTLCs { .. } => {},
606 events::MessageSendEvent::SendRevokeAndACK { .. } => {},
607 events::MessageSendEvent::SendChannelReestablish { .. } => {},
608 events::MessageSendEvent::SendFundingLocked { .. } => {},
609 events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
610 events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => {},
611 _ => panic!("Unhandled message event"),
616 let mut events = nodes[1].get_and_clear_pending_msg_events();
617 let drop_node_id = if $counterparty_id == 0 { nodes[0].get_our_node_id() } else { nodes[2].get_our_node_id() };
618 let msg_sink = if $counterparty_id == 0 { &mut bc_events } else { &mut ba_events };
619 for event in events.drain(..) {
620 let push = match event {
621 events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => {
622 if *node_id != drop_node_id { true } else { false }
624 events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => {
625 if *node_id != drop_node_id { true } else { false }
627 events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => {
628 if *node_id != drop_node_id { true } else { false }
630 events::MessageSendEvent::SendFundingLocked { .. } => false,
631 events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => false,
632 events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => false,
633 _ => panic!("Unhandled message event"),
635 if push { msg_sink.push(event); }
640 macro_rules! process_events {
641 ($node: expr, $fail: expr) => { {
642 // In case we get 256 payments we may have a hash collision, resulting in the
643 // second claim/fail call not finding the duplicate-hash HTLC, so we have to
644 // deduplicate the calls here.
645 let mut claim_set = HashSet::new();
646 let mut events = nodes[$node].get_and_clear_pending_events();
647 // Sort events so that PendingHTLCsForwardable get processed last. This avoids a
648 // case where we first process a PendingHTLCsForwardable, then claim/fail on a
649 // PaymentReceived, claiming/failing two HTLCs, but leaving a just-generated
650 // PaymentReceived event for the second HTLC in our pending_events (and breaking
651 // our claim_set deduplication).
652 events.sort_by(|a, b| {
653 if let events::Event::PaymentReceived { .. } = a {
654 if let events::Event::PendingHTLCsForwardable { .. } = b {
656 } else { Ordering::Equal }
657 } else if let events::Event::PendingHTLCsForwardable { .. } = a {
658 if let events::Event::PaymentReceived { .. } = b {
660 } else { Ordering::Equal }
661 } else { Ordering::Equal }
663 for event in events.drain(..) {
665 events::Event::PaymentReceived { payment_hash, payment_secret, .. } => {
666 if claim_set.insert(payment_hash.0) {
668 assert!(nodes[$node].fail_htlc_backwards(&payment_hash, &payment_secret));
670 assert!(nodes[$node].claim_funds(PaymentPreimage(payment_hash.0), &payment_secret, 5_000_000));
674 events::Event::PaymentSent { .. } => {},
675 events::Event::PaymentFailed { .. } => {},
676 events::Event::PendingHTLCsForwardable { .. } => {
677 nodes[$node].process_pending_htlc_forwards();
679 _ => panic!("Unhandled event"),
685 let a = get_slice!(1)[0];
686 println!("PROCESSING {:x}", a);
688 0x00 => *monitor_a.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
689 0x01 => *monitor_b.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
690 0x02 => *monitor_c.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
691 0x03 => *monitor_a.update_ret.lock().unwrap() = Ok(()),
692 0x04 => *monitor_b.update_ret.lock().unwrap() = Ok(()),
693 0x05 => *monitor_c.update_ret.lock().unwrap() = Ok(()),
695 if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) {
696 nodes[0].channel_monitor_updated(&chan_1_funding, *id);
700 if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) {
701 nodes[1].channel_monitor_updated(&chan_1_funding, *id);
705 if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) {
706 nodes[1].channel_monitor_updated(&chan_2_funding, *id);
710 if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) {
711 nodes[2].channel_monitor_updated(&chan_2_funding, *id);
714 0x09 => send_payment!(nodes[0], (&nodes[1], chan_a)),
715 0x0a => send_payment!(nodes[1], (&nodes[0], chan_a)),
716 0x0b => send_payment!(nodes[1], (&nodes[2], chan_b)),
717 0x0c => send_payment!(nodes[2], (&nodes[1], chan_b)),
718 0x0d => send_payment!(nodes[0], (&nodes[1], chan_a), (&nodes[2], chan_b)),
719 0x0e => send_payment!(nodes[2], (&nodes[1], chan_b), (&nodes[0], chan_a)),
721 if !chan_a_disconnected {
722 nodes[0].peer_disconnected(&nodes[1].get_our_node_id(), false);
723 nodes[1].peer_disconnected(&nodes[0].get_our_node_id(), false);
724 chan_a_disconnected = true;
725 drain_msg_events_on_disconnect!(0);
729 if !chan_b_disconnected {
730 nodes[1].peer_disconnected(&nodes[2].get_our_node_id(), false);
731 nodes[2].peer_disconnected(&nodes[1].get_our_node_id(), false);
732 chan_b_disconnected = true;
733 drain_msg_events_on_disconnect!(2);
737 if chan_a_disconnected {
738 nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::empty() });
739 nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: InitFeatures::empty() });
740 chan_a_disconnected = false;
744 if chan_b_disconnected {
745 nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: InitFeatures::empty() });
746 nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::empty() });
747 chan_b_disconnected = false;
750 0x13 => process_msg_events!(0, true),
751 0x14 => process_msg_events!(0, false),
752 0x15 => process_events!(0, true),
753 0x16 => process_events!(0, false),
754 0x17 => process_msg_events!(1, true),
755 0x18 => process_msg_events!(1, false),
756 0x19 => process_events!(1, true),
757 0x1a => process_events!(1, false),
758 0x1b => process_msg_events!(2, true),
759 0x1c => process_msg_events!(2, false),
760 0x1d => process_events!(2, true),
761 0x1e => process_events!(2, false),
763 if !chan_a_disconnected {
764 nodes[1].peer_disconnected(&nodes[0].get_our_node_id(), false);
765 chan_a_disconnected = true;
766 drain_msg_events_on_disconnect!(0);
768 let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a);
769 node_a = Arc::new(new_node_a);
770 nodes[0] = node_a.clone();
771 monitor_a = new_monitor_a;
774 if !chan_a_disconnected {
775 nodes[0].peer_disconnected(&nodes[1].get_our_node_id(), false);
776 chan_a_disconnected = true;
777 nodes[0].get_and_clear_pending_msg_events();
780 if !chan_b_disconnected {
781 nodes[2].peer_disconnected(&nodes[1].get_our_node_id(), false);
782 chan_b_disconnected = true;
783 nodes[2].get_and_clear_pending_msg_events();
786 let (new_node_b, new_monitor_b) = reload_node!(node_b_ser, 1, monitor_b);
787 node_b = Arc::new(new_node_b);
788 nodes[1] = node_b.clone();
789 monitor_b = new_monitor_b;
792 if !chan_b_disconnected {
793 nodes[1].peer_disconnected(&nodes[2].get_our_node_id(), false);
794 chan_b_disconnected = true;
795 drain_msg_events_on_disconnect!(2);
797 let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c);
798 node_c = Arc::new(new_node_c);
799 nodes[2] = node_c.clone();
800 monitor_c = new_monitor_c;
802 0x22 => send_payment_with_secret!(nodes[0], (&nodes[1], chan_a), (&nodes[2], chan_b)),
803 0x23 => send_payment_with_secret!(nodes[2], (&nodes[1], chan_b), (&nodes[0], chan_a)),
804 // 0x24 defined above
808 node_a_ser.0.clear();
809 nodes[0].write(&mut node_a_ser).unwrap();
810 monitor_a.should_update_manager.store(false, atomic::Ordering::Relaxed);
811 node_b_ser.0.clear();
812 nodes[1].write(&mut node_b_ser).unwrap();
813 monitor_b.should_update_manager.store(false, atomic::Ordering::Relaxed);
814 node_c_ser.0.clear();
815 nodes[2].write(&mut node_c_ser).unwrap();
816 monitor_c.should_update_manager.store(false, atomic::Ordering::Relaxed);
820 pub fn chanmon_consistency_test<Out: test_logger::Output>(data: &[u8], out: Out) {
825 pub extern "C" fn chanmon_consistency_run(data: *const u8, datalen: usize) {
826 do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull{});