1 //! Test that monitor update failures don't get our channel state out of sync.
2 //! One of the biggest concern with the monitor update failure handling code is that messages
3 //! resent after monitor updating is restored are delivered out-of-order, resulting in
4 //! commitment_signed messages having "invalid signatures".
5 //! To test this we stand up a network of three nodes and read bytes from the fuzz input to denote
6 //! actions such as sending payments, handling events, or changing monitor update return values on
7 //! a per-node basis. This should allow it to find any cases where the ordering of actions results
8 //! in us getting out of sync with ourselves, and, assuming at least one of our recieve- or
9 //! send-side handling is correct, other peers. We consider it a failure if any action results in a
10 //! channel being force-closed.
12 use bitcoin::BitcoinHash;
13 use bitcoin::blockdata::block::BlockHeader;
14 use bitcoin::blockdata::transaction::{Transaction, TxOut};
15 use bitcoin::blockdata::script::{Builder, Script};
16 use bitcoin::blockdata::opcodes;
17 use bitcoin::network::constants::Network;
19 use bitcoin_hashes::Hash as TraitImport;
20 use bitcoin_hashes::hash160::Hash as Hash160;
21 use bitcoin_hashes::sha256::Hash as Sha256;
22 use bitcoin_hashes::sha256d::Hash as Sha256d;
24 use lightning::chain::chaininterface;
25 use lightning::chain::transaction::OutPoint;
26 use lightning::chain::chaininterface::{BroadcasterInterface,ConfirmationTarget,ChainListener,FeeEstimator,ChainWatchInterfaceUtil};
27 use lightning::chain::keysinterface::{KeysInterface, InMemoryChannelKeys};
28 use lightning::ln::channelmonitor;
29 use lightning::ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, HTLCUpdate};
30 use lightning::ln::channelmanager::{ChannelManager, PaymentHash, PaymentPreimage, ChannelManagerReadArgs};
31 use lightning::ln::router::{Route, RouteHop};
32 use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, ErrorAction, UpdateAddHTLC, Features, FeatureContextInit};
33 use lightning::util::enforcing_trait_impls::EnforcingChannelKeys;
34 use lightning::util::events;
35 use lightning::util::logger::Logger;
36 use lightning::util::config::UserConfig;
37 use lightning::util::events::{EventsProvider, MessageSendEventsProvider};
38 use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer};
40 use utils::test_logger;
42 use secp256k1::key::{PublicKey,SecretKey};
43 use secp256k1::Secp256k1;
46 use std::cmp::Ordering;
47 use std::collections::{HashSet, hash_map, HashMap};
48 use std::sync::{Arc,Mutex};
49 use std::sync::atomic;
52 struct FuzzEstimator {}
53 impl FeeEstimator for FuzzEstimator {
54 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u64 {
59 pub struct TestBroadcaster {}
60 impl BroadcasterInterface for TestBroadcaster {
61 fn broadcast_transaction(&self, _tx: &Transaction) { }
64 pub struct VecWriter(pub Vec<u8>);
65 impl Writer for VecWriter {
66 fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
67 self.0.extend_from_slice(buf);
70 fn size_hint(&mut self, size: usize) {
71 self.0.reserve_exact(size);
75 static mut IN_RESTORE: bool = false;
76 pub struct TestChannelMonitor {
77 pub simple_monitor: Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint>>,
78 pub update_ret: Mutex<Result<(), channelmonitor::ChannelMonitorUpdateErr>>,
79 pub latest_good_update: Mutex<HashMap<OutPoint, Vec<u8>>>,
80 pub latest_update_good: Mutex<HashMap<OutPoint, bool>>,
81 pub latest_updates_good_at_last_ser: Mutex<HashMap<OutPoint, bool>>,
82 pub should_update_manager: atomic::AtomicBool,
84 impl TestChannelMonitor {
85 pub fn new(chain_monitor: Arc<dyn chaininterface::ChainWatchInterface>, broadcaster: Arc<dyn chaininterface::BroadcasterInterface>, logger: Arc<dyn Logger>, feeest: Arc<dyn chaininterface::FeeEstimator>) -> Self {
87 simple_monitor: channelmonitor::SimpleManyChannelMonitor::new(chain_monitor, broadcaster, logger, feeest),
88 update_ret: Mutex::new(Ok(())),
89 latest_good_update: Mutex::new(HashMap::new()),
90 latest_update_good: Mutex::new(HashMap::new()),
91 latest_updates_good_at_last_ser: Mutex::new(HashMap::new()),
92 should_update_manager: atomic::AtomicBool::new(false),
96 impl channelmonitor::ManyChannelMonitor for TestChannelMonitor {
97 fn add_update_monitor(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
98 let ret = self.update_ret.lock().unwrap().clone();
100 let mut ser = VecWriter(Vec::new());
101 monitor.write_for_disk(&mut ser).unwrap();
102 self.latest_good_update.lock().unwrap().insert(funding_txo, ser.0);
103 match self.latest_update_good.lock().unwrap().entry(funding_txo) {
104 hash_map::Entry::Vacant(e) => { e.insert(true); },
105 hash_map::Entry::Occupied(mut e) => {
106 if !e.get() && unsafe { IN_RESTORE } {
107 // Technically we can't consider an update to be "good" unless we're doing
108 // it in response to a test_restore_channel_monitor as the channel may
109 // still be waiting on such a call, so only set us to good if we're in the
110 // middle of a restore call.
115 self.should_update_manager.store(true, atomic::Ordering::Relaxed);
117 self.latest_update_good.lock().unwrap().insert(funding_txo, false);
119 assert!(self.simple_monitor.add_update_monitor(funding_txo, monitor).is_ok());
123 fn fetch_pending_htlc_updated(&self) -> Vec<HTLCUpdate> {
124 return self.simple_monitor.fetch_pending_htlc_updated();
130 session_id: atomic::AtomicU8,
131 channel_id: atomic::AtomicU8,
133 impl KeysInterface for KeyProvider {
134 type ChanKeySigner = EnforcingChannelKeys;
136 fn get_node_secret(&self) -> SecretKey {
137 SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, self.node_id]).unwrap()
140 fn get_destination_script(&self) -> Script {
141 let secp_ctx = Secp256k1::signing_only();
142 let channel_monitor_claim_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, self.node_id]).unwrap();
143 let our_channel_monitor_claim_key_hash = Hash160::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
144 Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&our_channel_monitor_claim_key_hash[..]).into_script()
147 fn get_shutdown_pubkey(&self) -> PublicKey {
148 let secp_ctx = Secp256k1::signing_only();
149 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, self.node_id]).unwrap())
152 fn get_channel_keys(&self, _inbound: bool) -> EnforcingChannelKeys {
153 EnforcingChannelKeys::new(InMemoryChannelKeys {
154 funding_key: SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, self.node_id]).unwrap(),
155 revocation_base_key: SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, self.node_id]).unwrap(),
156 payment_base_key: SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, self.node_id]).unwrap(),
157 delayed_payment_base_key: SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, self.node_id]).unwrap(),
158 htlc_base_key: SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, self.node_id]).unwrap(),
159 commitment_seed: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, self.node_id],
163 fn get_onion_rand(&self) -> (SecretKey, [u8; 32]) {
164 let id = self.session_id.fetch_add(1, atomic::Ordering::Relaxed);
165 (SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, id, 10, self.node_id]).unwrap(),
169 fn get_channel_id(&self) -> [u8; 32] {
170 let id = self.channel_id.fetch_add(1, atomic::Ordering::Relaxed);
171 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, id, 11, self.node_id]
176 pub fn do_test(data: &[u8]) {
177 let fee_est = Arc::new(FuzzEstimator{});
178 let broadcast = Arc::new(TestBroadcaster{});
180 macro_rules! make_node {
181 ($node_id: expr) => { {
182 let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string()));
183 let watch = Arc::new(ChainWatchInterfaceUtil::new(Network::Bitcoin, Arc::clone(&logger)));
184 let monitor = Arc::new(TestChannelMonitor::new(watch.clone(), broadcast.clone(), logger.clone(), fee_est.clone()));
186 let keys_manager = Arc::new(KeyProvider { node_id: $node_id, session_id: atomic::AtomicU8::new(0), channel_id: atomic::AtomicU8::new(0) });
187 let mut config = UserConfig::default();
188 config.channel_options.fee_proportional_millionths = 0;
189 config.channel_options.announced_channel = true;
190 config.peer_channel_config_limits.min_dust_limit_satoshis = 0;
191 (ChannelManager::new(Network::Bitcoin, fee_est.clone(), monitor.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, 0).unwrap(),
196 macro_rules! reload_node {
197 ($ser: expr, $node_id: expr, $old_monitors: expr) => { {
198 let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string()));
199 let watch = Arc::new(ChainWatchInterfaceUtil::new(Network::Bitcoin, Arc::clone(&logger)));
200 let monitor = Arc::new(TestChannelMonitor::new(watch.clone(), broadcast.clone(), logger.clone(), fee_est.clone()));
202 let keys_manager = Arc::new(KeyProvider { node_id: $node_id, session_id: atomic::AtomicU8::new(0), channel_id: atomic::AtomicU8::new(0) });
203 let mut config = UserConfig::default();
204 config.channel_options.fee_proportional_millionths = 0;
205 config.channel_options.announced_channel = true;
206 config.peer_channel_config_limits.min_dust_limit_satoshis = 0;
208 let mut monitors = HashMap::new();
209 let mut old_monitors = $old_monitors.latest_good_update.lock().unwrap();
210 for (outpoint, monitor_ser) in old_monitors.drain() {
211 monitors.insert(outpoint, <(Sha256d, ChannelMonitor)>::read(&mut Cursor::new(&monitor_ser), Arc::clone(&logger)).expect("Failed to read monitor").1);
212 monitor.latest_good_update.lock().unwrap().insert(outpoint, monitor_ser);
214 let mut monitor_refs = HashMap::new();
215 for (outpoint, monitor) in monitors.iter_mut() {
216 monitor_refs.insert(*outpoint, monitor);
219 let read_args = ChannelManagerReadArgs {
221 fee_estimator: fee_est.clone(),
222 monitor: monitor.clone(),
223 tx_broadcaster: broadcast.clone(),
225 default_config: config,
226 channel_monitors: &mut monitor_refs,
229 let res = (<(Sha256d, ChannelManager<EnforcingChannelKeys>)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, monitor);
230 for (_, was_good) in $old_monitors.latest_updates_good_at_last_ser.lock().unwrap().iter() {
232 // If the last time we updated a monitor we didn't successfully update (and we
233 // have sense updated our serialized copy of the ChannelManager) we may
234 // force-close the channel on our counterparty cause we know we're missing
235 // something. Thus, we just return here since we can't continue to test.
243 let mut channel_txn = Vec::new();
244 macro_rules! make_channel {
245 ($source: expr, $dest: expr, $chan_id: expr) => { {
246 $source.create_channel($dest.get_our_node_id(), 10000000, 42, 0).unwrap();
248 let events = $source.get_and_clear_pending_msg_events();
249 assert_eq!(events.len(), 1);
250 if let events::MessageSendEvent::SendOpenChannel { ref msg, .. } = events[0] {
252 } else { panic!("Wrong event type"); }
255 $dest.handle_open_channel(&$source.get_our_node_id(), Features::<FeatureContextInit>::supported(), &open_channel);
256 let accept_channel = {
257 let events = $dest.get_and_clear_pending_msg_events();
258 assert_eq!(events.len(), 1);
259 if let events::MessageSendEvent::SendAcceptChannel { ref msg, .. } = events[0] {
261 } else { panic!("Wrong event type"); }
264 $source.handle_accept_channel(&$dest.get_our_node_id(), Features::<FeatureContextInit>::supported(), &accept_channel);
266 let events = $source.get_and_clear_pending_events();
267 assert_eq!(events.len(), 1);
268 if let events::Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, .. } = events[0] {
269 let tx = Transaction { version: $chan_id, lock_time: 0, input: Vec::new(), output: vec![TxOut {
270 value: *channel_value_satoshis, script_pubkey: output_script.clone(),
272 let funding_output = OutPoint::new(tx.txid(), 0);
273 $source.funding_transaction_generated(&temporary_channel_id, funding_output);
274 channel_txn.push(tx);
275 } else { panic!("Wrong event type"); }
278 let funding_created = {
279 let events = $source.get_and_clear_pending_msg_events();
280 assert_eq!(events.len(), 1);
281 if let events::MessageSendEvent::SendFundingCreated { ref msg, .. } = events[0] {
283 } else { panic!("Wrong event type"); }
285 $dest.handle_funding_created(&$source.get_our_node_id(), &funding_created);
287 let funding_signed = {
288 let events = $dest.get_and_clear_pending_msg_events();
289 assert_eq!(events.len(), 1);
290 if let events::MessageSendEvent::SendFundingSigned { ref msg, .. } = events[0] {
292 } else { panic!("Wrong event type"); }
294 $source.handle_funding_signed(&$dest.get_our_node_id(), &funding_signed);
297 let events = $source.get_and_clear_pending_events();
298 assert_eq!(events.len(), 1);
299 if let events::Event::FundingBroadcastSafe { .. } = events[0] {
300 } else { panic!("Wrong event type"); }
305 macro_rules! confirm_txn {
307 let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
308 let mut txn = Vec::with_capacity(channel_txn.len());
309 let mut posn = Vec::with_capacity(channel_txn.len());
310 for i in 0..channel_txn.len() {
311 txn.push(&channel_txn[i]);
312 posn.push(i as u32 + 1);
314 $node.block_connected(&header, 1, &txn, &posn);
316 header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
317 $node.block_connected(&header, i, &Vec::new(), &[0; 0]);
322 macro_rules! lock_fundings {
323 ($nodes: expr) => { {
324 let mut node_events = Vec::new();
325 for node in $nodes.iter() {
326 node_events.push(node.get_and_clear_pending_msg_events());
328 for (idx, node_event) in node_events.iter().enumerate() {
329 for event in node_event {
330 if let events::MessageSendEvent::SendFundingLocked { ref node_id, ref msg } = event {
331 for node in $nodes.iter() {
332 if node.get_our_node_id() == *node_id {
333 node.handle_funding_locked(&$nodes[idx].get_our_node_id(), msg);
336 } else { panic!("Wrong event type"); }
340 for node in $nodes.iter() {
341 let events = node.get_and_clear_pending_msg_events();
342 for event in events {
343 if let events::MessageSendEvent::SendAnnouncementSignatures { .. } = event {
344 } else { panic!("Wrong event type"); }
350 // 3 nodes is enough to hit all the possible cases, notably unknown-source-unknown-dest
352 let (mut node_a, mut monitor_a) = make_node!(0);
353 let (mut node_b, mut monitor_b) = make_node!(1);
354 let (mut node_c, mut monitor_c) = make_node!(2);
356 let mut nodes = [node_a, node_b, node_c];
358 make_channel!(nodes[0], nodes[1], 0);
359 make_channel!(nodes[1], nodes[2], 1);
361 for node in nodes.iter() {
365 lock_fundings!(nodes);
367 let chan_a = nodes[0].list_usable_channels()[0].short_channel_id.unwrap();
368 let chan_b = nodes[2].list_usable_channels()[0].short_channel_id.unwrap();
370 let mut payment_id = 0;
372 let mut chan_a_disconnected = false;
373 let mut chan_b_disconnected = false;
374 let mut ba_events = Vec::new();
375 let mut bc_events = Vec::new();
377 let mut node_a_ser = VecWriter(Vec::new());
378 nodes[0].write(&mut node_a_ser).unwrap();
379 let mut node_b_ser = VecWriter(Vec::new());
380 nodes[1].write(&mut node_b_ser).unwrap();
381 let mut node_c_ser = VecWriter(Vec::new());
382 nodes[2].write(&mut node_c_ser).unwrap();
384 macro_rules! test_return {
386 assert_eq!(nodes[0].list_channels().len(), 1);
387 assert_eq!(nodes[1].list_channels().len(), 2);
388 assert_eq!(nodes[2].list_channels().len(), 1);
393 let mut read_pos = 0;
394 macro_rules! get_slice {
397 let slice_len = $len as usize;
398 if data.len() < read_pos + slice_len {
401 read_pos += slice_len;
402 &data[read_pos - slice_len..read_pos]
408 macro_rules! send_payment {
409 ($source: expr, $dest: expr) => { {
410 let payment_hash = Sha256::hash(&[payment_id; 1]);
411 payment_id = payment_id.wrapping_add(1);
412 if let Err(_) = $source.send_payment(Route {
413 hops: vec![RouteHop {
414 pubkey: $dest.0.get_our_node_id(),
415 short_channel_id: $dest.1,
417 cltv_expiry_delta: 200,
419 }, PaymentHash(payment_hash.into_inner())) {
420 // Probably ran out of funds
424 ($source: expr, $middle: expr, $dest: expr) => { {
425 let payment_hash = Sha256::hash(&[payment_id; 1]);
426 payment_id = payment_id.wrapping_add(1);
427 if let Err(_) = $source.send_payment(Route {
428 hops: vec![RouteHop {
429 pubkey: $middle.0.get_our_node_id(),
430 short_channel_id: $middle.1,
432 cltv_expiry_delta: 100,
434 pubkey: $dest.0.get_our_node_id(),
435 short_channel_id: $dest.1,
437 cltv_expiry_delta: 200,
439 }, PaymentHash(payment_hash.into_inner())) {
440 // Probably ran out of funds
446 macro_rules! process_msg_events {
447 ($node: expr, $corrupt_forward: expr) => { {
448 let events = if $node == 1 {
449 let mut new_events = Vec::new();
450 mem::swap(&mut new_events, &mut ba_events);
451 new_events.extend_from_slice(&bc_events[..]);
454 } else { Vec::new() };
455 for event in events.iter().chain(nodes[$node].get_and_clear_pending_msg_events().iter()) {
457 events::MessageSendEvent::UpdateHTLCs { ref node_id, updates: CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
458 for dest in nodes.iter() {
459 if dest.get_our_node_id() == *node_id {
460 assert!(update_fee.is_none());
461 for update_add in update_add_htlcs {
462 if !$corrupt_forward {
463 dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), &update_add);
465 // Corrupt the update_add_htlc message so that its HMAC
466 // check will fail and we generate a
467 // update_fail_malformed_htlc instead of an
468 // update_fail_htlc as we do when we reject a payment.
469 let mut msg_ser = update_add.encode();
470 msg_ser[1000] ^= 0xff;
471 let new_msg = UpdateAddHTLC::read(&mut Cursor::new(&msg_ser)).unwrap();
472 dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), &new_msg);
475 for update_fulfill in update_fulfill_htlcs {
476 dest.handle_update_fulfill_htlc(&nodes[$node].get_our_node_id(), &update_fulfill);
478 for update_fail in update_fail_htlcs {
479 dest.handle_update_fail_htlc(&nodes[$node].get_our_node_id(), &update_fail);
481 for update_fail_malformed in update_fail_malformed_htlcs {
482 dest.handle_update_fail_malformed_htlc(&nodes[$node].get_our_node_id(), &update_fail_malformed);
484 dest.handle_commitment_signed(&nodes[$node].get_our_node_id(), &commitment_signed);
488 events::MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
489 for dest in nodes.iter() {
490 if dest.get_our_node_id() == *node_id {
491 dest.handle_revoke_and_ack(&nodes[$node].get_our_node_id(), msg);
495 events::MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => {
496 for dest in nodes.iter() {
497 if dest.get_our_node_id() == *node_id {
498 dest.handle_channel_reestablish(&nodes[$node].get_our_node_id(), msg);
502 events::MessageSendEvent::SendFundingLocked { .. } => {
503 // Can be generated as a reestablish response
505 events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {
506 // Can be generated due to a payment forward being rejected due to a
507 // channel having previously failed a monitor update
509 events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => {
510 // Can be generated at any processing step to send back an error, disconnect
511 // peer or just ignore
513 _ => panic!("Unhandled message event"),
519 macro_rules! drain_msg_events_on_disconnect {
520 ($counterparty_id: expr) => { {
521 if $counterparty_id == 0 {
522 for event in nodes[0].get_and_clear_pending_msg_events() {
524 events::MessageSendEvent::UpdateHTLCs { .. } => {},
525 events::MessageSendEvent::SendRevokeAndACK { .. } => {},
526 events::MessageSendEvent::SendChannelReestablish { .. } => {},
527 events::MessageSendEvent::SendFundingLocked { .. } => {},
528 events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
529 events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => {},
530 _ => panic!("Unhandled message event"),
535 for event in nodes[2].get_and_clear_pending_msg_events() {
537 events::MessageSendEvent::UpdateHTLCs { .. } => {},
538 events::MessageSendEvent::SendRevokeAndACK { .. } => {},
539 events::MessageSendEvent::SendChannelReestablish { .. } => {},
540 events::MessageSendEvent::SendFundingLocked { .. } => {},
541 events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
542 events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => {},
543 _ => panic!("Unhandled message event"),
548 let mut events = nodes[1].get_and_clear_pending_msg_events();
549 let drop_node_id = if $counterparty_id == 0 { nodes[0].get_our_node_id() } else { nodes[2].get_our_node_id() };
550 let msg_sink = if $counterparty_id == 0 { &mut bc_events } else { &mut ba_events };
551 for event in events.drain(..) {
552 let push = match event {
553 events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => {
554 if *node_id != drop_node_id { true } else { false }
556 events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => {
557 if *node_id != drop_node_id { true } else { false }
559 events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => {
560 if *node_id != drop_node_id { true } else { false }
562 events::MessageSendEvent::SendFundingLocked { .. } => false,
563 events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => false,
564 events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => false,
565 _ => panic!("Unhandled message event"),
567 if push { msg_sink.push(event); }
572 macro_rules! process_events {
573 ($node: expr, $fail: expr) => { {
574 // In case we get 256 payments we may have a hash collision, resulting in the
575 // second claim/fail call not finding the duplicate-hash HTLC, so we have to
576 // deduplicate the calls here.
577 let mut claim_set = HashSet::new();
578 let mut events = nodes[$node].get_and_clear_pending_events();
579 // Sort events so that PendingHTLCsForwardable get processed last. This avoids a
580 // case where we first process a PendingHTLCsForwardable, then claim/fail on a
581 // PaymentReceived, claiming/failing two HTLCs, but leaving a just-generated
582 // PaymentReceived event for the second HTLC in our pending_events (and breaking
583 // our claim_set deduplication).
584 events.sort_by(|a, b| {
585 if let events::Event::PaymentReceived { .. } = a {
586 if let events::Event::PendingHTLCsForwardable { .. } = b {
588 } else { Ordering::Equal }
589 } else if let events::Event::PendingHTLCsForwardable { .. } = a {
590 if let events::Event::PaymentReceived { .. } = b {
592 } else { Ordering::Equal }
593 } else { Ordering::Equal }
595 for event in events.drain(..) {
597 events::Event::PaymentReceived { payment_hash, .. } => {
598 if claim_set.insert(payment_hash.0) {
600 assert!(nodes[$node].fail_htlc_backwards(&payment_hash));
602 assert!(nodes[$node].claim_funds(PaymentPreimage(payment_hash.0), 5_000_000));
606 events::Event::PaymentSent { .. } => {},
607 events::Event::PaymentFailed { .. } => {},
608 events::Event::PendingHTLCsForwardable { .. } => {
609 nodes[$node].process_pending_htlc_forwards();
611 _ => panic!("Unhandled event"),
617 match get_slice!(1)[0] {
618 0x00 => *monitor_a.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
619 0x01 => *monitor_b.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
620 0x02 => *monitor_c.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
621 0x03 => *monitor_a.update_ret.lock().unwrap() = Ok(()),
622 0x04 => *monitor_b.update_ret.lock().unwrap() = Ok(()),
623 0x05 => *monitor_c.update_ret.lock().unwrap() = Ok(()),
624 0x06 => { unsafe { IN_RESTORE = true }; nodes[0].test_restore_channel_monitor(); unsafe { IN_RESTORE = false }; },
625 0x07 => { unsafe { IN_RESTORE = true }; nodes[1].test_restore_channel_monitor(); unsafe { IN_RESTORE = false }; },
626 0x08 => { unsafe { IN_RESTORE = true }; nodes[2].test_restore_channel_monitor(); unsafe { IN_RESTORE = false }; },
627 0x09 => send_payment!(nodes[0], (&nodes[1], chan_a)),
628 0x0a => send_payment!(nodes[1], (&nodes[0], chan_a)),
629 0x0b => send_payment!(nodes[1], (&nodes[2], chan_b)),
630 0x0c => send_payment!(nodes[2], (&nodes[1], chan_b)),
631 0x0d => send_payment!(nodes[0], (&nodes[1], chan_a), (&nodes[2], chan_b)),
632 0x0e => send_payment!(nodes[2], (&nodes[1], chan_b), (&nodes[0], chan_a)),
634 if !chan_a_disconnected {
635 nodes[0].peer_disconnected(&nodes[1].get_our_node_id(), false);
636 nodes[1].peer_disconnected(&nodes[0].get_our_node_id(), false);
637 chan_a_disconnected = true;
638 drain_msg_events_on_disconnect!(0);
642 if !chan_b_disconnected {
643 nodes[1].peer_disconnected(&nodes[2].get_our_node_id(), false);
644 nodes[2].peer_disconnected(&nodes[1].get_our_node_id(), false);
645 chan_b_disconnected = true;
646 drain_msg_events_on_disconnect!(2);
650 if chan_a_disconnected {
651 nodes[0].peer_connected(&nodes[1].get_our_node_id());
652 nodes[1].peer_connected(&nodes[0].get_our_node_id());
653 chan_a_disconnected = false;
657 if chan_b_disconnected {
658 nodes[1].peer_connected(&nodes[2].get_our_node_id());
659 nodes[2].peer_connected(&nodes[1].get_our_node_id());
660 chan_b_disconnected = false;
663 0x13 => process_msg_events!(0, true),
664 0x14 => process_msg_events!(0, false),
665 0x15 => process_events!(0, true),
666 0x16 => process_events!(0, false),
667 0x17 => process_msg_events!(1, true),
668 0x18 => process_msg_events!(1, false),
669 0x19 => process_events!(1, true),
670 0x1a => process_events!(1, false),
671 0x1b => process_msg_events!(2, true),
672 0x1c => process_msg_events!(2, false),
673 0x1d => process_events!(2, true),
674 0x1e => process_events!(2, false),
676 if !chan_a_disconnected {
677 nodes[1].peer_disconnected(&nodes[0].get_our_node_id(), false);
678 chan_a_disconnected = true;
679 drain_msg_events_on_disconnect!(0);
681 let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a);
682 node_a = Arc::new(new_node_a);
683 nodes[0] = node_a.clone();
684 monitor_a = new_monitor_a;
687 if !chan_a_disconnected {
688 nodes[0].peer_disconnected(&nodes[1].get_our_node_id(), false);
689 chan_a_disconnected = true;
690 nodes[0].get_and_clear_pending_msg_events();
693 if !chan_b_disconnected {
694 nodes[2].peer_disconnected(&nodes[1].get_our_node_id(), false);
695 chan_b_disconnected = true;
696 nodes[2].get_and_clear_pending_msg_events();
699 let (new_node_b, new_monitor_b) = reload_node!(node_b_ser, 1, monitor_b);
700 node_b = Arc::new(new_node_b);
701 nodes[1] = node_b.clone();
702 monitor_b = new_monitor_b;
705 if !chan_b_disconnected {
706 nodes[1].peer_disconnected(&nodes[2].get_our_node_id(), false);
707 chan_b_disconnected = true;
708 drain_msg_events_on_disconnect!(2);
710 let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c);
711 node_c = Arc::new(new_node_c);
712 nodes[2] = node_c.clone();
713 monitor_c = new_monitor_c;
718 if monitor_a.should_update_manager.load(atomic::Ordering::Relaxed) {
719 node_a_ser.0.clear();
720 nodes[0].write(&mut node_a_ser).unwrap();
721 monitor_a.should_update_manager.store(false, atomic::Ordering::Relaxed);
722 *monitor_a.latest_updates_good_at_last_ser.lock().unwrap() = monitor_a.latest_update_good.lock().unwrap().clone();
724 if monitor_b.should_update_manager.load(atomic::Ordering::Relaxed) {
725 node_b_ser.0.clear();
726 nodes[1].write(&mut node_b_ser).unwrap();
727 monitor_b.should_update_manager.store(false, atomic::Ordering::Relaxed);
728 *monitor_b.latest_updates_good_at_last_ser.lock().unwrap() = monitor_b.latest_update_good.lock().unwrap().clone();
730 if monitor_c.should_update_manager.load(atomic::Ordering::Relaxed) {
731 node_c_ser.0.clear();
732 nodes[2].write(&mut node_c_ser).unwrap();
733 monitor_c.should_update_manager.store(false, atomic::Ordering::Relaxed);
734 *monitor_c.latest_updates_good_at_last_ser.lock().unwrap() = monitor_c.latest_update_good.lock().unwrap().clone();
740 pub extern "C" fn chanmon_consistency_run(data: *const u8, datalen: usize) {
741 do_test(unsafe { std::slice::from_raw_parts(data, datalen) });
747 fn duplicate_crash() {
748 super::do_test(&::hex::decode("00").unwrap());