X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fln%2Fchannelmanager.rs;h=c3d39d8f85498508468c771d21a690a26891a351;hb=e67d8c6314c4fce465e41652b539f83d0a8c4887;hp=80f0d9635040951b8f5069c5e9a8122ba9500050;hpb=78232f2aeded08b32fa4ebfeb0b77d80b337518d;p=rust-lightning diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index 80f0d963..c3d39d8f 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -12,8 +12,7 @@ use bitcoin::blockdata::block::BlockHeader; use bitcoin::blockdata::transaction::Transaction; use bitcoin::blockdata::constants::genesis_block; use bitcoin::network::constants::Network; -use bitcoin::network::serialize::BitcoinHash; -use bitcoin::util::hash::Sha256dHash; +use bitcoin::util::hash::{BitcoinHash, Sha256dHash}; use secp256k1::key::{SecretKey,PublicKey}; use secp256k1::{Secp256k1,Message}; @@ -1943,10 +1942,20 @@ impl ChannelManager { // but if we've sent a shutdown and they haven't acknowledged it yet, we just // want to reject the new HTLC and fail it backwards instead of forwarding. if let PendingHTLCStatus::Forward(PendingForwardHTLCInfo { incoming_shared_secret, .. }) = pending_forward_info { + let chan_update = self.get_channel_update(chan); pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id: msg.channel_id, htlc_id: msg.htlc_id, - reason: ChannelManager::build_first_hop_failure_packet(&incoming_shared_secret, 0x1000|20, &self.get_channel_update(chan).unwrap().encode_with_len()[..]), + reason: if let Ok(update) = chan_update { + ChannelManager::build_first_hop_failure_packet(&incoming_shared_secret, 0x1000|20, &update.encode_with_len()[..]) + } else { + // This can only happen if the channel isn't in the fully-funded + // state yet, implying our counterparty is trying to route payments + // over the channel back to themselves (cause no one else should + // know the short_id is a lightning channel yet). We should have no + // problem just calling this unknown_next_peer + ChannelManager::build_first_hop_failure_packet(&incoming_shared_secret, 0x4000|10, &[]) + }, })); } } @@ -2194,8 +2203,8 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - if (msg.failure_code & 0x8000) != 0 { - return Err(MsgHandleErrInternal::send_err_msg_close_chan("Got update_fail_malformed_htlc with BADONION set", msg.channel_id)); + if (msg.failure_code & 0x8000) == 0 { + return Err(MsgHandleErrInternal::send_err_msg_close_chan("Got update_fail_malformed_htlc with BADONION not set", msg.channel_id)); } chan.update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }) .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; @@ -2383,7 +2392,7 @@ impl ChannelManager { if chan.get_their_node_id() != *their_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, order) = chan.channel_reestablish(msg) + let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, order, shutdown) = chan.channel_reestablish(msg) .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; if let Some(monitor) = channel_monitor { if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { @@ -2422,6 +2431,12 @@ impl ChannelManager { send_raa!(); }, } + if let Some(msg) = shutdown { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: their_node_id.clone(), + msg, + }); + } Ok(()) }, None => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) @@ -2727,6 +2742,7 @@ impl ChannelMessageHandler for ChannelManager { let short_to_id = channel_state.short_to_id; let pending_msg_events = channel_state.pending_msg_events; if no_connection_possible { + log_debug!(self, "Failing all channels with {} due to no_connection_possible", log_pubkey!(their_node_id)); channel_state.by_id.retain(|_, chan| { if chan.get_their_node_id() == *their_node_id { if let Some(short_id) = chan.get_short_channel_id() { @@ -2744,6 +2760,7 @@ impl ChannelMessageHandler for ChannelManager { } }); } else { + log_debug!(self, "Marking channels with {} disconnected and generating channel_updates", log_pubkey!(their_node_id)); channel_state.by_id.retain(|_, chan| { if chan.get_their_node_id() == *their_node_id { //TODO: mark channel disabled (and maybe announce such after a timeout). @@ -2774,6 +2791,8 @@ impl ChannelMessageHandler for ChannelManager { } fn peer_connected(&self, their_node_id: &PublicKey) { + log_debug!(self, "Generating channel_reestablish events for {}", log_pubkey!(their_node_id)); + let _ = self.total_consistency_lock.read().unwrap(); let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); @@ -3214,13 +3233,11 @@ mod tests { use util::ser::{Writeable, Writer, ReadableArgs}; use util::config::UserConfig; - use bitcoin::util::hash::Sha256dHash; + use bitcoin::util::hash::{BitcoinHash, Sha256dHash}; use bitcoin::blockdata::block::{Block, BlockHeader}; use bitcoin::blockdata::transaction::{Transaction, TxOut}; use bitcoin::blockdata::constants::genesis_block; use bitcoin::network::constants::Network; - use bitcoin::network::serialize::serialize; - use bitcoin::network::serialize::BitcoinHash; use hex; @@ -3510,7 +3527,7 @@ mod tests { tx = Transaction { version: chan_id as u32, lock_time: 0, input: Vec::new(), output: vec![TxOut { value: *channel_value_satoshis, script_pubkey: output_script.clone(), }]}; - funding_output = OutPoint::new(Sha256dHash::from_data(&serialize(&tx).unwrap()[..]), 0); + funding_output = OutPoint::new(tx.txid(), 0); node_a.node.funding_transaction_generated(&temporary_channel_id, funding_output); let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap(); @@ -3636,6 +3653,30 @@ mod tests { } } + macro_rules! get_closing_signed_broadcast { + ($node: expr, $dest_pubkey: expr) => { + { + let events = $node.get_and_clear_pending_msg_events(); + assert!(events.len() == 1 || events.len() == 2); + (match events[events.len() - 1] { + MessageSendEvent::BroadcastChannelUpdate { ref msg } => { + assert_eq!(msg.contents.flags & 2, 2); + msg.clone() + }, + _ => panic!("Unexpected event"), + }, if events.len() == 2 { + match events[0] { + MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => { + assert_eq!(*node_id, $dest_pubkey); + Some(msg.clone()) + }, + _ => panic!("Unexpected event"), + } + } else { None }) + } + } + } + fn close_channel(outbound_node: &Node, inbound_node: &Node, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate) { let (node_a, broadcaster_a, struct_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) } else { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) }; let (node_b, broadcaster_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster) } else { (&inbound_node.node, &inbound_node.tx_broadcaster) }; @@ -3667,29 +3708,6 @@ mod tests { }) }; - macro_rules! get_closing_signed_broadcast { - ($node: expr, $dest_pubkey: expr) => { - { - let events = $node.get_and_clear_pending_msg_events(); - assert!(events.len() == 1 || events.len() == 2); - (match events[events.len() - 1] { - MessageSendEvent::BroadcastChannelUpdate { ref msg } => { - msg.clone() - }, - _ => panic!("Unexpected event"), - }, if events.len() == 2 { - match events[0] { - MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => { - assert_eq!(*node_id, $dest_pubkey); - Some(msg.clone()) - }, - _ => panic!("Unexpected event"), - } - } else { None }) - } - } - } - node_a.handle_shutdown(&node_b.get_our_node_id(), &shutdown_b).unwrap(); let (as_update, bs_update) = if close_inbound_first { assert!(node_a.get_and_clear_pending_msg_events().is_empty()); @@ -3758,35 +3776,41 @@ mod tests { } macro_rules! commitment_signed_dance { - ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr) => { + ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr, true /* skip last step */) => { { check_added_monitors!($node_a, 0); assert!($node_a.node.get_and_clear_pending_msg_events().is_empty()); $node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed).unwrap(); - let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!($node_a, $node_b.node.get_our_node_id()); check_added_monitors!($node_a, 1); + commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, false); + } + }; + ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */) => { + { + let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!($node_a, $node_b.node.get_our_node_id()); check_added_monitors!($node_b, 0); assert!($node_b.node.get_and_clear_pending_msg_events().is_empty()); $node_b.node.handle_revoke_and_ack(&$node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap(); assert!($node_b.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!($node_b, 1); $node_b.node.handle_commitment_signed(&$node_a.node.get_our_node_id(), &as_commitment_signed).unwrap(); - let bs_revoke_and_ack = get_event_msg!($node_b, MessageSendEvent::SendRevokeAndACK, $node_a.node.get_our_node_id()); + let (bs_revoke_and_ack, extra_msg_option) = { + let events = $node_b.node.get_and_clear_pending_msg_events(); + assert!(events.len() <= 2); + (match events[0] { + MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { + assert_eq!(*node_id, $node_a.node.get_our_node_id()); + (*msg).clone() + }, + _ => panic!("Unexpected event"), + }, events.get(1).map(|e| e.clone())) + }; check_added_monitors!($node_b, 1); if $fail_backwards { assert!($node_a.node.get_and_clear_pending_events().is_empty()); assert!($node_a.node.get_and_clear_pending_msg_events().is_empty()); } $node_a.node.handle_revoke_and_ack(&$node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); - if $fail_backwards { - let channel_state = $node_a.node.channel_state.lock().unwrap(); - assert_eq!(channel_state.pending_msg_events.len(), 1); - if let MessageSendEvent::UpdateHTLCs { ref node_id, .. } = channel_state.pending_msg_events[0] { - assert_ne!(*node_id, $node_b.node.get_our_node_id()); - } else { panic!("Unexpected event"); } - } else { - assert!($node_a.node.get_and_clear_pending_msg_events().is_empty()); - } { let mut added_monitors = $node_a.chan_monitor.added_monitors.lock().unwrap(); if $fail_backwards { @@ -3797,6 +3821,26 @@ mod tests { } added_monitors.clear(); } + extra_msg_option + } + }; + ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, false /* no extra message */) => { + { + assert!(commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true).is_none()); + } + }; + ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr) => { + { + commitment_signed_dance!($node_a, $node_b, $commitment_signed, $fail_backwards, true); + if $fail_backwards { + let channel_state = $node_a.node.channel_state.lock().unwrap(); + assert_eq!(channel_state.pending_msg_events.len(), 1); + if let MessageSendEvent::UpdateHTLCs { ref node_id, .. } = channel_state.pending_msg_events[0] { + assert_ne!(*node_id, $node_b.node.get_our_node_id()); + } else { panic!("Unexpected event"); } + } else { + assert!($node_a.node.get_and_clear_pending_msg_events().is_empty()); + } } } } @@ -4642,6 +4686,375 @@ mod tests { close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); } + #[test] + fn pre_funding_lock_shutdown_test() { + // Test sending a shutdown prior to funding_locked after funding generation + let nodes = create_network(2); + let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 8000000, 0); + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[0].chain_monitor.block_connected_checked(&header, 1, &[&tx; 1], &[1; 1]); + nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&tx; 1], &[1; 1]); + + nodes[0].node.close_channel(&OutPoint::new(tx.txid(), 0).to_channel_id()).unwrap(); + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap(); + let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap(); + + let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap(); + let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap(); + let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + assert!(node_0_none.is_none()); + + assert!(nodes[0].node.list_channels().is_empty()); + assert!(nodes[1].node.list_channels().is_empty()); + } + + #[test] + fn updates_shutdown_wait() { + // Test sending a shutdown with outstanding updates pending + let mut nodes = create_network(3); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + let route_1 = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap(); + let route_2 = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap(); + + let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000); + + nodes[0].node.close_channel(&chan_1.2).unwrap(); + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap(); + let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap(); + + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + let (_, payment_hash) = get_payment_preimage_hash!(nodes[0]); + if let Err(APIError::ChannelUnavailable {..}) = nodes[0].node.send_payment(route_1, payment_hash) {} + else { panic!("New sends should fail!") }; + if let Err(APIError::ChannelUnavailable {..}) = nodes[1].node.send_payment(route_2, payment_hash) {} + else { panic!("New sends should fail!") }; + + assert!(nodes[2].node.claim_funds(our_payment_preimage)); + check_added_monitors!(nodes[2], 1); + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap(); + check_added_monitors!(nodes[1], 1); + let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); + + assert!(updates_2.update_add_htlcs.is_empty()); + assert!(updates_2.update_fail_htlcs.is_empty()); + assert!(updates_2.update_fail_malformed_htlcs.is_empty()); + assert!(updates_2.update_fee.is_none()); + assert_eq!(updates_2.update_fulfill_htlcs.len(), 1); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(our_payment_preimage, *payment_preimage); + }, + _ => panic!("Unexpected event"), + } + + let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap(); + let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap(); + let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + assert!(node_0_none.is_none()); + + assert!(nodes[0].node.list_channels().is_empty()); + + assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); + nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); + assert!(nodes[1].node.list_channels().is_empty()); + assert!(nodes[2].node.list_channels().is_empty()); + } + + #[test] + fn htlc_fail_async_shutdown() { + // Test HTLCs fail if shutdown starts even if messages are delivered out-of-order + let mut nodes = create_network(3); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap(); + let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + nodes[0].node.send_payment(route, our_payment_hash).unwrap(); + check_added_monitors!(nodes[0], 1); + let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + assert_eq!(updates.update_add_htlcs.len(), 1); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + + nodes[1].node.close_channel(&chan_1.2).unwrap(); + let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap(); + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed).unwrap(); + check_added_monitors!(nodes[1], 1); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap(); + commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false); + + let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(updates_2.update_add_htlcs.is_empty()); + assert!(updates_2.update_fulfill_htlcs.is_empty()); + assert_eq!(updates_2.update_fail_htlcs.len(), 1); + assert!(updates_2.update_fail_malformed_htlcs.is_empty()); + assert!(updates_2.update_fee.is_none()); + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fail_htlcs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentFailed { ref payment_hash, ref rejected_by_dest } => { + assert_eq!(our_payment_hash, *payment_hash); + assert!(!rejected_by_dest); + }, + _ => panic!("Unexpected event"), + } + + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap(); + let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap(); + let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + assert!(node_0_none.is_none()); + + assert!(nodes[0].node.list_channels().is_empty()); + + assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); + nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); + assert!(nodes[1].node.list_channels().is_empty()); + assert!(nodes[2].node.list_channels().is_empty()); + } + + #[test] + fn update_fee_async_shutdown() { + // Test update_fee works after shutdown start if messages are delivered out-of-order + let nodes = create_network(2); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + + let starting_feerate = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().get_feerate(); + nodes[0].node.update_fee(chan_1.2.clone(), starting_feerate + 20).unwrap(); + check_added_monitors!(nodes[0], 1); + let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_some()); + + nodes[1].node.close_channel(&chan_1.2).unwrap(); + let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap(); + // Note that we don't actually test normative behavior here. The spec indicates we could + // actually send a closing_signed here, but is kinda unclear and could possibly be amended + // to require waiting on the full commitment dance before doing so (see + // https://github.com/lightningnetwork/lightning-rfc/issues/499). In any case, to avoid + // ambiguity, we should wait until after the full commitment dance to send closing_signed. + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &updates.update_fee.unwrap()).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed).unwrap(); + check_added_monitors!(nodes[1], 1); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap(); + let node_0_closing_signed = commitment_signed_dance!(nodes[1], nodes[0], (), false, true, true); + + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), match node_0_closing_signed.unwrap() { + MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => { + assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + msg + }, + _ => panic!("Unexpected event"), + }).unwrap(); + let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap(); + let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + assert!(node_0_none.is_none()); + } + + fn do_test_shutdown_rebroadcast(recv_count: u8) { + // Test that shutdown/closing_signed is re-sent on reconnect with a variable number of + // messages delivered prior to disconnect + let nodes = create_network(3); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000); + + nodes[1].node.close_channel(&chan_1.2).unwrap(); + let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + if recv_count > 0 { + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap(); + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + if recv_count > 1 { + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap(); + } + } + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + let node_0_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); + let node_1_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_reestablish).unwrap(); + let node_1_2nd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + assert!(node_1_shutdown == node_1_2nd_shutdown); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_reestablish).unwrap(); + let node_0_2nd_shutdown = if recv_count > 0 { + let node_0_2nd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown).unwrap(); + node_0_2nd_shutdown + } else { + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown).unwrap(); + get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()) + }; + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_2nd_shutdown).unwrap(); + + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + assert!(nodes[2].node.claim_funds(our_payment_preimage)); + check_added_monitors!(nodes[2], 1); + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap(); + check_added_monitors!(nodes[1], 1); + let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); + + assert!(updates_2.update_add_htlcs.is_empty()); + assert!(updates_2.update_fail_htlcs.is_empty()); + assert!(updates_2.update_fail_malformed_htlcs.is_empty()); + assert!(updates_2.update_fee.is_none()); + assert_eq!(updates_2.update_fulfill_htlcs.len(), 1); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(our_payment_preimage, *payment_preimage); + }, + _ => panic!("Unexpected event"), + } + + let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + if recv_count > 0 { + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap(); + let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + assert!(node_1_closing_signed.is_some()); + } + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + let node_0_2nd_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); + if recv_count == 0 { + // If all closing_signeds weren't delivered we can just resume where we left off... + let node_1_2nd_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_2nd_reestablish).unwrap(); + let node_0_3rd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + assert!(node_0_2nd_shutdown == node_0_3rd_shutdown); + + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish).unwrap(); + let node_1_3rd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + assert!(node_1_3rd_shutdown == node_1_2nd_shutdown); + + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_3rd_shutdown).unwrap(); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_3rd_shutdown).unwrap(); + let node_0_2nd_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + assert!(node_0_closing_signed == node_0_2nd_closing_signed); + + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed).unwrap(); + let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap(); + let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + assert!(node_0_none.is_none()); + } else { + // If one node, however, received + responded with an identical closing_signed we end + // up erroring and node[0] will try to broadcast its own latest commitment transaction. + // There isn't really anything better we can do simply, but in the future we might + // explore storing a set of recently-closed channels that got disconnected during + // closing_signed and avoiding broadcasting local commitment txn for some timeout to + // give our counterparty enough time to (potentially) broadcast a cooperative closing + // transaction. + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + if let Err(msgs::HandleError{action: Some(msgs::ErrorAction::SendErrorMessage{msg}), ..}) = + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish) { + nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msg); + let msgs::ErrorMessage {ref channel_id, ..} = msg; + assert_eq!(*channel_id, chan_1.2); + } else { panic!("Needed SendErrorMessage close"); } + + // get_closing_signed_broadcast usually eats the BroadcastChannelUpdate for us and + // checks it, but in this case nodes[0] didn't ever get a chance to receive a + // closing_signed so we do it ourselves + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { ref msg } => { + assert_eq!(msg.contents.flags & 2, 2); + }, + _ => panic!("Unexpected event"), + } + } + + assert!(nodes[0].node.list_channels().is_empty()); + + assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); + nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); + assert!(nodes[1].node.list_channels().is_empty()); + assert!(nodes[2].node.list_channels().is_empty()); + } + + #[test] + fn test_shutdown_rebroadcast() { + do_test_shutdown_rebroadcast(0); + do_test_shutdown_rebroadcast(1); + do_test_shutdown_rebroadcast(2); + } + #[test] fn fake_network_test() { // Simple test which builds a network of ChannelManagers, connects them to each other, and @@ -5302,7 +5715,13 @@ mod tests { get_announce_close_broadcast_events(&nodes, 3, 4); assert_eq!(nodes[3].node.list_channels().len(), 0); assert_eq!(nodes[4].node.list_channels().len(), 0); + } + #[test] + fn test_justice_tx() { + // Test justice txn built on revoked HTLC-Success tx, against both sides + + let nodes = create_network(2); // Create some new channels: let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1); @@ -5341,6 +5760,45 @@ mod tests { test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone()); } get_announce_close_broadcast_events(&nodes, 0, 1); + + assert_eq!(nodes[0].node.list_channels().len(), 0); + assert_eq!(nodes[1].node.list_channels().len(), 0); + + // We test justice_tx build by A on B's revoked HTLC-Success tx + // Create some new channels: + let chan_6 = create_announced_chan_between_nodes(&nodes, 0, 1); + + // A pending HTLC which will be revoked: + let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; + // Get the will-be-revoked local txn from B + let revoked_local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.last_local_commitment_txn.clone(); + assert_eq!(revoked_local_txn.len(), 1); // Only commitment tx + assert_eq!(revoked_local_txn[0].input.len(), 1); + assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_6.3.txid()); + assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to A are present + // Revoke the old state + claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_4); + { + let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); + { + let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 3); + assert_eq!(node_txn.pop().unwrap(), node_txn[0]); // An outpoint registration will result in a 2nd block_connected + assert_eq!(node_txn[0].input.len(), 1); // We claim the received HTLC output + + check_spends!(node_txn[0], revoked_local_txn[0].clone()); + node_txn.swap_remove(0); + } + test_txn_broadcast(&nodes[0], &chan_6, None, HTLCType::NONE); + + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); + let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS); + header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[1].clone()] }, 1); + test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone()); + } + get_announce_close_broadcast_events(&nodes, 0, 1); assert_eq!(nodes[0].node.list_channels().len(), 0); assert_eq!(nodes[1].node.list_channels().len(), 0); } @@ -5761,7 +6219,7 @@ mod tests { /// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas /// for claims/fails they are separated out. - fn reconnect_nodes(node_a: &Node, node_b: &Node, pre_all_htlcs: bool, pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool)) { + fn reconnect_nodes(node_a: &Node, node_b: &Node, send_funding_locked: (bool, bool), pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool)) { node_a.node.peer_connected(&node_b.node.get_our_node_id()); let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b); node_b.node.peer_connected(&node_a.node.get_our_node_id()); @@ -5794,7 +6252,7 @@ mod tests { (pending_htlc_adds.1 == 0 && pending_htlc_claims.1 == 0 && pending_cell_htlc_claims.1 == 0 && pending_cell_htlc_fails.1 == 0)); for chan_msgs in resp_1.drain(..) { - if pre_all_htlcs { + if send_funding_locked.0 { node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap(); let announcement_event = node_a.node.get_and_clear_pending_msg_events(); if !announcement_event.is_empty() { @@ -5851,7 +6309,7 @@ mod tests { } for chan_msgs in resp_2.drain(..) { - if pre_all_htlcs { + if send_funding_locked.1 { node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap(); let announcement_event = node_b.node.get_and_clear_pending_msg_events(); if !announcement_event.is_empty() { @@ -5915,7 +6373,7 @@ mod tests { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; @@ -5924,7 +6382,7 @@ mod tests { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; @@ -5937,7 +6395,7 @@ mod tests { claim_payment_along_route(&nodes[0], &vec!(&nodes[1], &nodes[2]), true, payment_preimage_3); fail_payment_along_route(&nodes[0], &[&nodes[1], &nodes[2]], true, payment_hash_5); - reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (1, 0), (1, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (1, 0), (1, 0), (false, false)); { let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); @@ -6018,19 +6476,19 @@ mod tests { if messages_delivered < 3 { // Even if the funding_locked messages get exchanged, as long as nothing further was // received on either side, both sides will need to resend them. - reconnect_nodes(&nodes[0], &nodes[1], true, (0, 1), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 1), (0, 0), (0, 0), (0, 0), (false, false)); } else if messages_delivered == 3 { // nodes[0] still wants its RAA + commitment_signed - reconnect_nodes(&nodes[0], &nodes[1], false, (-1, 0), (0, 0), (0, 0), (0, 0), (true, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (true, false)); } else if messages_delivered == 4 { // nodes[0] still wants its commitment_signed - reconnect_nodes(&nodes[0], &nodes[1], false, (-1, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (false, false)); } else if messages_delivered == 5 { // nodes[1] still wants its final RAA - reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, true)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, true)); } else if messages_delivered == 6 { // Everything was delivered... - reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); } let events_1 = nodes[1].node.get_and_clear_pending_events(); @@ -6042,7 +6500,7 @@ mod tests { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); nodes[1].node.process_pending_htlc_forwards(); @@ -6116,7 +6574,7 @@ mod tests { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); if messages_delivered < 2 { - reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (1, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (false, false)); //TODO: Deduplicate PaymentSent events, then enable this if: //if messages_delivered < 1 { let events_4 = nodes[0].node.get_and_clear_pending_events(); @@ -6130,21 +6588,21 @@ mod tests { //} } else if messages_delivered == 2 { // nodes[0] still wants its RAA + commitment_signed - reconnect_nodes(&nodes[0], &nodes[1], false, (0, -1), (0, 0), (0, 0), (0, 0), (false, true)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (false, true)); } else if messages_delivered == 3 { // nodes[0] still wants its commitment_signed - reconnect_nodes(&nodes[0], &nodes[1], false, (0, -1), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (false, false)); } else if messages_delivered == 4 { // nodes[1] still wants its final RAA - reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (true, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (true, false)); } else if messages_delivered == 5 { // Everything was delivered... - reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); } nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); // Channel should still work fine... let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0; @@ -6185,20 +6643,28 @@ mod tests { _ => panic!("Unexpected event"), } + reconnect_nodes(&nodes[0], &nodes[1], (false, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + confirm_transaction(&nodes[1].chain_monitor, &tx, tx.version); let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events_2.len(), 1); + assert_eq!(events_2.len(), 2); match events_2[0] { MessageSendEvent::SendFundingLocked { ref node_id, msg: _ } => { assert_eq!(*node_id, nodes[0].node.get_our_node_id()); }, _ => panic!("Unexpected event"), } + match events_2[1] { + MessageSendEvent::SendAnnouncementSignatures { ref node_id, msg: _ } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + }, + _ => panic!("Unexpected event"), + } - reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); // TODO: We shouldn't need to manually pass list_usable_chanels here once we support // rebroadcasting announcement_signatures upon reconnect. @@ -6401,7 +6867,7 @@ mod tests { if disconnect { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); } *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); @@ -6442,7 +6908,7 @@ mod tests { if disconnect { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); } // ...and make sure we can force-close a TemporaryFailure channel with a PermanentFailure @@ -7002,7 +7468,7 @@ mod tests { nodes[0].node = Arc::new(nodes_0_deserialized); check_added_monitors!(nodes[0], 1); - reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); fail_payment(&nodes[0], &[&nodes[1]], our_payment_hash); claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage); @@ -7072,8 +7538,8 @@ mod tests { nodes[0].node = Arc::new(nodes_0_deserialized); // nodes[1] and nodes[2] have no lost state with nodes[0]... - reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); - reconnect_nodes(&nodes[0], &nodes[2], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); //... and we can even still claim the payment! claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage);