X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Ffunctional_tests.rs;fp=lightning%2Fsrc%2Fln%2Ffunctional_tests.rs;h=5bb98bb613f6dd67fbc271146780b99037378dd3;hb=53052234959a3c0a7323a194c351c4ba9b46339b;hp=044c1673011dd66d81112fe7c673a9aaf19287a0;hpb=88fef649b15fa030cb91de76d58346a0bc408834;p=rust-lightning diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 044c1673..5bb98bb6 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -70,10 +70,10 @@ fn test_insane_channel_opens() { // that supposedly makes the channel open message insane let insane_open_helper = |expected_error_str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| { match nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &message_mutator(open_channel_message.clone())) { - Err(msgs::HandleError{ err: error_str, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) => { - assert_eq!(error_str, expected_error_str, "unexpected HandleError string (expected `{}`, actual `{}`)", expected_error_str, error_str) + Err(msgs::LightningError{ err: error_str, action: msgs::ErrorAction::SendErrorMessage {..}}) => { + assert_eq!(error_str, expected_error_str, "unexpected LightningError string (expected `{}`, actual `{}`)", expected_error_str, error_str) }, - Err(msgs::HandleError{..}) => {panic!("unexpected HandleError action")}, + Err(msgs::LightningError{..}) => {panic!("unexpected LightningError action")}, _ => panic!("insane OpenChannel message was somehow Ok"), } }; @@ -952,7 +952,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { // transaction. assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - if let Err(msgs::HandleError{action: Some(msgs::ErrorAction::SendErrorMessage{msg}), ..}) = + if let Err(msgs::LightningError{action: msgs::ErrorAction::SendErrorMessage{msg}, ..}) = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish) { nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msg); let msgs::ErrorMessage {ref channel_id, ..} = msg; @@ -1245,8 +1245,75 @@ fn duplicate_htlc_test() { claim_payment(&nodes[1], &vec!(&nodes[3])[..], payment_preimage); } +#[test] +fn test_duplicate_htlc_different_direction_onchain() { + // Test that ChannelMonitor doesn't generate 2 preimage txn + // when we have 2 HTLCs with same preimage that go across a node + // in opposite directions. + let nodes = create_network(2, &[None, None]); + + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new()); + + // balancing + send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + + let (payment_preimage, payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000); + + let route = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 800_000, TEST_FINAL_CLTV).unwrap(); + send_along_route_with_hash(&nodes[1], route, &vec!(&nodes[0])[..], 800_000, payment_hash); + + // Provide preimage to node 0 by claiming payment + nodes[0].node.claim_funds(payment_preimage); + check_added_monitors!(nodes[0], 1); + + // Broadcast node 1 commitment txn + let remote_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone(); + + assert_eq!(remote_txn[0].output.len(), 4); // 1 local, 1 remote, 1 htlc inbound, 1 htlc outbound + let mut has_both_htlcs = 0; // check htlcs match ones committed + for outp in remote_txn[0].output.iter() { + if outp.value == 800_000 / 1000 { + has_both_htlcs += 1; + } else if outp.value == 900_000 / 1000 { + has_both_htlcs += 1; + } + } + assert_eq!(has_both_htlcs, 2); + + let header = BlockHeader { version: 0x2000_0000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + + nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![remote_txn[0].clone()] }, 1); + + // Check we only broadcast 1 timeout tx + let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); + let htlc_pair = if claim_txn[0].output[0].value == 800_000 / 1000 { (claim_txn[0].clone(), claim_txn[1].clone()) } else { (claim_txn[1].clone(), claim_txn[0].clone()) }; + assert_eq!(claim_txn.len(), 6); + assert_eq!(htlc_pair.0.input.len(), 1); + assert_eq!(htlc_pair.0.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC 1 <--> 0, preimage tx + check_spends!(htlc_pair.0, remote_txn[0].clone()); + assert_eq!(htlc_pair.1.input.len(), 1); + assert_eq!(htlc_pair.1.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // HTLC 0 <--> 1, timeout tx + check_spends!(htlc_pair.1, remote_txn[0].clone()); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + for e in events { + match e { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => { + assert!(update_add_htlcs.is_empty()); + assert!(update_fail_htlcs.is_empty()); + assert_eq!(update_fulfill_htlcs.len(), 1); + assert!(update_fail_malformed_htlcs.is_empty()); + assert_eq!(nodes[1].node.get_our_node_id(), *node_id); + }, + _ => panic!("Unexpected event"), + } + } +} + fn do_channel_reserve_test(test_recv: bool) { - use ln::msgs::HandleError; + use ln::msgs::LightningError; let mut nodes = create_network(3, &[None, None, None]); let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1900, 1001, LocalFeatures::new(), LocalFeatures::new()); @@ -1382,7 +1449,7 @@ fn do_channel_reserve_test(test_recv: bool) { if test_recv { let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg).err().unwrap(); match err { - HandleError{err, .. } => assert_eq!(err, "Remote HTLC add would put them over their reserve value"), + LightningError{err, .. } => assert_eq!(err, "Remote HTLC add would put them over their reserve value"), } // If we send a garbage message, the channel should get closed, making the rest of this test case fail. assert_eq!(nodes[1].node.list_channels().len(), 1); @@ -3455,7 +3522,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id()); let reestablish = get_event_msg!(nodes[3], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id()); - if let Err(msgs::HandleError { action: Some(msgs::ErrorAction::SendErrorMessage { msg }), .. }) = nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish) { + if let Err(msgs::LightningError { action: msgs::ErrorAction::SendErrorMessage { msg }, .. }) = nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish) { assert_eq!(msg.channel_id, channel_id); } else { panic!("Unexpected result"); } } @@ -5101,7 +5168,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() { let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1; let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { assert_eq!(err, "Remote side tried to send less than our minimum HTLC value"); } else { assert!(false); @@ -5127,7 +5194,7 @@ fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { updates.update_add_htlcs[0].amount_msat = 5000000-their_channel_reserve+1; let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { assert_eq!(err, "Remote HTLC add would put them over their reserve value"); } else { assert!(false); @@ -5174,7 +5241,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { msg.htlc_id = (super::channel::OUR_MAX_HTLCS) as u64; let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { assert_eq!(err, "Remote tried to push more than our max accepted HTLCs"); } else { assert!(false); @@ -5197,7 +5264,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], chan.2).their_max_htlc_value_in_flight_msat + 1; let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { assert_eq!(err,"Remote HTLC add would put them over our max HTLC value"); } else { assert!(false); @@ -5220,7 +5287,7 @@ fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() { updates.update_add_htlcs[0].cltv_expiry = 500000000; let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { assert_eq!(err,"Remote provided CLTV expiry in seconds instead of block height"); } else { assert!(false); @@ -5266,7 +5333,7 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { let _bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { assert_eq!(err, "Remote skipped HTLC ID"); } else { assert!(false); @@ -5298,7 +5365,7 @@ fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { let err = nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msg); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { assert_eq!(err, "Remote tried to fulfill/fail HTLC before it had been committed"); } else { assert!(false); @@ -5330,7 +5397,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { let err = nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_msg); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { assert_eq!(err, "Remote tried to fulfill/fail HTLC before it had been committed"); } else { assert!(false); @@ -5363,7 +5430,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() let err = nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { assert_eq!(err, "Remote tried to fulfill/fail HTLC before it had been committed"); } else { assert!(false); @@ -5404,7 +5471,7 @@ fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { update_fulfill_msg.htlc_id = 1; let err = nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { assert_eq!(err, "Remote tried to fulfill/fail an HTLC we couldn't find"); } else { assert!(false); @@ -5445,7 +5512,7 @@ fn test_update_fulfill_htlc_bolt2_wrong_preimage() { update_fulfill_msg.payment_preimage = PaymentPreimage([1; 32]); let err = nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { assert_eq!(err, "Remote tried to fulfill HTLC with an incorrect preimage"); } else { assert!(false); @@ -5491,7 +5558,7 @@ fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_messag }; update_msg.failure_code &= !0x8000; let err = nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { assert_eq!(err, "Got update_fail_malformed_htlc with BADONION not set"); } else { assert!(false); @@ -5864,14 +5931,12 @@ fn test_upfront_shutdown_script() { node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh(); // Test we enforce upfront_scriptpbukey if by providing a diffrent one at closing that we disconnect peer if let Err(error) = nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown) { - if let Some(error) = error.action { - match error { - ErrorAction::SendErrorMessage { msg } => { - assert_eq!(msg.data,"Got shutdown request with a scriptpubkey which did not match their previous scriptpubkey"); - }, - _ => { assert!(false); } - } - } else { assert!(false); } + match error.action { + ErrorAction::SendErrorMessage { msg } => { + assert_eq!(msg.data,"Got shutdown request with a scriptpubkey which did not match their previous scriptpubkey"); + }, + _ => { assert!(false); } + } } else { assert!(false); } let events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -5981,14 +6046,12 @@ fn test_user_configurable_csv_delay() { let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); accept_channel.to_self_delay = 200; if let Err(error) = nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), LocalFeatures::new(), &accept_channel) { - if let Some(error) = error.action { - match error { - ErrorAction::SendErrorMessage { msg } => { - assert_eq!(msg.data,"They wanted our payments to be delayed by a needlessly long period"); - }, - _ => { assert!(false); } - } - } else { assert!(false); } + match error.action { + ErrorAction::SendErrorMessage { msg } => { + assert_eq!(msg.data,"They wanted our payments to be delayed by a needlessly long period"); + }, + _ => { assert!(false); } + } } else { assert!(false); } // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Channel::new_from_req() @@ -6056,14 +6119,12 @@ fn test_data_loss_protect() { // Check we update monitor following learning of per_commitment_point from B if let Err(err) = nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0]) { - if let Some(error) = err.action { - match error { - ErrorAction::SendErrorMessage { msg } => { - assert_eq!(msg.data, "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting"); - }, - _ => panic!("Unexpected event!"), - } - } else { assert!(false); } + match err.action { + ErrorAction::SendErrorMessage { msg } => { + assert_eq!(msg.data, "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting"); + }, + _ => panic!("Unexpected event!"), + } } else { assert!(false); } check_added_monitors!(nodes[0], 1); @@ -6085,13 +6146,11 @@ fn test_data_loss_protect() { // Check we close channel detecting A is fallen-behind if let Err(err) = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]) { - if let Some(error) = err.action { - match error { - ErrorAction::SendErrorMessage { msg } => { - assert_eq!(msg.data, "Peer attempted to reestablish channel with a very old local commitment transaction"); }, - _ => panic!("Unexpected event!"), - } - } else { assert!(false); } + match err.action { + ErrorAction::SendErrorMessage { msg } => { + assert_eq!(msg.data, "Peer attempted to reestablish channel with a very old local commitment transaction"); }, + _ => panic!("Unexpected event!"), + } } else { assert!(false); } let events = nodes[1].node.get_and_clear_pending_msg_events();