X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fln%2Ffunctional_tests.rs;h=6580abbc6dd716c22135175be7fccd57c477bedd;hb=e2a9ed7265680cc821f29860264c7f4d53e044b9;hp=60c1c7bd3fc6c8a9ba871c52f65a3c482229d4e1;hpb=bf26056c8a7d44875e50364135a59410192c2fe1;p=rust-lightning diff --git a/src/ln/functional_tests.rs b/src/ln/functional_tests.rs index 60c1c7bd..6580abbc 100644 --- a/src/ln/functional_tests.rs +++ b/src/ln/functional_tests.rs @@ -21,7 +21,8 @@ use util::ser::{Writeable, ReadableArgs}; use util::config::UserConfig; use util::rng; -use bitcoin::util::hash::{BitcoinHash, Sha256dHash}; +use bitcoin::util::hash::BitcoinHash; +use bitcoin_hashes::sha256d::Hash as Sha256dHash; use bitcoin::util::bip143; use bitcoin::util::address::Address; use bitcoin::util::bip32::{ChildNumber, ExtendedPubKey, ExtendedPrivKey}; @@ -42,7 +43,6 @@ use std::collections::{BTreeSet, HashMap, HashSet}; use std::default::Default; use std::sync::Arc; use std::sync::atomic::Ordering; -use std::time::Instant; use std::mem; use ln::functional_test_utils::*; @@ -1449,6 +1449,153 @@ fn channel_reserve_test() { do_channel_reserve_test(true); } +#[test] +fn channel_reserve_in_flight_removes() { + // In cases where one side claims an HTLC, it thinks it has additional available funds that it + // can send to its counterparty, but due to update ordering, the other side may not yet have + // considered those HTLCs fully removed. + // This tests that we don't count HTLCs which will not be included in the next remote + // commitment transaction towards the reserve value (as it implies no commitment transaction + // will be generated which violates the remote reserve value). + // This was broken previously, and discovered by the chanmon_fail_consistency fuzz test. + // To test this we: + // * route two HTLCs from A to B (note that, at a high level, this test is checking that, when + // you consider the values of both of these HTLCs, B may not send an HTLC back to A, but if + // you only consider the value of the first HTLC, it may not), + // * start routing a third HTLC from A to B, + // * claim the first two HTLCs (though B will generate an update_fulfill for one, and put + // the other claim in its holding cell, as it immediately goes into AwaitingRAA), + // * deliver the first fulfill from B + // * deliver the update_add and an RAA from A, resulting in B freeing the second holding cell + // claim, + // * deliver A's response CS and RAA. + // This results in A having the second HTLC in AwaitingRemovedRemoteRevoke, but B having + // removed it fully. B now has the push_msat plus the first two HTLCs in value. + // * Now B happily sends another HTLC, potentially violating its reserve value from A's point + // of view (if A counts the AwaitingRemovedRemoteRevoke HTLC). + let mut nodes = create_network(2); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + + let b_chan_values = get_channel_value_stat!(nodes[1], chan_1.2); + // Route the first two HTLCs. + let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000); + let (payment_preimage_2, _) = route_payment(&nodes[0], &[&nodes[1]], 20000); + + // Start routing the third HTLC (this is just used to get everyone in the right state). + let (payment_preimage_3, payment_hash_3) = get_payment_preimage_hash!(nodes[0]); + let send_1 = { + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap(); + nodes[0].node.send_payment(route, payment_hash_3).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + + // Now claim both of the first two HTLCs on B's end, putting B in AwaitingRAA and generating an + // initial fulfill/CS. + assert!(nodes[1].node.claim_funds(payment_preimage_1)); + check_added_monitors!(nodes[1], 1); + let bs_removes = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + + // This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not + // remove the second HTLC when we send the HTLC back from B to A. + assert!(nodes[1].node.claim_funds(payment_preimage_2)); + check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_removes.update_fulfill_htlcs[0]).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed).unwrap(); + check_added_monitors!(nodes[0], 1); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + expect_payment_sent!(nodes[0], payment_preimage_1); + + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg).unwrap(); + check_added_monitors!(nodes[1], 1); + // B is already AwaitingRAA, so cant generate a CS here + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap(); + check_added_monitors!(nodes[1], 1); + let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap(); + check_added_monitors!(nodes[0], 1); + let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed).unwrap(); + check_added_monitors!(nodes[1], 1); + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + + // The second HTLCis removed, but as A is in AwaitingRAA it can't generate a CS here, so the + // RAA that B generated above doesn't fully resolve the second HTLC from A's point of view. + // However, the RAA A generates here *does* fully resolve the HTLC from B's point of view (as A + // can no longer broadcast a commitment transaction with it and B has the preimage so can go + // on-chain as necessary). + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_cs.update_fulfill_htlcs[0]).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed).unwrap(); + check_added_monitors!(nodes[0], 1); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + expect_payment_sent!(nodes[0], payment_preimage_2); + + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap(); + check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + expect_pending_htlcs_forwardable!(nodes[1]); + expect_payment_received!(nodes[1], payment_hash_3, 100000); + + // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't + // resolve the second HTLC from A's point of view. + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap(); + check_added_monitors!(nodes[0], 1); + let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + + // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back + // to A to ensure that A doesn't count the almost-removed HTLC in update_add processing. + let (payment_preimage_4, payment_hash_4) = get_payment_preimage_hash!(nodes[1]); + let send_2 = { + let route = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &[], 10000, TEST_FINAL_CLTV).unwrap(); + nodes[1].node.send_payment(route, payment_hash_4).unwrap(); + check_added_monitors!(nodes[1], 1); + let mut events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_2.msgs[0]).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_2.commitment_msg).unwrap(); + check_added_monitors!(nodes[0], 1); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + + // Now just resolve all the outstanding messages/HTLCs for completeness... + + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed).unwrap(); + check_added_monitors!(nodes[1], 1); + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap(); + check_added_monitors!(nodes[1], 1); + + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap(); + check_added_monitors!(nodes[0], 1); + let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed).unwrap(); + check_added_monitors!(nodes[1], 1); + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap(); + check_added_monitors!(nodes[0], 1); + + expect_pending_htlcs_forwardable!(nodes[0]); + expect_payment_received!(nodes[0], payment_hash_4, 10000); + + claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3); +} + #[test] fn channel_monitor_network_test() { // Simple test which builds a network of ChannelManagers, connects them to each other, and @@ -2038,7 +2185,7 @@ fn test_htlc_on_chain_timeout() { // Broadcast legit commitment tx from C on B's chain let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone(); check_spends!(commitment_tx[0], chan_2.3.clone()); - nodes[2].node.fail_htlc_backwards(&payment_hash, 0); + nodes[2].node.fail_htlc_backwards(&payment_hash); check_added_monitors!(nodes[2], 0); expect_pending_htlcs_forwardable!(nodes[2]); check_added_monitors!(nodes[2], 1); @@ -2219,7 +2366,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let (_, second_payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value); let (_, third_payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value); - assert!(nodes[2].node.fail_htlc_backwards(&first_payment_hash, 0)); + assert!(nodes[2].node.fail_htlc_backwards(&first_payment_hash)); expect_pending_htlcs_forwardable!(nodes[2]); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -2232,7 +2379,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let bs_raa = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true); // Drop the last RAA from 3 -> 2 - assert!(nodes[2].node.fail_htlc_backwards(&second_payment_hash, 0)); + assert!(nodes[2].node.fail_htlc_backwards(&second_payment_hash)); expect_pending_htlcs_forwardable!(nodes[2]); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -2249,7 +2396,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa).unwrap(); check_added_monitors!(nodes[2], 1); - assert!(nodes[2].node.fail_htlc_backwards(&third_payment_hash, 0)); + assert!(nodes[2].node.fail_htlc_backwards(&third_payment_hash)); expect_pending_htlcs_forwardable!(nodes[2]); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -2312,7 +2459,6 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use _ => panic!("Unexpected event"), }; } - nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); @@ -2486,7 +2632,7 @@ fn test_force_close_fail_back() { // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success.. { let mut monitors = nodes[2].chan_monitor.simple_monitor.monitors.lock().unwrap(); - monitors.get_mut(&OutPoint::new(Sha256dHash::from(&payment_event.commitment_msg.channel_id[..]), 0)).unwrap() + monitors.get_mut(&OutPoint::new(Sha256dHash::from_slice(&payment_event.commitment_msg.channel_id[..]).unwrap(), 0)).unwrap() .provide_payment_preimage(&our_payment_hash, &our_payment_preimage); } nodes[2].chain_monitor.block_connected_checked(&header, 1, &[&tx], &[1]); @@ -2665,7 +2811,6 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) { nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); - nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); nodes[1].node.process_pending_htlc_forwards(); let events_2 = nodes[1].node.get_and_clear_pending_events(); @@ -3018,7 +3163,7 @@ fn test_invalid_channel_announcement() { macro_rules! sign_msg { ($unsigned_msg: expr) => { - let msghash = Message::from_slice(&Sha256dHash::from_data(&$unsigned_msg.encode()[..])[..]).unwrap(); + let msghash = Message::from_slice(&Sha256dHash::hash(&$unsigned_msg.encode()[..])[..]).unwrap(); let as_bitcoin_sig = secp_ctx.sign(&msghash, &as_chan.get_local_keys().funding_key); let bs_bitcoin_sig = secp_ctx.sign(&msghash, &bs_chan.get_local_keys().funding_key); let as_node_sig = secp_ctx.sign(&msghash, &nodes[0].keys_manager.get_node_secret()); @@ -3045,7 +3190,7 @@ fn test_invalid_channel_announcement() { assert!(nodes[0].router.handle_channel_announcement(&chan_announcement).is_err()); let mut unsigned_msg = dummy_unsigned_msg!(); - unsigned_msg.chain_hash = Sha256dHash::from_data(&[1,2,3,4,5,6,7,8,9]); + unsigned_msg.chain_hash = Sha256dHash::hash(&[1,2,3,4,5,6,7,8,9]); sign_msg!(unsigned_msg); assert!(nodes[0].router.handle_channel_announcement(&chan_announcement).is_err()); } @@ -3062,7 +3207,7 @@ fn test_no_txn_manager_serialize_deserialize() { let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new()); nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap(); - nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new()))); + nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new()), Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }))); let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..]; let (_, chan_0_monitor) = <(Sha256dHash, ChannelMonitor)>::read(&mut chan_0_monitor_read, Arc::new(test_utils::TestLogger::new())).unwrap(); assert!(chan_0_monitor_read.is_empty()); @@ -3128,7 +3273,7 @@ fn test_simple_manager_serialize_deserialize() { let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new()); nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap(); - nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new()))); + nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new()), Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }))); let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..]; let (_, chan_0_monitor) = <(Sha256dHash, ChannelMonitor)>::read(&mut chan_0_monitor_read, Arc::new(test_utils::TestLogger::new())).unwrap(); assert!(chan_0_monitor_read.is_empty()); @@ -3188,7 +3333,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { node_0_monitors_serialized.push(writer.0); } - nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new()))); + nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new()), Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }))); let mut node_0_monitors = Vec::new(); for serialized in node_0_monitors_serialized.iter() { let mut read = &serialized[..]; @@ -3267,7 +3412,7 @@ macro_rules! check_spendable_outputs { }; let secp_ctx = Secp256k1::new(); let remotepubkey = PublicKey::from_secret_key(&secp_ctx, &key); - let witness_script = Address::p2pkh(&remotepubkey, Network::Testnet).script_pubkey(); + let witness_script = Address::p2pkh(&::bitcoin::PublicKey{compressed: true, key: remotepubkey}, Network::Testnet).script_pubkey(); let sighash = Message::from_slice(&bip143::SighashComponents::new(&spend_tx).sighash_all(&spend_tx.input[0], &witness_script, output.value)[..]).unwrap(); let remotesig = secp_ctx.sign(&sighash, key); spend_tx.input[0].witness.push(remotesig.serialize_der().to_vec()); @@ -3322,7 +3467,7 @@ macro_rules! check_spendable_outputs { let secret = { match ExtendedPrivKey::new_master(Network::Testnet, &$node.node_seed) { Ok(master_key) => { - match master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx($der_idx)) { + match master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx($der_idx).expect("key space exhausted")) { Ok(key) => key, Err(_) => panic!("Your RNG is busted"), } @@ -3333,10 +3478,10 @@ macro_rules! check_spendable_outputs { let pubkey = ExtendedPubKey::from_private(&secp_ctx, &secret).public_key; let witness_script = Address::p2pkh(&pubkey, Network::Testnet).script_pubkey(); let sighash = Message::from_slice(&bip143::SighashComponents::new(&spend_tx).sighash_all(&spend_tx.input[0], &witness_script, output.value)[..]).unwrap(); - let sig = secp_ctx.sign(&sighash, &secret.secret_key); + let sig = secp_ctx.sign(&sighash, &secret.private_key.key); spend_tx.input[0].witness.push(sig.serialize_der().to_vec()); spend_tx.input[0].witness[0].push(SigHashType::All as u8); - spend_tx.input[0].witness.push(pubkey.serialize().to_vec()); + spend_tx.input[0].witness.push(pubkey.key.serialize().to_vec()); txn.push(spend_tx); }, } @@ -3895,10 +4040,10 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go. // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs - assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_1, ds_dust_limit*1000)); - assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_3, 1000000)); - assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_5, 1000000)); - assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_6, ds_dust_limit*1000)); + assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_1)); + assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_3)); + assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_5)); + assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_6)); check_added_monitors!(nodes[4], 0); expect_pending_htlcs_forwardable!(nodes[4]); check_added_monitors!(nodes[4], 1); @@ -3911,8 +4056,8 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno commitment_signed_dance!(nodes[3], nodes[4], four_removes.commitment_signed, false); // Fail 3rd below-dust and 7th above-dust HTLCs - assert!(nodes[5].node.fail_htlc_backwards(&payment_hash_2, ds_dust_limit*1000)); - assert!(nodes[5].node.fail_htlc_backwards(&payment_hash_4, 1000000)); + assert!(nodes[5].node.fail_htlc_backwards(&payment_hash_2)); + assert!(nodes[5].node.fail_htlc_backwards(&payment_hash_4)); check_added_monitors!(nodes[5], 0); expect_pending_htlcs_forwardable!(nodes[5]); check_added_monitors!(nodes[5], 1); @@ -4205,7 +4350,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no // actually revoked. let htlc_value = if use_dust { 50000 } else { 3000000 }; let (_, our_payment_hash) = route_payment(&nodes[0], &[&nodes[1]], htlc_value); - assert!(nodes[1].node.fail_htlc_backwards(&our_payment_hash, htlc_value)); + assert!(nodes[1].node.fail_htlc_backwards(&our_payment_hash)); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); @@ -4315,7 +4460,6 @@ fn run_onion_failure_test_with_fail_intercept(_name: &str, test_case: macro_rules! expect_htlc_forward { ($node: expr) => {{ expect_event!($node, Event::PendingHTLCsForwardable); - $node.node.channel_state.lock().unwrap().next_forward = Instant::now(); $node.node.process_pending_htlc_forwards(); }} } @@ -4456,7 +4600,7 @@ impl msgs::ChannelUpdate { msgs::ChannelUpdate { signature: Signature::from(FFISignature::new()), contents: msgs::UnsignedChannelUpdate { - chain_hash: Sha256dHash::from_data(&vec![0u8][..]), + chain_hash: Sha256dHash::hash(&vec![0u8][..]), short_channel_id: 0, timestamp: 0, flags: 0, @@ -4531,7 +4675,7 @@ fn test_onion_failure() { let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap(); msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[1].shared_secret[..], NODE|2, &[0;0]); }, ||{ - nodes[2].node.fail_htlc_backwards(&payment_hash, 0); + nodes[2].node.fail_htlc_backwards(&payment_hash); }, true, Some(NODE|2), Some(msgs::HTLCFailChannelUpdate::NodeFailure{node_id: route.hops[1].pubkey, is_permanent: false})); // intermediate node failure @@ -4549,7 +4693,7 @@ fn test_onion_failure() { let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap(); msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[1].shared_secret[..], PERM|NODE|2, &[0;0]); }, ||{ - nodes[2].node.fail_htlc_backwards(&payment_hash, 0); + nodes[2].node.fail_htlc_backwards(&payment_hash); }, false, Some(PERM|NODE|2), Some(msgs::HTLCFailChannelUpdate::NodeFailure{node_id: route.hops[1].pubkey, is_permanent: true})); // intermediate node failure @@ -4560,7 +4704,7 @@ fn test_onion_failure() { let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap(); msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[0].shared_secret[..], PERM|NODE|3, &[0;0]); }, ||{ - nodes[2].node.fail_htlc_backwards(&payment_hash, 0); + nodes[2].node.fail_htlc_backwards(&payment_hash); }, true, Some(PERM|NODE|3), Some(msgs::HTLCFailChannelUpdate::NodeFailure{node_id: route.hops[0].pubkey, is_permanent: true})); // final node failure @@ -4569,7 +4713,7 @@ fn test_onion_failure() { let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap(); msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[1].shared_secret[..], PERM|NODE|3, &[0;0]); }, ||{ - nodes[2].node.fail_htlc_backwards(&payment_hash, 0); + nodes[2].node.fail_htlc_backwards(&payment_hash); }, false, Some(PERM|NODE|3), Some(msgs::HTLCFailChannelUpdate::NodeFailure{node_id: route.hops[1].pubkey, is_permanent: true})); run_onion_failure_test("invalid_onion_version", 0, &nodes, &route, &payment_hash, |msg| { msg.onion_routing_packet.version = 1; }, ||{}, true, @@ -4636,7 +4780,7 @@ fn test_onion_failure() { }, ||{}, true, Some(UPDATE|14), Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage{msg: ChannelUpdate::dummy()})); run_onion_failure_test("unknown_payment_hash", 2, &nodes, &route, &payment_hash, |_| {}, || { - nodes[2].node.fail_htlc_backwards(&payment_hash, 0); + nodes[2].node.fail_htlc_backwards(&payment_hash); }, false, Some(PERM|15), None); run_onion_failure_test("final_expiry_too_soon", 1, &nodes, &route, &payment_hash, |msg| { @@ -4806,7 +4950,7 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() for i in 0..max_accepted_htlcs { let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap(); let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); - let mut payment_event = { + let payment_event = { nodes[0].node.send_payment(route, our_payment_hash).unwrap(); check_added_monitors!(nodes[0], 1); @@ -5052,3 +5196,303 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { assert!(nodes[1].node.list_channels().is_empty()); check_closed_broadcast!(nodes[1]); } + +#[test] +fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { + //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc. + + let mut nodes = create_network(2); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 1000000, TEST_FINAL_CLTV).unwrap(); + let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + nodes[0].node.send_payment(route, our_payment_hash).unwrap(); + check_added_monitors!(nodes[0], 1); + let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap(); + + let update_msg = msgs::UpdateFulfillHTLC{ + channel_id: chan.2, + htlc_id: 0, + payment_preimage: our_payment_preimage, + }; + + let err = nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msg); + + if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + assert_eq!(err, "Remote tried to fulfill/fail HTLC before it had been committed"); + } else { + assert!(false); + } + + assert!(nodes[0].node.list_channels().is_empty()); + check_closed_broadcast!(nodes[0]); +} + +#[test] +fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { + //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc. + + let mut nodes = create_network(2); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 1000000, TEST_FINAL_CLTV).unwrap(); + let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + nodes[0].node.send_payment(route, our_payment_hash).unwrap(); + check_added_monitors!(nodes[0], 1); + let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap(); + + let update_msg = msgs::UpdateFailHTLC{ + channel_id: chan.2, + htlc_id: 0, + reason: msgs::OnionErrorPacket { data: Vec::new()}, + }; + + let err = nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_msg); + + if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + assert_eq!(err, "Remote tried to fulfill/fail HTLC before it had been committed"); + } else { + assert!(false); + } + + assert!(nodes[0].node.list_channels().is_empty()); + check_closed_broadcast!(nodes[0]); +} + +#[test] +fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() { + //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc. + + let mut nodes = create_network(2); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 1000000, TEST_FINAL_CLTV).unwrap(); + let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + nodes[0].node.send_payment(route, our_payment_hash).unwrap(); + check_added_monitors!(nodes[0], 1); + let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap(); + + let update_msg = msgs::UpdateFailMalformedHTLC{ + channel_id: chan.2, + htlc_id: 0, + sha256_of_onion: [1; 32], + failure_code: 0x8000, + }; + + let err = nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg); + + if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + assert_eq!(err, "Remote tried to fulfill/fail HTLC before it had been committed"); + } else { + assert!(false); + } + + assert!(nodes[0].node.list_channels().is_empty()); + check_closed_broadcast!(nodes[0]); +} + +#[test] +fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { + //BOLT 2 Requirement: A receiving node: if the id does not correspond to an HTLC in its current commitment transaction MUST fail the channel. + + let nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let our_payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 100000).0; + + nodes[1].node.claim_funds(our_payment_preimage); + check_added_monitors!(nodes[1], 1); + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = { + match events[0] { + MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => { + assert!(update_add_htlcs.is_empty()); + assert_eq!(update_fulfill_htlcs.len(), 1); + assert!(update_fail_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); + update_fulfill_htlcs[0].clone() + }, + _ => panic!("Unexpected event"), + } + }; + + update_fulfill_msg.htlc_id = 1; + + let err = nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg); + if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + assert_eq!(err, "Remote tried to fulfill/fail an HTLC we couldn't find"); + } else { + assert!(false); + } + + assert!(nodes[0].node.list_channels().is_empty()); + check_closed_broadcast!(nodes[0]); +} + +#[test] +fn test_update_fulfill_htlc_bolt2_wrong_preimage() { + //BOLT 2 Requirement: A receiving node: if the payment_preimage value in update_fulfill_htlc doesn't SHA256 hash to the corresponding HTLC payment_hash MUST fail the channel. + + let nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let our_payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 100000).0; + + nodes[1].node.claim_funds(our_payment_preimage); + check_added_monitors!(nodes[1], 1); + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = { + match events[0] { + MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => { + assert!(update_add_htlcs.is_empty()); + assert_eq!(update_fulfill_htlcs.len(), 1); + assert!(update_fail_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); + update_fulfill_htlcs[0].clone() + }, + _ => panic!("Unexpected event"), + } + }; + + update_fulfill_msg.payment_preimage = PaymentPreimage([1; 32]); + + let err = nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg); + if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + assert_eq!(err, "Remote tried to fulfill HTLC with an incorrect preimage"); + } else { + assert!(false); + } + + assert!(nodes[0].node.list_channels().is_empty()); + check_closed_broadcast!(nodes[0]); +} + + +#[test] +fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_message() { + //BOLT 2 Requirement: A receiving node: if the BADONION bit in failure_code is not set for update_fail_malformed_htlc MUST fail the channel. + + let mut nodes = create_network(2); + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000); + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 1000000, TEST_FINAL_CLTV).unwrap(); + let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + nodes[0].node.send_payment(route, our_payment_hash).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + updates.update_add_htlcs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message + + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap(); + check_added_monitors!(nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true); + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + + let mut update_msg: msgs::UpdateFailMalformedHTLC = { + match events[0] { + MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => { + assert!(update_add_htlcs.is_empty()); + assert!(update_fulfill_htlcs.is_empty()); + assert!(update_fail_htlcs.is_empty()); + assert_eq!(update_fail_malformed_htlcs.len(), 1); + assert!(update_fee.is_none()); + update_fail_malformed_htlcs[0].clone() + }, + _ => panic!("Unexpected event"), + } + }; + update_msg.failure_code &= !0x8000; + let err = nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg); + if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + assert_eq!(err, "Got update_fail_malformed_htlc with BADONION not set"); + } else { + assert!(false); + } + + assert!(nodes[0].node.list_channels().is_empty()); + check_closed_broadcast!(nodes[0]); +} + +#[test] +fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_update_fail_htlc() { + //BOLT 2 Requirement: a receiving node which has an outgoing HTLC canceled by update_fail_malformed_htlc: + // * MUST return an error in the update_fail_htlc sent to the link which originally sent the HTLC, using the failure_code given and setting the data to sha256_of_onion. + + let mut nodes = create_network(3); + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000); + create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000); + + let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 100000, TEST_FINAL_CLTV).unwrap(); + let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + + //First hop + let mut payment_event = { + nodes[0].node.send_payment(route, our_payment_hash).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + check_added_monitors!(nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[1]); + let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events_2.len(), 1); + check_added_monitors!(nodes[1], 1); + payment_event = SendEvent::from_event(events_2.remove(0)); + assert_eq!(payment_event.msgs.len(), 1); + + //Second Hop + payment_event.msgs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + check_added_monitors!(nodes[2], 0); + commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true); + + let events_3 = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(events_3.len(), 1); + let update_msg : (msgs::UpdateFailMalformedHTLC, msgs::CommitmentSigned) = { + match events_3[0] { + MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + assert!(update_add_htlcs.is_empty()); + assert!(update_fulfill_htlcs.is_empty()); + assert!(update_fail_htlcs.is_empty()); + assert_eq!(update_fail_malformed_htlcs.len(), 1); + assert!(update_fee.is_none()); + (update_fail_malformed_htlcs[0].clone(), commitment_signed.clone()) + }, + _ => panic!("Unexpected event"), + } + }; + + nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg.0).unwrap(); + + check_added_monitors!(nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); + let events_4 = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events_4.len(), 1); + + //Confirm that handlinge the update_malformed_htlc message produces an update_fail_htlc message to be forwarded back along the route + match events_4[0] { + MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => { + assert!(update_add_htlcs.is_empty()); + assert!(update_fulfill_htlcs.is_empty()); + assert_eq!(update_fail_htlcs.len(), 1); + assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); + }, + _ => panic!("Unexpected event"), + }; + + check_added_monitors!(nodes[1], 1); +}