}
}
-// We don't care about transaction IDs as we're only going to accept signed data. Thus, we use
-// this constant instead of a random value.
-const TXID: u16 = 0x4242;
+// We don't care about transaction IDs as we're only going to accept signed data.
+// Further, if we're querying over DoH, the RFC says we SHOULD use a transaction ID of 0 here.
+const TXID: u16 = 0;
fn build_query(domain: &Name, ty: u16) -> QueryBuf {
let mut query = QueryBuf::new_zeroed(0);
- let query_msg_len: u16 = 2 + 2 + 8 + 2 + 2 + name_len(domain) + 11;
- query.extend_from_slice(&query_msg_len.to_be_bytes());
query.extend_from_slice(&TXID.to_be_bytes());
query.extend_from_slice(&[0x01, 0x20]); // Flags: Recursive, Authenticated Data
query.extend_from_slice(&[0, 1, 0, 0, 0, 0, 0, 1]); // One question, One additional
#[cfg(feature = "std")]
fn send_query(stream: &mut TcpStream, query: &[u8]) -> Result<(), Error> {
+ stream.write_all(&(query.len() as u16).to_be_bytes())?;
stream.write_all(&query)?;
Ok(())
}
#[cfg(feature = "tokio")]
async fn send_query_async(stream: &mut TokioTcpStream, query: &[u8]) -> Result<(), Error> {
+ stream.write_all(&(query.len() as u16).to_be_bytes()).await?;
stream.write_all(&query).await?;
Ok(())
}
[lib]
crate-type = ["cdylib", "rlib"]
+
+[profile.release]
+lto = true
+codegen-units = 1
--- /dev/null
+import init from './dnssec_prover_wasm.js';
+import * as wasm from './dnssec_prover_wasm.js';
+
+/**
+* Asynchronously resolves a given domain and type using the provided DoH endpoint, then verifies
+* the returned DNSSEC data and ultimately returns a JSON-encoded list of validated records.
+*/
+export async function lookup_doh(domain, ty, doh_endpoint) {
+ await init();
+
+ if (!domain.endsWith(".")) domain += ".";
+ if (ty.toLowerCase() == "txt") {
+ ty = 16;
+ } else if (ty.toLowerCase() == "tlsa") {
+ ty = 52;
+ } else if (ty.toLowerCase() == "a") {
+ ty = 1;
+ } else if (ty.toLowerCase() == "aaaa") {
+ ty = 28;
+ }
+ if (typeof(ty) == "number") {
+ var builder = wasm.init_proof_builder(domain, ty);
+ if (builder == null) {
+ return "{\"error\":\"Bad domain\"}";
+ } else {
+ var queries_pending = 0;
+ var send_next_query;
+ send_next_query = async function() {
+ var query = wasm.get_next_query(builder);
+ if (query != null) {
+ queries_pending += 1;
+ var b64 = btoa(String.fromCodePoint(...query));
+ var b64url = b64.replace(/\+/g, '-').replace(/\//g, '_').replace(/=/g, '');
+ try {
+ var resp = await fetch(doh_endpoint + "?dns=" + b64url,
+ {headers: {"accept": "application/dns-message"}});
+ if (!resp.ok) { throw "Query returned HTTP " + resp.status; }
+ var array = await resp.arrayBuffer();
+ var buf = new Uint8Array(array);
+ wasm.process_query_response(builder, buf);
+ queries_pending -= 1;
+ } catch (e) {
+ return "{\"error\":\"DoH Query failed: " + e + "\"}";
+ }
+ return await send_next_query();
+ } else if (queries_pending == 0) {
+ var proof = wasm.get_unverified_proof(builder);
+ if (proof != null) {
+ var result = wasm.verify_byte_stream(proof);
+ return JSON.stringify(JSON.parse(result), null, 1);
+ } else {
+ return "{\"error\":\"Failed to build proof\"}";
+ }
+ }
+ }
+ return await send_next_query();
+ }
+ } else {
+ return "{\"error\":\"Unsupported Type\"}";
+ }
+}
use dnssec_prover::ser::parse_rr_stream;
use dnssec_prover::validation::{verify_rr_stream, ValidationError};
+use dnssec_prover::query::{ProofBuilder, QueryBuf};
use wasm_bindgen::prelude::wasm_bindgen;
+extern crate alloc;
+use alloc::collections::VecDeque;
+
use core::fmt::Write;
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
+#[wasm_bindgen]
+pub struct WASMProofBuilder(ProofBuilder, VecDeque<QueryBuf>);
+
+#[wasm_bindgen]
+/// Builds a proof builder which can generate a proof for records of the given `ty`pe at the given
+/// `name`.
+///
+/// After calling this [`get_next_query`] should be called to fetch the initial query.
+pub fn init_proof_builder(mut name: String, ty: u16) -> Option<WASMProofBuilder> {
+ if !name.ends_with('.') { name.push('.'); }
+ if let Ok(qname) = name.try_into() {
+ let (builder, initial_query) = ProofBuilder::new(&qname, ty);
+ let mut queries = VecDeque::with_capacity(4);
+ queries.push_back(initial_query);
+ Some(WASMProofBuilder(builder, queries))
+ } else {
+ None
+ }
+}
+
+#[wasm_bindgen]
+/// Processes a response to a query previously fetched from [`get_next_query`].
+///
+/// After calling this, [`get_next_query`] should be called until pending queries are exhausted and
+/// no more pending queries exist, at which point [`get_unverified_proof`] should be called.
+pub fn process_query_response(proof_builder: &mut WASMProofBuilder, response: Vec<u8>) {
+ if response.len() < u16::MAX as usize {
+ let mut answer = QueryBuf::new_zeroed(response.len() as u16);
+ answer.copy_from_slice(&response);
+ if let Ok(queries) = proof_builder.0.process_response(&answer) {
+ for query in queries {
+ proof_builder.1.push_back(query);
+ }
+ }
+ }
+}
+
+
+#[wasm_bindgen]
+/// Gets the next query (if any) that should be sent to the resolver for the given proof builder.
+///
+/// Once the resolver responds [`process_query_response`] should be called with the response.
+pub fn get_next_query(proof_builder: &mut WASMProofBuilder) -> Option<Vec<u8>> {
+ if let Some(query) = proof_builder.1.pop_front() {
+ Some(query.into_vec())
+ } else {
+ None
+ }
+}
+
+#[wasm_bindgen]
+/// Gets the final, unverified, proof once all queries fetched via [`get_next_query`] have
+/// completed and their responses passed to [`process_query_response`].
+pub fn get_unverified_proof(proof_builder: WASMProofBuilder) -> Option<Vec<u8>> {
+ proof_builder.0.finish_proof().ok().map(|(proof, _ttl)| proof)
+}
+
#[wasm_bindgen]
/// Verifies an RFC 9102-formatted proof and returns the [`VerifiedRRStream`] in JSON form.
pub fn verify_byte_stream(stream: Vec<u8>) -> String {