X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fquery.rs;h=bb8fd7d43db725c7f95ec275535c76b496ba2ce4;hb=bb4b86c8178faa74bbbfd20e91626bf9372424c6;hp=37fbaefcc9d969ca50cc2b9b75b9dd8a575c453a;hpb=b2791c191a941208eff825a44440fbf605dcfb60;p=dnssec-prover diff --git a/src/query.rs b/src/query.rs index 37fbaef..bb8fd7d 100644 --- a/src/query.rs +++ b/src/query.rs @@ -34,7 +34,8 @@ pub struct QueryBuf { len: u16, } impl QueryBuf { - fn new_zeroed(len: u16) -> Self { + /// Generates a new buffer of the given length, consisting of all zeros. + pub fn new_zeroed(len: u16) -> Self { let heap_buf = if len > STACK_BUF_LIMIT { vec![0; len as usize] } else { Vec::new() }; Self { buf: [0; STACK_BUF_LIMIT as usize], @@ -42,7 +43,11 @@ impl QueryBuf { len } } - pub(crate) fn extend_from_slice(&mut self, sl: &[u8]) { + /// Extends the size of this buffer by appending the given slice. + /// + /// If the total length of this buffer exceeds [`u16::MAX`] after appending, the buffer's state + /// is undefined, however pushing data beyond [`u16::MAX`] will not panic. + pub fn extend_from_slice(&mut self, sl: &[u8]) { let new_len = self.len.saturating_add(sl.len() as u16); let was_heap = self.len > STACK_BUF_LIMIT; let is_heap = new_len > STACK_BUF_LIMIT; @@ -59,6 +64,14 @@ impl QueryBuf { target.copy_from_slice(sl); self.len = new_len; } + /// Converts this query into its bytes on the heap + pub fn into_vec(self) -> Vec { + if self.len > STACK_BUF_LIMIT { + self.heap_buf + } else { + self.buf[..self.len as usize].to_vec() + } + } } impl ops::Deref for QueryBuf { type Target = [u8]; @@ -80,14 +93,12 @@ impl ops::DerefMut for QueryBuf { } } -// We don't care about transaction IDs as we're only going to accept signed data. Thus, we use -// this constant instead of a random value. -const TXID: u16 = 0x4242; +// We don't care about transaction IDs as we're only going to accept signed data. +// Further, if we're querying over DoH, the RFC says we SHOULD use a transaction ID of 0 here. +const TXID: u16 = 0; fn build_query(domain: &Name, ty: u16) -> QueryBuf { let mut query = QueryBuf::new_zeroed(0); - let query_msg_len: u16 = 2 + 2 + 8 + 2 + 2 + name_len(domain) + 11; - query.extend_from_slice(&query_msg_len.to_be_bytes()); query.extend_from_slice(&TXID.to_be_bytes()); query.extend_from_slice(&[0x01, 0x20]); // Flags: Recursive, Authenticated Data query.extend_from_slice(&[0, 1, 0, 0, 0, 0, 0, 1]); // One question, One additional @@ -172,6 +183,11 @@ const MAX_REQUESTS: usize = 10; /// [`ProofBuilder::process_response`] should be called, and each fresh query returned should be /// sent to the resolver. Once [`ProofBuilder::awaiting_responses`] returns false, /// [`ProofBuilder::finish_proof`] should be called to fetch the resulting proof. +/// +/// To build a DNSSEC proof using a DoH server, take each [`QueryBuf`], encode it as base64url, and +/// make a query to `https://doh-server/endpoint?dns=base64url_encoded_query` with an `Accept` +/// header of `application/dns-message`. Each response, in raw binary, can be fed directly into +/// [`ProofBuilder::process_response`]. pub struct ProofBuilder { proof: Vec, min_ttl: u32, @@ -258,12 +274,14 @@ impl ProofBuilder { #[cfg(feature = "std")] fn send_query(stream: &mut TcpStream, query: &[u8]) -> Result<(), Error> { + stream.write_all(&(query.len() as u16).to_be_bytes())?; stream.write_all(&query)?; Ok(()) } #[cfg(feature = "tokio")] async fn send_query_async(stream: &mut TokioTcpStream, query: &[u8]) -> Result<(), Error> { + stream.write_all(&(query.len() as u16).to_be_bytes()).await?; stream.write_all(&query).await?; Ok(()) }