1 //! Simple variable-time big integer implementation
4 use core::marker::PhantomData;
6 #[allow(clippy::needless_lifetimes)] // lifetimes improve readability
7 #[allow(clippy::needless_borrow)] // borrows indicate read-only/non-move
8 #[allow(clippy::too_many_arguments)] // sometimes we don't have an option
9 #[allow(clippy::identity_op)] // sometimes identities improve readability for repeated actions
11 // **************************************
12 // * Implementations of math primitives *
13 // **************************************
15 macro_rules! debug_unwrap { ($v: expr) => { {
17 debug_assert!(v.is_ok());
20 Err(e) => return Err(e),
24 // Various const versions of existing slice utilities
25 /// Const version of `&a[start..end]`
26 const fn const_subslice<'a, T>(a: &'a [T], start: usize, end: usize) -> &'a [T] {
27 assert!(start <= a.len());
28 assert!(end <= a.len());
29 assert!(end >= start);
30 let mut startptr = a.as_ptr();
31 startptr = unsafe { startptr.add(start) };
32 let len = end - start;
33 // The docs for from_raw_parts do not mention any requirements that the pointer be valid if the
34 // length is zero, aside from requiring proper alignment (which is met here). Thus,
35 // one-past-the-end should be an acceptable pointer for a 0-length slice.
36 unsafe { alloc::slice::from_raw_parts(startptr, len) }
39 /// Const version of `dest[dest_start..dest_end].copy_from_slice(source)`
41 /// Once `const_mut_refs` is stable we can convert this to a function
42 macro_rules! copy_from_slice {
43 ($dest: ident, $dest_start: expr, $dest_end: expr, $source: ident) => { {
44 let dest_start = $dest_start;
45 let dest_end = $dest_end;
46 assert!(dest_start <= $dest.len());
47 assert!(dest_end <= $dest.len());
48 assert!(dest_end >= dest_start);
49 assert!(dest_end - dest_start == $source.len());
51 while i < $source.len() {
52 $dest[i + dest_start] = $source[i];
58 /// Const version of a > b
59 const fn slice_greater_than(a: &[u64], b: &[u64]) -> bool {
60 debug_assert!(a.len() == b.len());
61 let len = if a.len() <= b.len() { a.len() } else { b.len() };
64 if a[i] > b[i] { return true; }
65 else if a[i] < b[i] { return false; }
71 /// Const version of a == b
72 const fn slice_equal(a: &[u64], b: &[u64]) -> bool {
73 debug_assert!(a.len() == b.len());
74 let len = if a.len() <= b.len() { a.len() } else { b.len() };
77 if a[i] != b[i] { return false; }
83 /// Adds a single u64 valuein-place, returning an overflow flag, in which case one out-of-bounds
84 /// high bit is implicitly included in the result.
86 /// Once `const_mut_refs` is stable we can convert this to a function
87 macro_rules! add_u64 { ($a: ident, $b: expr) => { {
92 let (v, carry) = $a[i].overflowing_add(add);
95 if add == 0 { break; }
103 /// Negates the given u64 slice.
105 /// Once `const_mut_refs` is stable we can convert this to a function
106 macro_rules! negate { ($v: ident) => { {
109 $v[i] ^= 0xffff_ffff_ffff_ffff;
112 let _ = add_u64!($v, 1);
115 /// Doubles in-place, returning an overflow flag, in which case one out-of-bounds high bit is
116 /// implicitly included in the result.
118 /// Once `const_mut_refs` is stable we can convert this to a function
119 macro_rules! double { ($a: ident) => { {
120 { let _: &[u64] = &$a; } // Force type resolution
122 let mut carry = false;
125 let next_carry = ($a[i] & (1 << 63)) != 0;
126 let (v, _next_carry_2) = ($a[i] << 1).overflowing_add(carry as u64);
128 debug_assert!(!_next_carry_2, "Adding one to 0x7ffff..*2 is only 0xffff..");
130 // Note that we can ignore _next_carry_2 here as we never need it - it cannot be set if
131 // next_carry is not set and at max 0xffff..*2 + 1 is only 0x1ffff.. (i.e. we can not need
142 macro_rules! define_add { ($name: ident, $len: expr) => {
143 /// Adds two $len-64-bit integers together, returning a new $len-64-bit integer and an overflow
144 /// bit, with the same semantics as the std [`u64::overflowing_add`] method.
145 const fn $name(a: &[u64], b: &[u64]) -> ([u64; $len], bool) {
146 debug_assert!(a.len() == $len);
147 debug_assert!(b.len() == $len);
148 let mut r = [0; $len];
149 let mut carry = false;
150 let mut i = $len - 1;
152 let (v, mut new_carry) = a[i].overflowing_add(b[i]);
153 let (v2, new_new_carry) = v.overflowing_add(carry as u64);
154 new_carry |= new_new_carry;
165 define_add!(add_2, 2);
166 define_add!(add_3, 3);
167 define_add!(add_4, 4);
168 define_add!(add_6, 6);
169 define_add!(add_8, 8);
170 define_add!(add_12, 12);
171 define_add!(add_16, 16);
172 define_add!(add_32, 32);
173 define_add!(add_64, 64);
174 define_add!(add_128, 128);
176 macro_rules! define_sub { ($name: ident, $name_abs: ident, $len: expr) => {
177 /// Subtracts the `b` $len-64-bit integer from the `a` $len-64-bit integer, returning a new
178 /// $len-64-bit integer and an overflow bit, with the same semantics as the std
179 /// [`u64::overflowing_sub`] method.
180 const fn $name(a: &[u64], b: &[u64]) -> ([u64; $len], bool) {
181 debug_assert!(a.len() == $len);
182 debug_assert!(b.len() == $len);
183 let mut r = [0; $len];
184 let mut carry = false;
185 let mut i = $len - 1;
187 let (v, mut new_carry) = a[i].overflowing_sub(b[i]);
188 let (v2, new_new_carry) = v.overflowing_sub(carry as u64);
189 new_carry |= new_new_carry;
199 /// Subtracts the `b` $len-64-bit integer from the `a` $len-64-bit integer, returning a new
200 /// $len-64-bit integer representing the absolute value of the result, as well as a sign bit.
202 const fn $name_abs(a: &[u64], b: &[u64]) -> ([u64; $len], bool) {
203 let (mut res, neg) = $name(a, b);
211 define_sub!(sub_2, sub_abs_2, 2);
212 define_sub!(sub_3, sub_abs_3, 3);
213 define_sub!(sub_4, sub_abs_4, 4);
214 define_sub!(sub_6, sub_abs_6, 6);
215 define_sub!(sub_8, sub_abs_8, 8);
216 define_sub!(sub_12, sub_abs_12, 12);
217 define_sub!(sub_16, sub_abs_16, 16);
218 define_sub!(sub_32, sub_abs_32, 32);
219 define_sub!(sub_64, sub_abs_64, 64);
220 define_sub!(sub_128, sub_abs_128, 128);
222 /// Multiplies two 128-bit integers together, returning a new 256-bit integer.
224 /// This is the base case for our multiplication, taking advantage of Rust's native 128-bit int
225 /// types to do multiplication (potentially) natively.
226 const fn mul_2(a: &[u64], b: &[u64]) -> [u64; 4] {
227 debug_assert!(a.len() == 2);
228 debug_assert!(b.len() == 2);
230 // Gradeschool multiplication is way faster here.
231 let (a0, a1) = (a[0] as u128, a[1] as u128);
232 let (b0, b1) = (b[0] as u128, b[1] as u128);
236 let (z1, i_carry_a) = z1i.overflowing_add(z1j);
239 add_mul_2_parts(z2, z1, z0, i_carry_a)
242 /// Adds the gradeschool multiplication intermediate parts to a final 256-bit result
243 const fn add_mul_2_parts(z2: u128, z1: u128, z0: u128, i_carry_a: bool) -> [u64; 4] {
244 let z2a = ((z2 >> 64) & 0xffff_ffff_ffff_ffff) as u64;
245 let z1a = ((z1 >> 64) & 0xffff_ffff_ffff_ffff) as u64;
246 let z0a = ((z0 >> 64) & 0xffff_ffff_ffff_ffff) as u64;
247 let z2b = (z2 & 0xffff_ffff_ffff_ffff) as u64;
248 let z1b = (z1 & 0xffff_ffff_ffff_ffff) as u64;
249 let z0b = (z0 & 0xffff_ffff_ffff_ffff) as u64;
253 let (k, j_carry) = z0a.overflowing_add(z1b);
255 let (mut j, i_carry_b) = z1a.overflowing_add(z2b);
257 (j, i_carry_c) = j.overflowing_add(j_carry as u64);
259 let i_carry = i_carry_a as u64 + i_carry_b as u64 + i_carry_c as u64;
260 let (i, must_not_overflow) = z2a.overflowing_add(i_carry);
261 debug_assert!(!must_not_overflow, "Two 2*64 bit numbers, multiplied, will not use more than 4*64 bits");
266 const fn mul_3(a: &[u64], b: &[u64]) -> [u64; 6] {
267 debug_assert!(a.len() == 3);
268 debug_assert!(b.len() == 3);
270 let (a0, a1, a2) = (a[0] as u128, a[1] as u128, a[2] as u128);
271 let (b0, b1, b2) = (b[0] as u128, b[1] as u128, b[2] as u128);
283 let r5 = ((m4 >> 0) & 0xffff_ffff_ffff_ffff) as u64;
285 let r4a = ((m4 >> 64) & 0xffff_ffff_ffff_ffff) as u64;
286 let r4b = ((m3a >> 0) & 0xffff_ffff_ffff_ffff) as u64;
287 let r4c = ((m3b >> 0) & 0xffff_ffff_ffff_ffff) as u64;
289 let r3a = ((m3a >> 64) & 0xffff_ffff_ffff_ffff) as u64;
290 let r3b = ((m3b >> 64) & 0xffff_ffff_ffff_ffff) as u64;
291 let r3c = ((m2a >> 0 ) & 0xffff_ffff_ffff_ffff) as u64;
292 let r3d = ((m2b >> 0 ) & 0xffff_ffff_ffff_ffff) as u64;
293 let r3e = ((m2c >> 0 ) & 0xffff_ffff_ffff_ffff) as u64;
295 let r2a = ((m2a >> 64) & 0xffff_ffff_ffff_ffff) as u64;
296 let r2b = ((m2b >> 64) & 0xffff_ffff_ffff_ffff) as u64;
297 let r2c = ((m2c >> 64) & 0xffff_ffff_ffff_ffff) as u64;
298 let r2d = ((m1a >> 0 ) & 0xffff_ffff_ffff_ffff) as u64;
299 let r2e = ((m1b >> 0 ) & 0xffff_ffff_ffff_ffff) as u64;
301 let r1a = ((m1a >> 64) & 0xffff_ffff_ffff_ffff) as u64;
302 let r1b = ((m1b >> 64) & 0xffff_ffff_ffff_ffff) as u64;
303 let r1c = ((m0 >> 0 ) & 0xffff_ffff_ffff_ffff) as u64;
305 let r0a = ((m0 >> 64) & 0xffff_ffff_ffff_ffff) as u64;
307 let (r4, r3_ca) = r4a.overflowing_add(r4b);
308 let (r4, r3_cb) = r4.overflowing_add(r4c);
309 let r3_c = r3_ca as u64 + r3_cb as u64;
311 let (r3, r2_ca) = r3a.overflowing_add(r3b);
312 let (r3, r2_cb) = r3.overflowing_add(r3c);
313 let (r3, r2_cc) = r3.overflowing_add(r3d);
314 let (r3, r2_cd) = r3.overflowing_add(r3e);
315 let (r3, r2_ce) = r3.overflowing_add(r3_c);
316 let r2_c = r2_ca as u64 + r2_cb as u64 + r2_cc as u64 + r2_cd as u64 + r2_ce as u64;
318 let (r2, r1_ca) = r2a.overflowing_add(r2b);
319 let (r2, r1_cb) = r2.overflowing_add(r2c);
320 let (r2, r1_cc) = r2.overflowing_add(r2d);
321 let (r2, r1_cd) = r2.overflowing_add(r2e);
322 let (r2, r1_ce) = r2.overflowing_add(r2_c);
323 let r1_c = r1_ca as u64 + r1_cb as u64 + r1_cc as u64 + r1_cd as u64 + r1_ce as u64;
325 let (r1, r0_ca) = r1a.overflowing_add(r1b);
326 let (r1, r0_cb) = r1.overflowing_add(r1c);
327 let (r1, r0_cc) = r1.overflowing_add(r1_c);
328 let r0_c = r0_ca as u64 + r0_cb as u64 + r0_cc as u64;
330 let (r0, must_not_overflow) = r0a.overflowing_add(r0_c);
331 debug_assert!(!must_not_overflow, "Two 3*64 bit numbers, multiplied, will not use more than 6*64 bits");
333 [r0, r1, r2, r3, r4, r5]
336 macro_rules! define_mul { ($name: ident, $len: expr, $submul: ident, $add: ident, $subadd: ident, $sub: ident, $subsub: ident) => {
337 /// Multiplies two $len-64-bit integers together, returning a new $len*2-64-bit integer.
338 const fn $name(a: &[u64], b: &[u64]) -> [u64; $len * 2] {
339 // We could probably get a bit faster doing gradeschool multiplication for some smaller
340 // sizes, but its easier to just have one variable-length multiplication, so we do
341 // Karatsuba always here.
342 debug_assert!(a.len() == $len);
343 debug_assert!(b.len() == $len);
345 let a0 = const_subslice(a, 0, $len / 2);
346 let a1 = const_subslice(a, $len / 2, $len);
347 let b0 = const_subslice(b, 0, $len / 2);
348 let b1 = const_subslice(b, $len / 2, $len);
350 let z2 = $submul(a0, b0);
351 let z0 = $submul(a1, b1);
353 let (z1a_max, z1a_min, z1a_sign) =
354 if slice_greater_than(&a1, &a0) { (a1, a0, true) } else { (a0, a1, false) };
355 let (z1b_max, z1b_min, z1b_sign) =
356 if slice_greater_than(&b1, &b0) { (b1, b0, true) } else { (b0, b1, false) };
358 let z1a = $subsub(z1a_max, z1a_min);
359 debug_assert!(!z1a.1, "z1a_max was selected to be greater than z1a_min");
360 let z1b = $subsub(z1b_max, z1b_min);
361 debug_assert!(!z1b.1, "z1b_max was selected to be greater than z1b_min");
362 let z1m_sign = z1a_sign == z1b_sign;
364 let z1m = $submul(&z1a.0, &z1b.0);
365 let z1n = $add(&z0, &z2);
366 let mut z1_carry = z1n.1;
367 let z1 = if z1m_sign {
368 let r = $sub(&z1n.0, &z1m);
369 if r.1 { z1_carry ^= true; }
372 let r = $add(&z1n.0, &z1m);
373 if r.1 { z1_carry = true; }
377 let l = const_subslice(&z0, $len / 2, $len);
378 let (k, j_carry) = $subadd(const_subslice(&z0, 0, $len / 2), const_subslice(&z1, $len / 2, $len));
379 let (mut j, i_carry_a) = $subadd(const_subslice(&z1, 0, $len / 2), const_subslice(&z2, $len / 2, $len));
380 let mut i_carry_b = false;
382 i_carry_b = add_u64!(j, 1);
384 let mut i = [0; $len / 2];
385 let i_source = const_subslice(&z2, 0, $len / 2);
386 copy_from_slice!(i, 0, $len / 2, i_source);
387 let i_carry = i_carry_a as u64 + i_carry_b as u64 + z1_carry as u64;
389 let must_not_overflow = add_u64!(i, i_carry);
390 debug_assert!(!must_not_overflow, "Two N*64 bit numbers, multiplied, will not use more than 2*N*64 bits");
393 let mut res = [0; $len * 2];
394 copy_from_slice!(res, $len * 2 * 0 / 4, $len * 2 * 1 / 4, i);
395 copy_from_slice!(res, $len * 2 * 1 / 4, $len * 2 * 2 / 4, j);
396 copy_from_slice!(res, $len * 2 * 2 / 4, $len * 2 * 3 / 4, k);
397 copy_from_slice!(res, $len * 2 * 3 / 4, $len * 2 * 4 / 4, l);
402 define_mul!(mul_4, 4, mul_2, add_4, add_2, sub_4, sub_2);
403 define_mul!(mul_6, 6, mul_3, add_6, add_3, sub_6, sub_3);
404 define_mul!(mul_8, 8, mul_4, add_8, add_4, sub_8, sub_4);
405 define_mul!(mul_16, 16, mul_8, add_16, add_8, sub_16, sub_8);
406 define_mul!(mul_32, 32, mul_16, add_32, add_16, sub_32, sub_16);
407 define_mul!(mul_64, 64, mul_32, add_64, add_32, sub_64, sub_32);
410 /// Squares a 128-bit integer, returning a new 256-bit integer.
412 /// This is the base case for our squaring, taking advantage of Rust's native 128-bit int
413 /// types to do multiplication (potentially) natively.
414 const fn sqr_2(a: &[u64]) -> [u64; 4] {
415 debug_assert!(a.len() == 2);
417 let (a0, a1) = (a[0] as u128, a[1] as u128);
419 let mut z1 = a0 * a1;
420 let i_carry_a = z1 & (1u128 << 127) != 0;
424 add_mul_2_parts(z2, z1, z0, i_carry_a)
427 macro_rules! define_sqr { ($name: ident, $len: expr, $submul: ident, $subsqr: ident, $subadd: ident) => {
428 /// Squares a $len-64-bit integers, returning a new $len*2-64-bit integer.
429 const fn $name(a: &[u64]) -> [u64; $len * 2] {
430 // Squaring is only 3 half-length multiplies/squares in gradeschool math, so use that.
431 debug_assert!(a.len() == $len);
433 let hi = const_subslice(a, 0, $len / 2);
434 let lo = const_subslice(a, $len / 2, $len);
436 let v0 = $subsqr(lo);
437 let mut v1 = $submul(hi, lo);
438 let i_carry_a = double!(v1);
439 let v2 = $subsqr(hi);
441 let l = const_subslice(&v0, $len / 2, $len);
442 let (k, j_carry) = $subadd(const_subslice(&v0, 0, $len / 2), const_subslice(&v1, $len / 2, $len));
443 let (mut j, i_carry_b) = $subadd(const_subslice(&v1, 0, $len / 2), const_subslice(&v2, $len / 2, $len));
445 let mut i = [0; $len / 2];
446 let i_source = const_subslice(&v2, 0, $len / 2);
447 copy_from_slice!(i, 0, $len / 2, i_source);
449 let mut i_carry_c = false;
451 i_carry_c = add_u64!(j, 1);
453 let i_carry = i_carry_a as u64 + i_carry_b as u64 + i_carry_c as u64;
455 let must_not_overflow = add_u64!(i, i_carry);
456 debug_assert!(!must_not_overflow, "Two N*64 bit numbers, multiplied, will not use more than 2*N*64 bits");
459 let mut res = [0; $len * 2];
460 copy_from_slice!(res, $len * 2 * 0 / 4, $len * 2 * 1 / 4, i);
461 copy_from_slice!(res, $len * 2 * 1 / 4, $len * 2 * 2 / 4, j);
462 copy_from_slice!(res, $len * 2 * 2 / 4, $len * 2 * 3 / 4, k);
463 copy_from_slice!(res, $len * 2 * 3 / 4, $len * 2 * 4 / 4, l);
468 // TODO: Write an optimized sqr_3 (though secp384r1 is barely used)
469 const fn sqr_3(a: &[u64]) -> [u64; 6] { mul_3(a, a) }
471 define_sqr!(sqr_4, 4, mul_2, sqr_2, add_2);
472 define_sqr!(sqr_6, 6, mul_3, sqr_3, add_3);
473 define_sqr!(sqr_8, 8, mul_4, sqr_4, add_4);
474 define_sqr!(sqr_16, 16, mul_8, sqr_8, add_8);
475 define_sqr!(sqr_32, 32, mul_16, sqr_16, add_16);
476 define_sqr!(sqr_64, 64, mul_32, sqr_32, add_32);
478 macro_rules! dummy_pre_push { ($name: ident, $len: expr) => {} }
479 macro_rules! vec_pre_push { ($name: ident, $len: expr) => { $name.push([0; $len]); } }
481 macro_rules! define_div_rem { ($name: ident, $len: expr, $sub: ident, $heap_init: expr, $pre_push: ident $(, $const_opt: tt)?) => {
482 /// Divides two $len-64-bit integers, `a` by `b`, returning the quotient and remainder
484 /// Fails iff `b` is zero.
485 $($const_opt)? fn $name(a: &[u64; $len], b: &[u64; $len]) -> Result<([u64; $len], [u64; $len]), ()> {
486 if slice_equal(b, &[0; $len]) { return Err(()); }
488 // Very naively divide `a` by `b` by calculating all the powers of two times `b` up to `a`,
489 // then subtracting the powers of two in decreasing order. What's left is the remainder.
491 // This requires storing all the multiples of `b` in `pow2s`, which may be a vec or an
492 // array. `$pre_push!()` sets up the next element with zeros and then we can overwrite it.
494 let mut pow2s = $heap_init;
495 let mut pow2s_count = 0;
496 while slice_greater_than(a, &b_pow) {
497 $pre_push!(pow2s, $len);
498 pow2s[pow2s_count] = b_pow;
500 let double_overflow = double!(b_pow);
501 if double_overflow { break; }
503 let mut quot = [0; $len];
505 let mut pow2 = pow2s_count as isize - 1;
507 let b_pow = pow2s[pow2 as usize];
508 let overflow = double!(quot);
509 debug_assert!(!overflow, "quotient should be expressible in $len*64 bits");
510 if slice_greater_than(&rem, &b_pow) {
511 let (r, underflow) = $sub(&rem, &b_pow);
512 debug_assert!(!underflow, "rem was just checked to be > b_pow, so sub cannot underflow");
518 if slice_equal(&rem, b) {
519 let overflow = add_u64!(quot, 1);
520 debug_assert!(!overflow, "quotient should be expressible in $len*64 bits");
521 Ok((quot, [0; $len]))
529 define_div_rem!(div_rem_2, 2, sub_2, [[0; 2]; 2 * 64], dummy_pre_push, const);
530 define_div_rem!(div_rem_4, 4, sub_4, [[0; 4]; 4 * 64], dummy_pre_push, const); // Uses 8 KiB of stack
531 define_div_rem!(div_rem_6, 6, sub_6, [[0; 6]; 6 * 64], dummy_pre_push, const); // Uses 18 KiB of stack!
532 #[cfg(debug_assertions)]
533 define_div_rem!(div_rem_8, 8, sub_8, [[0; 8]; 8 * 64], dummy_pre_push, const); // Uses 32 KiB of stack!
534 #[cfg(debug_assertions)]
535 define_div_rem!(div_rem_12, 12, sub_12, [[0; 12]; 12 * 64], dummy_pre_push, const); // Uses 72 KiB of stack!
536 define_div_rem!(div_rem_64, 64, sub_64, Vec::new(), vec_pre_push); // Uses up to 2 MiB of heap
537 #[cfg(debug_assertions)]
538 define_div_rem!(div_rem_128, 128, sub_128, Vec::new(), vec_pre_push); // Uses up to 8 MiB of heap
540 macro_rules! define_mod_inv { ($name: ident, $len: expr, $div: ident, $add: ident, $sub_abs: ident, $mul: ident) => {
541 /// Calculates the modular inverse of a $len-64-bit number with respect to the given modulus,
543 const fn $name(a: &[u64; $len], m: &[u64; $len]) -> Result<[u64; $len], ()> {
544 if slice_equal(a, &[0; $len]) || slice_equal(m, &[0; $len]) { return Err(()); }
546 let (mut s, mut old_s) = ([0; $len], [0; $len]);
551 let (mut old_s_neg, mut s_neg) = (false, false);
553 while !slice_equal(&r, &[0; $len]) {
554 let (quot, new_r) = debug_unwrap!($div(&old_r, &r));
556 let new_sa = $mul(", &s);
557 debug_assert!(slice_equal(const_subslice(&new_sa, 0, $len), &[0; $len]), "S overflowed");
558 let (new_s, new_s_neg) = match (old_s_neg, s_neg) {
560 let (new_s, overflow) = $add(&old_s, const_subslice(&new_sa, $len, new_sa.len()));
561 debug_assert!(!overflow);
565 let (new_s, overflow) = $add(&old_s, const_subslice(&new_sa, $len, new_sa.len()));
566 debug_assert!(!overflow);
570 let (new_s, overflow) = $add(&old_s, const_subslice(&new_sa, $len, new_sa.len()));
571 debug_assert!(!overflow);
574 (false, false) => $sub_abs(&old_s, const_subslice(&new_sa, $len, new_sa.len())),
586 // At this point old_r contains our GCD and old_s our first Bézout's identity coefficient.
587 if !slice_equal(const_subslice(&old_r, 0, $len - 1), &[0; $len - 1]) || old_r[$len - 1] != 1 {
590 debug_assert!(slice_greater_than(m, &old_s));
592 let (modinv, underflow) = $sub_abs(m, &old_s);
593 debug_assert!(!underflow);
594 debug_assert!(slice_greater_than(m, &modinv));
603 define_mod_inv!(mod_inv_2, 2, div_rem_2, add_2, sub_abs_2, mul_2);
604 define_mod_inv!(mod_inv_4, 4, div_rem_4, add_4, sub_abs_4, mul_4);
605 define_mod_inv!(mod_inv_6, 6, div_rem_6, add_6, sub_abs_6, mul_6);
607 define_mod_inv!(mod_inv_8, 8, div_rem_8, add_8, sub_abs_8, mul_8);
609 // ******************
610 // * The public API *
611 // ******************
613 const WORD_COUNT_4096: usize = 4096 / 64;
614 const WORD_COUNT_256: usize = 256 / 64;
615 const WORD_COUNT_384: usize = 384 / 64;
617 // RFC 5702 indicates RSA keys can be up to 4096 bits, so we always use 4096-bit integers
618 #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
619 pub(super) struct U4096([u64; WORD_COUNT_4096]);
621 #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
622 pub(super) struct U256([u64; WORD_COUNT_256]);
624 #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
625 pub(super) struct U384([u64; WORD_COUNT_384]);
627 pub(super) trait Int: Clone + Ord + Sized {
630 fn from_be_bytes(b: &[u8]) -> Result<Self, ()>;
631 fn limbs(&self) -> &[u64];
634 const ZERO: U256 = U256([0; 4]);
635 const BYTES: usize = 32;
636 fn from_be_bytes(b: &[u8]) -> Result<Self, ()> { Self::from_be_bytes(b) }
637 fn limbs(&self) -> &[u64] { &self.0 }
640 const ZERO: U384 = U384([0; 6]);
641 const BYTES: usize = 48;
642 fn from_be_bytes(b: &[u8]) -> Result<Self, ()> { Self::from_be_bytes(b) }
643 fn limbs(&self) -> &[u64] { &self.0 }
646 /// Defines a *PRIME* Modulus
647 pub(super) trait PrimeModulus<I: Int> {
649 const R_SQUARED_MOD_PRIME: I;
650 const NEGATIVE_PRIME_INV_MOD_R: I;
653 #[derive(Clone, Debug, PartialEq, Eq)] // Ord doesn't make sense cause we have an R factor
654 pub(super) struct U256Mod<M: PrimeModulus<U256>>(U256, PhantomData<M>);
656 #[derive(Clone, Debug, PartialEq, Eq)] // Ord doesn't make sense cause we have an R factor
657 pub(super) struct U384Mod<M: PrimeModulus<U384>>(U384, PhantomData<M>);
660 /// Constructs a new [`U4096`] from a variable number of big-endian bytes.
661 pub(super) fn from_be_bytes(bytes: &[u8]) -> Result<U4096, ()> {
662 if bytes.len() > 4096/8 { return Err(()); }
663 let u64s = (bytes.len() + 7) / 8;
664 let mut res = [0; WORD_COUNT_4096];
667 let pos = (u64s - i) * 8;
668 let start = bytes.len().saturating_sub(pos);
669 let end = bytes.len() + 8 - pos;
670 b[8 + start - end..].copy_from_slice(&bytes[start..end]);
671 res[i + WORD_COUNT_4096 - u64s] = u64::from_be_bytes(b);
676 /// Naively multiplies `self` * `b` mod `m`, returning a new [`U4096`].
678 /// Fails iff m is 0 or self or b are greater than m.
679 #[cfg(debug_assertions)]
680 fn mulmod_naive(&self, b: &U4096, m: &U4096) -> Result<U4096, ()> {
681 if m.0 == [0; WORD_COUNT_4096] { return Err(()); }
682 if self > m || b > m { return Err(()); }
684 let mul = mul_64(&self.0, &b.0);
686 let mut m_zeros = [0; 128];
687 m_zeros[WORD_COUNT_4096..].copy_from_slice(&m.0);
688 let (_, rem) = div_rem_128(&mul, &m_zeros)?;
689 let mut res = [0; WORD_COUNT_4096];
690 debug_assert_eq!(&rem[..WORD_COUNT_4096], &[0; WORD_COUNT_4096]);
691 res.copy_from_slice(&rem[WORD_COUNT_4096..]);
695 /// Calculates `self` ^ `exp` mod `m`, returning a new [`U4096`].
697 /// Fails iff m is 0, even, or self or b are greater than m.
698 pub(super) fn expmod_odd_mod(&self, mut exp: u32, m: &U4096) -> Result<U4096, ()> {
699 #![allow(non_camel_case_types)]
701 if m.0 == [0; WORD_COUNT_4096] { return Err(()); }
702 if m.0[WORD_COUNT_4096 - 1] & 1 == 0 { return Err(()); }
703 if self > m { return Err(()); }
705 let mut t = [0; WORD_COUNT_4096];
706 if m.0[..WORD_COUNT_4096 - 1] == [0; WORD_COUNT_4096 - 1] && m.0[WORD_COUNT_4096 - 1] == 1 {
709 t[WORD_COUNT_4096 - 1] = 1;
710 if exp == 0 { return Ok(U4096(t)); }
712 // Because m is not even, using 2^4096 as the Montgomery R value is always safe - it is
713 // guaranteed to be co-prime with any non-even integer.
715 // We use a single 4096-bit integer type for all our RSA operations, though in most cases
716 // we're actually dealing with 1024-bit or 2048-bit ints. Thus, we define sub-array math
717 // here which debug_assert's the required bits are 0s and then uses faster math primitives.
719 type mul_ty = fn(&[u64], &[u64]) -> [u64; WORD_COUNT_4096 * 2];
720 type sqr_ty = fn(&[u64]) -> [u64; WORD_COUNT_4096 * 2];
721 type add_double_ty = fn(&[u64], &[u64]) -> ([u64; WORD_COUNT_4096 * 2], bool);
722 type sub_ty = fn(&[u64], &[u64]) -> ([u64; WORD_COUNT_4096], bool);
723 let (word_count, log_bits, mul, sqr, add_double, sub) =
724 if m.0[..WORD_COUNT_4096 / 2] == [0; WORD_COUNT_4096 / 2] {
725 if m.0[..WORD_COUNT_4096 * 3 / 4] == [0; WORD_COUNT_4096 * 3 / 4] {
726 fn mul_16_subarr(a: &[u64], b: &[u64]) -> [u64; WORD_COUNT_4096 * 2] {
727 debug_assert_eq!(a.len(), WORD_COUNT_4096);
728 debug_assert_eq!(b.len(), WORD_COUNT_4096);
729 debug_assert_eq!(&a[..WORD_COUNT_4096 * 3 / 4], &[0; WORD_COUNT_4096 * 3 / 4]);
730 debug_assert_eq!(&b[..WORD_COUNT_4096 * 3 / 4], &[0; WORD_COUNT_4096 * 3 / 4]);
731 let mut res = [0; WORD_COUNT_4096 * 2];
732 res[WORD_COUNT_4096 + WORD_COUNT_4096 / 2..].copy_from_slice(
733 &mul_16(&a[WORD_COUNT_4096 * 3 / 4..], &b[WORD_COUNT_4096 * 3 / 4..]));
736 fn sqr_16_subarr(a: &[u64]) -> [u64; WORD_COUNT_4096 * 2] {
737 debug_assert_eq!(a.len(), WORD_COUNT_4096);
738 debug_assert_eq!(&a[..WORD_COUNT_4096 * 3 / 4], &[0; WORD_COUNT_4096 * 3 / 4]);
739 let mut res = [0; WORD_COUNT_4096 * 2];
740 res[WORD_COUNT_4096 + WORD_COUNT_4096 / 2..].copy_from_slice(
741 &sqr_16(&a[WORD_COUNT_4096 * 3 / 4..]));
744 fn add_32_subarr(a: &[u64], b: &[u64]) -> ([u64; WORD_COUNT_4096 * 2], bool) {
745 debug_assert_eq!(a.len(), WORD_COUNT_4096 * 2);
746 debug_assert_eq!(b.len(), WORD_COUNT_4096 * 2);
747 debug_assert_eq!(&a[..WORD_COUNT_4096 * 3 / 2], &[0; WORD_COUNT_4096 * 3 / 2]);
748 debug_assert_eq!(&b[..WORD_COUNT_4096 * 3 / 2], &[0; WORD_COUNT_4096 * 3 / 2]);
749 let (add, overflow) = add_32(&a[WORD_COUNT_4096 * 3 / 2..], &b[WORD_COUNT_4096 * 3 / 2..]);
750 let mut res = [0; WORD_COUNT_4096 * 2];
751 res[WORD_COUNT_4096 * 3 / 2..].copy_from_slice(&add);
754 fn sub_16_subarr(a: &[u64], b: &[u64]) -> ([u64; WORD_COUNT_4096], bool) {
755 debug_assert_eq!(a.len(), WORD_COUNT_4096);
756 debug_assert_eq!(b.len(), WORD_COUNT_4096);
757 debug_assert_eq!(&a[..WORD_COUNT_4096 * 3 / 4], &[0; WORD_COUNT_4096 * 3 / 4]);
758 debug_assert_eq!(&b[..WORD_COUNT_4096 * 3 / 4], &[0; WORD_COUNT_4096 * 3 / 4]);
759 let (sub, underflow) = sub_16(&a[WORD_COUNT_4096 * 3 / 4..], &b[WORD_COUNT_4096 * 3 / 4..]);
760 let mut res = [0; WORD_COUNT_4096];
761 res[WORD_COUNT_4096 * 3 / 4..].copy_from_slice(&sub);
764 (16, 10, mul_16_subarr as mul_ty, sqr_16_subarr as sqr_ty, add_32_subarr as add_double_ty, sub_16_subarr as sub_ty)
766 fn mul_32_subarr(a: &[u64], b: &[u64]) -> [u64; WORD_COUNT_4096 * 2] {
767 debug_assert_eq!(a.len(), WORD_COUNT_4096);
768 debug_assert_eq!(b.len(), WORD_COUNT_4096);
769 debug_assert_eq!(&a[..WORD_COUNT_4096 / 2], &[0; WORD_COUNT_4096 / 2]);
770 debug_assert_eq!(&b[..WORD_COUNT_4096 / 2], &[0; WORD_COUNT_4096 / 2]);
771 let mut res = [0; WORD_COUNT_4096 * 2];
772 res[WORD_COUNT_4096..].copy_from_slice(
773 &mul_32(&a[WORD_COUNT_4096 / 2..], &b[WORD_COUNT_4096 / 2..]));
776 fn sqr_32_subarr(a: &[u64]) -> [u64; WORD_COUNT_4096 * 2] {
777 debug_assert_eq!(a.len(), WORD_COUNT_4096);
778 debug_assert_eq!(&a[..WORD_COUNT_4096 / 2], &[0; WORD_COUNT_4096 / 2]);
779 let mut res = [0; WORD_COUNT_4096 * 2];
780 res[WORD_COUNT_4096..].copy_from_slice(
781 &sqr_32(&a[WORD_COUNT_4096 / 2..]));
784 fn add_64_subarr(a: &[u64], b: &[u64]) -> ([u64; WORD_COUNT_4096 * 2], bool) {
785 debug_assert_eq!(a.len(), WORD_COUNT_4096 * 2);
786 debug_assert_eq!(b.len(), WORD_COUNT_4096 * 2);
787 debug_assert_eq!(&a[..WORD_COUNT_4096], &[0; WORD_COUNT_4096]);
788 debug_assert_eq!(&b[..WORD_COUNT_4096], &[0; WORD_COUNT_4096]);
789 let (add, overflow) = add_64(&a[WORD_COUNT_4096..], &b[WORD_COUNT_4096..]);
790 let mut res = [0; WORD_COUNT_4096 * 2];
791 res[WORD_COUNT_4096..].copy_from_slice(&add);
794 fn sub_32_subarr(a: &[u64], b: &[u64]) -> ([u64; WORD_COUNT_4096], bool) {
795 debug_assert_eq!(a.len(), WORD_COUNT_4096);
796 debug_assert_eq!(b.len(), WORD_COUNT_4096);
797 debug_assert_eq!(&a[..WORD_COUNT_4096 / 2], &[0; WORD_COUNT_4096 / 2]);
798 debug_assert_eq!(&b[..WORD_COUNT_4096 / 2], &[0; WORD_COUNT_4096 / 2]);
799 let (sub, underflow) = sub_32(&a[WORD_COUNT_4096 / 2..], &b[WORD_COUNT_4096 / 2..]);
800 let mut res = [0; WORD_COUNT_4096];
801 res[WORD_COUNT_4096 / 2..].copy_from_slice(&sub);
804 (32, 11, mul_32_subarr as mul_ty, sqr_32_subarr as sqr_ty, add_64_subarr as add_double_ty, sub_32_subarr as sub_ty)
807 (64, 12, mul_64 as mul_ty, sqr_64 as sqr_ty, add_128 as add_double_ty, sub_64 as sub_ty)
810 // r is always the even value with one bit set above the word count we're using.
811 let mut r = [0; WORD_COUNT_4096 * 2];
812 r[WORD_COUNT_4096 * 2 - word_count - 1] = 1;
814 let mut m_inv_pos = [0; WORD_COUNT_4096];
815 m_inv_pos[WORD_COUNT_4096 - 1] = 1;
816 let mut two = [0; WORD_COUNT_4096];
817 two[WORD_COUNT_4096 - 1] = 2;
818 for _ in 0..log_bits {
819 let mut m_m_inv = mul(&m_inv_pos, &m.0);
820 m_m_inv[..WORD_COUNT_4096 * 2 - word_count].fill(0);
821 let m_inv = mul(&sub(&two, &m_m_inv[WORD_COUNT_4096..]).0, &m_inv_pos);
822 m_inv_pos[WORD_COUNT_4096 - word_count..].copy_from_slice(&m_inv[WORD_COUNT_4096 * 2 - word_count..]);
824 m_inv_pos[..WORD_COUNT_4096 - word_count].fill(0);
826 // `m_inv` is the negative modular inverse of m mod R, so subtract m_inv from R.
827 let mut m_inv = m_inv_pos;
829 m_inv[..WORD_COUNT_4096 - word_count].fill(0);
830 debug_assert_eq!(&mul(&m_inv, &m.0)[WORD_COUNT_4096 * 2 - word_count..],
832 &[0xffff_ffff_ffff_ffff; WORD_COUNT_4096][WORD_COUNT_4096 - word_count..]);
834 let mont_reduction = |mu: [u64; WORD_COUNT_4096 * 2]| -> [u64; WORD_COUNT_4096] {
835 debug_assert_eq!(&mu[..WORD_COUNT_4096 * 2 - word_count * 2],
836 &[0; WORD_COUNT_4096 * 2][..WORD_COUNT_4096 * 2 - word_count * 2]);
837 // Do a montgomery reduction of `mu`
839 // mu % R is just the bottom word_count bytes of mu
840 let mut mu_mod_r = [0; WORD_COUNT_4096];
841 mu_mod_r[WORD_COUNT_4096 - word_count..].copy_from_slice(&mu[WORD_COUNT_4096 * 2 - word_count..]);
843 // v = ((mu % R) * negative_modulus_inverse) % R
844 let mut v = mul(&mu_mod_r, &m_inv);
845 v[..WORD_COUNT_4096 * 2 - word_count].fill(0); // mod R
847 // t_on_r = (mu + v*modulus) / R
848 let t0 = mul(&v[WORD_COUNT_4096..], &m.0);
849 let (t1, t1_extra_bit) = add_double(&t0, &mu);
851 // Note that dividing t1 by R is simply a matter of shifting right by word_count bytes
852 // We only need to maintain word_count bytes (plus `t1_extra_bit` which is implicitly
853 // an extra bit) because t_on_r is guarantee to be, at max, 2*m - 1.
854 let mut t1_on_r = [0; WORD_COUNT_4096];
855 debug_assert_eq!(&t1[WORD_COUNT_4096 * 2 - word_count..], &[0; WORD_COUNT_4096][WORD_COUNT_4096 - word_count..],
856 "t1 should be divisible by r");
857 t1_on_r[WORD_COUNT_4096 - word_count..].copy_from_slice(&t1[WORD_COUNT_4096 * 2 - word_count * 2..WORD_COUNT_4096 * 2 - word_count]);
859 // The modulus has only word_count bytes, so if t1_extra_bit is set we are definitely
860 // larger than the modulus.
861 if t1_extra_bit || t1_on_r >= m.0 {
863 (t1_on_r, underflow) = sub(&t1_on_r, &m.0);
864 debug_assert_eq!(t1_extra_bit, underflow,
865 "The number (t1_extra_bit, t1_on_r) is at most 2m-1, so underflowing t1_on_r - m should happen iff t1_extra_bit is set.");
870 // Calculate R^2 mod m as ((2^DOUBLES * R) mod m)^(log_bits - LOG2_DOUBLES) mod R
871 let mut r_minus_one = [0xffff_ffff_ffff_ffffu64; WORD_COUNT_4096];
872 r_minus_one[..WORD_COUNT_4096 - word_count].fill(0);
873 // While we do a full div here, in general R should be less than 2x m (assuming the RSA
874 // modulus used its full bit range and is 1024, 2048, or 4096 bits), so it should be cheap.
875 // In cases with a nonstandard RSA modulus we may end up being pretty slow here, but we'll
877 // If we ever find a problem with this we should reduce R to be tigher on m, as we're
878 // wasting extra bits of calculation if R is too far from m.
879 let (_, mut r_mod_m) = debug_unwrap!(div_rem_64(&r_minus_one, &m.0));
880 let r_mod_m_overflow = add_u64!(r_mod_m, 1);
881 if r_mod_m_overflow || r_mod_m >= m.0 {
882 (r_mod_m, _) = sub_64(&r_mod_m, &m.0);
885 let mut r2_mod_m: [u64; 64] = r_mod_m;
886 const DOUBLES: usize = 32;
887 const LOG2_DOUBLES: usize = 5;
889 for _ in 0..DOUBLES {
890 let overflow = double!(r2_mod_m);
891 if overflow || r2_mod_m > m.0 {
892 (r2_mod_m, _) = sub_64(&r2_mod_m, &m.0);
895 for _ in 0..log_bits - LOG2_DOUBLES {
896 r2_mod_m = mont_reduction(sqr(&r2_mod_m));
898 // Clear excess high bits
899 for (m_limb, r2_limb) in m.0.iter().zip(r2_mod_m.iter_mut()) {
900 let clear_bits = m_limb.leading_zeros();
901 if clear_bits == 0 { break; }
902 *r2_limb &= !(0xffff_ffff_ffff_ffffu64 << (64 - clear_bits));
903 if *m_limb != 0 { break; }
905 debug_assert!(r2_mod_m < m.0);
907 // Finally, actually do the exponentiation...
909 // Calculate t * R and a * R as mont multiplications by R^2 mod m
910 let mut tr = mont_reduction(mul(&r2_mod_m, &t));
911 let mut ar = mont_reduction(mul(&r2_mod_m, &self.0));
913 #[cfg(debug_assertions)] {
914 debug_assert_eq!(r2_mod_m, U4096(r_mod_m).mulmod_naive(&U4096(r_mod_m), &m).unwrap().0);
915 debug_assert_eq!(&tr, &U4096(t).mulmod_naive(&U4096(r_mod_m), &m).unwrap().0);
916 debug_assert_eq!(&ar, &self.mulmod_naive(&U4096(r_mod_m), &m).unwrap().0);
921 tr = mont_reduction(mul(&tr, &ar));
924 ar = mont_reduction(sqr(&ar));
927 ar = mont_reduction(mul(&ar, &tr));
928 let mut resr = [0; WORD_COUNT_4096 * 2];
929 resr[WORD_COUNT_4096..].copy_from_slice(&ar);
930 Ok(U4096(mont_reduction(resr)))
934 // In a const context we can't subslice a slice, so instead we pick the eight bytes we want and
935 // pass them here to build u64s from arrays.
936 const fn eight_bytes_to_u64_be(a: u8, b: u8, c: u8, d: u8, e: u8, f: u8, g: u8, h: u8) -> u64 {
937 let b = [a, b, c, d, e, f, g, h];
938 u64::from_be_bytes(b)
942 /// Constructs a new [`U256`] from a variable number of big-endian bytes.
943 pub(super) fn from_be_bytes(bytes: &[u8]) -> Result<U256, ()> {
944 if bytes.len() > 256/8 { return Err(()); }
945 let u64s = (bytes.len() + 7) / 8;
946 let mut res = [0; WORD_COUNT_256];
949 let pos = (u64s - i) * 8;
950 let start = bytes.len().saturating_sub(pos);
951 let end = bytes.len() + 8 - pos;
952 b[8 + start - end..].copy_from_slice(&bytes[start..end]);
953 res[i + WORD_COUNT_256 - u64s] = u64::from_be_bytes(b);
958 /// Constructs a new [`U256`] from a fixed number of big-endian bytes.
959 pub(super) const fn from_32_be_bytes_panicking(bytes: &[u8; 32]) -> U256 {
961 eight_bytes_to_u64_be(bytes[0*8 + 0], bytes[0*8 + 1], bytes[0*8 + 2], bytes[0*8 + 3],
962 bytes[0*8 + 4], bytes[0*8 + 5], bytes[0*8 + 6], bytes[0*8 + 7]),
963 eight_bytes_to_u64_be(bytes[1*8 + 0], bytes[1*8 + 1], bytes[1*8 + 2], bytes[1*8 + 3],
964 bytes[1*8 + 4], bytes[1*8 + 5], bytes[1*8 + 6], bytes[1*8 + 7]),
965 eight_bytes_to_u64_be(bytes[2*8 + 0], bytes[2*8 + 1], bytes[2*8 + 2], bytes[2*8 + 3],
966 bytes[2*8 + 4], bytes[2*8 + 5], bytes[2*8 + 6], bytes[2*8 + 7]),
967 eight_bytes_to_u64_be(bytes[3*8 + 0], bytes[3*8 + 1], bytes[3*8 + 2], bytes[3*8 + 3],
968 bytes[3*8 + 4], bytes[3*8 + 5], bytes[3*8 + 6], bytes[3*8 + 7]),
973 pub(super) const fn zero() -> U256 { U256([0, 0, 0, 0]) }
974 pub(super) const fn one() -> U256 { U256([0, 0, 0, 1]) }
975 pub(super) const fn three() -> U256 { U256([0, 0, 0, 3]) }
978 // Values modulus M::PRIME.0, stored in montgomery form.
979 impl<M: PrimeModulus<U256>> U256Mod<M> {
980 const fn mont_reduction(mu: [u64; 8]) -> Self {
981 #[cfg(debug_assertions)] {
982 // Check NEGATIVE_PRIME_INV_MOD_R is correct. Since this is all const, the compiler
983 // should be able to do it at compile time alone.
984 let minus_one_mod_r = mul_4(&M::PRIME.0, &M::NEGATIVE_PRIME_INV_MOD_R.0);
985 assert!(slice_equal(const_subslice(&minus_one_mod_r, 4, 8), &[0xffff_ffff_ffff_ffff; 4]));
988 #[cfg(debug_assertions)] {
989 // Check R_SQUARED_MOD_PRIME is correct. Since this is all const, the compiler
990 // should be able to do it at compile time alone.
991 let r_minus_one = [0xffff_ffff_ffff_ffff; 4];
992 let (mut r_mod_prime, _) = sub_4(&r_minus_one, &M::PRIME.0);
993 let r_mod_prime_overflow = add_u64!(r_mod_prime, 1);
994 assert!(!r_mod_prime_overflow);
995 let r_squared = sqr_4(&r_mod_prime);
996 let mut prime_extended = [0; 8];
997 let prime = M::PRIME.0;
998 copy_from_slice!(prime_extended, 4, 8, prime);
999 let (_, r_squared_mod_prime) = if let Ok(v) = div_rem_8(&r_squared, &prime_extended) { v } else { panic!() };
1000 assert!(slice_greater_than(&prime_extended, &r_squared_mod_prime));
1001 assert!(slice_equal(const_subslice(&r_squared_mod_prime, 4, 8), &M::R_SQUARED_MOD_PRIME.0));
1004 // mu % R is just the bottom 4 bytes of mu
1005 let mu_mod_r = const_subslice(&mu, 4, 8);
1006 // v = ((mu % R) * negative_modulus_inverse) % R
1007 let mut v = mul_4(&mu_mod_r, &M::NEGATIVE_PRIME_INV_MOD_R.0);
1008 const ZEROS: &[u64; 4] = &[0; 4];
1009 copy_from_slice!(v, 0, 4, ZEROS); // mod R
1011 // t_on_r = (mu + v*modulus) / R
1012 let t0 = mul_4(const_subslice(&v, 4, 8), &M::PRIME.0);
1013 let (t1, t1_extra_bit) = add_8(&t0, &mu);
1015 // Note that dividing t1 by R is simply a matter of shifting right by 4 bytes.
1016 // We only need to maintain 4 bytes (plus `t1_extra_bit` which is implicitly an extra bit)
1017 // because t_on_r is guarantee to be, at max, 2*m - 1.
1018 let t1_on_r = const_subslice(&t1, 0, 4);
1020 let mut res = [0; 4];
1021 // The modulus is only 4 bytes, so t1_extra_bit implies we're definitely larger than the
1023 if t1_extra_bit || slice_greater_than(&t1_on_r, &M::PRIME.0) {
1025 (res, underflow) = sub_4(&t1_on_r, &M::PRIME.0);
1026 debug_assert!(t1_extra_bit == underflow,
1027 "The number (t1_extra_bit, t1_on_r) is at most 2m-1, so underflowing t1_on_r - m should happen iff t1_extra_bit is set.");
1029 copy_from_slice!(res, 0, 4, t1_on_r);
1031 Self(U256(res), PhantomData)
1034 pub(super) const fn from_u256_panicking(v: U256) -> Self {
1035 assert!(v.0[0] <= M::PRIME.0[0]);
1036 if v.0[0] == M::PRIME.0[0] {
1037 assert!(v.0[1] <= M::PRIME.0[1]);
1038 if v.0[1] == M::PRIME.0[1] {
1039 assert!(v.0[2] <= M::PRIME.0[2]);
1040 if v.0[2] == M::PRIME.0[2] {
1041 assert!(v.0[3] < M::PRIME.0[3]);
1045 assert!(M::PRIME.0[0] != 0 || M::PRIME.0[1] != 0 || M::PRIME.0[2] != 0 || M::PRIME.0[3] != 0);
1046 Self::mont_reduction(mul_4(&M::R_SQUARED_MOD_PRIME.0, &v.0))
1049 pub(super) fn from_u256(mut v: U256) -> Self {
1050 debug_assert!(M::PRIME.0 != [0; 4]);
1051 debug_assert!(M::PRIME.0[0] > (1 << 63), "PRIME should have the top bit set");
1052 while v >= M::PRIME {
1053 let (new_v, spurious_underflow) = sub_4(&v.0, &M::PRIME.0);
1054 debug_assert!(!spurious_underflow, "v was > M::PRIME.0");
1057 Self::mont_reduction(mul_4(&M::R_SQUARED_MOD_PRIME.0, &v.0))
1060 pub(super) fn from_modinv_of(v: U256) -> Result<Self, ()> {
1061 Ok(Self::from_u256(U256(mod_inv_4(&v.0, &M::PRIME.0)?)))
1064 /// Multiplies `self` * `b` mod `m`.
1066 /// Panics if `self`'s modulus is not equal to `b`'s
1067 pub(super) fn mul(&self, b: &Self) -> Self {
1068 Self::mont_reduction(mul_4(&self.0.0, &b.0.0))
1071 /// Doubles `self` mod `m`.
1072 pub(super) fn double(&self) -> Self {
1073 let mut res = self.0.0;
1074 let overflow = double!(res);
1075 if overflow || !slice_greater_than(&M::PRIME.0, &res) {
1077 (res, underflow) = sub_4(&res, &M::PRIME.0);
1078 debug_assert_eq!(overflow, underflow);
1080 Self(U256(res), PhantomData)
1083 /// Multiplies `self` by 3 mod `m`.
1084 pub(super) fn times_three(&self) -> Self {
1085 // TODO: Optimize this a lot
1086 self.mul(&U256Mod::from_u256(U256::three()))
1089 /// Multiplies `self` by 4 mod `m`.
1090 pub(super) fn times_four(&self) -> Self {
1091 // TODO: Optimize this somewhat?
1092 self.double().double()
1095 /// Multiplies `self` by 8 mod `m`.
1096 pub(super) fn times_eight(&self) -> Self {
1097 // TODO: Optimize this somewhat?
1098 self.double().double().double()
1101 /// Multiplies `self` by 8 mod `m`.
1102 pub(super) fn square(&self) -> Self {
1103 Self::mont_reduction(sqr_4(&self.0.0))
1106 /// Subtracts `b` from `self` % `m`.
1107 pub(super) fn sub(&self, b: &Self) -> Self {
1108 let (mut val, underflow) = sub_4(&self.0.0, &b.0.0);
1111 (val, overflow) = add_4(&val, &M::PRIME.0);
1112 debug_assert_eq!(overflow, underflow);
1114 Self(U256(val), PhantomData)
1117 /// Adds `b` to `self` % `m`.
1118 pub(super) fn add(&self, b: &Self) -> Self {
1119 let (mut val, overflow) = add_4(&self.0.0, &b.0.0);
1120 if overflow || !slice_greater_than(&M::PRIME.0, &val) {
1122 (val, underflow) = sub_4(&val, &M::PRIME.0);
1123 debug_assert_eq!(overflow, underflow);
1125 Self(U256(val), PhantomData)
1128 /// Returns the underlying [`U256`].
1129 pub(super) fn into_u256(self) -> U256 {
1130 let mut expanded_self = [0; 8];
1131 expanded_self[4..].copy_from_slice(&self.0.0);
1132 Self::mont_reduction(expanded_self).0
1136 // Values modulus M::PRIME.0, stored in montgomery form.
1138 /// Constructs a new [`U384`] from a variable number of big-endian bytes.
1139 pub(super) fn from_be_bytes(bytes: &[u8]) -> Result<U384, ()> {
1140 if bytes.len() > 384/8 { return Err(()); }
1141 let u64s = (bytes.len() + 7) / 8;
1142 let mut res = [0; WORD_COUNT_384];
1145 let pos = (u64s - i) * 8;
1146 let start = bytes.len().saturating_sub(pos);
1147 let end = bytes.len() + 8 - pos;
1148 b[8 + start - end..].copy_from_slice(&bytes[start..end]);
1149 res[i + WORD_COUNT_384 - u64s] = u64::from_be_bytes(b);
1154 /// Constructs a new [`U384`] from a fixed number of big-endian bytes.
1155 pub(super) const fn from_48_be_bytes_panicking(bytes: &[u8; 48]) -> U384 {
1157 eight_bytes_to_u64_be(bytes[0*8 + 0], bytes[0*8 + 1], bytes[0*8 + 2], bytes[0*8 + 3],
1158 bytes[0*8 + 4], bytes[0*8 + 5], bytes[0*8 + 6], bytes[0*8 + 7]),
1159 eight_bytes_to_u64_be(bytes[1*8 + 0], bytes[1*8 + 1], bytes[1*8 + 2], bytes[1*8 + 3],
1160 bytes[1*8 + 4], bytes[1*8 + 5], bytes[1*8 + 6], bytes[1*8 + 7]),
1161 eight_bytes_to_u64_be(bytes[2*8 + 0], bytes[2*8 + 1], bytes[2*8 + 2], bytes[2*8 + 3],
1162 bytes[2*8 + 4], bytes[2*8 + 5], bytes[2*8 + 6], bytes[2*8 + 7]),
1163 eight_bytes_to_u64_be(bytes[3*8 + 0], bytes[3*8 + 1], bytes[3*8 + 2], bytes[3*8 + 3],
1164 bytes[3*8 + 4], bytes[3*8 + 5], bytes[3*8 + 6], bytes[3*8 + 7]),
1165 eight_bytes_to_u64_be(bytes[4*8 + 0], bytes[4*8 + 1], bytes[4*8 + 2], bytes[4*8 + 3],
1166 bytes[4*8 + 4], bytes[4*8 + 5], bytes[4*8 + 6], bytes[4*8 + 7]),
1167 eight_bytes_to_u64_be(bytes[5*8 + 0], bytes[5*8 + 1], bytes[5*8 + 2], bytes[5*8 + 3],
1168 bytes[5*8 + 4], bytes[5*8 + 5], bytes[5*8 + 6], bytes[5*8 + 7]),
1173 pub(super) const fn zero() -> U384 { U384([0, 0, 0, 0, 0, 0]) }
1174 pub(super) const fn one() -> U384 { U384([0, 0, 0, 0, 0, 1]) }
1175 pub(super) const fn three() -> U384 { U384([0, 0, 0, 0, 0, 3]) }
1178 impl<M: PrimeModulus<U384>> U384Mod<M> {
1179 const fn mont_reduction(mu: [u64; 12]) -> Self {
1180 #[cfg(debug_assertions)] {
1181 // Check NEGATIVE_PRIME_INV_MOD_R is correct. Since this is all const, the compiler
1182 // should be able to do it at compile time alone.
1183 let minus_one_mod_r = mul_6(&M::PRIME.0, &M::NEGATIVE_PRIME_INV_MOD_R.0);
1184 assert!(slice_equal(const_subslice(&minus_one_mod_r, 6, 12), &[0xffff_ffff_ffff_ffff; 6]));
1187 #[cfg(debug_assertions)] {
1188 // Check R_SQUARED_MOD_PRIME is correct. Since this is all const, the compiler
1189 // should be able to do it at compile time alone.
1190 let r_minus_one = [0xffff_ffff_ffff_ffff; 6];
1191 let (mut r_mod_prime, _) = sub_6(&r_minus_one, &M::PRIME.0);
1192 let r_mod_prime_overflow = add_u64!(r_mod_prime, 1);
1193 assert!(!r_mod_prime_overflow);
1194 let r_squared = sqr_6(&r_mod_prime);
1195 let mut prime_extended = [0; 12];
1196 let prime = M::PRIME.0;
1197 copy_from_slice!(prime_extended, 6, 12, prime);
1198 let (_, r_squared_mod_prime) = if let Ok(v) = div_rem_12(&r_squared, &prime_extended) { v } else { panic!() };
1199 assert!(slice_greater_than(&prime_extended, &r_squared_mod_prime));
1200 assert!(slice_equal(const_subslice(&r_squared_mod_prime, 6, 12), &M::R_SQUARED_MOD_PRIME.0));
1203 // mu % R is just the bottom 4 bytes of mu
1204 let mu_mod_r = const_subslice(&mu, 6, 12);
1205 // v = ((mu % R) * negative_modulus_inverse) % R
1206 let mut v = mul_6(&mu_mod_r, &M::NEGATIVE_PRIME_INV_MOD_R.0);
1207 const ZEROS: &[u64; 6] = &[0; 6];
1208 copy_from_slice!(v, 0, 6, ZEROS); // mod R
1210 // t_on_r = (mu + v*modulus) / R
1211 let t0 = mul_6(const_subslice(&v, 6, 12), &M::PRIME.0);
1212 let (t1, t1_extra_bit) = add_12(&t0, &mu);
1214 // Note that dividing t1 by R is simply a matter of shifting right by 4 bytes.
1215 // We only need to maintain 4 bytes (plus `t1_extra_bit` which is implicitly an extra bit)
1216 // because t_on_r is guarantee to be, at max, 2*m - 1.
1217 let t1_on_r = const_subslice(&t1, 0, 6);
1219 let mut res = [0; 6];
1220 // The modulus is only 4 bytes, so t1_extra_bit implies we're definitely larger than the
1222 if t1_extra_bit || slice_greater_than(&t1_on_r, &M::PRIME.0) {
1224 (res, underflow) = sub_6(&t1_on_r, &M::PRIME.0);
1225 debug_assert!(t1_extra_bit == underflow);
1227 copy_from_slice!(res, 0, 6, t1_on_r);
1229 Self(U384(res), PhantomData)
1232 pub(super) const fn from_u384_panicking(v: U384) -> Self {
1233 assert!(v.0[0] <= M::PRIME.0[0]);
1234 if v.0[0] == M::PRIME.0[0] {
1235 assert!(v.0[1] <= M::PRIME.0[1]);
1236 if v.0[1] == M::PRIME.0[1] {
1237 assert!(v.0[2] <= M::PRIME.0[2]);
1238 if v.0[2] == M::PRIME.0[2] {
1239 assert!(v.0[3] <= M::PRIME.0[3]);
1240 if v.0[3] == M::PRIME.0[3] {
1241 assert!(v.0[4] <= M::PRIME.0[4]);
1242 if v.0[4] == M::PRIME.0[4] {
1243 assert!(v.0[5] < M::PRIME.0[5]);
1249 assert!(M::PRIME.0[0] != 0 || M::PRIME.0[1] != 0 || M::PRIME.0[2] != 0
1250 || M::PRIME.0[3] != 0|| M::PRIME.0[4] != 0|| M::PRIME.0[5] != 0);
1251 Self::mont_reduction(mul_6(&M::R_SQUARED_MOD_PRIME.0, &v.0))
1254 pub(super) fn from_u384(mut v: U384) -> Self {
1255 debug_assert!(M::PRIME.0 != [0; 6]);
1256 debug_assert!(M::PRIME.0[0] > (1 << 63), "PRIME should have the top bit set");
1257 while v >= M::PRIME {
1258 let (new_v, spurious_underflow) = sub_6(&v.0, &M::PRIME.0);
1259 debug_assert!(!spurious_underflow);
1262 Self::mont_reduction(mul_6(&M::R_SQUARED_MOD_PRIME.0, &v.0))
1265 pub(super) fn from_modinv_of(v: U384) -> Result<Self, ()> {
1266 Ok(Self::from_u384(U384(mod_inv_6(&v.0, &M::PRIME.0)?)))
1269 /// Multiplies `self` * `b` mod `m`.
1271 /// Panics if `self`'s modulus is not equal to `b`'s
1272 pub(super) fn mul(&self, b: &Self) -> Self {
1273 Self::mont_reduction(mul_6(&self.0.0, &b.0.0))
1276 /// Doubles `self` mod `m`.
1277 pub(super) fn double(&self) -> Self {
1278 let mut res = self.0.0;
1279 let overflow = double!(res);
1280 if overflow || !slice_greater_than(&M::PRIME.0, &res) {
1282 (res, underflow) = sub_6(&res, &M::PRIME.0);
1283 debug_assert_eq!(overflow, underflow);
1285 Self(U384(res), PhantomData)
1288 /// Multiplies `self` by 3 mod `m`.
1289 pub(super) fn times_three(&self) -> Self {
1290 // TODO: Optimize this a lot
1291 self.mul(&U384Mod::from_u384(U384::three()))
1294 /// Multiplies `self` by 4 mod `m`.
1295 pub(super) fn times_four(&self) -> Self {
1296 // TODO: Optimize this somewhat?
1297 self.double().double()
1300 /// Multiplies `self` by 8 mod `m`.
1301 pub(super) fn times_eight(&self) -> Self {
1302 // TODO: Optimize this somewhat?
1303 self.double().double().double()
1306 /// Multiplies `self` by 8 mod `m`.
1307 pub(super) fn square(&self) -> Self {
1308 Self::mont_reduction(sqr_6(&self.0.0))
1311 /// Subtracts `b` from `self` % `m`.
1312 pub(super) fn sub(&self, b: &Self) -> Self {
1313 let (mut val, underflow) = sub_6(&self.0.0, &b.0.0);
1316 (val, overflow) = add_6(&val, &M::PRIME.0);
1317 debug_assert_eq!(overflow, underflow);
1319 Self(U384(val), PhantomData)
1322 /// Adds `b` to `self` % `m`.
1323 pub(super) fn add(&self, b: &Self) -> Self {
1324 let (mut val, overflow) = add_6(&self.0.0, &b.0.0);
1325 if overflow || !slice_greater_than(&M::PRIME.0, &val) {
1327 (val, underflow) = sub_6(&val, &M::PRIME.0);
1328 debug_assert_eq!(overflow, underflow);
1330 Self(U384(val), PhantomData)
1333 /// Returns the underlying [`U384`].
1334 pub(super) fn into_u384(self) -> U384 {
1335 let mut expanded_self = [0; 12];
1336 expanded_self[6..].copy_from_slice(&self.0.0);
1337 Self::mont_reduction(expanded_self).0
1346 impl PrimeModulus<U256> for P256 {
1347 const PRIME: U256 = U256::from_32_be_bytes_panicking(&hex_lit::hex!(
1348 "ffffffff00000001000000000000000000000000ffffffffffffffffffffffff"));
1349 const R_SQUARED_MOD_PRIME: U256 = U256::from_32_be_bytes_panicking(&hex_lit::hex!(
1350 "00000004fffffffdfffffffffffffffefffffffbffffffff0000000000000003"));
1351 const NEGATIVE_PRIME_INV_MOD_R: U256 = U256::from_32_be_bytes_panicking(&hex_lit::hex!(
1352 "ffffffff00000002000000000000000000000001000000000000000000000001"));
1356 impl PrimeModulus<U384> for P384 {
1357 const PRIME: U384 = U384::from_48_be_bytes_panicking(&hex_lit::hex!(
1358 "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff"));
1359 const R_SQUARED_MOD_PRIME: U384 = U384::from_48_be_bytes_panicking(&hex_lit::hex!(
1360 "000000000000000000000000000000010000000200000000fffffffe000000000000000200000000fffffffe00000001"));
1361 const NEGATIVE_PRIME_INV_MOD_R: U384 = U384::from_48_be_bytes_panicking(&hex_lit::hex!(
1362 "00000014000000140000000c00000002fffffffcfffffffafffffffbfffffffe00000000000000010000000100000001"));
1369 /// Read some bytes and use them to test bigint math by comparing results against the `ibig` crate.
1370 pub fn fuzz_math(input: &[u8]) {
1371 if input.len() < 32 || input.len() % 16 != 0 { return; }
1372 let split = core::cmp::min(input.len() / 2, 512);
1373 let (a, b) = input.split_at(core::cmp::min(input.len() / 2, 512));
1374 let b = &b[..split];
1376 let ai = ibig::UBig::from_be_bytes(&a);
1377 let bi = ibig::UBig::from_be_bytes(&b);
1379 let mut a_u64s = Vec::with_capacity(split / 8);
1380 for chunk in a.chunks(8) {
1381 a_u64s.push(u64::from_be_bytes(chunk.try_into().unwrap()));
1383 let mut b_u64s = Vec::with_capacity(split / 8);
1384 for chunk in b.chunks(8) {
1385 b_u64s.push(u64::from_be_bytes(chunk.try_into().unwrap()));
1388 macro_rules! test { ($mul: ident, $sqr: ident, $add: ident, $sub: ident, $div_rem: ident, $mod_inv: ident) => {
1389 let res = $mul(&a_u64s, &b_u64s);
1390 let mut res_bytes = Vec::with_capacity(input.len() / 2);
1392 res_bytes.extend_from_slice(&i.to_be_bytes());
1394 assert_eq!(ibig::UBig::from_be_bytes(&res_bytes), ai.clone() * bi.clone());
1396 debug_assert_eq!($mul(&a_u64s, &a_u64s), $sqr(&a_u64s));
1397 debug_assert_eq!($mul(&b_u64s, &b_u64s), $sqr(&b_u64s));
1399 let (res, carry) = $add(&a_u64s, &b_u64s);
1400 let mut res_bytes = Vec::with_capacity(input.len() / 2 + 1);
1401 if carry { res_bytes.push(1); } else { res_bytes.push(0); }
1403 res_bytes.extend_from_slice(&i.to_be_bytes());
1405 assert_eq!(ibig::UBig::from_be_bytes(&res_bytes), ai.clone() + bi.clone());
1407 let mut add_u64s = a_u64s.clone();
1408 let carry = add_u64!(add_u64s, 1);
1409 let mut res_bytes = Vec::with_capacity(input.len() / 2 + 1);
1410 if carry { res_bytes.push(1); } else { res_bytes.push(0); }
1411 for i in &add_u64s {
1412 res_bytes.extend_from_slice(&i.to_be_bytes());
1414 assert_eq!(ibig::UBig::from_be_bytes(&res_bytes), ai.clone() + 1);
1416 let mut double_u64s = b_u64s.clone();
1417 let carry = double!(double_u64s);
1418 let mut res_bytes = Vec::with_capacity(input.len() / 2 + 1);
1419 if carry { res_bytes.push(1); } else { res_bytes.push(0); }
1420 for i in &double_u64s {
1421 res_bytes.extend_from_slice(&i.to_be_bytes());
1423 assert_eq!(ibig::UBig::from_be_bytes(&res_bytes), bi.clone() * 2);
1425 let (quot, rem) = if let Ok(res) =
1426 $div_rem(&a_u64s[..].try_into().unwrap(), &b_u64s[..].try_into().unwrap()) {
1429 let mut quot_bytes = Vec::with_capacity(input.len() / 2);
1431 quot_bytes.extend_from_slice(&i.to_be_bytes());
1433 let mut rem_bytes = Vec::with_capacity(input.len() / 2);
1435 rem_bytes.extend_from_slice(&i.to_be_bytes());
1437 let (quoti, remi) = ibig::ops::DivRem::div_rem(ai.clone(), &bi);
1438 assert_eq!(ibig::UBig::from_be_bytes("_bytes), quoti);
1439 assert_eq!(ibig::UBig::from_be_bytes(&rem_bytes), remi);
1441 if ai != ibig::UBig::from(0u32) { // ibig provides a spurious modular inverse for 0
1442 let ring = ibig::modular::ModuloRing::new(&bi);
1443 let ar = ring.from(ai.clone());
1444 let invi = ar.inverse().map(|i| i.residue());
1446 if let Ok(modinv) = $mod_inv(&a_u64s[..].try_into().unwrap(), &b_u64s[..].try_into().unwrap()) {
1447 let mut modinv_bytes = Vec::with_capacity(input.len() / 2);
1449 modinv_bytes.extend_from_slice(&i.to_be_bytes());
1451 assert_eq!(invi.unwrap(), ibig::UBig::from_be_bytes(&modinv_bytes));
1453 assert!(invi.is_none());
1458 macro_rules! test_mod { ($amodp: expr, $bmodp: expr, $PRIME: expr, $len: expr, $into: ident, $div_rem_double: ident, $div_rem: ident, $mul: ident, $add: ident, $sub: ident) => {
1459 // Test the U256/U384Mod wrapper, which operates in Montgomery representation
1460 let mut p_extended = [0; $len * 2];
1461 p_extended[$len..].copy_from_slice(&$PRIME);
1463 let amodp_squared = $div_rem_double(&$mul(&a_u64s, &a_u64s), &p_extended).unwrap().1;
1464 assert_eq!(&amodp_squared[..$len], &[0; $len]);
1465 assert_eq!(&$amodp.square().$into().0, &amodp_squared[$len..]);
1467 let abmodp = $div_rem_double(&$mul(&a_u64s, &b_u64s), &p_extended).unwrap().1;
1468 assert_eq!(&abmodp[..$len], &[0; $len]);
1469 assert_eq!(&$amodp.mul(&$bmodp).$into().0, &abmodp[$len..]);
1471 let (aplusb, aplusb_overflow) = $add(&a_u64s, &b_u64s);
1472 let mut aplusb_extended = [0; $len * 2];
1473 aplusb_extended[$len..].copy_from_slice(&aplusb);
1474 if aplusb_overflow { aplusb_extended[$len - 1] = 1; }
1475 let aplusbmodp = $div_rem_double(&aplusb_extended, &p_extended).unwrap().1;
1476 assert_eq!(&aplusbmodp[..$len], &[0; $len]);
1477 assert_eq!(&$amodp.add(&$bmodp).$into().0, &aplusbmodp[$len..]);
1479 let (mut aminusb, aminusb_underflow) = $sub(&a_u64s, &b_u64s);
1480 if aminusb_underflow {
1482 (aminusb, overflow) = $add(&aminusb, &$PRIME);
1484 (aminusb, overflow) = $add(&aminusb, &$PRIME);
1488 let aminusbmodp = $div_rem(&aminusb, &$PRIME).unwrap().1;
1489 assert_eq!(&$amodp.sub(&$bmodp).$into().0, &aminusbmodp);
1492 if a_u64s.len() == 2 {
1493 test!(mul_2, sqr_2, add_2, sub_2, div_rem_2, mod_inv_2);
1494 } else if a_u64s.len() == 4 {
1495 test!(mul_4, sqr_4, add_4, sub_4, div_rem_4, mod_inv_4);
1496 let amodp = U256Mod::<fuzz_moduli::P256>::from_u256(U256(a_u64s[..].try_into().unwrap()));
1497 let bmodp = U256Mod::<fuzz_moduli::P256>::from_u256(U256(b_u64s[..].try_into().unwrap()));
1498 test_mod!(amodp, bmodp, fuzz_moduli::P256::PRIME.0, 4, into_u256, div_rem_8, div_rem_4, mul_4, add_4, sub_4);
1499 } else if a_u64s.len() == 6 {
1500 test!(mul_6, sqr_6, add_6, sub_6, div_rem_6, mod_inv_6);
1501 let amodp = U384Mod::<fuzz_moduli::P384>::from_u384(U384(a_u64s[..].try_into().unwrap()));
1502 let bmodp = U384Mod::<fuzz_moduli::P384>::from_u384(U384(b_u64s[..].try_into().unwrap()));
1503 test_mod!(amodp, bmodp, fuzz_moduli::P384::PRIME.0, 6, into_u384, div_rem_12, div_rem_6, mul_6, add_6, sub_6);
1504 } else if a_u64s.len() == 8 {
1505 test!(mul_8, sqr_8, add_8, sub_8, div_rem_8, mod_inv_8);
1506 } else if input.len() == 512*2 + 4 {
1507 let mut e_bytes = [0; 4];
1508 e_bytes.copy_from_slice(&input[512 * 2..512 * 2 + 4]);
1509 let e = u32::from_le_bytes(e_bytes);
1510 let a = U4096::from_be_bytes(&a).unwrap();
1511 let b = U4096::from_be_bytes(&b).unwrap();
1513 let res = if let Ok(r) = a.expmod_odd_mod(e, &b) { r } else { return };
1514 let mut res_bytes = Vec::with_capacity(512);
1516 res_bytes.extend_from_slice(&i.to_be_bytes());
1519 let ring = ibig::modular::ModuloRing::new(&bi);
1520 let ar = ring.from(ai.clone());
1521 assert_eq!(ar.pow(&e.into()).residue(), ibig::UBig::from_be_bytes(&res_bytes));
1529 fn u64s_to_u128(v: [u64; 2]) -> u128 {
1532 r |= (v[0] as u128) << 64;
1536 fn u64s_to_i128(v: [u64; 2]) -> i128 {
1539 r |= (v[0] as i128) << 64;
1545 let mut zero = [0u64; 2];
1547 assert_eq!(zero, [0; 2]);
1549 let mut one = [0u64, 1u64];
1551 assert_eq!(u64s_to_i128(one), -1);
1553 let mut minus_one: [u64; 2] = [u64::MAX, u64::MAX];
1555 assert_eq!(minus_one, [0, 1]);
1560 let mut zero = [0u64; 2];
1561 assert!(!double!(zero));
1562 assert_eq!(zero, [0; 2]);
1564 let mut one = [0u64, 1u64];
1565 assert!(!double!(one));
1566 assert_eq!(one, [0, 2]);
1568 let mut u64_max = [0, u64::MAX];
1569 assert!(!double!(u64_max));
1570 assert_eq!(u64_max, [1, u64::MAX - 1]);
1572 let mut u64_carry_overflow = [0x7fff_ffff_ffff_ffffu64, 0x8000_0000_0000_0000];
1573 assert!(!double!(u64_carry_overflow));
1574 assert_eq!(u64_carry_overflow, [u64::MAX, 0]);
1576 let mut max = [u64::MAX; 4];
1577 assert!(double!(max));
1578 assert_eq!(max, [u64::MAX, u64::MAX, u64::MAX, u64::MAX - 1]);
1582 fn mul_min_simple_tests() {
1585 let res = mul_2(&a, &b);
1586 assert_eq!(res, [0, 3, 10, 8]);
1588 let a = [0x1bad_cafe_dead_beef, 2424];
1589 let b = [0x2bad_beef_dead_cafe, 4242];
1590 let res = mul_2(&a, &b);
1591 assert_eq!(res, [340296855556511776, 15015369169016130186, 4248480538569992542, 10282608]);
1593 let a = [0xf6d9_f8eb_8b60_7a6d, 0x4b93_833e_2194_fc2e];
1594 let b = [0xfdab_0000_6952_8ab4, 0xd302_0000_8282_0000];
1595 let res = mul_2(&a, &b);
1596 assert_eq!(res, [17625486516939878681, 18390748118453258282, 2695286104209847530, 1510594524414214144]);
1598 let a = [0x8b8b_8b8b_8b8b_8b8b, 0x8b8b_8b8b_8b8b_8b8b];
1599 let b = [0x8b8b_8b8b_8b8b_8b8b, 0x8b8b_8b8b_8b8b_8b8b];
1600 let res = mul_2(&a, &b);
1601 assert_eq!(res, [5481115605507762349, 8230042173354675923, 16737530186064798, 15714555036048702841]);
1603 let a = [0x0000_0000_0000_0020, 0x002d_362c_005b_7753];
1604 let b = [0x0900_0000_0030_0003, 0xb708_00fe_0000_00cd];
1605 let res = mul_2(&a, &b);
1606 assert_eq!(res, [1, 2306290405521702946, 17647397529888728169, 10271802099389861239]);
1608 let a = [0x0000_0000_7fff_ffff, 0xffff_ffff_0000_0000];
1609 let b = [0x0000_0800_0000_0000, 0x0000_1000_0000_00e1];
1610 let res = mul_2(&a, &b);
1611 assert_eq!(res, [1024, 0, 483183816703, 18446743107341910016]);
1613 let a = [0xf6d9_f8eb_ebeb_eb6d, 0x4b93_83a0_bb35_0680];
1614 let b = [0xfd02_b9b9_b9b9_b9b9, 0xb9b9_b9b9_b9b9_b9b9];
1615 let res = mul_2(&a, &b);
1616 assert_eq!(res, [17579814114991930107, 15033987447865175985, 488855932380801351, 5453318140933190272]);
1618 let a = [u64::MAX; 2];
1619 let b = [u64::MAX; 2];
1620 let res = mul_2(&a, &b);
1621 assert_eq!(res, [18446744073709551615, 18446744073709551614, 0, 1]);
1626 fn test(a: [u64; 2], b: [u64; 2]) {
1627 let a_int = u64s_to_u128(a);
1628 let b_int = u64s_to_u128(b);
1630 let res = add_2(&a, &b);
1631 assert_eq!((u64s_to_u128(res.0), res.1), a_int.overflowing_add(b_int));
1633 let res = sub_2(&a, &b);
1634 assert_eq!((u64s_to_u128(res.0), res.1), a_int.overflowing_sub(b_int));
1637 test([0; 2], [0; 2]);
1638 test([0x1bad_cafe_dead_beef, 2424], [0x2bad_cafe_dead_cafe, 4242]);
1639 test([u64::MAX; 2], [u64::MAX; 2]);
1640 test([u64::MAX, 0x8000_0000_0000_0000], [0, 0x7fff_ffff_ffff_ffff]);
1641 test([0, 0x7fff_ffff_ffff_ffff], [u64::MAX, 0x8000_0000_0000_0000]);
1642 test([u64::MAX, 0], [0, u64::MAX]);
1643 test([0, u64::MAX], [u64::MAX, 0]);
1644 test([u64::MAX; 2], [0; 2]);
1645 test([0; 2], [u64::MAX; 2]);
1649 fn mul_4_simple_tests() {
1652 assert_eq!(mul_4(&a, &b),
1653 [0, 2, 4, 6, 8, 6, 4, 2]);
1655 let a = [0x1bad_cafe_dead_beef, 2424, 0x1bad_cafe_dead_beef, 2424];
1656 let b = [0x2bad_beef_dead_cafe, 4242, 0x2bad_beef_dead_cafe, 4242];
1657 assert_eq!(mul_4(&a, &b),
1658 [340296855556511776, 15015369169016130186, 4929074249683016095, 11583994264332991364,
1659 8837257932696496860, 15015369169036695402, 4248480538569992542, 10282608]);
1661 let a = [u64::MAX; 4];
1662 let b = [u64::MAX; 4];
1663 assert_eq!(mul_4(&a, &b),
1664 [18446744073709551615, 18446744073709551615, 18446744073709551615,
1665 18446744073709551614, 0, 0, 0, 1]);
1669 fn double_simple_tests() {
1670 let mut a = [0xfff5_b32d_01ff_0000, 0x00e7_e7e7_e7e7_e7e7];
1671 assert!(double!(a));
1672 assert_eq!(a, [18440945635998695424, 130551405668716494]);
1674 let mut a = [u64::MAX, u64::MAX];
1675 assert!(double!(a));
1676 assert_eq!(a, [18446744073709551615, 18446744073709551614]);