1 //! Simple variable-time big integer implementation
4 use core::marker::PhantomData;
6 #[allow(clippy::needless_lifetimes)] // lifetimes improve readability
7 #[allow(clippy::needless_borrow)] // borrows indicate read-only/non-move
8 #[allow(clippy::too_many_arguments)] // sometimes we don't have an option
9 #[allow(clippy::identity_op)] // sometimes identities improve readability for repeated actions
11 // **************************************
12 // * Implementations of math primitives *
13 // **************************************
15 macro_rules! debug_unwrap { ($v: expr) => { {
17 debug_assert!(v.is_ok());
20 Err(e) => return Err(e),
24 // Various const versions of existing slice utilities
25 /// Const version of `&a[start..end]`
26 const fn const_subslice<'a, T>(a: &'a [T], start: usize, end: usize) -> &'a [T] {
27 assert!(start <= a.len());
28 assert!(end <= a.len());
29 assert!(end >= start);
30 let mut startptr = a.as_ptr();
31 startptr = unsafe { startptr.add(start) };
32 let len = end - start;
33 // The docs for from_raw_parts do not mention any requirements that the pointer be valid if the
34 // length is zero, aside from requiring proper alignment (which is met here). Thus,
35 // one-past-the-end should be an acceptable pointer for a 0-length slice.
36 unsafe { alloc::slice::from_raw_parts(startptr, len) }
39 /// Const version of `dest[dest_start..dest_end].copy_from_slice(source)`
41 /// Once `const_mut_refs` is stable we can convert this to a function
42 macro_rules! copy_from_slice {
43 ($dest: ident, $dest_start: expr, $dest_end: expr, $source: ident) => { {
44 let dest_start = $dest_start;
45 let dest_end = $dest_end;
46 assert!(dest_start <= $dest.len());
47 assert!(dest_end <= $dest.len());
48 assert!(dest_end >= dest_start);
49 assert!(dest_end - dest_start == $source.len());
51 while i < $source.len() {
52 $dest[i + dest_start] = $source[i];
58 /// Const version of a > b
59 const fn slice_greater_than(a: &[u64], b: &[u64]) -> bool {
60 debug_assert!(a.len() == b.len());
61 let len = if a.len() <= b.len() { a.len() } else { b.len() };
64 if a[i] > b[i] { return true; }
65 else if a[i] < b[i] { return false; }
71 /// Const version of a == b
72 const fn slice_equal(a: &[u64], b: &[u64]) -> bool {
73 debug_assert!(a.len() == b.len());
74 let len = if a.len() <= b.len() { a.len() } else { b.len() };
77 if a[i] != b[i] { return false; }
83 /// Adds a single u64 valuein-place, returning an overflow flag, in which case one out-of-bounds
84 /// high bit is implicitly included in the result.
86 /// Once `const_mut_refs` is stable we can convert this to a function
87 macro_rules! add_u64 { ($a: ident, $b: expr) => { {
92 let (v, carry) = $a[i].overflowing_add(add);
95 if add == 0 { break; }
103 /// Negates the given u64 slice.
105 /// Once `const_mut_refs` is stable we can convert this to a function
106 macro_rules! negate { ($v: ident) => { {
109 $v[i] ^= 0xffff_ffff_ffff_ffff;
112 let _ = add_u64!($v, 1);
115 /// Doubles in-place, returning an overflow flag, in which case one out-of-bounds high bit is
116 /// implicitly included in the result.
118 /// Once `const_mut_refs` is stable we can convert this to a function
119 macro_rules! double { ($a: ident) => { {
120 { let _: &[u64] = &$a; } // Force type resolution
122 let mut carry = false;
125 let next_carry = ($a[i] & (1 << 63)) != 0;
126 let (v, _next_carry_2) = ($a[i] << 1).overflowing_add(carry as u64);
128 debug_assert!(!_next_carry_2, "Adding one to 0x7ffff..*2 is only 0xffff..");
130 // Note that we can ignore _next_carry_2 here as we never need it - it cannot be set if
131 // next_carry is not set and at max 0xffff..*2 + 1 is only 0x1ffff.. (i.e. we can not need
142 macro_rules! define_add { ($name: ident, $len: expr) => {
143 /// Adds two $len-64-bit integers together, returning a new $len-64-bit integer and an overflow
144 /// bit, with the same semantics as the std [`u64::overflowing_add`] method.
145 const fn $name(a: &[u64], b: &[u64]) -> ([u64; $len], bool) {
146 debug_assert!(a.len() == $len);
147 debug_assert!(b.len() == $len);
148 let mut r = [0; $len];
149 let mut carry = false;
150 let mut i = $len - 1;
152 let (v, mut new_carry) = a[i].overflowing_add(b[i]);
153 let (v2, new_new_carry) = v.overflowing_add(carry as u64);
154 new_carry |= new_new_carry;
165 define_add!(add_2, 2);
166 define_add!(add_3, 3);
167 define_add!(add_4, 4);
168 define_add!(add_6, 6);
169 define_add!(add_8, 8);
170 define_add!(add_12, 12);
171 define_add!(add_16, 16);
172 define_add!(add_32, 32);
173 define_add!(add_64, 64);
174 define_add!(add_128, 128);
176 macro_rules! define_sub { ($name: ident, $name_abs: ident, $len: expr) => {
177 /// Subtracts the `b` $len-64-bit integer from the `a` $len-64-bit integer, returning a new
178 /// $len-64-bit integer and an overflow bit, with the same semantics as the std
179 /// [`u64::overflowing_sub`] method.
180 const fn $name(a: &[u64], b: &[u64]) -> ([u64; $len], bool) {
181 debug_assert!(a.len() == $len);
182 debug_assert!(b.len() == $len);
183 let mut r = [0; $len];
184 let mut carry = false;
185 let mut i = $len - 1;
187 let (v, mut new_carry) = a[i].overflowing_sub(b[i]);
188 let (v2, new_new_carry) = v.overflowing_sub(carry as u64);
189 new_carry |= new_new_carry;
199 /// Subtracts the `b` $len-64-bit integer from the `a` $len-64-bit integer, returning a new
200 /// $len-64-bit integer representing the absolute value of the result, as well as a sign bit.
202 const fn $name_abs(a: &[u64], b: &[u64]) -> ([u64; $len], bool) {
203 let (mut res, neg) = $name(a, b);
211 define_sub!(sub_2, sub_abs_2, 2);
212 define_sub!(sub_3, sub_abs_3, 3);
213 define_sub!(sub_4, sub_abs_4, 4);
214 define_sub!(sub_6, sub_abs_6, 6);
215 define_sub!(sub_8, sub_abs_8, 8);
216 define_sub!(sub_12, sub_abs_12, 12);
217 define_sub!(sub_16, sub_abs_16, 16);
218 define_sub!(sub_32, sub_abs_32, 32);
219 define_sub!(sub_64, sub_abs_64, 64);
220 define_sub!(sub_128, sub_abs_128, 128);
222 /// Multiplies two 128-bit integers together, returning a new 256-bit integer.
224 /// This is the base case for our multiplication, taking advantage of Rust's native 128-bit int
225 /// types to do multiplication (potentially) natively.
226 const fn mul_2(a: &[u64], b: &[u64]) -> [u64; 4] {
227 debug_assert!(a.len() == 2);
228 debug_assert!(b.len() == 2);
230 // Gradeschool multiplication is way faster here.
231 let (a0, a1) = (a[0] as u128, a[1] as u128);
232 let (b0, b1) = (b[0] as u128, b[1] as u128);
236 let (z1, i_carry_a) = z1i.overflowing_add(z1j);
239 add_mul_2_parts(z2, z1, z0, i_carry_a)
242 /// Adds the gradeschool multiplication intermediate parts to a final 256-bit result
243 const fn add_mul_2_parts(z2: u128, z1: u128, z0: u128, i_carry_a: bool) -> [u64; 4] {
244 let z2a = ((z2 >> 64) & 0xffff_ffff_ffff_ffff) as u64;
245 let z1a = ((z1 >> 64) & 0xffff_ffff_ffff_ffff) as u64;
246 let z0a = ((z0 >> 64) & 0xffff_ffff_ffff_ffff) as u64;
247 let z2b = (z2 & 0xffff_ffff_ffff_ffff) as u64;
248 let z1b = (z1 & 0xffff_ffff_ffff_ffff) as u64;
249 let z0b = (z0 & 0xffff_ffff_ffff_ffff) as u64;
253 let (k, j_carry) = z0a.overflowing_add(z1b);
255 let (mut j, i_carry_b) = z1a.overflowing_add(z2b);
257 (j, i_carry_c) = j.overflowing_add(j_carry as u64);
259 let i_carry = i_carry_a as u64 + i_carry_b as u64 + i_carry_c as u64;
260 let (i, must_not_overflow) = z2a.overflowing_add(i_carry);
261 debug_assert!(!must_not_overflow, "Two 2*64 bit numbers, multiplied, will not use more than 4*64 bits");
266 const fn mul_3(a: &[u64], b: &[u64]) -> [u64; 6] {
267 debug_assert!(a.len() == 3);
268 debug_assert!(b.len() == 3);
270 let (a0, a1, a2) = (a[0] as u128, a[1] as u128, a[2] as u128);
271 let (b0, b1, b2) = (b[0] as u128, b[1] as u128, b[2] as u128);
283 let r5 = ((m4 >> 0) & 0xffff_ffff_ffff_ffff) as u64;
285 let r4a = ((m4 >> 64) & 0xffff_ffff_ffff_ffff) as u64;
286 let r4b = ((m3a >> 0) & 0xffff_ffff_ffff_ffff) as u64;
287 let r4c = ((m3b >> 0) & 0xffff_ffff_ffff_ffff) as u64;
289 let r3a = ((m3a >> 64) & 0xffff_ffff_ffff_ffff) as u64;
290 let r3b = ((m3b >> 64) & 0xffff_ffff_ffff_ffff) as u64;
291 let r3c = ((m2a >> 0 ) & 0xffff_ffff_ffff_ffff) as u64;
292 let r3d = ((m2b >> 0 ) & 0xffff_ffff_ffff_ffff) as u64;
293 let r3e = ((m2c >> 0 ) & 0xffff_ffff_ffff_ffff) as u64;
295 let r2a = ((m2a >> 64) & 0xffff_ffff_ffff_ffff) as u64;
296 let r2b = ((m2b >> 64) & 0xffff_ffff_ffff_ffff) as u64;
297 let r2c = ((m2c >> 64) & 0xffff_ffff_ffff_ffff) as u64;
298 let r2d = ((m1a >> 0 ) & 0xffff_ffff_ffff_ffff) as u64;
299 let r2e = ((m1b >> 0 ) & 0xffff_ffff_ffff_ffff) as u64;
301 let r1a = ((m1a >> 64) & 0xffff_ffff_ffff_ffff) as u64;
302 let r1b = ((m1b >> 64) & 0xffff_ffff_ffff_ffff) as u64;
303 let r1c = ((m0 >> 0 ) & 0xffff_ffff_ffff_ffff) as u64;
305 let r0a = ((m0 >> 64) & 0xffff_ffff_ffff_ffff) as u64;
307 let (r4, r3_ca) = r4a.overflowing_add(r4b);
308 let (r4, r3_cb) = r4.overflowing_add(r4c);
309 let r3_c = r3_ca as u64 + r3_cb as u64;
311 let (r3, r2_ca) = r3a.overflowing_add(r3b);
312 let (r3, r2_cb) = r3.overflowing_add(r3c);
313 let (r3, r2_cc) = r3.overflowing_add(r3d);
314 let (r3, r2_cd) = r3.overflowing_add(r3e);
315 let (r3, r2_ce) = r3.overflowing_add(r3_c);
316 let r2_c = r2_ca as u64 + r2_cb as u64 + r2_cc as u64 + r2_cd as u64 + r2_ce as u64;
318 let (r2, r1_ca) = r2a.overflowing_add(r2b);
319 let (r2, r1_cb) = r2.overflowing_add(r2c);
320 let (r2, r1_cc) = r2.overflowing_add(r2d);
321 let (r2, r1_cd) = r2.overflowing_add(r2e);
322 let (r2, r1_ce) = r2.overflowing_add(r2_c);
323 let r1_c = r1_ca as u64 + r1_cb as u64 + r1_cc as u64 + r1_cd as u64 + r1_ce as u64;
325 let (r1, r0_ca) = r1a.overflowing_add(r1b);
326 let (r1, r0_cb) = r1.overflowing_add(r1c);
327 let (r1, r0_cc) = r1.overflowing_add(r1_c);
328 let r0_c = r0_ca as u64 + r0_cb as u64 + r0_cc as u64;
330 let (r0, must_not_overflow) = r0a.overflowing_add(r0_c);
331 debug_assert!(!must_not_overflow, "Two 3*64 bit numbers, multiplied, will not use more than 6*64 bits");
333 [r0, r1, r2, r3, r4, r5]
336 macro_rules! define_mul { ($name: ident, $len: expr, $submul: ident, $add: ident, $subadd: ident, $sub: ident, $subsub: ident) => {
337 /// Multiplies two $len-64-bit integers together, returning a new $len*2-64-bit integer.
338 const fn $name(a: &[u64], b: &[u64]) -> [u64; $len * 2] {
339 // We could probably get a bit faster doing gradeschool multiplication for some smaller
340 // sizes, but its easier to just have one variable-length multiplication, so we do
341 // Karatsuba always here.
342 debug_assert!(a.len() == $len);
343 debug_assert!(b.len() == $len);
345 let a0 = const_subslice(a, 0, $len / 2);
346 let a1 = const_subslice(a, $len / 2, $len);
347 let b0 = const_subslice(b, 0, $len / 2);
348 let b1 = const_subslice(b, $len / 2, $len);
350 let z2 = $submul(a0, b0);
351 let z0 = $submul(a1, b1);
353 let (z1a_max, z1a_min, z1a_sign) =
354 if slice_greater_than(&a1, &a0) { (a1, a0, true) } else { (a0, a1, false) };
355 let (z1b_max, z1b_min, z1b_sign) =
356 if slice_greater_than(&b1, &b0) { (b1, b0, true) } else { (b0, b1, false) };
358 let z1a = $subsub(z1a_max, z1a_min);
359 debug_assert!(!z1a.1, "z1a_max was selected to be greater than z1a_min");
360 let z1b = $subsub(z1b_max, z1b_min);
361 debug_assert!(!z1b.1, "z1b_max was selected to be greater than z1b_min");
362 let z1m_sign = z1a_sign == z1b_sign;
364 let z1m = $submul(&z1a.0, &z1b.0);
365 let z1n = $add(&z0, &z2);
366 let mut z1_carry = z1n.1;
367 let z1 = if z1m_sign {
368 let r = $sub(&z1n.0, &z1m);
369 if r.1 { z1_carry ^= true; }
372 let r = $add(&z1n.0, &z1m);
373 if r.1 { z1_carry = true; }
377 let l = const_subslice(&z0, $len / 2, $len);
378 let (k, j_carry) = $subadd(const_subslice(&z0, 0, $len / 2), const_subslice(&z1, $len / 2, $len));
379 let (mut j, i_carry_a) = $subadd(const_subslice(&z1, 0, $len / 2), const_subslice(&z2, $len / 2, $len));
380 let mut i_carry_b = false;
382 i_carry_b = add_u64!(j, 1);
384 let mut i = [0; $len / 2];
385 let i_source = const_subslice(&z2, 0, $len / 2);
386 copy_from_slice!(i, 0, $len / 2, i_source);
387 let i_carry = i_carry_a as u64 + i_carry_b as u64 + z1_carry as u64;
389 let must_not_overflow = add_u64!(i, i_carry);
390 debug_assert!(!must_not_overflow, "Two N*64 bit numbers, multiplied, will not use more than 2*N*64 bits");
393 let mut res = [0; $len * 2];
394 copy_from_slice!(res, $len * 2 * 0 / 4, $len * 2 * 1 / 4, i);
395 copy_from_slice!(res, $len * 2 * 1 / 4, $len * 2 * 2 / 4, j);
396 copy_from_slice!(res, $len * 2 * 2 / 4, $len * 2 * 3 / 4, k);
397 copy_from_slice!(res, $len * 2 * 3 / 4, $len * 2 * 4 / 4, l);
402 define_mul!(mul_4, 4, mul_2, add_4, add_2, sub_4, sub_2);
403 define_mul!(mul_6, 6, mul_3, add_6, add_3, sub_6, sub_3);
404 define_mul!(mul_8, 8, mul_4, add_8, add_4, sub_8, sub_4);
405 define_mul!(mul_16, 16, mul_8, add_16, add_8, sub_16, sub_8);
406 define_mul!(mul_32, 32, mul_16, add_32, add_16, sub_32, sub_16);
407 define_mul!(mul_64, 64, mul_32, add_64, add_32, sub_64, sub_32);
410 /// Squares a 128-bit integer, returning a new 256-bit integer.
412 /// This is the base case for our squaring, taking advantage of Rust's native 128-bit int
413 /// types to do multiplication (potentially) natively.
414 const fn sqr_2(a: &[u64]) -> [u64; 4] {
415 debug_assert!(a.len() == 2);
417 let (a0, a1) = (a[0] as u128, a[1] as u128);
419 let mut z1 = a0 * a1;
420 let i_carry_a = z1 & (1u128 << 127) != 0;
424 add_mul_2_parts(z2, z1, z0, i_carry_a)
427 macro_rules! define_sqr { ($name: ident, $len: expr, $submul: ident, $subsqr: ident, $subadd: ident) => {
428 /// Squares a $len-64-bit integers, returning a new $len*2-64-bit integer.
429 const fn $name(a: &[u64]) -> [u64; $len * 2] {
430 // Squaring is only 3 half-length multiplies/squares in gradeschool math, so use that.
431 debug_assert!(a.len() == $len);
433 let hi = const_subslice(a, 0, $len / 2);
434 let lo = const_subslice(a, $len / 2, $len);
436 let v0 = $subsqr(lo);
437 let mut v1 = $submul(hi, lo);
438 let i_carry_a = double!(v1);
439 let v2 = $subsqr(hi);
441 let l = const_subslice(&v0, $len / 2, $len);
442 let (k, j_carry) = $subadd(const_subslice(&v0, 0, $len / 2), const_subslice(&v1, $len / 2, $len));
443 let (mut j, i_carry_b) = $subadd(const_subslice(&v1, 0, $len / 2), const_subslice(&v2, $len / 2, $len));
445 let mut i = [0; $len / 2];
446 let i_source = const_subslice(&v2, 0, $len / 2);
447 copy_from_slice!(i, 0, $len / 2, i_source);
449 let mut i_carry_c = false;
451 i_carry_c = add_u64!(j, 1);
453 let i_carry = i_carry_a as u64 + i_carry_b as u64 + i_carry_c as u64;
455 let must_not_overflow = add_u64!(i, i_carry);
456 debug_assert!(!must_not_overflow, "Two N*64 bit numbers, multiplied, will not use more than 2*N*64 bits");
459 let mut res = [0; $len * 2];
460 copy_from_slice!(res, $len * 2 * 0 / 4, $len * 2 * 1 / 4, i);
461 copy_from_slice!(res, $len * 2 * 1 / 4, $len * 2 * 2 / 4, j);
462 copy_from_slice!(res, $len * 2 * 2 / 4, $len * 2 * 3 / 4, k);
463 copy_from_slice!(res, $len * 2 * 3 / 4, $len * 2 * 4 / 4, l);
468 // TODO: Write an optimized sqr_3 (though secp384r1 is barely used)
469 const fn sqr_3(a: &[u64]) -> [u64; 6] { mul_3(a, a) }
471 define_sqr!(sqr_4, 4, mul_2, sqr_2, add_2);
472 define_sqr!(sqr_6, 6, mul_3, sqr_3, add_3);
473 define_sqr!(sqr_8, 8, mul_4, sqr_4, add_4);
474 define_sqr!(sqr_16, 16, mul_8, sqr_8, add_8);
475 define_sqr!(sqr_32, 32, mul_16, sqr_16, add_16);
476 define_sqr!(sqr_64, 64, mul_32, sqr_32, add_32);
478 macro_rules! dummy_pre_push { ($name: ident, $len: expr) => {} }
479 macro_rules! vec_pre_push { ($name: ident, $len: expr) => { $name.push([0; $len]); } }
481 macro_rules! define_div_rem { ($name: ident, $len: expr, $sub: ident, $heap_init: expr, $pre_push: ident $(, $const_opt: tt)?) => {
482 /// Divides two $len-64-bit integers, `a` by `b`, returning the quotient and remainder
484 /// Fails iff `b` is zero.
485 $($const_opt)? fn $name(a: &[u64; $len], b: &[u64; $len]) -> Result<([u64; $len], [u64; $len]), ()> {
486 if slice_equal(b, &[0; $len]) { return Err(()); }
488 // Very naively divide `a` by `b` by calculating all the powers of two times `b` up to `a`,
489 // then subtracting the powers of two in decreasing order. What's left is the remainder.
491 // This requires storing all the multiples of `b` in `pow2s`, which may be a vec or an
492 // array. `$pre_push!()` sets up the next element with zeros and then we can overwrite it.
494 let mut pow2s = $heap_init;
495 let mut pow2s_count = 0;
496 while slice_greater_than(a, &b_pow) {
497 $pre_push!(pow2s, $len);
498 pow2s[pow2s_count] = b_pow;
500 let double_overflow = double!(b_pow);
501 if double_overflow { break; }
503 let mut quot = [0; $len];
505 let mut pow2 = pow2s_count as isize - 1;
507 let b_pow = pow2s[pow2 as usize];
508 let overflow = double!(quot);
509 debug_assert!(!overflow, "quotient should be expressible in $len*64 bits");
510 if slice_greater_than(&rem, &b_pow) {
511 let (r, underflow) = $sub(&rem, &b_pow);
512 debug_assert!(!underflow, "rem was just checked to be > b_pow, so sub cannot underflow");
518 if slice_equal(&rem, b) {
519 let overflow = add_u64!(quot, 1);
520 debug_assert!(!overflow, "quotient should be expressible in $len*64 bits");
521 Ok((quot, [0; $len]))
529 define_div_rem!(div_rem_2, 2, sub_2, [[0; 2]; 2 * 64], dummy_pre_push, const);
530 define_div_rem!(div_rem_4, 4, sub_4, [[0; 4]; 4 * 64], dummy_pre_push, const); // Uses 8 KiB of stack
531 define_div_rem!(div_rem_6, 6, sub_6, [[0; 6]; 6 * 64], dummy_pre_push, const); // Uses 18 KiB of stack!
532 #[cfg(debug_assertions)]
533 define_div_rem!(div_rem_8, 8, sub_8, [[0; 8]; 8 * 64], dummy_pre_push, const); // Uses 32 KiB of stack!
534 #[cfg(debug_assertions)]
535 define_div_rem!(div_rem_12, 12, sub_12, [[0; 12]; 12 * 64], dummy_pre_push, const); // Uses 72 KiB of stack!
536 define_div_rem!(div_rem_64, 64, sub_64, Vec::new(), vec_pre_push); // Uses up to 2 MiB of heap
537 #[cfg(debug_assertions)]
538 define_div_rem!(div_rem_128, 128, sub_128, Vec::new(), vec_pre_push); // Uses up to 8 MiB of heap
540 macro_rules! define_mod_inv { ($name: ident, $len: expr, $div: ident, $add: ident, $sub_abs: ident, $mul: ident) => {
541 /// Calculates the modular inverse of a $len-64-bit number with respect to the given modulus,
543 const fn $name(a: &[u64; $len], m: &[u64; $len]) -> Result<[u64; $len], ()> {
544 if slice_equal(a, &[0; $len]) || slice_equal(m, &[0; $len]) { return Err(()); }
546 let (mut s, mut old_s) = ([0; $len], [0; $len]);
551 let (mut old_s_neg, mut s_neg) = (false, false);
553 while !slice_equal(&r, &[0; $len]) {
554 let (quot, new_r) = debug_unwrap!($div(&old_r, &r));
556 let new_sa = $mul(", &s);
557 debug_assert!(slice_equal(const_subslice(&new_sa, 0, $len), &[0; $len]), "S overflowed");
558 let (new_s, new_s_neg) = match (old_s_neg, s_neg) {
560 let (new_s, overflow) = $add(&old_s, const_subslice(&new_sa, $len, new_sa.len()));
561 debug_assert!(!overflow);
565 let (new_s, overflow) = $add(&old_s, const_subslice(&new_sa, $len, new_sa.len()));
566 debug_assert!(!overflow);
570 let (new_s, overflow) = $add(&old_s, const_subslice(&new_sa, $len, new_sa.len()));
571 debug_assert!(!overflow);
574 (false, false) => $sub_abs(&old_s, const_subslice(&new_sa, $len, new_sa.len())),
586 // At this point old_r contains our GCD and old_s our first Bézout's identity coefficient.
587 if !slice_equal(const_subslice(&old_r, 0, $len - 1), &[0; $len - 1]) || old_r[$len - 1] != 1 {
590 debug_assert!(slice_greater_than(m, &old_s));
592 let (modinv, underflow) = $sub_abs(m, &old_s);
593 debug_assert!(!underflow);
594 debug_assert!(slice_greater_than(m, &modinv));
603 define_mod_inv!(mod_inv_2, 2, div_rem_2, add_2, sub_abs_2, mul_2);
604 define_mod_inv!(mod_inv_4, 4, div_rem_4, add_4, sub_abs_4, mul_4);
605 define_mod_inv!(mod_inv_6, 6, div_rem_6, add_6, sub_abs_6, mul_6);
607 define_mod_inv!(mod_inv_8, 8, div_rem_8, add_8, sub_abs_8, mul_8);
609 // ******************
610 // * The public API *
611 // ******************
613 const WORD_COUNT_4096: usize = 4096 / 64;
614 const WORD_COUNT_256: usize = 256 / 64;
615 const WORD_COUNT_384: usize = 384 / 64;
617 // RFC 5702 indicates RSA keys can be up to 4096 bits, so we always use 4096-bit integers
618 #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
619 pub(super) struct U4096([u64; WORD_COUNT_4096]);
621 #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
622 pub(super) struct U256([u64; WORD_COUNT_256]);
624 #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
625 pub(super) struct U384([u64; WORD_COUNT_384]);
627 pub(super) trait Int: Clone + Ord + Sized {
630 fn from_be_bytes(b: &[u8]) -> Result<Self, ()>;
631 fn limbs(&self) -> &[u64];
634 const ZERO: U256 = U256([0; 4]);
635 const BYTES: usize = 32;
636 fn from_be_bytes(b: &[u8]) -> Result<Self, ()> { Self::from_be_bytes(b) }
637 fn limbs(&self) -> &[u64] { &self.0 }
640 const ZERO: U384 = U384([0; 6]);
641 const BYTES: usize = 48;
642 fn from_be_bytes(b: &[u8]) -> Result<Self, ()> { Self::from_be_bytes(b) }
643 fn limbs(&self) -> &[u64] { &self.0 }
646 /// Defines a *PRIME* Modulus
647 pub(super) trait PrimeModulus<I: Int> {
649 const R_SQUARED_MOD_PRIME: I;
650 const NEGATIVE_PRIME_INV_MOD_R: I;
653 #[derive(Clone, Debug, PartialEq, Eq)] // Ord doesn't make sense cause we have an R factor
654 pub(super) struct U256Mod<M: PrimeModulus<U256>>(U256, PhantomData<M>);
656 #[derive(Clone, Debug, PartialEq, Eq)] // Ord doesn't make sense cause we have an R factor
657 pub(super) struct U384Mod<M: PrimeModulus<U384>>(U384, PhantomData<M>);
660 /// Constructs a new [`U4096`] from a variable number of big-endian bytes.
661 pub(super) fn from_be_bytes(bytes: &[u8]) -> Result<U4096, ()> {
662 if bytes.len() > 4096/8 { return Err(()); }
663 let u64s = (bytes.len() + 7) / 8;
664 let mut res = [0; WORD_COUNT_4096];
667 let pos = (u64s - i) * 8;
668 let start = bytes.len().saturating_sub(pos);
669 let end = bytes.len() + 8 - pos;
670 b[8 + start - end..].copy_from_slice(&bytes[start..end]);
671 res[i + WORD_COUNT_4096 - u64s] = u64::from_be_bytes(b);
676 /// Naively multiplies `self` * `b` mod `m`, returning a new [`U4096`].
678 /// Fails iff m is 0 or self or b are greater than m.
679 #[cfg(debug_assertions)]
680 fn mulmod_naive(&self, b: &U4096, m: &U4096) -> Result<U4096, ()> {
681 if m.0 == [0; WORD_COUNT_4096] { return Err(()); }
682 if self > m || b > m { return Err(()); }
684 let mul = mul_64(&self.0, &b.0);
686 let mut m_zeros = [0; 128];
687 m_zeros[WORD_COUNT_4096..].copy_from_slice(&m.0);
688 let (_, rem) = div_rem_128(&mul, &m_zeros)?;
689 let mut res = [0; WORD_COUNT_4096];
690 debug_assert_eq!(&rem[..WORD_COUNT_4096], &[0; WORD_COUNT_4096]);
691 res.copy_from_slice(&rem[WORD_COUNT_4096..]);
695 /// Calculates `self` ^ `exp` mod `m`, returning a new [`U4096`].
697 /// Fails iff m is 0, even, or self or b are greater than m.
698 pub(super) fn expmod_odd_mod(&self, mut exp: u32, m: &U4096) -> Result<U4096, ()> {
699 #![allow(non_camel_case_types)]
701 if m.0 == [0; WORD_COUNT_4096] { return Err(()); }
702 if m.0[WORD_COUNT_4096 - 1] & 1 == 0 { return Err(()); }
703 if self > m { return Err(()); }
705 let mut t = [0; WORD_COUNT_4096];
706 if m.0[..WORD_COUNT_4096 - 1] == [0; WORD_COUNT_4096 - 1] && m.0[WORD_COUNT_4096 - 1] == 1 {
709 t[WORD_COUNT_4096 - 1] = 1;
710 if exp == 0 { return Ok(U4096(t)); }
712 // Because m is not even, using 2^4096 as the Montgomery R value is always safe - it is
713 // guaranteed to be co-prime with any non-even integer.
715 // We use a single 4096-bit integer type for all our RSA operations, though in most cases
716 // we're actually dealing with 1024-bit or 2048-bit ints. Thus, we define sub-array math
717 // here which debug_assert's the required bits are 0s and then uses faster math primitives.
719 type mul_ty = fn(&[u64], &[u64]) -> [u64; WORD_COUNT_4096 * 2];
720 type sqr_ty = fn(&[u64]) -> [u64; WORD_COUNT_4096 * 2];
721 type add_double_ty = fn(&[u64], &[u64]) -> ([u64; WORD_COUNT_4096 * 2], bool);
722 type sub_ty = fn(&[u64], &[u64]) -> ([u64; WORD_COUNT_4096], bool);
723 let (word_count, log_bits, mul, sqr, add_double, sub) =
724 if m.0[..WORD_COUNT_4096 / 2] == [0; WORD_COUNT_4096 / 2] {
725 if m.0[..WORD_COUNT_4096 * 3 / 4] == [0; WORD_COUNT_4096 * 3 / 4] {
726 fn mul_16_subarr(a: &[u64], b: &[u64]) -> [u64; WORD_COUNT_4096 * 2] {
727 debug_assert_eq!(a.len(), WORD_COUNT_4096);
728 debug_assert_eq!(b.len(), WORD_COUNT_4096);
729 debug_assert_eq!(&a[..WORD_COUNT_4096 * 3 / 4], &[0; WORD_COUNT_4096 * 3 / 4]);
730 debug_assert_eq!(&b[..WORD_COUNT_4096 * 3 / 4], &[0; WORD_COUNT_4096 * 3 / 4]);
731 let mut res = [0; WORD_COUNT_4096 * 2];
732 res[WORD_COUNT_4096 + WORD_COUNT_4096 / 2..].copy_from_slice(
733 &mul_16(&a[WORD_COUNT_4096 * 3 / 4..], &b[WORD_COUNT_4096 * 3 / 4..]));
736 fn sqr_16_subarr(a: &[u64]) -> [u64; WORD_COUNT_4096 * 2] {
737 debug_assert_eq!(a.len(), WORD_COUNT_4096);
738 debug_assert_eq!(&a[..WORD_COUNT_4096 * 3 / 4], &[0; WORD_COUNT_4096 * 3 / 4]);
739 let mut res = [0; WORD_COUNT_4096 * 2];
740 res[WORD_COUNT_4096 + WORD_COUNT_4096 / 2..].copy_from_slice(
741 &sqr_16(&a[WORD_COUNT_4096 * 3 / 4..]));
744 fn add_32_subarr(a: &[u64], b: &[u64]) -> ([u64; WORD_COUNT_4096 * 2], bool) {
745 debug_assert_eq!(a.len(), WORD_COUNT_4096 * 2);
746 debug_assert_eq!(b.len(), WORD_COUNT_4096 * 2);
747 debug_assert_eq!(&a[..WORD_COUNT_4096 * 3 / 2], &[0; WORD_COUNT_4096 * 3 / 2]);
748 debug_assert_eq!(&b[..WORD_COUNT_4096 * 3 / 2], &[0; WORD_COUNT_4096 * 3 / 2]);
749 let (add, overflow) = add_32(&a[WORD_COUNT_4096 * 3 / 2..], &b[WORD_COUNT_4096 * 3 / 2..]);
750 let mut res = [0; WORD_COUNT_4096 * 2];
751 res[WORD_COUNT_4096 * 3 / 2..].copy_from_slice(&add);
754 fn sub_16_subarr(a: &[u64], b: &[u64]) -> ([u64; WORD_COUNT_4096], bool) {
755 debug_assert_eq!(a.len(), WORD_COUNT_4096);
756 debug_assert_eq!(b.len(), WORD_COUNT_4096);
757 debug_assert_eq!(&a[..WORD_COUNT_4096 * 3 / 4], &[0; WORD_COUNT_4096 * 3 / 4]);
758 debug_assert_eq!(&b[..WORD_COUNT_4096 * 3 / 4], &[0; WORD_COUNT_4096 * 3 / 4]);
759 let (sub, underflow) = sub_16(&a[WORD_COUNT_4096 * 3 / 4..], &b[WORD_COUNT_4096 * 3 / 4..]);
760 let mut res = [0; WORD_COUNT_4096];
761 res[WORD_COUNT_4096 * 3 / 4..].copy_from_slice(&sub);
764 (16, 10, mul_16_subarr as mul_ty, sqr_16_subarr as sqr_ty, add_32_subarr as add_double_ty, sub_16_subarr as sub_ty)
766 fn mul_32_subarr(a: &[u64], b: &[u64]) -> [u64; WORD_COUNT_4096 * 2] {
767 debug_assert_eq!(a.len(), WORD_COUNT_4096);
768 debug_assert_eq!(b.len(), WORD_COUNT_4096);
769 debug_assert_eq!(&a[..WORD_COUNT_4096 / 2], &[0; WORD_COUNT_4096 / 2]);
770 debug_assert_eq!(&b[..WORD_COUNT_4096 / 2], &[0; WORD_COUNT_4096 / 2]);
771 let mut res = [0; WORD_COUNT_4096 * 2];
772 res[WORD_COUNT_4096..].copy_from_slice(
773 &mul_32(&a[WORD_COUNT_4096 / 2..], &b[WORD_COUNT_4096 / 2..]));
776 fn sqr_32_subarr(a: &[u64]) -> [u64; WORD_COUNT_4096 * 2] {
777 debug_assert_eq!(a.len(), WORD_COUNT_4096);
778 debug_assert_eq!(&a[..WORD_COUNT_4096 / 2], &[0; WORD_COUNT_4096 / 2]);
779 let mut res = [0; WORD_COUNT_4096 * 2];
780 res[WORD_COUNT_4096..].copy_from_slice(
781 &sqr_32(&a[WORD_COUNT_4096 / 2..]));
784 fn add_64_subarr(a: &[u64], b: &[u64]) -> ([u64; WORD_COUNT_4096 * 2], bool) {
785 debug_assert_eq!(a.len(), WORD_COUNT_4096 * 2);
786 debug_assert_eq!(b.len(), WORD_COUNT_4096 * 2);
787 debug_assert_eq!(&a[..WORD_COUNT_4096], &[0; WORD_COUNT_4096]);
788 debug_assert_eq!(&b[..WORD_COUNT_4096], &[0; WORD_COUNT_4096]);
789 let (add, overflow) = add_64(&a[WORD_COUNT_4096..], &b[WORD_COUNT_4096..]);
790 let mut res = [0; WORD_COUNT_4096 * 2];
791 res[WORD_COUNT_4096..].copy_from_slice(&add);
794 fn sub_32_subarr(a: &[u64], b: &[u64]) -> ([u64; WORD_COUNT_4096], bool) {
795 debug_assert_eq!(a.len(), WORD_COUNT_4096);
796 debug_assert_eq!(b.len(), WORD_COUNT_4096);
797 debug_assert_eq!(&a[..WORD_COUNT_4096 / 2], &[0; WORD_COUNT_4096 / 2]);
798 debug_assert_eq!(&b[..WORD_COUNT_4096 / 2], &[0; WORD_COUNT_4096 / 2]);
799 let (sub, underflow) = sub_32(&a[WORD_COUNT_4096 / 2..], &b[WORD_COUNT_4096 / 2..]);
800 let mut res = [0; WORD_COUNT_4096];
801 res[WORD_COUNT_4096 / 2..].copy_from_slice(&sub);
804 (32, 11, mul_32_subarr as mul_ty, sqr_32_subarr as sqr_ty, add_64_subarr as add_double_ty, sub_32_subarr as sub_ty)
807 (64, 12, mul_64 as mul_ty, sqr_64 as sqr_ty, add_128 as add_double_ty, sub_64 as sub_ty)
810 // r is always the even value with one bit set above the word count we're using.
811 let mut r = [0; WORD_COUNT_4096 * 2];
812 r[WORD_COUNT_4096 * 2 - word_count - 1] = 1;
814 let mut m_inv_pos = [0; WORD_COUNT_4096];
815 m_inv_pos[WORD_COUNT_4096 - 1] = 1;
816 let mut two = [0; WORD_COUNT_4096];
817 two[WORD_COUNT_4096 - 1] = 2;
818 for _ in 0..log_bits {
819 let mut m_m_inv = mul(&m_inv_pos, &m.0);
820 m_m_inv[..WORD_COUNT_4096 * 2 - word_count].fill(0);
821 let m_inv = mul(&sub(&two, &m_m_inv[WORD_COUNT_4096..]).0, &m_inv_pos);
822 m_inv_pos[WORD_COUNT_4096 - word_count..].copy_from_slice(&m_inv[WORD_COUNT_4096 * 2 - word_count..]);
824 m_inv_pos[..WORD_COUNT_4096 - word_count].fill(0);
826 // `m_inv` is the negative modular inverse of m mod R, so subtract m_inv from R.
827 let mut m_inv = m_inv_pos;
829 m_inv[..WORD_COUNT_4096 - word_count].fill(0);
830 debug_assert_eq!(&mul(&m_inv, &m.0)[WORD_COUNT_4096 * 2 - word_count..],
832 &[0xffff_ffff_ffff_ffff; WORD_COUNT_4096][WORD_COUNT_4096 - word_count..]);
834 let mont_reduction = |mu: [u64; WORD_COUNT_4096 * 2]| -> [u64; WORD_COUNT_4096] {
835 debug_assert_eq!(&mu[..WORD_COUNT_4096 * 2 - word_count * 2],
836 &[0; WORD_COUNT_4096 * 2][..WORD_COUNT_4096 * 2 - word_count * 2]);
837 // Do a montgomery reduction of `mu`
839 // The definition of REDC (with some names changed):
840 // v = ((mu % R) * N') mod R
841 // t = (mu + v*N) / R
842 // if t >= N { t - N } else { t }
844 // mu % R is just the bottom word_count bytes of mu
845 let mut mu_mod_r = [0; WORD_COUNT_4096];
846 mu_mod_r[WORD_COUNT_4096 - word_count..].copy_from_slice(&mu[WORD_COUNT_4096 * 2 - word_count..]);
848 // v = ((mu % R) * negative_modulus_inverse) % R
849 let mut v = mul(&mu_mod_r, &m_inv);
850 v[..WORD_COUNT_4096 * 2 - word_count].fill(0); // mod R
852 // t_on_r = (mu + v*modulus) / R
853 let t0 = mul(&v[WORD_COUNT_4096..], &m.0);
854 let (t1, t1_extra_bit) = add_double(&t0, &mu);
856 // Note that dividing t1 by R is simply a matter of shifting right by word_count bytes
857 // We only need to maintain word_count bytes (plus `t1_extra_bit` which is implicitly
858 // an extra bit) because t_on_r is guarantee to be, at max, 2*m - 1.
859 let mut t1_on_r = [0; WORD_COUNT_4096];
860 debug_assert_eq!(&t1[WORD_COUNT_4096 * 2 - word_count..], &[0; WORD_COUNT_4096][WORD_COUNT_4096 - word_count..],
861 "t1 should be divisible by r");
862 t1_on_r[WORD_COUNT_4096 - word_count..].copy_from_slice(&t1[WORD_COUNT_4096 * 2 - word_count * 2..WORD_COUNT_4096 * 2 - word_count]);
864 // The modulus has only word_count bytes, so if t1_extra_bit is set we are definitely
865 // larger than the modulus.
866 if t1_extra_bit || t1_on_r >= m.0 {
868 (t1_on_r, underflow) = sub(&t1_on_r, &m.0);
869 debug_assert_eq!(t1_extra_bit, underflow,
870 "The number (t1_extra_bit, t1_on_r) is at most 2m-1, so underflowing t1_on_r - m should happen iff t1_extra_bit is set.");
875 // Calculate R^2 mod m as ((2^DOUBLES * R) mod m)^(log_bits - LOG2_DOUBLES) mod R
876 let mut r_minus_one = [0xffff_ffff_ffff_ffffu64; WORD_COUNT_4096];
877 r_minus_one[..WORD_COUNT_4096 - word_count].fill(0);
878 // While we do a full div here, in general R should be less than 2x m (assuming the RSA
879 // modulus used its full bit range and is 1024, 2048, or 4096 bits), so it should be cheap.
880 // In cases with a nonstandard RSA modulus we may end up being pretty slow here, but we'll
882 // If we ever find a problem with this we should reduce R to be tigher on m, as we're
883 // wasting extra bits of calculation if R is too far from m.
884 let (_, mut r_mod_m) = debug_unwrap!(div_rem_64(&r_minus_one, &m.0));
885 let r_mod_m_overflow = add_u64!(r_mod_m, 1);
886 if r_mod_m_overflow || r_mod_m >= m.0 {
887 (r_mod_m, _) = sub_64(&r_mod_m, &m.0);
890 let mut r2_mod_m: [u64; 64] = r_mod_m;
891 const DOUBLES: usize = 32;
892 const LOG2_DOUBLES: usize = 5;
894 for _ in 0..DOUBLES {
895 let overflow = double!(r2_mod_m);
896 if overflow || r2_mod_m > m.0 {
897 (r2_mod_m, _) = sub_64(&r2_mod_m, &m.0);
900 for _ in 0..log_bits - LOG2_DOUBLES {
901 r2_mod_m = mont_reduction(sqr(&r2_mod_m));
903 // Clear excess high bits
904 for (m_limb, r2_limb) in m.0.iter().zip(r2_mod_m.iter_mut()) {
905 let clear_bits = m_limb.leading_zeros();
906 if clear_bits == 0 { break; }
907 *r2_limb &= !(0xffff_ffff_ffff_ffffu64 << (64 - clear_bits));
908 if *m_limb != 0 { break; }
911 debug_assert!(r2_mod_m < m.0);
912 #[cfg(debug_assertions)] {
913 debug_assert_eq!(r2_mod_m, U4096(r_mod_m).mulmod_naive(&U4096(r_mod_m), &m).unwrap().0);
916 // Finally, actually do the exponentiation...
918 // Calculate t * R and a * R as mont multiplications by R^2 mod m
919 // (i.e. t * R^2 / R and 1 * R^2 / R)
920 let mut tr = mont_reduction(mul(&r2_mod_m, &t));
921 let mut ar = mont_reduction(mul(&r2_mod_m, &self.0));
923 #[cfg(debug_assertions)] {
924 // Check that tr/ar match naive multiplication
925 debug_assert_eq!(&tr, &U4096(t).mulmod_naive(&U4096(r_mod_m), &m).unwrap().0);
926 debug_assert_eq!(&ar, &self.mulmod_naive(&U4096(r_mod_m), &m).unwrap().0);
931 tr = mont_reduction(mul(&tr, &ar));
934 ar = mont_reduction(sqr(&ar));
937 ar = mont_reduction(mul(&ar, &tr));
938 let mut resr = [0; WORD_COUNT_4096 * 2];
939 resr[WORD_COUNT_4096..].copy_from_slice(&ar);
940 Ok(U4096(mont_reduction(resr)))
944 // In a const context we can't subslice a slice, so instead we pick the eight bytes we want and
945 // pass them here to build u64s from arrays.
946 const fn eight_bytes_to_u64_be(a: u8, b: u8, c: u8, d: u8, e: u8, f: u8, g: u8, h: u8) -> u64 {
947 let b = [a, b, c, d, e, f, g, h];
948 u64::from_be_bytes(b)
952 /// Constructs a new [`U256`] from a variable number of big-endian bytes.
953 pub(super) fn from_be_bytes(bytes: &[u8]) -> Result<U256, ()> {
954 if bytes.len() > 256/8 { return Err(()); }
955 let u64s = (bytes.len() + 7) / 8;
956 let mut res = [0; WORD_COUNT_256];
959 let pos = (u64s - i) * 8;
960 let start = bytes.len().saturating_sub(pos);
961 let end = bytes.len() + 8 - pos;
962 b[8 + start - end..].copy_from_slice(&bytes[start..end]);
963 res[i + WORD_COUNT_256 - u64s] = u64::from_be_bytes(b);
968 /// Constructs a new [`U256`] from a fixed number of big-endian bytes.
969 pub(super) const fn from_32_be_bytes_panicking(bytes: &[u8; 32]) -> U256 {
971 eight_bytes_to_u64_be(bytes[0*8 + 0], bytes[0*8 + 1], bytes[0*8 + 2], bytes[0*8 + 3],
972 bytes[0*8 + 4], bytes[0*8 + 5], bytes[0*8 + 6], bytes[0*8 + 7]),
973 eight_bytes_to_u64_be(bytes[1*8 + 0], bytes[1*8 + 1], bytes[1*8 + 2], bytes[1*8 + 3],
974 bytes[1*8 + 4], bytes[1*8 + 5], bytes[1*8 + 6], bytes[1*8 + 7]),
975 eight_bytes_to_u64_be(bytes[2*8 + 0], bytes[2*8 + 1], bytes[2*8 + 2], bytes[2*8 + 3],
976 bytes[2*8 + 4], bytes[2*8 + 5], bytes[2*8 + 6], bytes[2*8 + 7]),
977 eight_bytes_to_u64_be(bytes[3*8 + 0], bytes[3*8 + 1], bytes[3*8 + 2], bytes[3*8 + 3],
978 bytes[3*8 + 4], bytes[3*8 + 5], bytes[3*8 + 6], bytes[3*8 + 7]),
983 pub(super) const fn zero() -> U256 { U256([0, 0, 0, 0]) }
984 pub(super) const fn one() -> U256 { U256([0, 0, 0, 1]) }
985 pub(super) const fn three() -> U256 { U256([0, 0, 0, 3]) }
988 // Values modulus M::PRIME.0, stored in montgomery form.
989 impl<M: PrimeModulus<U256>> U256Mod<M> {
990 const fn mont_reduction(mu: [u64; 8]) -> Self {
991 #[cfg(debug_assertions)] {
992 // Check NEGATIVE_PRIME_INV_MOD_R is correct. Since this is all const, the compiler
993 // should be able to do it at compile time alone.
994 let minus_one_mod_r = mul_4(&M::PRIME.0, &M::NEGATIVE_PRIME_INV_MOD_R.0);
995 assert!(slice_equal(const_subslice(&minus_one_mod_r, 4, 8), &[0xffff_ffff_ffff_ffff; 4]));
998 #[cfg(debug_assertions)] {
999 // Check R_SQUARED_MOD_PRIME is correct. Since this is all const, the compiler
1000 // should be able to do it at compile time alone.
1001 let r_minus_one = [0xffff_ffff_ffff_ffff; 4];
1002 let (mut r_mod_prime, _) = sub_4(&r_minus_one, &M::PRIME.0);
1003 let r_mod_prime_overflow = add_u64!(r_mod_prime, 1);
1004 assert!(!r_mod_prime_overflow);
1005 let r_squared = sqr_4(&r_mod_prime);
1006 let mut prime_extended = [0; 8];
1007 let prime = M::PRIME.0;
1008 copy_from_slice!(prime_extended, 4, 8, prime);
1009 let (_, r_squared_mod_prime) = if let Ok(v) = div_rem_8(&r_squared, &prime_extended) { v } else { panic!() };
1010 assert!(slice_greater_than(&prime_extended, &r_squared_mod_prime));
1011 assert!(slice_equal(const_subslice(&r_squared_mod_prime, 4, 8), &M::R_SQUARED_MOD_PRIME.0));
1014 // The definition of REDC (with some names changed):
1015 // v = ((mu % R) * N') mod R
1016 // t = (mu + v*N) / R
1017 // if t >= N { t - N } else { t }
1019 // mu % R is just the bottom 4 bytes of mu
1020 let mu_mod_r = const_subslice(&mu, 4, 8);
1021 // v = ((mu % R) * negative_modulus_inverse) % R
1022 let mut v = mul_4(&mu_mod_r, &M::NEGATIVE_PRIME_INV_MOD_R.0);
1023 const ZEROS: &[u64; 4] = &[0; 4];
1024 copy_from_slice!(v, 0, 4, ZEROS); // mod R
1026 // t_on_r = (mu + v*modulus) / R
1027 let t0 = mul_4(const_subslice(&v, 4, 8), &M::PRIME.0);
1028 let (t1, t1_extra_bit) = add_8(&t0, &mu);
1030 // Note that dividing t1 by R is simply a matter of shifting right by 4 bytes.
1031 // We only need to maintain 4 bytes (plus `t1_extra_bit` which is implicitly an extra bit)
1032 // because t_on_r is guarantee to be, at max, 2*m - 1.
1033 let t1_on_r = const_subslice(&t1, 0, 4);
1035 let mut res = [0; 4];
1036 // The modulus is only 4 bytes, so t1_extra_bit implies we're definitely larger than the
1038 if t1_extra_bit || slice_greater_than(&t1_on_r, &M::PRIME.0) {
1040 (res, underflow) = sub_4(&t1_on_r, &M::PRIME.0);
1041 debug_assert!(t1_extra_bit == underflow,
1042 "The number (t1_extra_bit, t1_on_r) is at most 2m-1, so underflowing t1_on_r - m should happen iff t1_extra_bit is set.");
1044 copy_from_slice!(res, 0, 4, t1_on_r);
1046 Self(U256(res), PhantomData)
1049 pub(super) const fn from_u256_panicking(v: U256) -> Self {
1050 assert!(v.0[0] <= M::PRIME.0[0]);
1051 if v.0[0] == M::PRIME.0[0] {
1052 assert!(v.0[1] <= M::PRIME.0[1]);
1053 if v.0[1] == M::PRIME.0[1] {
1054 assert!(v.0[2] <= M::PRIME.0[2]);
1055 if v.0[2] == M::PRIME.0[2] {
1056 assert!(v.0[3] < M::PRIME.0[3]);
1060 assert!(M::PRIME.0[0] != 0 || M::PRIME.0[1] != 0 || M::PRIME.0[2] != 0 || M::PRIME.0[3] != 0);
1061 Self::mont_reduction(mul_4(&M::R_SQUARED_MOD_PRIME.0, &v.0))
1064 pub(super) fn from_u256(mut v: U256) -> Self {
1065 debug_assert!(M::PRIME.0 != [0; 4]);
1066 debug_assert!(M::PRIME.0[0] > (1 << 63), "PRIME should have the top bit set");
1067 while v >= M::PRIME {
1068 let (new_v, spurious_underflow) = sub_4(&v.0, &M::PRIME.0);
1069 debug_assert!(!spurious_underflow, "v was > M::PRIME.0");
1072 Self::mont_reduction(mul_4(&M::R_SQUARED_MOD_PRIME.0, &v.0))
1075 pub(super) fn from_modinv_of(v: U256) -> Result<Self, ()> {
1076 Ok(Self::from_u256(U256(mod_inv_4(&v.0, &M::PRIME.0)?)))
1079 /// Multiplies `self` * `b` mod `m`.
1081 /// Panics if `self`'s modulus is not equal to `b`'s
1082 pub(super) fn mul(&self, b: &Self) -> Self {
1083 Self::mont_reduction(mul_4(&self.0.0, &b.0.0))
1086 /// Doubles `self` mod `m`.
1087 pub(super) fn double(&self) -> Self {
1088 let mut res = self.0.0;
1089 let overflow = double!(res);
1090 if overflow || !slice_greater_than(&M::PRIME.0, &res) {
1092 (res, underflow) = sub_4(&res, &M::PRIME.0);
1093 debug_assert_eq!(overflow, underflow);
1095 Self(U256(res), PhantomData)
1098 /// Multiplies `self` by 3 mod `m`.
1099 pub(super) fn times_three(&self) -> Self {
1100 // TODO: Optimize this a lot
1101 self.mul(&U256Mod::from_u256(U256::three()))
1104 /// Multiplies `self` by 4 mod `m`.
1105 pub(super) fn times_four(&self) -> Self {
1106 // TODO: Optimize this somewhat?
1107 self.double().double()
1110 /// Multiplies `self` by 8 mod `m`.
1111 pub(super) fn times_eight(&self) -> Self {
1112 // TODO: Optimize this somewhat?
1113 self.double().double().double()
1116 /// Multiplies `self` by 8 mod `m`.
1117 pub(super) fn square(&self) -> Self {
1118 Self::mont_reduction(sqr_4(&self.0.0))
1121 /// Subtracts `b` from `self` % `m`.
1122 pub(super) fn sub(&self, b: &Self) -> Self {
1123 let (mut val, underflow) = sub_4(&self.0.0, &b.0.0);
1126 (val, overflow) = add_4(&val, &M::PRIME.0);
1127 debug_assert_eq!(overflow, underflow);
1129 Self(U256(val), PhantomData)
1132 /// Adds `b` to `self` % `m`.
1133 pub(super) fn add(&self, b: &Self) -> Self {
1134 let (mut val, overflow) = add_4(&self.0.0, &b.0.0);
1135 if overflow || !slice_greater_than(&M::PRIME.0, &val) {
1137 (val, underflow) = sub_4(&val, &M::PRIME.0);
1138 debug_assert_eq!(overflow, underflow);
1140 Self(U256(val), PhantomData)
1143 /// Returns the underlying [`U256`].
1144 pub(super) fn into_u256(self) -> U256 {
1145 let mut expanded_self = [0; 8];
1146 expanded_self[4..].copy_from_slice(&self.0.0);
1147 Self::mont_reduction(expanded_self).0
1151 // Values modulus M::PRIME.0, stored in montgomery form.
1153 /// Constructs a new [`U384`] from a variable number of big-endian bytes.
1154 pub(super) fn from_be_bytes(bytes: &[u8]) -> Result<U384, ()> {
1155 if bytes.len() > 384/8 { return Err(()); }
1156 let u64s = (bytes.len() + 7) / 8;
1157 let mut res = [0; WORD_COUNT_384];
1160 let pos = (u64s - i) * 8;
1161 let start = bytes.len().saturating_sub(pos);
1162 let end = bytes.len() + 8 - pos;
1163 b[8 + start - end..].copy_from_slice(&bytes[start..end]);
1164 res[i + WORD_COUNT_384 - u64s] = u64::from_be_bytes(b);
1169 /// Constructs a new [`U384`] from a fixed number of big-endian bytes.
1170 pub(super) const fn from_48_be_bytes_panicking(bytes: &[u8; 48]) -> U384 {
1172 eight_bytes_to_u64_be(bytes[0*8 + 0], bytes[0*8 + 1], bytes[0*8 + 2], bytes[0*8 + 3],
1173 bytes[0*8 + 4], bytes[0*8 + 5], bytes[0*8 + 6], bytes[0*8 + 7]),
1174 eight_bytes_to_u64_be(bytes[1*8 + 0], bytes[1*8 + 1], bytes[1*8 + 2], bytes[1*8 + 3],
1175 bytes[1*8 + 4], bytes[1*8 + 5], bytes[1*8 + 6], bytes[1*8 + 7]),
1176 eight_bytes_to_u64_be(bytes[2*8 + 0], bytes[2*8 + 1], bytes[2*8 + 2], bytes[2*8 + 3],
1177 bytes[2*8 + 4], bytes[2*8 + 5], bytes[2*8 + 6], bytes[2*8 + 7]),
1178 eight_bytes_to_u64_be(bytes[3*8 + 0], bytes[3*8 + 1], bytes[3*8 + 2], bytes[3*8 + 3],
1179 bytes[3*8 + 4], bytes[3*8 + 5], bytes[3*8 + 6], bytes[3*8 + 7]),
1180 eight_bytes_to_u64_be(bytes[4*8 + 0], bytes[4*8 + 1], bytes[4*8 + 2], bytes[4*8 + 3],
1181 bytes[4*8 + 4], bytes[4*8 + 5], bytes[4*8 + 6], bytes[4*8 + 7]),
1182 eight_bytes_to_u64_be(bytes[5*8 + 0], bytes[5*8 + 1], bytes[5*8 + 2], bytes[5*8 + 3],
1183 bytes[5*8 + 4], bytes[5*8 + 5], bytes[5*8 + 6], bytes[5*8 + 7]),
1188 pub(super) const fn zero() -> U384 { U384([0, 0, 0, 0, 0, 0]) }
1189 pub(super) const fn one() -> U384 { U384([0, 0, 0, 0, 0, 1]) }
1190 pub(super) const fn three() -> U384 { U384([0, 0, 0, 0, 0, 3]) }
1193 impl<M: PrimeModulus<U384>> U384Mod<M> {
1194 const fn mont_reduction(mu: [u64; 12]) -> Self {
1195 #[cfg(debug_assertions)] {
1196 // Check NEGATIVE_PRIME_INV_MOD_R is correct. Since this is all const, the compiler
1197 // should be able to do it at compile time alone.
1198 let minus_one_mod_r = mul_6(&M::PRIME.0, &M::NEGATIVE_PRIME_INV_MOD_R.0);
1199 assert!(slice_equal(const_subslice(&minus_one_mod_r, 6, 12), &[0xffff_ffff_ffff_ffff; 6]));
1202 #[cfg(debug_assertions)] {
1203 // Check R_SQUARED_MOD_PRIME is correct. Since this is all const, the compiler
1204 // should be able to do it at compile time alone.
1205 let r_minus_one = [0xffff_ffff_ffff_ffff; 6];
1206 let (mut r_mod_prime, _) = sub_6(&r_minus_one, &M::PRIME.0);
1207 let r_mod_prime_overflow = add_u64!(r_mod_prime, 1);
1208 assert!(!r_mod_prime_overflow);
1209 let r_squared = sqr_6(&r_mod_prime);
1210 let mut prime_extended = [0; 12];
1211 let prime = M::PRIME.0;
1212 copy_from_slice!(prime_extended, 6, 12, prime);
1213 let (_, r_squared_mod_prime) = if let Ok(v) = div_rem_12(&r_squared, &prime_extended) { v } else { panic!() };
1214 assert!(slice_greater_than(&prime_extended, &r_squared_mod_prime));
1215 assert!(slice_equal(const_subslice(&r_squared_mod_prime, 6, 12), &M::R_SQUARED_MOD_PRIME.0));
1218 // The definition of REDC (with some names changed):
1219 // v = ((mu % R) * N') mod R
1220 // t = (mu + v*N) / R
1221 // if t >= N { t - N } else { t }
1223 // mu % R is just the bottom 4 bytes of mu
1224 let mu_mod_r = const_subslice(&mu, 6, 12);
1225 // v = ((mu % R) * negative_modulus_inverse) % R
1226 let mut v = mul_6(&mu_mod_r, &M::NEGATIVE_PRIME_INV_MOD_R.0);
1227 const ZEROS: &[u64; 6] = &[0; 6];
1228 copy_from_slice!(v, 0, 6, ZEROS); // mod R
1230 // t_on_r = (mu + v*modulus) / R
1231 let t0 = mul_6(const_subslice(&v, 6, 12), &M::PRIME.0);
1232 let (t1, t1_extra_bit) = add_12(&t0, &mu);
1234 // Note that dividing t1 by R is simply a matter of shifting right by 4 bytes.
1235 // We only need to maintain 4 bytes (plus `t1_extra_bit` which is implicitly an extra bit)
1236 // because t_on_r is guarantee to be, at max, 2*m - 1.
1237 let t1_on_r = const_subslice(&t1, 0, 6);
1239 let mut res = [0; 6];
1240 // The modulus is only 4 bytes, so t1_extra_bit implies we're definitely larger than the
1242 if t1_extra_bit || slice_greater_than(&t1_on_r, &M::PRIME.0) {
1244 (res, underflow) = sub_6(&t1_on_r, &M::PRIME.0);
1245 debug_assert!(t1_extra_bit == underflow);
1247 copy_from_slice!(res, 0, 6, t1_on_r);
1249 Self(U384(res), PhantomData)
1252 pub(super) const fn from_u384_panicking(v: U384) -> Self {
1253 assert!(v.0[0] <= M::PRIME.0[0]);
1254 if v.0[0] == M::PRIME.0[0] {
1255 assert!(v.0[1] <= M::PRIME.0[1]);
1256 if v.0[1] == M::PRIME.0[1] {
1257 assert!(v.0[2] <= M::PRIME.0[2]);
1258 if v.0[2] == M::PRIME.0[2] {
1259 assert!(v.0[3] <= M::PRIME.0[3]);
1260 if v.0[3] == M::PRIME.0[3] {
1261 assert!(v.0[4] <= M::PRIME.0[4]);
1262 if v.0[4] == M::PRIME.0[4] {
1263 assert!(v.0[5] < M::PRIME.0[5]);
1269 assert!(M::PRIME.0[0] != 0 || M::PRIME.0[1] != 0 || M::PRIME.0[2] != 0
1270 || M::PRIME.0[3] != 0|| M::PRIME.0[4] != 0|| M::PRIME.0[5] != 0);
1271 Self::mont_reduction(mul_6(&M::R_SQUARED_MOD_PRIME.0, &v.0))
1274 pub(super) fn from_u384(mut v: U384) -> Self {
1275 debug_assert!(M::PRIME.0 != [0; 6]);
1276 debug_assert!(M::PRIME.0[0] > (1 << 63), "PRIME should have the top bit set");
1277 while v >= M::PRIME {
1278 let (new_v, spurious_underflow) = sub_6(&v.0, &M::PRIME.0);
1279 debug_assert!(!spurious_underflow);
1282 Self::mont_reduction(mul_6(&M::R_SQUARED_MOD_PRIME.0, &v.0))
1285 pub(super) fn from_modinv_of(v: U384) -> Result<Self, ()> {
1286 Ok(Self::from_u384(U384(mod_inv_6(&v.0, &M::PRIME.0)?)))
1289 /// Multiplies `self` * `b` mod `m`.
1291 /// Panics if `self`'s modulus is not equal to `b`'s
1292 pub(super) fn mul(&self, b: &Self) -> Self {
1293 Self::mont_reduction(mul_6(&self.0.0, &b.0.0))
1296 /// Doubles `self` mod `m`.
1297 pub(super) fn double(&self) -> Self {
1298 let mut res = self.0.0;
1299 let overflow = double!(res);
1300 if overflow || !slice_greater_than(&M::PRIME.0, &res) {
1302 (res, underflow) = sub_6(&res, &M::PRIME.0);
1303 debug_assert_eq!(overflow, underflow);
1305 Self(U384(res), PhantomData)
1308 /// Multiplies `self` by 3 mod `m`.
1309 pub(super) fn times_three(&self) -> Self {
1310 // TODO: Optimize this a lot
1311 self.mul(&U384Mod::from_u384(U384::three()))
1314 /// Multiplies `self` by 4 mod `m`.
1315 pub(super) fn times_four(&self) -> Self {
1316 // TODO: Optimize this somewhat?
1317 self.double().double()
1320 /// Multiplies `self` by 8 mod `m`.
1321 pub(super) fn times_eight(&self) -> Self {
1322 // TODO: Optimize this somewhat?
1323 self.double().double().double()
1326 /// Multiplies `self` by 8 mod `m`.
1327 pub(super) fn square(&self) -> Self {
1328 Self::mont_reduction(sqr_6(&self.0.0))
1331 /// Subtracts `b` from `self` % `m`.
1332 pub(super) fn sub(&self, b: &Self) -> Self {
1333 let (mut val, underflow) = sub_6(&self.0.0, &b.0.0);
1336 (val, overflow) = add_6(&val, &M::PRIME.0);
1337 debug_assert_eq!(overflow, underflow);
1339 Self(U384(val), PhantomData)
1342 /// Adds `b` to `self` % `m`.
1343 pub(super) fn add(&self, b: &Self) -> Self {
1344 let (mut val, overflow) = add_6(&self.0.0, &b.0.0);
1345 if overflow || !slice_greater_than(&M::PRIME.0, &val) {
1347 (val, underflow) = sub_6(&val, &M::PRIME.0);
1348 debug_assert_eq!(overflow, underflow);
1350 Self(U384(val), PhantomData)
1353 /// Returns the underlying [`U384`].
1354 pub(super) fn into_u384(self) -> U384 {
1355 let mut expanded_self = [0; 12];
1356 expanded_self[6..].copy_from_slice(&self.0.0);
1357 Self::mont_reduction(expanded_self).0
1366 impl PrimeModulus<U256> for P256 {
1367 const PRIME: U256 = U256::from_32_be_bytes_panicking(&hex_lit::hex!(
1368 "ffffffff00000001000000000000000000000000ffffffffffffffffffffffff"));
1369 const R_SQUARED_MOD_PRIME: U256 = U256::from_32_be_bytes_panicking(&hex_lit::hex!(
1370 "00000004fffffffdfffffffffffffffefffffffbffffffff0000000000000003"));
1371 const NEGATIVE_PRIME_INV_MOD_R: U256 = U256::from_32_be_bytes_panicking(&hex_lit::hex!(
1372 "ffffffff00000002000000000000000000000001000000000000000000000001"));
1376 impl PrimeModulus<U384> for P384 {
1377 const PRIME: U384 = U384::from_48_be_bytes_panicking(&hex_lit::hex!(
1378 "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff"));
1379 const R_SQUARED_MOD_PRIME: U384 = U384::from_48_be_bytes_panicking(&hex_lit::hex!(
1380 "000000000000000000000000000000010000000200000000fffffffe000000000000000200000000fffffffe00000001"));
1381 const NEGATIVE_PRIME_INV_MOD_R: U384 = U384::from_48_be_bytes_panicking(&hex_lit::hex!(
1382 "00000014000000140000000c00000002fffffffcfffffffafffffffbfffffffe00000000000000010000000100000001"));
1389 /// Read some bytes and use them to test bigint math by comparing results against the `ibig` crate.
1390 pub fn fuzz_math(input: &[u8]) {
1391 if input.len() < 32 || input.len() % 16 != 0 { return; }
1392 let split = core::cmp::min(input.len() / 2, 512);
1393 let (a, b) = input.split_at(core::cmp::min(input.len() / 2, 512));
1394 let b = &b[..split];
1396 let ai = ibig::UBig::from_be_bytes(&a);
1397 let bi = ibig::UBig::from_be_bytes(&b);
1399 let mut a_u64s = Vec::with_capacity(split / 8);
1400 for chunk in a.chunks(8) {
1401 a_u64s.push(u64::from_be_bytes(chunk.try_into().unwrap()));
1403 let mut b_u64s = Vec::with_capacity(split / 8);
1404 for chunk in b.chunks(8) {
1405 b_u64s.push(u64::from_be_bytes(chunk.try_into().unwrap()));
1408 macro_rules! test { ($mul: ident, $sqr: ident, $add: ident, $sub: ident, $div_rem: ident, $mod_inv: ident) => {
1409 let res = $mul(&a_u64s, &b_u64s);
1410 let mut res_bytes = Vec::with_capacity(input.len() / 2);
1412 res_bytes.extend_from_slice(&i.to_be_bytes());
1414 assert_eq!(ibig::UBig::from_be_bytes(&res_bytes), ai.clone() * bi.clone());
1416 debug_assert_eq!($mul(&a_u64s, &a_u64s), $sqr(&a_u64s));
1417 debug_assert_eq!($mul(&b_u64s, &b_u64s), $sqr(&b_u64s));
1419 let (res, carry) = $add(&a_u64s, &b_u64s);
1420 let mut res_bytes = Vec::with_capacity(input.len() / 2 + 1);
1421 if carry { res_bytes.push(1); } else { res_bytes.push(0); }
1423 res_bytes.extend_from_slice(&i.to_be_bytes());
1425 assert_eq!(ibig::UBig::from_be_bytes(&res_bytes), ai.clone() + bi.clone());
1427 let mut add_u64s = a_u64s.clone();
1428 let carry = add_u64!(add_u64s, 1);
1429 let mut res_bytes = Vec::with_capacity(input.len() / 2 + 1);
1430 if carry { res_bytes.push(1); } else { res_bytes.push(0); }
1431 for i in &add_u64s {
1432 res_bytes.extend_from_slice(&i.to_be_bytes());
1434 assert_eq!(ibig::UBig::from_be_bytes(&res_bytes), ai.clone() + 1);
1436 let mut double_u64s = b_u64s.clone();
1437 let carry = double!(double_u64s);
1438 let mut res_bytes = Vec::with_capacity(input.len() / 2 + 1);
1439 if carry { res_bytes.push(1); } else { res_bytes.push(0); }
1440 for i in &double_u64s {
1441 res_bytes.extend_from_slice(&i.to_be_bytes());
1443 assert_eq!(ibig::UBig::from_be_bytes(&res_bytes), bi.clone() * 2);
1445 let (quot, rem) = if let Ok(res) =
1446 $div_rem(&a_u64s[..].try_into().unwrap(), &b_u64s[..].try_into().unwrap()) {
1449 let mut quot_bytes = Vec::with_capacity(input.len() / 2);
1451 quot_bytes.extend_from_slice(&i.to_be_bytes());
1453 let mut rem_bytes = Vec::with_capacity(input.len() / 2);
1455 rem_bytes.extend_from_slice(&i.to_be_bytes());
1457 let (quoti, remi) = ibig::ops::DivRem::div_rem(ai.clone(), &bi);
1458 assert_eq!(ibig::UBig::from_be_bytes("_bytes), quoti);
1459 assert_eq!(ibig::UBig::from_be_bytes(&rem_bytes), remi);
1461 if ai != ibig::UBig::from(0u32) { // ibig provides a spurious modular inverse for 0
1462 let ring = ibig::modular::ModuloRing::new(&bi);
1463 let ar = ring.from(ai.clone());
1464 let invi = ar.inverse().map(|i| i.residue());
1466 if let Ok(modinv) = $mod_inv(&a_u64s[..].try_into().unwrap(), &b_u64s[..].try_into().unwrap()) {
1467 let mut modinv_bytes = Vec::with_capacity(input.len() / 2);
1469 modinv_bytes.extend_from_slice(&i.to_be_bytes());
1471 assert_eq!(invi.unwrap(), ibig::UBig::from_be_bytes(&modinv_bytes));
1473 assert!(invi.is_none());
1478 macro_rules! test_mod { ($amodp: expr, $bmodp: expr, $PRIME: expr, $len: expr, $into: ident, $div_rem_double: ident, $div_rem: ident, $mul: ident, $add: ident, $sub: ident) => {
1479 // Test the U256/U384Mod wrapper, which operates in Montgomery representation
1480 let mut p_extended = [0; $len * 2];
1481 p_extended[$len..].copy_from_slice(&$PRIME);
1483 let amodp_squared = $div_rem_double(&$mul(&a_u64s, &a_u64s), &p_extended).unwrap().1;
1484 assert_eq!(&amodp_squared[..$len], &[0; $len]);
1485 assert_eq!(&$amodp.square().$into().0, &amodp_squared[$len..]);
1487 let abmodp = $div_rem_double(&$mul(&a_u64s, &b_u64s), &p_extended).unwrap().1;
1488 assert_eq!(&abmodp[..$len], &[0; $len]);
1489 assert_eq!(&$amodp.mul(&$bmodp).$into().0, &abmodp[$len..]);
1491 let (aplusb, aplusb_overflow) = $add(&a_u64s, &b_u64s);
1492 let mut aplusb_extended = [0; $len * 2];
1493 aplusb_extended[$len..].copy_from_slice(&aplusb);
1494 if aplusb_overflow { aplusb_extended[$len - 1] = 1; }
1495 let aplusbmodp = $div_rem_double(&aplusb_extended, &p_extended).unwrap().1;
1496 assert_eq!(&aplusbmodp[..$len], &[0; $len]);
1497 assert_eq!(&$amodp.add(&$bmodp).$into().0, &aplusbmodp[$len..]);
1499 let (mut aminusb, aminusb_underflow) = $sub(&a_u64s, &b_u64s);
1500 if aminusb_underflow {
1502 (aminusb, overflow) = $add(&aminusb, &$PRIME);
1504 (aminusb, overflow) = $add(&aminusb, &$PRIME);
1508 let aminusbmodp = $div_rem(&aminusb, &$PRIME).unwrap().1;
1509 assert_eq!(&$amodp.sub(&$bmodp).$into().0, &aminusbmodp);
1512 if a_u64s.len() == 2 {
1513 test!(mul_2, sqr_2, add_2, sub_2, div_rem_2, mod_inv_2);
1514 } else if a_u64s.len() == 4 {
1515 test!(mul_4, sqr_4, add_4, sub_4, div_rem_4, mod_inv_4);
1516 let amodp = U256Mod::<fuzz_moduli::P256>::from_u256(U256(a_u64s[..].try_into().unwrap()));
1517 let bmodp = U256Mod::<fuzz_moduli::P256>::from_u256(U256(b_u64s[..].try_into().unwrap()));
1518 test_mod!(amodp, bmodp, fuzz_moduli::P256::PRIME.0, 4, into_u256, div_rem_8, div_rem_4, mul_4, add_4, sub_4);
1519 } else if a_u64s.len() == 6 {
1520 test!(mul_6, sqr_6, add_6, sub_6, div_rem_6, mod_inv_6);
1521 let amodp = U384Mod::<fuzz_moduli::P384>::from_u384(U384(a_u64s[..].try_into().unwrap()));
1522 let bmodp = U384Mod::<fuzz_moduli::P384>::from_u384(U384(b_u64s[..].try_into().unwrap()));
1523 test_mod!(amodp, bmodp, fuzz_moduli::P384::PRIME.0, 6, into_u384, div_rem_12, div_rem_6, mul_6, add_6, sub_6);
1524 } else if a_u64s.len() == 8 {
1525 test!(mul_8, sqr_8, add_8, sub_8, div_rem_8, mod_inv_8);
1526 } else if input.len() == 512*2 + 4 {
1527 let mut e_bytes = [0; 4];
1528 e_bytes.copy_from_slice(&input[512 * 2..512 * 2 + 4]);
1529 let e = u32::from_le_bytes(e_bytes);
1530 let a = U4096::from_be_bytes(&a).unwrap();
1531 let b = U4096::from_be_bytes(&b).unwrap();
1533 let res = if let Ok(r) = a.expmod_odd_mod(e, &b) { r } else { return };
1534 let mut res_bytes = Vec::with_capacity(512);
1536 res_bytes.extend_from_slice(&i.to_be_bytes());
1539 let ring = ibig::modular::ModuloRing::new(&bi);
1540 let ar = ring.from(ai.clone());
1541 assert_eq!(ar.pow(&e.into()).residue(), ibig::UBig::from_be_bytes(&res_bytes));
1549 fn u64s_to_u128(v: [u64; 2]) -> u128 {
1552 r |= (v[0] as u128) << 64;
1556 fn u64s_to_i128(v: [u64; 2]) -> i128 {
1559 r |= (v[0] as i128) << 64;
1565 let mut zero = [0u64; 2];
1567 assert_eq!(zero, [0; 2]);
1569 let mut one = [0u64, 1u64];
1571 assert_eq!(u64s_to_i128(one), -1);
1573 let mut minus_one: [u64; 2] = [u64::MAX, u64::MAX];
1575 assert_eq!(minus_one, [0, 1]);
1580 let mut zero = [0u64; 2];
1581 assert!(!double!(zero));
1582 assert_eq!(zero, [0; 2]);
1584 let mut one = [0u64, 1u64];
1585 assert!(!double!(one));
1586 assert_eq!(one, [0, 2]);
1588 let mut u64_max = [0, u64::MAX];
1589 assert!(!double!(u64_max));
1590 assert_eq!(u64_max, [1, u64::MAX - 1]);
1592 let mut u64_carry_overflow = [0x7fff_ffff_ffff_ffffu64, 0x8000_0000_0000_0000];
1593 assert!(!double!(u64_carry_overflow));
1594 assert_eq!(u64_carry_overflow, [u64::MAX, 0]);
1596 let mut max = [u64::MAX; 4];
1597 assert!(double!(max));
1598 assert_eq!(max, [u64::MAX, u64::MAX, u64::MAX, u64::MAX - 1]);
1602 fn mul_min_simple_tests() {
1605 let res = mul_2(&a, &b);
1606 assert_eq!(res, [0, 3, 10, 8]);
1608 let a = [0x1bad_cafe_dead_beef, 2424];
1609 let b = [0x2bad_beef_dead_cafe, 4242];
1610 let res = mul_2(&a, &b);
1611 assert_eq!(res, [340296855556511776, 15015369169016130186, 4248480538569992542, 10282608]);
1613 let a = [0xf6d9_f8eb_8b60_7a6d, 0x4b93_833e_2194_fc2e];
1614 let b = [0xfdab_0000_6952_8ab4, 0xd302_0000_8282_0000];
1615 let res = mul_2(&a, &b);
1616 assert_eq!(res, [17625486516939878681, 18390748118453258282, 2695286104209847530, 1510594524414214144]);
1618 let a = [0x8b8b_8b8b_8b8b_8b8b, 0x8b8b_8b8b_8b8b_8b8b];
1619 let b = [0x8b8b_8b8b_8b8b_8b8b, 0x8b8b_8b8b_8b8b_8b8b];
1620 let res = mul_2(&a, &b);
1621 assert_eq!(res, [5481115605507762349, 8230042173354675923, 16737530186064798, 15714555036048702841]);
1623 let a = [0x0000_0000_0000_0020, 0x002d_362c_005b_7753];
1624 let b = [0x0900_0000_0030_0003, 0xb708_00fe_0000_00cd];
1625 let res = mul_2(&a, &b);
1626 assert_eq!(res, [1, 2306290405521702946, 17647397529888728169, 10271802099389861239]);
1628 let a = [0x0000_0000_7fff_ffff, 0xffff_ffff_0000_0000];
1629 let b = [0x0000_0800_0000_0000, 0x0000_1000_0000_00e1];
1630 let res = mul_2(&a, &b);
1631 assert_eq!(res, [1024, 0, 483183816703, 18446743107341910016]);
1633 let a = [0xf6d9_f8eb_ebeb_eb6d, 0x4b93_83a0_bb35_0680];
1634 let b = [0xfd02_b9b9_b9b9_b9b9, 0xb9b9_b9b9_b9b9_b9b9];
1635 let res = mul_2(&a, &b);
1636 assert_eq!(res, [17579814114991930107, 15033987447865175985, 488855932380801351, 5453318140933190272]);
1638 let a = [u64::MAX; 2];
1639 let b = [u64::MAX; 2];
1640 let res = mul_2(&a, &b);
1641 assert_eq!(res, [18446744073709551615, 18446744073709551614, 0, 1]);
1646 fn test(a: [u64; 2], b: [u64; 2]) {
1647 let a_int = u64s_to_u128(a);
1648 let b_int = u64s_to_u128(b);
1650 let res = add_2(&a, &b);
1651 assert_eq!((u64s_to_u128(res.0), res.1), a_int.overflowing_add(b_int));
1653 let res = sub_2(&a, &b);
1654 assert_eq!((u64s_to_u128(res.0), res.1), a_int.overflowing_sub(b_int));
1657 test([0; 2], [0; 2]);
1658 test([0x1bad_cafe_dead_beef, 2424], [0x2bad_cafe_dead_cafe, 4242]);
1659 test([u64::MAX; 2], [u64::MAX; 2]);
1660 test([u64::MAX, 0x8000_0000_0000_0000], [0, 0x7fff_ffff_ffff_ffff]);
1661 test([0, 0x7fff_ffff_ffff_ffff], [u64::MAX, 0x8000_0000_0000_0000]);
1662 test([u64::MAX, 0], [0, u64::MAX]);
1663 test([0, u64::MAX], [u64::MAX, 0]);
1664 test([u64::MAX; 2], [0; 2]);
1665 test([0; 2], [u64::MAX; 2]);
1669 fn mul_4_simple_tests() {
1672 assert_eq!(mul_4(&a, &b),
1673 [0, 2, 4, 6, 8, 6, 4, 2]);
1675 let a = [0x1bad_cafe_dead_beef, 2424, 0x1bad_cafe_dead_beef, 2424];
1676 let b = [0x2bad_beef_dead_cafe, 4242, 0x2bad_beef_dead_cafe, 4242];
1677 assert_eq!(mul_4(&a, &b),
1678 [340296855556511776, 15015369169016130186, 4929074249683016095, 11583994264332991364,
1679 8837257932696496860, 15015369169036695402, 4248480538569992542, 10282608]);
1681 let a = [u64::MAX; 4];
1682 let b = [u64::MAX; 4];
1683 assert_eq!(mul_4(&a, &b),
1684 [18446744073709551615, 18446744073709551615, 18446744073709551615,
1685 18446744073709551614, 0, 0, 0, 1]);
1689 fn double_simple_tests() {
1690 let mut a = [0xfff5_b32d_01ff_0000, 0x00e7_e7e7_e7e7_e7e7];
1691 assert!(double!(a));
1692 assert_eq!(a, [18440945635998695424, 130551405668716494]);
1694 let mut a = [u64::MAX, u64::MAX];
1695 assert!(double!(a));
1696 assert_eq!(a, [18446744073709551615, 18446744073709551614]);