1// Copyright 2015-2023 Brian Smith.
2//
3// Permission to use, copy, modify, and/or distribute this software for any
4// purpose with or without fee is hereby granted, provided that the above
5// copyright notice and this permission notice appear in all copies.
6//
7// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
8// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
10// SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14
15//! Multi-precision integers.
16//!
17//! # Modular Arithmetic.
18//!
19//! Modular arithmetic is done in finite commutative rings ℤ/mℤ for some
20//! modulus *m*. We work in finite commutative rings instead of finite fields
21//! because the RSA public modulus *n* is not prime, which means ℤ/nℤ contains
22//! nonzero elements that have no multiplicative inverse, so ℤ/nℤ is not a
23//! finite field.
24//!
25//! In some calculations we need to deal with multiple rings at once. For
26//! example, RSA private key operations operate in the rings ℤ/nℤ, ℤ/pℤ, and
27//! ℤ/qℤ. Types and functions dealing with such rings are all parameterized
28//! over a type `M` to ensure that we don't wrongly mix up the math, e.g. by
29//! multiplying an element of ℤ/pℤ by an element of ℤ/qℤ modulo q. This follows
30//! the "unit" pattern described in [Static checking of units in Servo].
31//!
32//! `Elem` also uses the static unit checking pattern to statically track the
33//! Montgomery factors that need to be canceled out in each value using it's
34//! `E` parameter.
35//!
36//! [Static checking of units in Servo]:
37//! https://blog.mozilla.org/research/2014/06/23/static-checking-of-units-in-servo/
38
39use self::boxed_limbs::BoxedLimbs;
40pub(crate) use self::{
41 modulus::{Modulus, OwnedModulus},
42 modulusvalue::OwnedModulusValue,
43 private_exponent::PrivateExponent,
44};
45use super::{inout::AliasingSlices3, limbs512, montgomery::*, LimbSliceError, MAX_LIMBS};
46use crate::{
47 bits::BitLength,
48 c,
49 error::{self, LenMismatchError},
50 limb::{self, Limb, LIMB_BITS},
51 polyfill::slice::{self, AsChunks},
52};
53use core::{
54 marker::PhantomData,
55 num::{NonZeroU64, NonZeroUsize},
56};
57
58mod boxed_limbs;
59mod modulus;
60mod modulusvalue;
61mod private_exponent;
62
63pub trait PublicModulus {}
64
65// When we need to create a new `Elem`, first we create a `Storage` and then
66// move its `limbs` into the new element. When we want to recylce an `Elem`'s
67// memory allocation, we convert it back into a `Storage`.
68pub struct Storage<M> {
69 limbs: BoxedLimbs<M>,
70}
71
72impl<M, E> From<Elem<M, E>> for Storage<M> {
73 fn from(elem: Elem<M, E>) -> Self {
74 Self { limbs: elem.limbs }
75 }
76}
77
78/// Elements of ℤ/mℤ for some modulus *m*.
79//
80// Defaulting `E` to `Unencoded` is a convenience for callers from outside this
81// submodule. However, for maximum clarity, we always explicitly use
82// `Unencoded` within the `bigint` submodule.
83pub struct Elem<M, E = Unencoded> {
84 limbs: BoxedLimbs<M>,
85
86 /// The number of Montgomery factors that need to be canceled out from
87 /// `value` to get the actual value.
88 encoding: PhantomData<E>,
89}
90
91impl<M, E> Elem<M, E> {
92 pub fn clone_into(&self, mut out: Storage<M>) -> Self {
93 out.limbs.copy_from_slice(&self.limbs);
94 Self {
95 limbs: out.limbs,
96 encoding: self.encoding,
97 }
98 }
99}
100
101impl<M, E> Elem<M, E> {
102 #[inline]
103 pub fn is_zero(&self) -> bool {
104 limb::limbs_are_zero_constant_time(&self.limbs).leak()
105 }
106}
107
108/// Does a Montgomery reduction on `limbs` assuming they are Montgomery-encoded ('R') and assuming
109/// they are the same size as `m`, but perhaps not reduced mod `m`. The result will be
110/// fully reduced mod `m`.
111///
112/// WARNING: Takes a `Storage` as an in/out value.
113fn from_montgomery_amm<M>(mut in_out: Storage<M>, m: &Modulus<M>) -> Elem<M, Unencoded> {
114 let mut one: [u64; 128] = [0; MAX_LIMBS];
115 one[0] = 1;
116 let one: &[u64] = &one[..m.limbs().len()];
117 limbs_mul_mont(
118 (&mut in_out.limbs[..], one),
119 m.limbs(),
120 m.n0(),
121 m.cpu_features(),
122 )
123 .unwrap_or_else(op:unwrap_impossible_limb_slice_error);
124 Elem {
125 limbs: in_out.limbs,
126 encoding: PhantomData,
127 }
128}
129
130#[cfg(any(test, not(target_arch = "x86_64")))]
131impl<M> Elem<M, R> {
132 #[inline]
133 pub fn into_unencoded(self, m: &Modulus<M>) -> Elem<M, Unencoded> {
134 from_montgomery_amm(Storage::from(self), m)
135 }
136}
137
138impl<M> Elem<M, Unencoded> {
139 pub fn from_be_bytes_padded(
140 input: untrusted::Input,
141 m: &Modulus<M>,
142 ) -> Result<Self, error::Unspecified> {
143 Ok(Self {
144 limbs: BoxedLimbs::from_be_bytes_padded_less_than(input, m)?,
145 encoding: PhantomData,
146 })
147 }
148
149 #[inline]
150 pub fn fill_be_bytes(&self, out: &mut [u8]) {
151 // See Falko Strenzke, "Manger's Attack revisited", ICICS 2010.
152 limb::big_endian_from_limbs(&self.limbs, out)
153 }
154}
155
156pub fn elem_mul_into<M, AF, BF>(
157 mut out: Storage<M>,
158 a: &Elem<M, AF>,
159 b: &Elem<M, BF>,
160 m: &Modulus<M>,
161) -> Elem<M, <(AF, BF) as ProductEncoding>::Output>
162where
163 (AF, BF): ProductEncoding,
164{
165 limbs_mul_mont(
166 (out.limbs.as_mut(), b.limbs.as_ref(), a.limbs.as_ref()),
167 m.limbs(),
168 m.n0(),
169 m.cpu_features(),
170 )
171 .unwrap_or_else(op:unwrap_impossible_limb_slice_error);
172 Elem {
173 limbs: out.limbs,
174 encoding: PhantomData,
175 }
176}
177
178pub fn elem_mul<M, AF, BF>(
179 a: &Elem<M, AF>,
180 mut b: Elem<M, BF>,
181 m: &Modulus<M>,
182) -> Elem<M, <(AF, BF) as ProductEncoding>::Output>
183where
184 (AF, BF): ProductEncoding,
185{
186 limbs_mul_mont(
187 (&mut b.limbs[..], &a.limbs[..]),
188 m.limbs(),
189 m.n0(),
190 m.cpu_features(),
191 )
192 .unwrap_or_else(op:unwrap_impossible_limb_slice_error);
193 Elem {
194 limbs: b.limbs,
195 encoding: PhantomData,
196 }
197}
198
199// r *= 2.
200fn elem_double<M, AF>(r: &mut Elem<M, AF>, m: &Modulus<M>) {
201 limb::limbs_double_mod(&mut r.limbs, m.limbs())
202 .unwrap_or_else(op:unwrap_impossible_len_mismatch_error)
203}
204
205// TODO: This is currently unused, but we intend to eventually use this to
206// reduce elements (x mod q) mod p in the RSA CRT. If/when we do so, we
207// should update the testing so it is reflective of that usage, instead of
208// the old usage.
209pub fn elem_reduced_once<A, M>(
210 mut r: Storage<M>,
211 a: &Elem<A, Unencoded>,
212 m: &Modulus<M>,
213 other_modulus_len_bits: BitLength,
214) -> Elem<M, Unencoded> {
215 assert_eq!(m.len_bits(), other_modulus_len_bits);
216 r.limbs.copy_from_slice(&a.limbs);
217 limb::limbs_reduce_once_constant_time(&mut r.limbs, m.limbs())
218 .unwrap_or_else(op:unwrap_impossible_len_mismatch_error);
219 Elem {
220 limbs: r.limbs,
221 encoding: PhantomData,
222 }
223}
224
225#[inline]
226pub fn elem_reduced<Larger, Smaller>(
227 mut r: Storage<Smaller>,
228 a: &Elem<Larger, Unencoded>,
229 m: &Modulus<Smaller>,
230 other_prime_len_bits: BitLength,
231) -> Elem<Smaller, RInverse> {
232 // This is stricter than required mathematically but this is what we
233 // guarantee and this is easier to check. The real requirement is that
234 // that `a < m*R` where `R` is the Montgomery `R` for `m`.
235 assert_eq!(other_prime_len_bits, m.len_bits());
236
237 // `limbs_from_mont_in_place` requires this.
238 assert_eq!(a.limbs.len(), m.limbs().len() * 2);
239
240 let mut tmp: [u64; 128] = [0; MAX_LIMBS];
241 let tmp: &mut [u64] = &mut tmp[..a.limbs.len()];
242 tmp.copy_from_slice(&a.limbs);
243
244 limbs_from_mont_in_place(&mut r.limbs, tmp, m.limbs(), m.n0());
245 Elem {
246 limbs: r.limbs,
247 encoding: PhantomData,
248 }
249}
250
251#[inline]
252fn elem_squared<M, E>(
253 mut a: Elem<M, E>,
254 m: &Modulus<M>,
255) -> Elem<M, <(E, E) as ProductEncoding>::Output>
256where
257 (E, E): ProductEncoding,
258{
259 limbs_square_mont(&mut a.limbs, m.limbs(), m.n0(), m.cpu_features())
260 .unwrap_or_else(op:unwrap_impossible_limb_slice_error);
261 Elem {
262 limbs: a.limbs,
263 encoding: PhantomData,
264 }
265}
266
267pub fn elem_widen<Larger, Smaller>(
268 mut r: Storage<Larger>,
269 a: Elem<Smaller, Unencoded>,
270 m: &Modulus<Larger>,
271 smaller_modulus_bits: BitLength,
272) -> Result<Elem<Larger, Unencoded>, error::Unspecified> {
273 if smaller_modulus_bits >= m.len_bits() {
274 return Err(error::Unspecified);
275 }
276 let (to_copy: &mut [u64], to_zero: &mut [u64]) = r.limbs.split_at_mut(mid:a.limbs.len());
277 to_copy.copy_from_slice(&a.limbs);
278 to_zero.fill(0);
279 Ok(Elem {
280 limbs: r.limbs,
281 encoding: PhantomData,
282 })
283}
284
285// TODO: Document why this works for all Montgomery factors.
286pub fn elem_add<M, E>(mut a: Elem<M, E>, b: Elem<M, E>, m: &Modulus<M>) -> Elem<M, E> {
287 limb::limbs_add_assign_mod(&mut a.limbs, &b.limbs, m.limbs())
288 .unwrap_or_else(op:unwrap_impossible_len_mismatch_error);
289 a
290}
291
292// TODO: Document why this works for all Montgomery factors.
293pub fn elem_sub<M, E>(mut a: Elem<M, E>, b: &Elem<M, E>, m: &Modulus<M>) -> Elem<M, E> {
294 prefixed_extern! {
295 // `r` and `a` may alias.
296 fn LIMBS_sub_mod(
297 r: *mut Limb,
298 a: *const Limb,
299 b: *const Limb,
300 m: *const Limb,
301 num_limbs: c::NonZero_size_t,
302 );
303 }
304 let num_limbs: NonZero = NonZeroUsize::new(m.limbs().len()).unwrap();
305 (a.limbs.as_mut(), b.limbs.as_ref())
306 .with_non_dangling_non_null_pointers_rab(num_limbs, |r, a, b| {
307 let m = m.limbs().as_ptr(); // Also non-dangling because num_limbs is non-zero.
308 unsafe { LIMBS_sub_mod(r, a, b, m, num_limbs) }
309 })
310 .unwrap_or_else(op:unwrap_impossible_len_mismatch_error);
311 a
312}
313
314// The value 1, Montgomery-encoded some number of times.
315pub struct One<M, E>(Elem<M, E>);
316
317impl<M> One<M, RR> {
318 // Returns RR = = R**2 (mod n) where R = 2**r is the smallest power of
319 // 2**LIMB_BITS such that R > m.
320 //
321 // Even though the assembly on some 32-bit platforms works with 64-bit
322 // values, using `LIMB_BITS` here, rather than `N0::LIMBS_USED * LIMB_BITS`,
323 // is correct because R**2 will still be a multiple of the latter as
324 // `N0::LIMBS_USED` is either one or two.
325 pub(crate) fn newRR(mut out: Storage<M>, m: &Modulus<M>) -> Self {
326 // The number of limbs in the numbers involved.
327 let w = m.limbs().len();
328
329 // The length of the numbers involved, in bits. R = 2**r.
330 let r = w * LIMB_BITS;
331
332 m.oneR(&mut out.limbs);
333 let mut acc: Elem<M, R> = Elem {
334 limbs: out.limbs,
335 encoding: PhantomData,
336 };
337
338 // 2**t * R can be calculated by t doublings starting with R.
339 //
340 // Choose a t that divides r and where t doublings are cheaper than 1 squaring.
341 //
342 // We could choose other values of t than w. But if t < d then the exponentiation that
343 // follows would require multiplications. Normally d is 1 (i.e. the modulus length is a
344 // power of two: RSA 1024, 2048, 4097, 8192) or 3 (RSA 1536, 3072).
345 //
346 // XXX(perf): Currently t = w / 2 is slightly faster. TODO(perf): Optimize `elem_double`
347 // and re-run benchmarks to rebalance this.
348 let t = w;
349 let z = w.trailing_zeros();
350 let d = w >> z;
351 debug_assert_eq!(w, d * (1 << z));
352 debug_assert!(d <= t);
353 debug_assert!(t < r);
354 for _ in 0..t {
355 elem_double(&mut acc, m);
356 }
357
358 // Because t | r:
359 //
360 // MontExp(2**t * R, r / t)
361 // = (2**t)**(r / t) * R (mod m) by definition of MontExp.
362 // = (2**t)**(1/t * r) * R (mod m)
363 // = (2**(t * 1/t))**r * R (mod m)
364 // = (2**1)**r * R (mod m)
365 // = 2**r * R (mod m)
366 // = R * R (mod m)
367 // = RR
368 //
369 // Like BoringSSL, use t = w (`m.limbs.len()`) which ensures that the exponent is a power
370 // of two. Consequently, there will be no multiplications in the Montgomery exponentiation;
371 // there will only be lg(r / t) squarings.
372 //
373 // lg(r / t)
374 // = lg((w * 2**b) / t)
375 // = lg((t * 2**b) / t)
376 // = lg(2**b)
377 // = b
378 // TODO(MSRV:1.67): const B: u32 = LIMB_BITS.ilog2();
379 const B: u32 = if cfg!(target_pointer_width = "64") {
380 6
381 } else if cfg!(target_pointer_width = "32") {
382 5
383 } else {
384 panic!("unsupported target_pointer_width")
385 };
386 #[allow(clippy::assertions_on_constants)]
387 const _LIMB_BITS_IS_2_POW_B: () = assert!(LIMB_BITS == 1 << B);
388 debug_assert_eq!(r, t * (1 << B));
389 for _ in 0..B {
390 acc = elem_squared(acc, m);
391 }
392
393 Self(Elem {
394 limbs: acc.limbs,
395 encoding: PhantomData, // PhantomData<RR>
396 })
397 }
398}
399
400impl<M> One<M, RRR> {
401 pub(crate) fn newRRR(One(oneRR: Elem): One<M, RR>, m: &Modulus<M>) -> Self {
402 Self(elem_squared(a:oneRR, m))
403 }
404}
405
406impl<M, E> AsRef<Elem<M, E>> for One<M, E> {
407 fn as_ref(&self) -> &Elem<M, E> {
408 &self.0
409 }
410}
411
412impl<M: PublicModulus, E> One<M, E> {
413 pub fn clone_into(&self, out: Storage<M>) -> Self {
414 Self(self.0.clone_into(out))
415 }
416}
417
418/// Calculates base**exponent (mod m).
419///
420/// The run time is a function of the number of limbs in `m` and the bit
421/// length and Hamming Weight of `exponent`. The bounds on `m` are pretty
422/// obvious but the bounds on `exponent` are less obvious. Callers should
423/// document the bounds they place on the maximum value and maximum Hamming
424/// weight of `exponent`.
425// TODO: The test coverage needs to be expanded, e.g. test with the largest
426// accepted exponent and with the most common values of 65537 and 3.
427pub(crate) fn elem_exp_vartime<M>(
428 out: Storage<M>,
429 base: Elem<M, R>,
430 exponent: NonZeroU64,
431 m: &Modulus<M>,
432) -> Elem<M, R> {
433 // Use what [Knuth] calls the "S-and-X binary method", i.e. variable-time
434 // square-and-multiply that scans the exponent from the most significant
435 // bit to the least significant bit (left-to-right). Left-to-right requires
436 // less storage compared to right-to-left scanning, at the cost of needing
437 // to compute `exponent.leading_zeros()`, which we assume to be cheap.
438 //
439 // As explained in [Knuth], exponentiation by squaring is the most
440 // efficient algorithm when the Hamming weight is 2 or less. It isn't the
441 // most efficient for all other, uncommon, exponent values but any
442 // suboptimality is bounded at least by the small bit length of `exponent`
443 // as enforced by its type.
444 //
445 // This implementation is slightly simplified by taking advantage of the
446 // fact that we require the exponent to be a positive integer.
447 //
448 // [Knuth]: The Art of Computer Programming, Volume 2: Seminumerical
449 // Algorithms (3rd Edition), Section 4.6.3.
450 let exponent = exponent.get();
451 let mut acc = base.clone_into(out);
452 let mut bit = 1 << (64 - 1 - exponent.leading_zeros());
453 debug_assert!((exponent & bit) != 0);
454 while bit > 1 {
455 bit >>= 1;
456 acc = elem_squared(acc, m);
457 if (exponent & bit) != 0 {
458 acc = elem_mul(&base, acc, m);
459 }
460 }
461 acc
462}
463
464pub fn elem_exp_consttime<N, P>(
465 out: Storage<P>,
466 base: &Elem<N>,
467 oneRRR: &One<P, RRR>,
468 exponent: &PrivateExponent,
469 p: &Modulus<P>,
470 other_prime_len_bits: BitLength,
471) -> Result<Elem<P, Unencoded>, LimbSliceError> {
472 // `elem_exp_consttime_inner` is parameterized on `STORAGE_LIMBS` only so
473 // we can run tests with larger-than-supported-in-operation test vectors.
474 elem_exp_consttime_inner::<N, P, { ELEM_EXP_CONSTTIME_MAX_MODULUS_LIMBS * STORAGE_ENTRIES }>(
475 out,
476 base,
477 oneRRR,
478 exponent,
479 m:p,
480 other_prime_len_bits,
481 )
482}
483
484// The maximum modulus size supported for `elem_exp_consttime` in normal
485// operation.
486const ELEM_EXP_CONSTTIME_MAX_MODULUS_LIMBS: usize = 2048 / LIMB_BITS;
487const _LIMBS_PER_CHUNK_DIVIDES_ELEM_EXP_CONSTTIME_MAX_MODULUS_LIMBS: () =
488 assert!(ELEM_EXP_CONSTTIME_MAX_MODULUS_LIMBS % limbs512::LIMBS_PER_CHUNK == 0);
489const WINDOW_BITS: u32 = 5;
490const TABLE_ENTRIES: usize = 1 << WINDOW_BITS;
491const STORAGE_ENTRIES: usize = TABLE_ENTRIES + if cfg!(target_arch = "x86_64") { 3 } else { 0 };
492
493#[cfg(not(target_arch = "x86_64"))]
494fn elem_exp_consttime_inner<N, M, const STORAGE_LIMBS: usize>(
495 out: Storage<M>,
496 base_mod_n: &Elem<N>,
497 oneRRR: &One<M, RRR>,
498 exponent: &PrivateExponent,
499 m: &Modulus<M>,
500 other_prime_len_bits: BitLength,
501) -> Result<Elem<M, Unencoded>, LimbSliceError> {
502 use crate::{bssl, limb::Window};
503
504 let base_rinverse: Elem<M, RInverse> = elem_reduced(out, base_mod_n, m, other_prime_len_bits);
505
506 let num_limbs = m.limbs().len();
507 let m_chunked: AsChunks<Limb, { limbs512::LIMBS_PER_CHUNK }> = match slice::as_chunks(m.limbs())
508 {
509 (m, []) => m,
510 _ => {
511 return Err(LimbSliceError::len_mismatch(LenMismatchError::new(
512 num_limbs,
513 )))
514 }
515 };
516 let cpe = m_chunked.len(); // 512-bit chunks per entry.
517
518 // This code doesn't have the strict alignment requirements that the x86_64
519 // version does, but uses the same aligned storage for convenience.
520 assert!(STORAGE_LIMBS % (STORAGE_ENTRIES * limbs512::LIMBS_PER_CHUNK) == 0); // TODO: `const`
521 let mut table = limbs512::AlignedStorage::<STORAGE_LIMBS>::zeroed();
522 let mut table = table
523 .aligned_chunks_mut(TABLE_ENTRIES, cpe)
524 .map_err(LimbSliceError::len_mismatch)?;
525
526 // TODO: Rewrite the below in terms of `AsChunks`.
527 let table = table.as_flattened_mut();
528
529 fn gather<M>(table: &[Limb], acc: &mut Elem<M, R>, i: Window) {
530 prefixed_extern! {
531 fn LIMBS_select_512_32(
532 r: *mut Limb,
533 table: *const Limb,
534 num_limbs: c::size_t,
535 i: Window,
536 ) -> bssl::Result;
537 }
538 Result::from(unsafe {
539 LIMBS_select_512_32(acc.limbs.as_mut_ptr(), table.as_ptr(), acc.limbs.len(), i)
540 })
541 .unwrap();
542 }
543
544 fn power<M>(
545 table: &[Limb],
546 mut acc: Elem<M, R>,
547 m: &Modulus<M>,
548 i: Window,
549 mut tmp: Elem<M, R>,
550 ) -> (Elem<M, R>, Elem<M, R>) {
551 for _ in 0..WINDOW_BITS {
552 acc = elem_squared(acc, m);
553 }
554 gather(table, &mut tmp, i);
555 let acc = elem_mul(&tmp, acc, m);
556 (acc, tmp)
557 }
558
559 fn entry(table: &[Limb], i: usize, num_limbs: usize) -> &[Limb] {
560 &table[(i * num_limbs)..][..num_limbs]
561 }
562 fn entry_mut(table: &mut [Limb], i: usize, num_limbs: usize) -> &mut [Limb] {
563 &mut table[(i * num_limbs)..][..num_limbs]
564 }
565
566 // table[0] = base**0 (i.e. 1).
567 m.oneR(entry_mut(table, 0, num_limbs));
568
569 // table[1] = base*R == (base/R * RRR)/R
570 limbs_mul_mont(
571 (
572 entry_mut(table, 1, num_limbs),
573 base_rinverse.limbs.as_ref(),
574 oneRRR.as_ref().limbs.as_ref(),
575 ),
576 m.limbs(),
577 m.n0(),
578 m.cpu_features(),
579 )?;
580 for i in 2..TABLE_ENTRIES {
581 let (src1, src2) = if i % 2 == 0 {
582 (i / 2, i / 2)
583 } else {
584 (i - 1, 1)
585 };
586 let (previous, rest) = table.split_at_mut(num_limbs * i);
587 let src1 = entry(previous, src1, num_limbs);
588 let src2 = entry(previous, src2, num_limbs);
589 let dst = entry_mut(rest, 0, num_limbs);
590 limbs_mul_mont((dst, src1, src2), m.limbs(), m.n0(), m.cpu_features())?;
591 }
592
593 let mut acc = Elem {
594 limbs: base_rinverse.limbs,
595 encoding: PhantomData,
596 };
597 let tmp = m.alloc_zero();
598 let tmp = Elem {
599 limbs: tmp.limbs,
600 encoding: PhantomData,
601 };
602 let (acc, _) = limb::fold_5_bit_windows(
603 exponent.limbs(),
604 |initial_window| {
605 gather(&table, &mut acc, initial_window);
606 (acc, tmp)
607 },
608 |(acc, tmp), window| power(&table, acc, m, window, tmp),
609 );
610
611 Ok(acc.into_unencoded(m))
612}
613
614#[cfg(target_arch = "x86_64")]
615fn elem_exp_consttime_inner<N, M, const STORAGE_LIMBS: usize>(
616 out: Storage<M>,
617 base_mod_n: &Elem<N>,
618 oneRRR: &One<M, RRR>,
619 exponent: &PrivateExponent,
620 m: &Modulus<M>,
621 other_prime_len_bits: BitLength,
622) -> Result<Elem<M, Unencoded>, LimbSliceError> {
623 use super::x86_64_mont::{
624 gather5, mul_mont5, mul_mont_gather5_amm, power5_amm, scatter5, sqr_mont5,
625 };
626 use crate::{
627 cpu::{
628 intel::{Adx, Bmi2},
629 GetFeature as _,
630 },
631 limb::{LeakyWindow, Window},
632 polyfill::slice::AsChunksMut,
633 };
634
635 let n0 = m.n0();
636
637 let cpu2 = m.cpu_features().get_feature();
638 let cpu3 = m.cpu_features().get_feature();
639
640 if base_mod_n.limbs.len() != m.limbs().len() * 2 {
641 return Err(LimbSliceError::len_mismatch(LenMismatchError::new(
642 base_mod_n.limbs.len(),
643 )));
644 }
645
646 let m_original: AsChunks<Limb, 8> = match slice::as_chunks(m.limbs()) {
647 (m, []) => m,
648 _ => return Err(LimbSliceError::len_mismatch(LenMismatchError::new(8))),
649 };
650 let cpe = m_original.len(); // 512-bit chunks per entry
651
652 let oneRRR = &oneRRR.as_ref().limbs;
653 let oneRRR = match slice::as_chunks(oneRRR) {
654 (c, []) => c,
655 _ => {
656 return Err(LimbSliceError::len_mismatch(LenMismatchError::new(
657 oneRRR.len(),
658 )))
659 }
660 };
661
662 // The x86_64 assembly was written under the assumption that the input data
663 // is aligned to `MOD_EXP_CTIME_ALIGN` bytes, which was/is 64 in OpenSSL.
664 // Subsequently, it was changed such that, according to BoringSSL, they
665 // only require 16 byte alignment. We enforce the old, stronger, alignment
666 // unless/until we can see a benefit to reducing it.
667 //
668 // Similarly, OpenSSL uses the x86_64 assembly functions by giving it only
669 // inputs `tmp`, `am`, and `np` that immediately follow the table.
670 // According to BoringSSL, in older versions of the OpenSSL code, this
671 // extra space was required for memory safety because the assembly code
672 // would over-read the table; according to BoringSSL, this is no longer the
673 // case. Regardless, the upstream code also contained comments implying
674 // that this was also important for performance. For now, we do as OpenSSL
675 // did/does.
676 const MOD_EXP_CTIME_ALIGN: usize = 64;
677 // Required by
678 const _TABLE_ENTRIES_IS_32: () = assert!(TABLE_ENTRIES == 32);
679 const _STORAGE_ENTRIES_HAS_3_EXTRA: () = assert!(STORAGE_ENTRIES == TABLE_ENTRIES + 3);
680
681 assert!(STORAGE_LIMBS % (STORAGE_ENTRIES * limbs512::LIMBS_PER_CHUNK) == 0); // TODO: `const`
682 let mut table = limbs512::AlignedStorage::<STORAGE_LIMBS>::zeroed();
683 let mut table = table
684 .aligned_chunks_mut(STORAGE_ENTRIES, cpe)
685 .map_err(LimbSliceError::len_mismatch)?;
686 let (mut table, mut state) = table.split_at_mut(TABLE_ENTRIES * cpe);
687 assert_eq!((table.as_ptr() as usize) % MOD_EXP_CTIME_ALIGN, 0);
688
689 // These are named `(tmp, am, np)` in BoringSSL.
690 let (mut acc, mut rest) = state.split_at_mut(cpe);
691 let (mut base_cached, mut m_cached) = rest.split_at_mut(cpe);
692
693 // "To improve cache locality" according to upstream.
694 m_cached
695 .as_flattened_mut()
696 .copy_from_slice(m_original.as_flattened());
697 let m_cached = m_cached.as_ref();
698
699 let out: Elem<M, RInverse> = elem_reduced(out, base_mod_n, m, other_prime_len_bits);
700 let base_rinverse = match slice::as_chunks(&out.limbs) {
701 (c, []) => c,
702 _ => {
703 return Err(LimbSliceError::len_mismatch(LenMismatchError::new(
704 out.limbs.len(),
705 )))
706 }
707 };
708
709 // base_cached = base*R == (base/R * RRR)/R
710 mul_mont5(
711 base_cached.as_mut(),
712 base_rinverse,
713 oneRRR,
714 m_cached,
715 n0,
716 cpu2,
717 )?;
718 let base_cached = base_cached.as_ref();
719 let mut out = Storage::from(out); // recycle.
720
721 // Fill in all the powers of 2 of `acc` into the table using only squaring and without any
722 // gathering, storing the last calculated power into `acc`.
723 fn scatter_powers_of_2(
724 mut table: AsChunksMut<Limb, 8>,
725 mut acc: AsChunksMut<Limb, 8>,
726 m_cached: AsChunks<Limb, 8>,
727 n0: &N0,
728 mut i: LeakyWindow,
729 cpu: Option<(Adx, Bmi2)>,
730 ) -> Result<(), LimbSliceError> {
731 loop {
732 scatter5(acc.as_ref(), table.as_mut(), i)?;
733 i *= 2;
734 if i >= TABLE_ENTRIES as LeakyWindow {
735 break;
736 }
737 sqr_mont5(acc.as_mut(), m_cached, n0, cpu)?;
738 }
739 Ok(())
740 }
741
742 // All entries in `table` will be Montgomery encoded.
743
744 // acc = table[0] = base**0 (i.e. 1).
745 m.oneR(acc.as_flattened_mut());
746 scatter5(acc.as_ref(), table.as_mut(), 0)?;
747
748 // acc = base**1 (i.e. base).
749 acc.as_flattened_mut()
750 .copy_from_slice(base_cached.as_flattened());
751
752 // Fill in entries 1, 2, 4, 8, 16.
753 scatter_powers_of_2(table.as_mut(), acc.as_mut(), m_cached, n0, 1, cpu2)?;
754 // Fill in entries 3, 6, 12, 24; 5, 10, 20, 30; 7, 14, 28; 9, 18; 11, 22; 13, 26; 15, 30;
755 // 17; 19; 21; 23; 25; 27; 29; 31.
756 for i in (3..(TABLE_ENTRIES as LeakyWindow)).step_by(2) {
757 let power = Window::from(i - 1);
758 assert!(power < 32); // Not secret,
759 unsafe {
760 mul_mont_gather5_amm(
761 acc.as_mut(),
762 base_cached,
763 table.as_ref(),
764 m_cached,
765 n0,
766 power,
767 cpu3,
768 )
769 }?;
770 scatter_powers_of_2(table.as_mut(), acc.as_mut(), m_cached, n0, i, cpu2)?;
771 }
772
773 let table = table.as_ref();
774
775 let acc = limb::fold_5_bit_windows(
776 exponent.limbs(),
777 |initial_window| {
778 unsafe { gather5(acc.as_mut(), table, initial_window) }
779 .unwrap_or_else(unwrap_impossible_limb_slice_error);
780 acc
781 },
782 |mut acc, window| {
783 unsafe { power5_amm(acc.as_mut(), table, m_cached, n0, window, cpu3) }
784 .unwrap_or_else(unwrap_impossible_limb_slice_error);
785 acc
786 },
787 );
788
789 // Reuse `base_rinverse`'s limbs to save an allocation.
790 out.limbs.copy_from_slice(acc.as_flattened());
791 Ok(from_montgomery_amm(out, m))
792}
793
794/// Verified a == b**-1 (mod m), i.e. a**-1 == b (mod m).
795pub fn verify_inverses_consttime<M>(
796 a: &Elem<M, R>,
797 b: Elem<M, Unencoded>,
798 m: &Modulus<M>,
799) -> Result<(), error::Unspecified> {
800 let r: Elem = elem_mul(a, b, m);
801 limb::verify_limbs_equal_1_leak_bit(&r.limbs)
802}
803
804#[inline]
805pub fn elem_verify_equal_consttime<M, E>(
806 a: &Elem<M, E>,
807 b: &Elem<M, E>,
808) -> Result<(), error::Unspecified> {
809 let equal: BoolMask = limb::limbs_equal_limbs_consttime(&a.limbs, &b.limbs)
810 .unwrap_or_else(op:unwrap_impossible_len_mismatch_error);
811 if !equal.leak() {
812 return Err(error::Unspecified);
813 }
814 Ok(())
815}
816
817#[cold]
818#[inline(never)]
819fn unwrap_impossible_len_mismatch_error<T>(LenMismatchError { .. }: LenMismatchError) -> T {
820 unreachable!()
821}
822
823#[cold]
824#[inline(never)]
825fn unwrap_impossible_limb_slice_error(err: LimbSliceError) {
826 match err {
827 LimbSliceError::LenMismatch(_) => unreachable!(),
828 LimbSliceError::TooShort(_) => unreachable!(),
829 LimbSliceError::TooLong(_) => unreachable!(),
830 }
831}
832
833#[cfg(test)]
834mod tests {
835 use super::*;
836 use crate::{cpu, test};
837
838 // Type-level representation of an arbitrary modulus.
839 struct M {}
840
841 impl PublicModulus for M {}
842
843 #[test]
844 fn test_elem_exp_consttime() {
845 let cpu_features = cpu::features();
846 test::run(
847 test_file!("../../crypto/fipsmodule/bn/test/mod_exp_tests.txt"),
848 |section, test_case| {
849 assert_eq!(section, "");
850
851 let m = consume_modulus::<M>(test_case, "M");
852 let m = m.modulus(cpu_features);
853 let expected_result = consume_elem(test_case, "ModExp", &m);
854 let base = consume_elem(test_case, "A", &m);
855 let e = {
856 let bytes = test_case.consume_bytes("E");
857 PrivateExponent::from_be_bytes_for_test_only(untrusted::Input::from(&bytes), &m)
858 .expect("valid exponent")
859 };
860
861 let oneRR = One::newRR(m.alloc_zero(), &m);
862 let oneRRR = One::newRRR(oneRR, &m);
863
864 // `base` in the test vectors is reduced (mod M) already but
865 // the API expects the bsae to be (mod N) where N = M * P for
866 // some other prime of the same length. Fake that here.
867 // Pretend there's another prime of equal length.
868 struct N {}
869 let other_modulus_len_bits = m.len_bits();
870 let base: Elem<N> = {
871 let mut limbs = BoxedLimbs::zero(base.limbs.len() * 2);
872 limbs[..base.limbs.len()].copy_from_slice(&base.limbs);
873 Elem {
874 limbs,
875 encoding: PhantomData,
876 }
877 };
878
879 let too_big = m.limbs().len() > ELEM_EXP_CONSTTIME_MAX_MODULUS_LIMBS;
880 let actual_result = if !too_big {
881 elem_exp_consttime(
882 m.alloc_zero(),
883 &base,
884 &oneRRR,
885 &e,
886 &m,
887 other_modulus_len_bits,
888 )
889 } else {
890 let actual_result = elem_exp_consttime(
891 m.alloc_zero(),
892 &base,
893 &oneRRR,
894 &e,
895 &m,
896 other_modulus_len_bits,
897 );
898 // TODO: Be more specific with which error we expect?
899 assert!(actual_result.is_err());
900 // Try again with a larger-than-normally-supported limit
901 elem_exp_consttime_inner::<_, _, { (4096 / LIMB_BITS) * STORAGE_ENTRIES }>(
902 m.alloc_zero(),
903 &base,
904 &oneRRR,
905 &e,
906 &m,
907 other_modulus_len_bits,
908 )
909 };
910 match actual_result {
911 Ok(r) => assert_elem_eq(&r, &expected_result),
912 Err(LimbSliceError::LenMismatch { .. }) => panic!(),
913 Err(LimbSliceError::TooLong { .. }) => panic!(),
914 Err(LimbSliceError::TooShort { .. }) => panic!(),
915 };
916
917 Ok(())
918 },
919 )
920 }
921
922 // TODO: fn test_elem_exp_vartime() using
923 // "src/rsa/bigint_elem_exp_vartime_tests.txt". See that file for details.
924 // In the meantime, the function is tested indirectly via the RSA
925 // verification and signing tests.
926 #[test]
927 fn test_elem_mul() {
928 let cpu_features = cpu::features();
929 test::run(
930 test_file!("../../crypto/fipsmodule/bn/test/mod_mul_tests.txt"),
931 |section, test_case| {
932 assert_eq!(section, "");
933
934 let m = consume_modulus::<M>(test_case, "M");
935 let m = m.modulus(cpu_features);
936 let expected_result = consume_elem(test_case, "ModMul", &m);
937 let a = consume_elem(test_case, "A", &m);
938 let b = consume_elem(test_case, "B", &m);
939
940 let b = into_encoded(m.alloc_zero(), b, &m);
941 let a = into_encoded(m.alloc_zero(), a, &m);
942 let actual_result = elem_mul(&a, b, &m);
943 let actual_result = actual_result.into_unencoded(&m);
944 assert_elem_eq(&actual_result, &expected_result);
945
946 Ok(())
947 },
948 )
949 }
950
951 #[test]
952 fn test_elem_squared() {
953 let cpu_features = cpu::features();
954 test::run(
955 test_file!("bigint_elem_squared_tests.txt"),
956 |section, test_case| {
957 assert_eq!(section, "");
958
959 let m = consume_modulus::<M>(test_case, "M");
960 let m = m.modulus(cpu_features);
961 let expected_result = consume_elem(test_case, "ModSquare", &m);
962 let a = consume_elem(test_case, "A", &m);
963
964 let a = into_encoded(m.alloc_zero(), a, &m);
965 let actual_result = elem_squared(a, &m);
966 let actual_result = actual_result.into_unencoded(&m);
967 assert_elem_eq(&actual_result, &expected_result);
968
969 Ok(())
970 },
971 )
972 }
973
974 #[test]
975 fn test_elem_reduced() {
976 let cpu_features = cpu::features();
977 test::run(
978 test_file!("bigint_elem_reduced_tests.txt"),
979 |section, test_case| {
980 assert_eq!(section, "");
981
982 struct M {}
983
984 let m_ = consume_modulus::<M>(test_case, "M");
985 let m = m_.modulus(cpu_features);
986 let expected_result = consume_elem(test_case, "R", &m);
987 let a =
988 consume_elem_unchecked::<M>(test_case, "A", expected_result.limbs.len() * 2);
989 let other_modulus_len_bits = m_.len_bits();
990
991 let actual_result = elem_reduced(m.alloc_zero(), &a, &m, other_modulus_len_bits);
992 let oneRR = One::newRR(m.alloc_zero(), &m);
993 let actual_result = elem_mul(oneRR.as_ref(), actual_result, &m);
994 assert_elem_eq(&actual_result, &expected_result);
995
996 Ok(())
997 },
998 )
999 }
1000
1001 #[test]
1002 fn test_elem_reduced_once() {
1003 let cpu_features = cpu::features();
1004 test::run(
1005 test_file!("bigint_elem_reduced_once_tests.txt"),
1006 |section, test_case| {
1007 assert_eq!(section, "");
1008
1009 struct M {}
1010 struct O {}
1011 let m = consume_modulus::<M>(test_case, "m");
1012 let m = m.modulus(cpu_features);
1013 let a = consume_elem_unchecked::<O>(test_case, "a", m.limbs().len());
1014 let expected_result = consume_elem::<M>(test_case, "r", &m);
1015 let other_modulus_len_bits = m.len_bits();
1016
1017 let actual_result =
1018 elem_reduced_once(m.alloc_zero(), &a, &m, other_modulus_len_bits);
1019 assert_elem_eq(&actual_result, &expected_result);
1020
1021 Ok(())
1022 },
1023 )
1024 }
1025
1026 fn consume_elem<M>(
1027 test_case: &mut test::TestCase,
1028 name: &str,
1029 m: &Modulus<M>,
1030 ) -> Elem<M, Unencoded> {
1031 let value = test_case.consume_bytes(name);
1032 Elem::from_be_bytes_padded(untrusted::Input::from(&value), m).unwrap()
1033 }
1034
1035 fn consume_elem_unchecked<M>(
1036 test_case: &mut test::TestCase,
1037 name: &str,
1038 num_limbs: usize,
1039 ) -> Elem<M, Unencoded> {
1040 let bytes = test_case.consume_bytes(name);
1041 let mut limbs = BoxedLimbs::zero(num_limbs);
1042 limb::parse_big_endian_and_pad_consttime(untrusted::Input::from(&bytes), &mut limbs)
1043 .unwrap();
1044 Elem {
1045 limbs,
1046 encoding: PhantomData,
1047 }
1048 }
1049
1050 fn consume_modulus<M>(test_case: &mut test::TestCase, name: &str) -> OwnedModulus<M> {
1051 let value = test_case.consume_bytes(name);
1052 OwnedModulus::from(
1053 OwnedModulusValue::from_be_bytes(untrusted::Input::from(&value)).unwrap(),
1054 )
1055 }
1056
1057 fn assert_elem_eq<M, E>(a: &Elem<M, E>, b: &Elem<M, E>) {
1058 if elem_verify_equal_consttime(a, b).is_err() {
1059 panic!("{:x?} != {:x?}", &*a.limbs, &*b.limbs);
1060 }
1061 }
1062
1063 fn into_encoded<M>(out: Storage<M>, a: Elem<M, Unencoded>, m: &Modulus<M>) -> Elem<M, R> {
1064 let oneRR = One::newRR(out, m);
1065 elem_mul(oneRR.as_ref(), a, m)
1066 }
1067}
1068