1 | // Copyright 2015-2023 Brian Smith. |
2 | // |
3 | // Permission to use, copy, modify, and/or distribute this software for any |
4 | // purpose with or without fee is hereby granted, provided that the above |
5 | // copyright notice and this permission notice appear in all copies. |
6 | // |
7 | // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES |
8 | // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
9 | // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY |
10 | // SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
11 | // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION |
12 | // OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN |
13 | // CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
14 | |
15 | //! Multi-precision integers. |
16 | //! |
17 | //! # Modular Arithmetic. |
18 | //! |
19 | //! Modular arithmetic is done in finite commutative rings ℤ/mℤ for some |
20 | //! modulus *m*. We work in finite commutative rings instead of finite fields |
21 | //! because the RSA public modulus *n* is not prime, which means ℤ/nℤ contains |
22 | //! nonzero elements that have no multiplicative inverse, so ℤ/nℤ is not a |
23 | //! finite field. |
24 | //! |
25 | //! In some calculations we need to deal with multiple rings at once. For |
26 | //! example, RSA private key operations operate in the rings ℤ/nℤ, ℤ/pℤ, and |
27 | //! ℤ/qℤ. Types and functions dealing with such rings are all parameterized |
28 | //! over a type `M` to ensure that we don't wrongly mix up the math, e.g. by |
29 | //! multiplying an element of ℤ/pℤ by an element of ℤ/qℤ modulo q. This follows |
30 | //! the "unit" pattern described in [Static checking of units in Servo]. |
31 | //! |
32 | //! `Elem` also uses the static unit checking pattern to statically track the |
33 | //! Montgomery factors that need to be canceled out in each value using it's |
34 | //! `E` parameter. |
35 | //! |
36 | //! [Static checking of units in Servo]: |
37 | //! https://blog.mozilla.org/research/2014/06/23/static-checking-of-units-in-servo/ |
38 | |
39 | use self::boxed_limbs::BoxedLimbs; |
40 | pub(crate) use self::{ |
41 | modulus::{Modulus, OwnedModulus, MODULUS_MAX_LIMBS}, |
42 | private_exponent::PrivateExponent, |
43 | }; |
44 | use crate::{ |
45 | arithmetic::montgomery::*, |
46 | bits::BitLength, |
47 | c, error, |
48 | limb::{self, Limb, LimbMask, LIMB_BITS}, |
49 | }; |
50 | use alloc::vec; |
51 | use core::{marker::PhantomData, num::NonZeroU64}; |
52 | |
53 | mod boxed_limbs; |
54 | mod modulus; |
55 | mod private_exponent; |
56 | |
57 | pub trait PublicModulus {} |
58 | |
59 | /// Elements of ℤ/mℤ for some modulus *m*. |
60 | // |
61 | // Defaulting `E` to `Unencoded` is a convenience for callers from outside this |
62 | // submodule. However, for maximum clarity, we always explicitly use |
63 | // `Unencoded` within the `bigint` submodule. |
64 | pub struct Elem<M, E = Unencoded> { |
65 | limbs: BoxedLimbs<M>, |
66 | |
67 | /// The number of Montgomery factors that need to be canceled out from |
68 | /// `value` to get the actual value. |
69 | encoding: PhantomData<E>, |
70 | } |
71 | |
72 | // TODO: `derive(Clone)` after https://github.com/rust-lang/rust/issues/26925 |
73 | // is resolved or restrict `M: Clone` and `E: Clone`. |
74 | impl<M, E> Clone for Elem<M, E> { |
75 | fn clone(&self) -> Self { |
76 | Self { |
77 | limbs: self.limbs.clone(), |
78 | encoding: self.encoding, |
79 | } |
80 | } |
81 | } |
82 | |
83 | impl<M, E> Elem<M, E> { |
84 | #[inline ] |
85 | pub fn is_zero(&self) -> bool { |
86 | self.limbs.is_zero() |
87 | } |
88 | } |
89 | |
90 | /// Does a Montgomery reduction on `limbs` assuming they are Montgomery-encoded ('R') and assuming |
91 | /// they are the same size as `m`, but perhaps not reduced mod `m`. The result will be |
92 | /// fully reduced mod `m`. |
93 | fn from_montgomery_amm<M>(limbs: BoxedLimbs<M>, m: &Modulus<M>) -> Elem<M, Unencoded> { |
94 | debug_assert_eq!(limbs.len(), m.limbs().len()); |
95 | |
96 | let mut limbs: BoxedLimbs = limbs; |
97 | let mut one: [u64; 128] = [0; MODULUS_MAX_LIMBS]; |
98 | one[0] = 1; |
99 | let one: &[u64] = &one[..m.limbs().len()]; |
100 | limbs_mont_mul(&mut limbs, a:one, m:m.limbs(), m.n0(), m.cpu_features()); |
101 | Elem { |
102 | limbs, |
103 | encoding: PhantomData, |
104 | } |
105 | } |
106 | |
107 | #[cfg (any(test, not(target_arch = "x86_64" )))] |
108 | impl<M> Elem<M, R> { |
109 | #[inline ] |
110 | pub fn into_unencoded(self, m: &Modulus<M>) -> Elem<M, Unencoded> { |
111 | from_montgomery_amm(self.limbs, m) |
112 | } |
113 | } |
114 | |
115 | impl<M> Elem<M, Unencoded> { |
116 | pub fn from_be_bytes_padded( |
117 | input: untrusted::Input, |
118 | m: &Modulus<M>, |
119 | ) -> Result<Self, error::Unspecified> { |
120 | Ok(Self { |
121 | limbs: BoxedLimbs::from_be_bytes_padded_less_than(input, m)?, |
122 | encoding: PhantomData, |
123 | }) |
124 | } |
125 | |
126 | #[inline ] |
127 | pub fn fill_be_bytes(&self, out: &mut [u8]) { |
128 | // See Falko Strenzke, "Manger's Attack revisited", ICICS 2010. |
129 | limb::big_endian_from_limbs(&self.limbs, out) |
130 | } |
131 | |
132 | fn is_one(&self) -> bool { |
133 | limb::limbs_equal_limb_constant_time(&self.limbs, b:1) == LimbMask::True |
134 | } |
135 | } |
136 | |
137 | pub fn elem_mul<M, AF, BF>( |
138 | a: &Elem<M, AF>, |
139 | mut b: Elem<M, BF>, |
140 | m: &Modulus<M>, |
141 | ) -> Elem<M, <(AF, BF) as ProductEncoding>::Output> |
142 | where |
143 | (AF, BF): ProductEncoding, |
144 | { |
145 | limbs_mont_mul(&mut b.limbs, &a.limbs, m:m.limbs(), m.n0(), m.cpu_features()); |
146 | Elem { |
147 | limbs: b.limbs, |
148 | encoding: PhantomData, |
149 | } |
150 | } |
151 | |
152 | // r *= 2. |
153 | fn elem_double<M, AF>(r: &mut Elem<M, AF>, m: &Modulus<M>) { |
154 | limb::limbs_double_mod(&mut r.limbs, m:m.limbs()) |
155 | } |
156 | |
157 | // TODO: This is currently unused, but we intend to eventually use this to |
158 | // reduce elements (x mod q) mod p in the RSA CRT. If/when we do so, we |
159 | // should update the testing so it is reflective of that usage, instead of |
160 | // the old usage. |
161 | pub fn elem_reduced_once<A, M>( |
162 | a: &Elem<A, Unencoded>, |
163 | m: &Modulus<M>, |
164 | other_modulus_len_bits: BitLength, |
165 | ) -> Elem<M, Unencoded> { |
166 | assert_eq!(m.len_bits(), other_modulus_len_bits); |
167 | |
168 | let mut r: BoxedLimbs = a.limbs.clone(); |
169 | limb::limbs_reduce_once_constant_time(&mut r, m:m.limbs()); |
170 | Elem { |
171 | limbs: BoxedLimbs::new_unchecked(r.into_limbs()), |
172 | encoding: PhantomData, |
173 | } |
174 | } |
175 | |
176 | #[inline ] |
177 | pub fn elem_reduced<Larger, Smaller>( |
178 | a: &Elem<Larger, Unencoded>, |
179 | m: &Modulus<Smaller>, |
180 | other_prime_len_bits: BitLength, |
181 | ) -> Elem<Smaller, RInverse> { |
182 | // This is stricter than required mathematically but this is what we |
183 | // guarantee and this is easier to check. The real requirement is that |
184 | // that `a < m*R` where `R` is the Montgomery `R` for `m`. |
185 | assert_eq!(other_prime_len_bits, m.len_bits()); |
186 | |
187 | // `limbs_from_mont_in_place` requires this. |
188 | assert_eq!(a.limbs.len(), m.limbs().len() * 2); |
189 | |
190 | let mut tmp: [u64; 128] = [0; MODULUS_MAX_LIMBS]; |
191 | let tmp: &mut [u64] = &mut tmp[..a.limbs.len()]; |
192 | tmp.copy_from_slice(&a.limbs); |
193 | |
194 | let mut r: Elem = m.zero(); |
195 | limbs_from_mont_in_place(&mut r.limbs, tmp, m:m.limbs(), m.n0()); |
196 | r |
197 | } |
198 | |
199 | fn elem_squared<M, E>( |
200 | mut a: Elem<M, E>, |
201 | m: &Modulus<M>, |
202 | ) -> Elem<M, <(E, E) as ProductEncoding>::Output> |
203 | where |
204 | (E, E): ProductEncoding, |
205 | { |
206 | limbs_mont_square(&mut a.limbs, m:m.limbs(), m.n0(), m.cpu_features()); |
207 | Elem { |
208 | limbs: a.limbs, |
209 | encoding: PhantomData, |
210 | } |
211 | } |
212 | |
213 | pub fn elem_widen<Larger, Smaller>( |
214 | a: Elem<Smaller, Unencoded>, |
215 | m: &Modulus<Larger>, |
216 | smaller_modulus_bits: BitLength, |
217 | ) -> Result<Elem<Larger, Unencoded>, error::Unspecified> { |
218 | if smaller_modulus_bits >= m.len_bits() { |
219 | return Err(error::Unspecified); |
220 | } |
221 | let mut r: Elem = m.zero(); |
222 | r.limbs[..a.limbs.len()].copy_from_slice(&a.limbs); |
223 | Ok(r) |
224 | } |
225 | |
226 | // TODO: Document why this works for all Montgomery factors. |
227 | pub fn elem_add<M, E>(mut a: Elem<M, E>, b: Elem<M, E>, m: &Modulus<M>) -> Elem<M, E> { |
228 | limb::limbs_add_assign_mod(&mut a.limbs, &b.limbs, m:m.limbs()); |
229 | a |
230 | } |
231 | |
232 | // TODO: Document why this works for all Montgomery factors. |
233 | pub fn elem_sub<M, E>(mut a: Elem<M, E>, b: &Elem<M, E>, m: &Modulus<M>) -> Elem<M, E> { |
234 | prefixed_extern! { |
235 | // `r` and `a` may alias. |
236 | fn LIMBS_sub_mod( |
237 | r: *mut Limb, |
238 | a: *const Limb, |
239 | b: *const Limb, |
240 | m: *const Limb, |
241 | num_limbs: c::size_t, |
242 | ); |
243 | } |
244 | unsafe { |
245 | LIMBS_sub_mod( |
246 | r:a.limbs.as_mut_ptr(), |
247 | a:a.limbs.as_ptr(), |
248 | b:b.limbs.as_ptr(), |
249 | m:m.limbs().as_ptr(), |
250 | num_limbs:m.limbs().len(), |
251 | ); |
252 | } |
253 | a |
254 | } |
255 | |
256 | // The value 1, Montgomery-encoded some number of times. |
257 | pub struct One<M, E>(Elem<M, E>); |
258 | |
259 | impl<M> One<M, RR> { |
260 | // Returns RR = = R**2 (mod n) where R = 2**r is the smallest power of |
261 | // 2**LIMB_BITS such that R > m. |
262 | // |
263 | // Even though the assembly on some 32-bit platforms works with 64-bit |
264 | // values, using `LIMB_BITS` here, rather than `N0::LIMBS_USED * LIMB_BITS`, |
265 | // is correct because R**2 will still be a multiple of the latter as |
266 | // `N0::LIMBS_USED` is either one or two. |
267 | pub(crate) fn newRR(m: &Modulus<M>) -> Self { |
268 | // The number of limbs in the numbers involved. |
269 | let w = m.limbs().len(); |
270 | |
271 | // The length of the numbers involved, in bits. R = 2**r. |
272 | let r = w * LIMB_BITS; |
273 | |
274 | let mut acc: Elem<M, R> = m.zero(); |
275 | m.oneR(&mut acc.limbs); |
276 | |
277 | // 2**t * R can be calculated by t doublings starting with R. |
278 | // |
279 | // Choose a t that divides r and where t doublings are cheaper than 1 squaring. |
280 | // |
281 | // We could choose other values of t than w. But if t < d then the exponentiation that |
282 | // follows would require multiplications. Normally d is 1 (i.e. the modulus length is a |
283 | // power of two: RSA 1024, 2048, 4097, 8192) or 3 (RSA 1536, 3072). |
284 | // |
285 | // XXX(perf): Currently t = w / 2 is slightly faster. TODO(perf): Optimize `elem_double` |
286 | // and re-run benchmarks to rebalance this. |
287 | let t = w; |
288 | let z = w.trailing_zeros(); |
289 | let d = w >> z; |
290 | debug_assert_eq!(w, d * (1 << z)); |
291 | debug_assert!(d <= t); |
292 | debug_assert!(t < r); |
293 | for _ in 0..t { |
294 | elem_double(&mut acc, m); |
295 | } |
296 | |
297 | // Because t | r: |
298 | // |
299 | // MontExp(2**t * R, r / t) |
300 | // = (2**t)**(r / t) * R (mod m) by definition of MontExp. |
301 | // = (2**t)**(1/t * r) * R (mod m) |
302 | // = (2**(t * 1/t))**r * R (mod m) |
303 | // = (2**1)**r * R (mod m) |
304 | // = 2**r * R (mod m) |
305 | // = R * R (mod m) |
306 | // = RR |
307 | // |
308 | // Like BoringSSL, use t = w (`m.limbs.len()`) which ensures that the exponent is a power |
309 | // of two. Consequently, there will be no multiplications in the Montgomery exponentiation; |
310 | // there will only be lg(r / t) squarings. |
311 | // |
312 | // lg(r / t) |
313 | // = lg((w * 2**b) / t) |
314 | // = lg((t * 2**b) / t) |
315 | // = lg(2**b) |
316 | // = b |
317 | // TODO(MSRV:1.67): const B: u32 = LIMB_BITS.ilog2(); |
318 | const B: u32 = if cfg!(target_pointer_width = "64" ) { |
319 | 6 |
320 | } else if cfg!(target_pointer_width = "32" ) { |
321 | 5 |
322 | } else { |
323 | panic!("unsupported target_pointer_width" ) |
324 | }; |
325 | #[allow (clippy::assertions_on_constants)] |
326 | const _LIMB_BITS_IS_2_POW_B: () = assert!(LIMB_BITS == 1 << B); |
327 | debug_assert_eq!(r, t * (1 << B)); |
328 | for _ in 0..B { |
329 | acc = elem_squared(acc, m); |
330 | } |
331 | |
332 | Self(Elem { |
333 | limbs: acc.limbs, |
334 | encoding: PhantomData, // PhantomData<RR> |
335 | }) |
336 | } |
337 | } |
338 | |
339 | impl<M> One<M, RRR> { |
340 | pub(crate) fn newRRR(One(oneRR: Elem): One<M, RR>, m: &Modulus<M>) -> Self { |
341 | Self(elem_squared(a:oneRR, m)) |
342 | } |
343 | } |
344 | |
345 | impl<M, E> AsRef<Elem<M, E>> for One<M, E> { |
346 | fn as_ref(&self) -> &Elem<M, E> { |
347 | &self.0 |
348 | } |
349 | } |
350 | |
351 | impl<M: PublicModulus, E> Clone for One<M, E> { |
352 | fn clone(&self) -> Self { |
353 | Self(self.0.clone()) |
354 | } |
355 | } |
356 | |
357 | /// Calculates base**exponent (mod m). |
358 | /// |
359 | /// The run time is a function of the number of limbs in `m` and the bit |
360 | /// length and Hamming Weight of `exponent`. The bounds on `m` are pretty |
361 | /// obvious but the bounds on `exponent` are less obvious. Callers should |
362 | /// document the bounds they place on the maximum value and maximum Hamming |
363 | /// weight of `exponent`. |
364 | // TODO: The test coverage needs to be expanded, e.g. test with the largest |
365 | // accepted exponent and with the most common values of 65537 and 3. |
366 | pub(crate) fn elem_exp_vartime<M>( |
367 | base: Elem<M, R>, |
368 | exponent: NonZeroU64, |
369 | m: &Modulus<M>, |
370 | ) -> Elem<M, R> { |
371 | // Use what [Knuth] calls the "S-and-X binary method", i.e. variable-time |
372 | // square-and-multiply that scans the exponent from the most significant |
373 | // bit to the least significant bit (left-to-right). Left-to-right requires |
374 | // less storage compared to right-to-left scanning, at the cost of needing |
375 | // to compute `exponent.leading_zeros()`, which we assume to be cheap. |
376 | // |
377 | // As explained in [Knuth], exponentiation by squaring is the most |
378 | // efficient algorithm when the Hamming weight is 2 or less. It isn't the |
379 | // most efficient for all other, uncommon, exponent values but any |
380 | // suboptimality is bounded at least by the small bit length of `exponent` |
381 | // as enforced by its type. |
382 | // |
383 | // This implementation is slightly simplified by taking advantage of the |
384 | // fact that we require the exponent to be a positive integer. |
385 | // |
386 | // [Knuth]: The Art of Computer Programming, Volume 2: Seminumerical |
387 | // Algorithms (3rd Edition), Section 4.6.3. |
388 | let exponent = exponent.get(); |
389 | let mut acc = base.clone(); |
390 | let mut bit = 1 << (64 - 1 - exponent.leading_zeros()); |
391 | debug_assert!((exponent & bit) != 0); |
392 | while bit > 1 { |
393 | bit >>= 1; |
394 | acc = elem_squared(acc, m); |
395 | if (exponent & bit) != 0 { |
396 | acc = elem_mul(&base, acc, m); |
397 | } |
398 | } |
399 | acc |
400 | } |
401 | |
402 | #[cfg (not(target_arch = "x86_64" ))] |
403 | pub fn elem_exp_consttime<M>( |
404 | base: Elem<M, R>, |
405 | exponent: &PrivateExponent, |
406 | m: &Modulus<M>, |
407 | ) -> Result<Elem<M, Unencoded>, error::Unspecified> { |
408 | use crate::{bssl, limb::Window}; |
409 | |
410 | const WINDOW_BITS: usize = 5; |
411 | const TABLE_ENTRIES: usize = 1 << WINDOW_BITS; |
412 | |
413 | let num_limbs = m.limbs().len(); |
414 | |
415 | let mut table = vec![0; TABLE_ENTRIES * num_limbs]; |
416 | |
417 | fn gather<M>(table: &[Limb], acc: &mut Elem<M, R>, i: Window) { |
418 | prefixed_extern! { |
419 | fn LIMBS_select_512_32( |
420 | r: *mut Limb, |
421 | table: *const Limb, |
422 | num_limbs: c::size_t, |
423 | i: Window, |
424 | ) -> bssl::Result; |
425 | } |
426 | Result::from(unsafe { |
427 | LIMBS_select_512_32(acc.limbs.as_mut_ptr(), table.as_ptr(), acc.limbs.len(), i) |
428 | }) |
429 | .unwrap(); |
430 | } |
431 | |
432 | fn power<M>( |
433 | table: &[Limb], |
434 | mut acc: Elem<M, R>, |
435 | m: &Modulus<M>, |
436 | i: Window, |
437 | mut tmp: Elem<M, R>, |
438 | ) -> (Elem<M, R>, Elem<M, R>) { |
439 | for _ in 0..WINDOW_BITS { |
440 | acc = elem_squared(acc, m); |
441 | } |
442 | gather(table, &mut tmp, i); |
443 | let acc = elem_mul(&tmp, acc, m); |
444 | (acc, tmp) |
445 | } |
446 | |
447 | fn entry(table: &[Limb], i: usize, num_limbs: usize) -> &[Limb] { |
448 | &table[(i * num_limbs)..][..num_limbs] |
449 | } |
450 | fn entry_mut(table: &mut [Limb], i: usize, num_limbs: usize) -> &mut [Limb] { |
451 | &mut table[(i * num_limbs)..][..num_limbs] |
452 | } |
453 | |
454 | // table[0] = base**0 (i.e. 1). |
455 | m.oneR(entry_mut(&mut table, 0, num_limbs)); |
456 | |
457 | entry_mut(&mut table, 1, num_limbs).copy_from_slice(&base.limbs); |
458 | for i in 2..TABLE_ENTRIES { |
459 | let (src1, src2) = if i % 2 == 0 { |
460 | (i / 2, i / 2) |
461 | } else { |
462 | (i - 1, 1) |
463 | }; |
464 | let (previous, rest) = table.split_at_mut(num_limbs * i); |
465 | let src1 = entry(previous, src1, num_limbs); |
466 | let src2 = entry(previous, src2, num_limbs); |
467 | let dst = entry_mut(rest, 0, num_limbs); |
468 | limbs_mont_product(dst, src1, src2, m.limbs(), m.n0(), m.cpu_features()); |
469 | } |
470 | |
471 | let tmp = m.zero(); |
472 | let mut acc = Elem { |
473 | limbs: base.limbs, |
474 | encoding: PhantomData, |
475 | }; |
476 | let (acc, _) = limb::fold_5_bit_windows( |
477 | exponent.limbs(), |
478 | |initial_window| { |
479 | gather(&table, &mut acc, initial_window); |
480 | (acc, tmp) |
481 | }, |
482 | |(acc, tmp), window| power(&table, acc, m, window, tmp), |
483 | ); |
484 | |
485 | Ok(acc.into_unencoded(m)) |
486 | } |
487 | |
488 | #[cfg (target_arch = "x86_64" )] |
489 | pub fn elem_exp_consttime<M>( |
490 | base: Elem<M, R>, |
491 | exponent: &PrivateExponent, |
492 | m: &Modulus<M>, |
493 | ) -> Result<Elem<M, Unencoded>, error::Unspecified> { |
494 | use crate::{cpu, limb::LIMB_BYTES}; |
495 | |
496 | // Pretty much all the math here requires CPU feature detection to have |
497 | // been done. `cpu_features` isn't threaded through all the internal |
498 | // functions, so just make it clear that it has been done at this point. |
499 | let cpu_features = m.cpu_features(); |
500 | |
501 | // The x86_64 assembly was written under the assumption that the input data |
502 | // is aligned to `MOD_EXP_CTIME_ALIGN` bytes, which was/is 64 in OpenSSL. |
503 | // Similarly, OpenSSL uses the x86_64 assembly functions by giving it only |
504 | // inputs `tmp`, `am`, and `np` that immediately follow the table. All the |
505 | // awkwardness here stems from trying to use the assembly code like OpenSSL |
506 | // does. |
507 | |
508 | use crate::limb::Window; |
509 | |
510 | const WINDOW_BITS: usize = 5; |
511 | const TABLE_ENTRIES: usize = 1 << WINDOW_BITS; |
512 | |
513 | let num_limbs = m.limbs().len(); |
514 | |
515 | const ALIGNMENT: usize = 64; |
516 | assert_eq!(ALIGNMENT % LIMB_BYTES, 0); |
517 | let mut table = vec![0; ((TABLE_ENTRIES + 3) * num_limbs) + ALIGNMENT]; |
518 | let (table, state) = { |
519 | let misalignment = (table.as_ptr() as usize) % ALIGNMENT; |
520 | let table = &mut table[((ALIGNMENT - misalignment) / LIMB_BYTES)..]; |
521 | assert_eq!((table.as_ptr() as usize) % ALIGNMENT, 0); |
522 | table.split_at_mut(TABLE_ENTRIES * num_limbs) |
523 | }; |
524 | |
525 | fn scatter(table: &mut [Limb], acc: &[Limb], i: Window, num_limbs: usize) { |
526 | prefixed_extern! { |
527 | fn bn_scatter5(a: *const Limb, a_len: c::size_t, table: *mut Limb, i: Window); |
528 | } |
529 | unsafe { bn_scatter5(acc.as_ptr(), num_limbs, table.as_mut_ptr(), i) } |
530 | } |
531 | |
532 | fn gather(table: &[Limb], acc: &mut [Limb], i: Window, num_limbs: usize) { |
533 | prefixed_extern! { |
534 | fn bn_gather5(r: *mut Limb, a_len: c::size_t, table: *const Limb, i: Window); |
535 | } |
536 | unsafe { bn_gather5(acc.as_mut_ptr(), num_limbs, table.as_ptr(), i) } |
537 | } |
538 | |
539 | fn limbs_mul_mont_gather5_amm( |
540 | table: &[Limb], |
541 | acc: &mut [Limb], |
542 | base: &[Limb], |
543 | m: &[Limb], |
544 | n0: &N0, |
545 | i: Window, |
546 | num_limbs: usize, |
547 | ) { |
548 | prefixed_extern! { |
549 | fn bn_mul_mont_gather5( |
550 | rp: *mut Limb, |
551 | ap: *const Limb, |
552 | table: *const Limb, |
553 | np: *const Limb, |
554 | n0: &N0, |
555 | num: c::size_t, |
556 | power: Window, |
557 | ); |
558 | } |
559 | unsafe { |
560 | bn_mul_mont_gather5( |
561 | acc.as_mut_ptr(), |
562 | base.as_ptr(), |
563 | table.as_ptr(), |
564 | m.as_ptr(), |
565 | n0, |
566 | num_limbs, |
567 | i, |
568 | ); |
569 | } |
570 | } |
571 | |
572 | fn power_amm( |
573 | table: &[Limb], |
574 | acc: &mut [Limb], |
575 | m_cached: &[Limb], |
576 | n0: &N0, |
577 | i: Window, |
578 | num_limbs: usize, |
579 | ) { |
580 | prefixed_extern! { |
581 | fn bn_power5( |
582 | r: *mut Limb, |
583 | a: *const Limb, |
584 | table: *const Limb, |
585 | n: *const Limb, |
586 | n0: &N0, |
587 | num: c::size_t, |
588 | i: Window, |
589 | ); |
590 | } |
591 | unsafe { |
592 | bn_power5( |
593 | acc.as_mut_ptr(), |
594 | acc.as_ptr(), |
595 | table.as_ptr(), |
596 | m_cached.as_ptr(), |
597 | n0, |
598 | num_limbs, |
599 | i, |
600 | ); |
601 | } |
602 | } |
603 | |
604 | // These are named `(tmp, am, np)` in BoringSSL. |
605 | let (acc, base_cached, m_cached): (&mut [Limb], &[Limb], &[Limb]) = { |
606 | let (acc, rest) = state.split_at_mut(num_limbs); |
607 | let (base_cached, rest) = rest.split_at_mut(num_limbs); |
608 | |
609 | // Upstream, the input `base` is not Montgomery-encoded, so they compute a |
610 | // Montgomery-encoded copy and store it here. |
611 | base_cached.copy_from_slice(&base.limbs); |
612 | |
613 | let m_cached = &mut rest[..num_limbs]; |
614 | // "To improve cache locality" according to upstream. |
615 | m_cached.copy_from_slice(m.limbs()); |
616 | |
617 | (acc, base_cached, m_cached) |
618 | }; |
619 | |
620 | let n0 = m.n0(); |
621 | |
622 | // Fill in all the powers of 2 of `acc` into the table using only squaring and without any |
623 | // gathering, storing the last calculated power into `acc`. |
624 | fn scatter_powers_of_2( |
625 | table: &mut [Limb], |
626 | acc: &mut [Limb], |
627 | m_cached: &[Limb], |
628 | n0: &N0, |
629 | mut i: Window, |
630 | num_limbs: usize, |
631 | cpu_features: cpu::Features, |
632 | ) { |
633 | loop { |
634 | scatter(table, acc, i, num_limbs); |
635 | i *= 2; |
636 | if i >= (TABLE_ENTRIES as Window) { |
637 | break; |
638 | } |
639 | limbs_mont_square(acc, m_cached, n0, cpu_features); |
640 | } |
641 | } |
642 | |
643 | // All entries in `table` will be Montgomery encoded. |
644 | |
645 | // acc = table[0] = base**0 (i.e. 1). |
646 | m.oneR(acc); |
647 | scatter(table, acc, 0, num_limbs); |
648 | |
649 | // acc = base**1 (i.e. base). |
650 | acc.copy_from_slice(base_cached); |
651 | |
652 | // Fill in entries 1, 2, 4, 8, 16. |
653 | scatter_powers_of_2(table, acc, m_cached, n0, 1, num_limbs, cpu_features); |
654 | // Fill in entries 3, 6, 12, 24; 5, 10, 20, 30; 7, 14, 28; 9, 18; 11, 22; 13, 26; 15, 30; |
655 | // 17; 19; 21; 23; 25; 27; 29; 31. |
656 | for i in (3..(TABLE_ENTRIES as Window)).step_by(2) { |
657 | limbs_mul_mont_gather5_amm(table, acc, base_cached, m_cached, n0, i - 1, num_limbs); |
658 | scatter_powers_of_2(table, acc, m_cached, n0, i, num_limbs, cpu_features); |
659 | } |
660 | |
661 | let acc = limb::fold_5_bit_windows( |
662 | exponent.limbs(), |
663 | |initial_window| { |
664 | gather(table, acc, initial_window, num_limbs); |
665 | acc |
666 | }, |
667 | |acc, window| { |
668 | power_amm(table, acc, m_cached, n0, window, num_limbs); |
669 | acc |
670 | }, |
671 | ); |
672 | |
673 | let mut r_amm = base.limbs; |
674 | r_amm.copy_from_slice(acc); |
675 | |
676 | Ok(from_montgomery_amm(r_amm, m)) |
677 | } |
678 | |
679 | /// Verified a == b**-1 (mod m), i.e. a**-1 == b (mod m). |
680 | pub fn verify_inverses_consttime<M>( |
681 | a: &Elem<M, R>, |
682 | b: Elem<M, Unencoded>, |
683 | m: &Modulus<M>, |
684 | ) -> Result<(), error::Unspecified> { |
685 | if elem_mul(a, b, m).is_one() { |
686 | Ok(()) |
687 | } else { |
688 | Err(error::Unspecified) |
689 | } |
690 | } |
691 | |
692 | #[inline ] |
693 | pub fn elem_verify_equal_consttime<M, E>( |
694 | a: &Elem<M, E>, |
695 | b: &Elem<M, E>, |
696 | ) -> Result<(), error::Unspecified> { |
697 | if limb::limbs_equal_limbs_consttime(&a.limbs, &b.limbs) == LimbMask::True { |
698 | Ok(()) |
699 | } else { |
700 | Err(error::Unspecified) |
701 | } |
702 | } |
703 | |
704 | #[cfg (test)] |
705 | mod tests { |
706 | use super::*; |
707 | use crate::{cpu, test}; |
708 | |
709 | // Type-level representation of an arbitrary modulus. |
710 | struct M {} |
711 | |
712 | impl PublicModulus for M {} |
713 | |
714 | #[test ] |
715 | fn test_elem_exp_consttime() { |
716 | let cpu_features = cpu::features(); |
717 | test::run( |
718 | test_file!("../../crypto/fipsmodule/bn/test/mod_exp_tests.txt" ), |
719 | |section, test_case| { |
720 | assert_eq!(section, "" ); |
721 | |
722 | let m = consume_modulus::<M>(test_case , "M" ); |
723 | let m = m.modulus(cpu_features); |
724 | let expected_result = consume_elem(test_case , "ModExp" , &m); |
725 | let base = consume_elem(test_case , "A" , &m); |
726 | let e = { |
727 | let bytes = test_case .consume_bytes("E" ); |
728 | PrivateExponent::from_be_bytes_for_test_only(untrusted::Input::from(&bytes), &m) |
729 | .expect("valid exponent" ) |
730 | }; |
731 | let base = into_encoded(base, &m); |
732 | let actual_result = elem_exp_consttime(base, &e, &m).unwrap(); |
733 | assert_elem_eq(&actual_result, &expected_result); |
734 | |
735 | Ok(()) |
736 | }, |
737 | ) |
738 | } |
739 | |
740 | // TODO: fn test_elem_exp_vartime() using |
741 | // "src/rsa/bigint_elem_exp_vartime_tests.txt". See that file for details. |
742 | // In the meantime, the function is tested indirectly via the RSA |
743 | // verification and signing tests. |
744 | #[test ] |
745 | fn test_elem_mul() { |
746 | let cpu_features = cpu::features(); |
747 | test::run( |
748 | test_file!("../../crypto/fipsmodule/bn/test/mod_mul_tests.txt" ), |
749 | |section, test_case| { |
750 | assert_eq!(section, "" ); |
751 | |
752 | let m = consume_modulus::<M>(test_case , "M" ); |
753 | let m = m.modulus(cpu_features); |
754 | let expected_result = consume_elem(test_case , "ModMul" , &m); |
755 | let a = consume_elem(test_case , "A" , &m); |
756 | let b = consume_elem(test_case , "B" , &m); |
757 | |
758 | let b = into_encoded(b, &m); |
759 | let a = into_encoded(a, &m); |
760 | let actual_result = elem_mul(&a, b, &m); |
761 | let actual_result = actual_result.into_unencoded(&m); |
762 | assert_elem_eq(&actual_result, &expected_result); |
763 | |
764 | Ok(()) |
765 | }, |
766 | ) |
767 | } |
768 | |
769 | #[test ] |
770 | fn test_elem_squared() { |
771 | let cpu_features = cpu::features(); |
772 | test::run( |
773 | test_file!("bigint_elem_squared_tests.txt" ), |
774 | |section, test_case| { |
775 | assert_eq!(section, "" ); |
776 | |
777 | let m = consume_modulus::<M>(test_case , "M" ); |
778 | let m = m.modulus(cpu_features); |
779 | let expected_result = consume_elem(test_case , "ModSquare" , &m); |
780 | let a = consume_elem(test_case , "A" , &m); |
781 | |
782 | let a = into_encoded(a, &m); |
783 | let actual_result = elem_squared(a, &m); |
784 | let actual_result = actual_result.into_unencoded(&m); |
785 | assert_elem_eq(&actual_result, &expected_result); |
786 | |
787 | Ok(()) |
788 | }, |
789 | ) |
790 | } |
791 | |
792 | #[test ] |
793 | fn test_elem_reduced() { |
794 | let cpu_features = cpu::features(); |
795 | test::run( |
796 | test_file!("bigint_elem_reduced_tests.txt" ), |
797 | |section, test_case| { |
798 | assert_eq!(section, "" ); |
799 | |
800 | struct M {} |
801 | |
802 | let m_ = consume_modulus::<M>(test_case , "M" ); |
803 | let m = m_.modulus(cpu_features); |
804 | let expected_result = consume_elem(test_case , "R" , &m); |
805 | let a = |
806 | consume_elem_unchecked::<M>(test_case , "A" , expected_result.limbs.len() * 2); |
807 | let other_modulus_len_bits = m_.len_bits(); |
808 | |
809 | let actual_result = elem_reduced(&a, &m, other_modulus_len_bits); |
810 | let oneRR = One::newRR(&m); |
811 | let actual_result = elem_mul(oneRR.as_ref(), actual_result, &m); |
812 | assert_elem_eq(&actual_result, &expected_result); |
813 | |
814 | Ok(()) |
815 | }, |
816 | ) |
817 | } |
818 | |
819 | #[test ] |
820 | fn test_elem_reduced_once() { |
821 | let cpu_features = cpu::features(); |
822 | test::run( |
823 | test_file!("bigint_elem_reduced_once_tests.txt" ), |
824 | |section, test_case| { |
825 | assert_eq!(section, "" ); |
826 | |
827 | struct M {} |
828 | struct O {} |
829 | let m = consume_modulus::<M>(test_case , "m" ); |
830 | let m = m.modulus(cpu_features); |
831 | let a = consume_elem_unchecked::<O>(test_case , "a" , m.limbs().len()); |
832 | let expected_result = consume_elem::<M>(test_case , "r" , &m); |
833 | let other_modulus_len_bits = m.len_bits(); |
834 | |
835 | let actual_result = elem_reduced_once(&a, &m, other_modulus_len_bits); |
836 | assert_elem_eq(&actual_result, &expected_result); |
837 | |
838 | Ok(()) |
839 | }, |
840 | ) |
841 | } |
842 | |
843 | fn consume_elem<M>( |
844 | test_case: &mut test::TestCase, |
845 | name: &str, |
846 | m: &Modulus<M>, |
847 | ) -> Elem<M, Unencoded> { |
848 | let value = test_case .consume_bytes(name); |
849 | Elem::from_be_bytes_padded(untrusted::Input::from(&value), m).unwrap() |
850 | } |
851 | |
852 | fn consume_elem_unchecked<M>( |
853 | test_case: &mut test::TestCase, |
854 | name: &str, |
855 | num_limbs: usize, |
856 | ) -> Elem<M, Unencoded> { |
857 | let bytes = test_case .consume_bytes(name); |
858 | let mut limbs = BoxedLimbs::zero(num_limbs); |
859 | limb::parse_big_endian_and_pad_consttime(untrusted::Input::from(&bytes), &mut limbs) |
860 | .unwrap(); |
861 | Elem { |
862 | limbs, |
863 | encoding: PhantomData, |
864 | } |
865 | } |
866 | |
867 | fn consume_modulus<M>(test_case: &mut test::TestCase, name: &str) -> OwnedModulus<M> { |
868 | let value = test_case .consume_bytes(name); |
869 | OwnedModulus::from_be_bytes(untrusted::Input::from(&value)).unwrap() |
870 | } |
871 | |
872 | fn assert_elem_eq<M, E>(a: &Elem<M, E>, b: &Elem<M, E>) { |
873 | if elem_verify_equal_consttime(a, b).is_err() { |
874 | panic!(" {:x?} != {:x?}" , &*a.limbs, &*b.limbs); |
875 | } |
876 | } |
877 | |
878 | fn into_encoded<M>(a: Elem<M, Unencoded>, m: &Modulus<M>) -> Elem<M, R> { |
879 | let oneRR = One::newRR(m); |
880 | elem_mul(oneRR.as_ref(), a, m) |
881 | } |
882 | } |
883 | |