1// Copyright 2015-2016 Brian Smith.
2//
3// Permission to use, copy, modify, and/or distribute this software for any
4// purpose with or without fee is hereby granted, provided that the above
5// copyright notice and this permission notice appear in all copies.
6//
7// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
8// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
10// SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14
15use super::{
16 aes::{self, Counter, Overlapping, OverlappingPartialBlock, BLOCK_LEN, ZERO_BLOCK},
17 gcm,
18 overlapping::IndexError,
19 Aad, Nonce, Tag,
20};
21use crate::{
22 cpu,
23 error::{self, InputTooLongError},
24 polyfill::{slice, sliceutil::overwrite_at_start, usize_from_u64_saturated},
25};
26use core::ops::RangeFrom;
27
28#[cfg(target_arch = "x86_64")]
29use aes::EncryptCtr32 as _;
30
31#[cfg(any(
32 all(target_arch = "aarch64", target_endian = "little"),
33 all(target_arch = "arm", target_endian = "little"),
34 target_arch = "x86",
35 target_arch = "x86_64"
36))]
37use cpu::GetFeature as _;
38
39#[derive(Clone)]
40pub(super) struct Key(DynKey);
41
42impl Key {
43 pub(super) fn new(
44 key: aes::KeyBytes,
45 cpu_features: cpu::Features,
46 ) -> Result<Self, error::Unspecified> {
47 Ok(Self(DynKey::new(key, cpu_features)?))
48 }
49}
50
51#[derive(Clone)]
52enum DynKey {
53 #[cfg(target_arch = "x86_64")]
54 AesHwClMulAvxMovbe(Combo<aes::hw::Key, gcm::clmulavxmovbe::Key>),
55
56 #[cfg(any(
57 all(target_arch = "aarch64", target_endian = "little"),
58 target_arch = "x86",
59 target_arch = "x86_64"
60 ))]
61 AesHwClMul(Combo<aes::hw::Key, gcm::clmul::Key>),
62
63 #[cfg(any(
64 all(target_arch = "aarch64", target_endian = "little"),
65 all(target_arch = "arm", target_endian = "little")
66 ))]
67 Simd(Combo<aes::vp::Key, gcm::neon::Key>),
68
69 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
70 Simd(Combo<aes::vp::Key, gcm::fallback::Key>),
71
72 Fallback(Combo<aes::fallback::Key, gcm::fallback::Key>),
73}
74
75impl DynKey {
76 fn new(key: aes::KeyBytes, cpu: cpu::Features) -> Result<Self, error::Unspecified> {
77 let cpu = cpu.values();
78 #[cfg(target_arch = "x86_64")]
79 if let Some((aes, gcm)) = cpu.get_feature() {
80 let aes_key = aes::hw::Key::new(key, aes, cpu.get_feature())?;
81 let gcm_key_value = derive_gcm_key_value(&aes_key);
82 let combo = if let Some(cpu) = cpu.get_feature() {
83 let gcm_key = gcm::clmulavxmovbe::Key::new(gcm_key_value, cpu);
84 Self::AesHwClMulAvxMovbe(Combo { aes_key, gcm_key })
85 } else {
86 let gcm_key = gcm::clmul::Key::new(gcm_key_value, gcm);
87 Self::AesHwClMul(Combo { aes_key, gcm_key })
88 };
89 return Ok(combo);
90 }
91
92 // x86_64 is handled above.
93 #[cfg(any(
94 all(target_arch = "aarch64", target_endian = "little"),
95 target_arch = "x86"
96 ))]
97 if let (Some(aes), Some(gcm)) = (cpu.get_feature(), cpu.get_feature()) {
98 let aes_key = aes::hw::Key::new(key, aes, cpu.get_feature())?;
99 let gcm_key_value = derive_gcm_key_value(&aes_key);
100 let gcm_key = gcm::clmul::Key::new(gcm_key_value, gcm);
101 return Ok(Self::AesHwClMul(Combo { aes_key, gcm_key }));
102 }
103
104 #[cfg(any(
105 all(target_arch = "aarch64", target_endian = "little"),
106 all(target_arch = "arm", target_endian = "little")
107 ))]
108 if let Some(cpu) = cpu.get_feature() {
109 return Self::new_neon(key, cpu);
110 }
111
112 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
113 if let Some(cpu) = cpu.get_feature() {
114 return Self::new_ssse3(key, cpu);
115 }
116
117 let _ = cpu;
118 Self::new_fallback(key)
119 }
120
121 #[cfg(any(
122 all(target_arch = "aarch64", target_endian = "little"),
123 all(target_arch = "arm", target_endian = "little")
124 ))]
125 #[cfg_attr(target_arch = "aarch64", inline(never))]
126 fn new_neon(key: aes::KeyBytes, cpu: cpu::arm::Neon) -> Result<Self, error::Unspecified> {
127 let aes_key = aes::vp::Key::new(key, cpu)?;
128 let gcm_key_value = derive_gcm_key_value(&aes_key);
129 let gcm_key = gcm::neon::Key::new(gcm_key_value, cpu);
130 Ok(Self::Simd(Combo { aes_key, gcm_key }))
131 }
132
133 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
134 #[inline(never)]
135 fn new_ssse3(
136 key: aes::KeyBytes,
137 cpu: aes::vp::RequiredCpuFeatures,
138 ) -> Result<Self, error::Unspecified> {
139 let aes_key = aes::vp::Key::new(key, cpu)?;
140 let gcm_key_value = derive_gcm_key_value(&aes_key);
141 let gcm_key = gcm::fallback::Key::new(gcm_key_value);
142 Ok(Self::Simd(Combo { aes_key, gcm_key }))
143 }
144
145 #[cfg_attr(
146 any(
147 all(target_arch = "aarch64", target_endian = "little"),
148 all(target_arch = "arm", target_endian = "little"),
149 target_arch = "x86",
150 target_arch = "x86_64",
151 ),
152 inline(never)
153 )]
154 fn new_fallback(key: aes::KeyBytes) -> Result<Self, error::Unspecified> {
155 let aes_key = aes::fallback::Key::new(key)?;
156 let gcm_key_value = derive_gcm_key_value(&aes_key);
157 let gcm_key = gcm::fallback::Key::new(gcm_key_value);
158 Ok(Self::Fallback(Combo { aes_key, gcm_key }))
159 }
160}
161
162fn derive_gcm_key_value(aes_key: &impl aes::EncryptBlock) -> gcm::KeyValue {
163 gcm::KeyValue::new(aes_key.encrypt_block(ZERO_BLOCK))
164}
165
166const CHUNK_BLOCKS: usize = 3 * 1024 / 16;
167
168#[inline(never)]
169pub(super) fn seal(
170 Key(key: &DynKey): &Key,
171 nonce: Nonce,
172 aad: Aad<&[u8]>,
173 in_out: &mut [u8],
174) -> Result<Tag, error::Unspecified> {
175 let mut ctr = Counter::one(nonce);
176 let tag_iv = ctr.increment();
177
178 match key {
179 #[cfg(target_arch = "x86_64")]
180 DynKey::AesHwClMulAvxMovbe(Combo { aes_key, gcm_key }) => {
181 use crate::c;
182 let mut auth = gcm::Context::new(gcm_key, aad, in_out.len())?;
183 let (htable, xi) = auth.inner();
184 prefixed_extern! {
185 // `HTable` and `Xi` should be 128-bit aligned. TODO: Can we shrink `HTable`? The
186 // assembly says it needs just nine values in that array.
187 fn aesni_gcm_encrypt(
188 input: *const u8,
189 output: *mut u8,
190 len: c::size_t,
191 key: &aes::AES_KEY,
192 ivec: &mut Counter,
193 Htable: &gcm::HTable,
194 Xi: &mut gcm::Xi) -> c::size_t;
195 }
196 let processed = unsafe {
197 aesni_gcm_encrypt(
198 in_out.as_ptr(),
199 in_out.as_mut_ptr(),
200 in_out.len(),
201 aes_key.inner_less_safe(),
202 &mut ctr,
203 htable,
204 xi,
205 )
206 };
207
208 let ramaining = match in_out.get_mut(processed..) {
209 Some(remaining) => remaining,
210 None => {
211 // This can't happen. If it did, then the assembly already
212 // caused a buffer overflow.
213 unreachable!()
214 }
215 };
216 let (mut whole, remainder) = slice::as_chunks_mut(ramaining);
217 aes_key.ctr32_encrypt_within(whole.as_flattened_mut().into(), &mut ctr);
218 auth.update_blocks(whole.as_ref());
219 let remainder = OverlappingPartialBlock::new(remainder.into())
220 .unwrap_or_else(|InputTooLongError { .. }| unreachable!());
221 seal_finish(aes_key, auth, remainder, ctr, tag_iv)
222 }
223
224 #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
225 DynKey::AesHwClMul(Combo { aes_key, gcm_key }) => {
226 use crate::bits::BitLength;
227
228 let mut auth = gcm::Context::new(gcm_key, aad, in_out.len())?;
229
230 let (mut whole, remainder) = slice::as_chunks_mut(in_out);
231 let whole_block_bits = auth.in_out_whole_block_bits();
232 let whole_block_bits_u64: BitLength<u64> = whole_block_bits.into();
233 if let Ok(whole_block_bits) = whole_block_bits_u64.try_into() {
234 use core::num::NonZeroU64;
235
236 let (htable, xi) = auth.inner();
237 prefixed_extern! {
238 fn aes_gcm_enc_kernel(
239 input: *const [u8; BLOCK_LEN],
240 in_bits: BitLength<NonZeroU64>,
241 output: *mut [u8; BLOCK_LEN],
242 Xi: &mut gcm::Xi,
243 ivec: &mut Counter,
244 key: &aes::AES_KEY,
245 Htable: &gcm::HTable);
246 }
247 unsafe {
248 aes_gcm_enc_kernel(
249 whole.as_ptr(),
250 whole_block_bits,
251 whole.as_mut_ptr(),
252 xi,
253 &mut ctr,
254 aes_key.inner_less_safe(),
255 htable,
256 )
257 }
258 }
259 let remainder = OverlappingPartialBlock::new(remainder.into())
260 .unwrap_or_else(|InputTooLongError { .. }| unreachable!());
261 seal_finish(aes_key, auth, remainder, ctr, tag_iv)
262 }
263
264 #[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
265 DynKey::AesHwClMul(c) => seal_strided(c, aad, in_out, ctr, tag_iv),
266
267 #[cfg(any(
268 all(target_arch = "aarch64", target_endian = "little"),
269 all(target_arch = "arm", target_endian = "little"),
270 target_arch = "x86_64",
271 target_arch = "x86"
272 ))]
273 DynKey::Simd(c) => seal_strided(c, aad, in_out, ctr, tag_iv),
274
275 DynKey::Fallback(c) => seal_strided(c, aad, in_out, ctr, tag_iv),
276 }
277}
278
279#[cfg_attr(
280 any(
281 all(target_arch = "aarch64", target_endian = "little"),
282 all(target_arch = "arm", target_endian = "little"),
283 target_arch = "x86",
284 target_arch = "x86_64"
285 ),
286 inline(never)
287)]
288#[cfg_attr(
289 any(
290 all(target_arch = "aarch64", target_endian = "little"),
291 target_arch = "x86_64"
292 ),
293 cold
294)]
295fn seal_strided<
296 A: aes::EncryptBlock + aes::EncryptCtr32,
297 G: gcm::UpdateBlock + gcm::UpdateBlocks,
298>(
299 Combo { aes_key: &A, gcm_key: &G }: &Combo<A, G>,
300 aad: Aad<&[u8]>,
301 in_out: &mut [u8],
302 mut ctr: Counter,
303 tag_iv: aes::Iv,
304) -> Result<Tag, error::Unspecified> {
305 let mut auth: Context<'_, G> = gcm::Context::new(gcm_key, aad, in_out.len())?;
306
307 let (mut whole: AsChunksMut<'_, u8, 16>, remainder: &mut [u8]) = slice::as_chunks_mut(slice:in_out);
308
309 for mut chunk: AsChunksMut<'_, u8, 16> in whole.chunks_mut::<CHUNK_BLOCKS>() {
310 aes_key.ctr32_encrypt_within(in_out:chunk.as_flattened_mut().into(), &mut ctr);
311 auth.update_blocks(input:chunk.as_ref());
312 }
313
314 let remainder: PartialBlock<'_, u8, 16> = OverlappingPartialBlock::new(remainder.into())
315 .unwrap_or_else(|InputTooLongError { .. }| unreachable!());
316 seal_finish(aes_key, auth, remainder, ctr, tag_iv)
317}
318
319fn seal_finish<A: aes::EncryptBlock, G: gcm::UpdateBlock>(
320 aes_key: &A,
321 mut auth: gcm::Context<G>,
322 remainder: OverlappingPartialBlock<'_>,
323 ctr: Counter,
324 tag_iv: aes::Iv,
325) -> Result<Tag, error::Unspecified> {
326 let remainder_len: usize = remainder.len();
327 if remainder_len > 0 {
328 let mut input: [u8; 16] = ZERO_BLOCK;
329 overwrite_at_start(&mut input, b:remainder.input());
330 let mut output: [u8; 16] = aes_key.encrypt_iv_xor_block(iv:ctr.into(), block:input);
331 output[remainder_len..].fill(0);
332 auth.update_block(output);
333 remainder.overwrite_at_start(padded:output);
334 }
335
336 Ok(finish(aes_key, gcm_ctx:auth, tag_iv))
337}
338
339#[inline(never)]
340pub(super) fn open(
341 Key(key: &DynKey): &Key,
342 nonce: Nonce,
343 aad: Aad<&[u8]>,
344 in_out_slice: &mut [u8],
345 src: RangeFrom<usize>,
346) -> Result<Tag, error::Unspecified> {
347 #[cfg(any(
348 all(target_arch = "aarch64", target_endian = "little"),
349 target_arch = "x86_64"
350 ))]
351 let in_out = Overlapping::new(in_out_slice, src.clone()).map_err(error::erase::<IndexError>)?;
352
353 let mut ctr = Counter::one(nonce);
354 let tag_iv = ctr.increment();
355
356 match key {
357 #[cfg(target_arch = "x86_64")]
358 DynKey::AesHwClMulAvxMovbe(Combo { aes_key, gcm_key }) => {
359 use crate::c;
360
361 prefixed_extern! {
362 // `HTable` and `Xi` should be 128-bit aligned. TODO: Can we shrink `HTable`? The
363 // assembly says it needs just nine values in that array.
364 fn aesni_gcm_decrypt(
365 input: *const u8,
366 output: *mut u8,
367 len: c::size_t,
368 key: &aes::AES_KEY,
369 ivec: &mut Counter,
370 Htable: &gcm::HTable,
371 Xi: &mut gcm::Xi) -> c::size_t;
372 }
373
374 let mut auth = gcm::Context::new(gcm_key, aad, in_out.len())?;
375 let processed = in_out.with_input_output_len(|input, output, len| {
376 let (htable, xi) = auth.inner();
377 unsafe {
378 aesni_gcm_decrypt(
379 input,
380 output,
381 len,
382 aes_key.inner_less_safe(),
383 &mut ctr,
384 htable,
385 xi,
386 )
387 }
388 });
389 let in_out_slice = in_out_slice.get_mut(processed..).unwrap_or_else(|| {
390 // This can't happen. If it did, then the assembly already
391 // caused a buffer overflow.
392 unreachable!()
393 });
394 // Authenticate any remaining whole blocks.
395 let in_out = Overlapping::new(in_out_slice, src.clone()).unwrap_or_else(
396 |IndexError { .. }| {
397 // This can't happen. If it did, then the assembly already
398 // overwrote part of the remaining input.
399 unreachable!()
400 },
401 );
402 let (whole, _) = slice::as_chunks(in_out.input());
403 auth.update_blocks(whole);
404
405 let whole_len = whole.as_flattened().len();
406
407 // Decrypt any remaining whole blocks.
408 let whole = Overlapping::new(&mut in_out_slice[..(src.start + whole_len)], src.clone())
409 .map_err(error::erase::<IndexError>)?;
410 aes_key.ctr32_encrypt_within(whole, &mut ctr);
411
412 let in_out_slice = match in_out_slice.get_mut(whole_len..) {
413 Some(partial) => partial,
414 None => unreachable!(),
415 };
416 let in_out = Overlapping::new(in_out_slice, src)
417 .unwrap_or_else(|IndexError { .. }| unreachable!());
418 let in_out = OverlappingPartialBlock::new(in_out)
419 .unwrap_or_else(|InputTooLongError { .. }| unreachable!());
420 open_finish(aes_key, auth, in_out, ctr, tag_iv)
421 }
422
423 #[cfg(all(target_arch = "aarch64", target_endian = "little"))]
424 DynKey::AesHwClMul(Combo { aes_key, gcm_key }) => {
425 use crate::bits::BitLength;
426
427 let mut auth = gcm::Context::new(gcm_key, aad, in_out.len())?;
428 let remainder_len = in_out.len() % BLOCK_LEN;
429 let whole_len = in_out.len() - remainder_len;
430 in_out.with_input_output_len(|input, output, _len| {
431 let whole_block_bits = auth.in_out_whole_block_bits();
432 let whole_block_bits_u64: BitLength<u64> = whole_block_bits.into();
433 if let Ok(whole_block_bits) = whole_block_bits_u64.try_into() {
434 use core::num::NonZeroU64;
435
436 let (htable, xi) = auth.inner();
437 prefixed_extern! {
438 fn aes_gcm_dec_kernel(
439 input: *const u8,
440 in_bits: BitLength<NonZeroU64>,
441 output: *mut u8,
442 Xi: &mut gcm::Xi,
443 ivec: &mut Counter,
444 key: &aes::AES_KEY,
445 Htable: &gcm::HTable);
446 }
447
448 unsafe {
449 aes_gcm_dec_kernel(
450 input,
451 whole_block_bits,
452 output,
453 xi,
454 &mut ctr,
455 aes_key.inner_less_safe(),
456 htable,
457 )
458 }
459 }
460 });
461 let remainder = &mut in_out_slice[whole_len..];
462 let remainder =
463 Overlapping::new(remainder, src).unwrap_or_else(|IndexError { .. }| unreachable!());
464 let remainder = OverlappingPartialBlock::new(remainder)
465 .unwrap_or_else(|InputTooLongError { .. }| unreachable!());
466 open_finish(aes_key, auth, remainder, ctr, tag_iv)
467 }
468
469 #[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
470 DynKey::AesHwClMul(c) => open_strided(c, aad, in_out_slice, src, ctr, tag_iv),
471
472 #[cfg(any(
473 all(target_arch = "aarch64", target_endian = "little"),
474 all(target_arch = "arm", target_endian = "little"),
475 target_arch = "x86_64",
476 target_arch = "x86"
477 ))]
478 DynKey::Simd(c) => open_strided(c, aad, in_out_slice, src, ctr, tag_iv),
479
480 DynKey::Fallback(c) => open_strided(c, aad, in_out_slice, src, ctr, tag_iv),
481 }
482}
483
484#[cfg_attr(
485 any(
486 all(
487 any(
488 all(target_arch = "aarch64", target_endian = "little"),
489 all(target_arch = "arm", target_endian = "little")
490 ),
491 target_feature = "neon"
492 ),
493 all(
494 any(target_arch = "x86", target_arch = "x86_64"),
495 target_feature = "sse"
496 )
497 ),
498 inline(never)
499)]
500#[cfg_attr(
501 any(
502 all(target_arch = "aarch64", target_endian = "little"),
503 target_arch = "x86_64"
504 ),
505 cold
506)]
507fn open_strided<
508 A: aes::EncryptBlock + aes::EncryptCtr32,
509 G: gcm::UpdateBlock + gcm::UpdateBlocks,
510>(
511 Combo { aes_key: &A, gcm_key: &G }: &Combo<A, G>,
512 aad: Aad<&[u8]>,
513 in_out_slice: &mut [u8],
514 src: RangeFrom<usize>,
515 mut ctr: Counter,
516 tag_iv: aes::Iv,
517) -> Result<Tag, error::Unspecified> {
518 let in_out = Overlapping::new(in_out_slice, src.clone()).map_err(error::erase::<IndexError>)?;
519 let input = in_out.input();
520 let input_len = input.len();
521
522 let mut auth = gcm::Context::new(gcm_key, aad, input_len)?;
523
524 let remainder_len = input_len % BLOCK_LEN;
525 let whole_len = input_len - remainder_len;
526 let in_prefix_len = src.start;
527
528 {
529 let mut chunk_len = CHUNK_BLOCKS * BLOCK_LEN;
530 let mut output = 0;
531 let mut input = in_prefix_len;
532 loop {
533 if whole_len - output < chunk_len {
534 chunk_len = whole_len - output;
535 }
536
537 let ciphertext = &in_out_slice[input..][..chunk_len];
538 let (ciphertext, leftover) = slice::as_chunks(ciphertext);
539 debug_assert_eq!(leftover.len(), 0);
540 if ciphertext.is_empty() {
541 break;
542 }
543 auth.update_blocks(ciphertext);
544
545 let chunk = Overlapping::new(
546 &mut in_out_slice[output..][..(chunk_len + in_prefix_len)],
547 in_prefix_len..,
548 )
549 .map_err(error::erase::<IndexError>)?;
550 aes_key.ctr32_encrypt_within(chunk, &mut ctr);
551 output += chunk_len;
552 input += chunk_len;
553 }
554 }
555
556 let in_out = Overlapping::new(&mut in_out_slice[whole_len..], src)
557 .unwrap_or_else(|IndexError { .. }| unreachable!());
558 let in_out = OverlappingPartialBlock::new(in_out)
559 .unwrap_or_else(|InputTooLongError { .. }| unreachable!());
560
561 open_finish(aes_key, auth, in_out, ctr, tag_iv)
562}
563
564fn open_finish<A: aes::EncryptBlock, G: gcm::UpdateBlock>(
565 aes_key: &A,
566 mut auth: gcm::Context<G>,
567 remainder: OverlappingPartialBlock<'_>,
568 ctr: Counter,
569 tag_iv: aes::Iv,
570) -> Result<Tag, error::Unspecified> {
571 if remainder.len() > 0 {
572 let mut input: [u8; 16] = ZERO_BLOCK;
573 overwrite_at_start(&mut input, b:remainder.input());
574 auth.update_block(input);
575 remainder.overwrite_at_start(padded:aes_key.encrypt_iv_xor_block(iv:ctr.into(), block:input));
576 }
577 Ok(finish(aes_key, gcm_ctx:auth, tag_iv))
578}
579
580fn finish<A: aes::EncryptBlock, G: gcm::UpdateBlock>(
581 aes_key: &A,
582 gcm_ctx: gcm::Context<G>,
583 tag_iv: aes::Iv,
584) -> Tag {
585 // Finalize the tag and return it.
586 gcm_ctx.pre_finish(|pre_tag: [u8; BLOCK_LEN]| Tag(aes_key.encrypt_iv_xor_block(tag_iv, block:pre_tag)))
587}
588
589pub(super) const MAX_IN_OUT_LEN: usize = super::max_input_len(BLOCK_LEN, overhead_blocks_per_nonce:2);
590
591// [NIST SP800-38D] Section 5.2.1.1. Note that [RFC 5116 Section 5.1] and
592// [RFC 5116 Section 5.2] have an off-by-one error in `P_MAX`.
593//
594// [NIST SP800-38D]:
595// http://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38d.pdf
596// [RFC 5116 Section 5.1]: https://tools.ietf.org/html/rfc5116#section-5.1
597// [RFC 5116 Section 5.2]: https://tools.ietf.org/html/rfc5116#section-5.2
598const _MAX_INPUT_LEN_BOUNDED_BY_NIST: () =
599 assert!(MAX_IN_OUT_LEN == usize_from_u64_saturated(((1u64 << 39) - 256) / 8));
600
601#[derive(Copy, Clone)]
602pub(super) struct Combo<Aes, Gcm> {
603 pub(super) aes_key: Aes,
604 pub(super) gcm_key: Gcm,
605}
606