1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * AES CCM routines supporting the Power 7+ Nest Accelerators driver |
4 | * |
5 | * Copyright (C) 2012 International Business Machines Inc. |
6 | * |
7 | * Author: Kent Yoder <yoder1@us.ibm.com> |
8 | */ |
9 | |
10 | #include <crypto/internal/aead.h> |
11 | #include <crypto/aes.h> |
12 | #include <crypto/algapi.h> |
13 | #include <crypto/scatterwalk.h> |
14 | #include <linux/module.h> |
15 | #include <linux/types.h> |
16 | #include <linux/crypto.h> |
17 | #include <asm/vio.h> |
18 | |
19 | #include "nx_csbcpb.h" |
20 | #include "nx.h" |
21 | |
22 | |
23 | static int ccm_aes_nx_set_key(struct crypto_aead *tfm, |
24 | const u8 *in_key, |
25 | unsigned int key_len) |
26 | { |
27 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm: &tfm->base); |
28 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; |
29 | struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; |
30 | |
31 | nx_ctx_init(nx_ctx, HCOP_FC_AES); |
32 | |
33 | switch (key_len) { |
34 | case AES_KEYSIZE_128: |
35 | NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); |
36 | NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128); |
37 | nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; |
38 | break; |
39 | default: |
40 | return -EINVAL; |
41 | } |
42 | |
43 | csbcpb->cpb.hdr.mode = NX_MODE_AES_CCM; |
44 | memcpy(csbcpb->cpb.aes_ccm.key, in_key, key_len); |
45 | |
46 | csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_CCA; |
47 | memcpy(csbcpb_aead->cpb.aes_cca.key, in_key, key_len); |
48 | |
49 | return 0; |
50 | |
51 | } |
52 | |
53 | static int ccm4309_aes_nx_set_key(struct crypto_aead *tfm, |
54 | const u8 *in_key, |
55 | unsigned int key_len) |
56 | { |
57 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm: &tfm->base); |
58 | |
59 | if (key_len < 3) |
60 | return -EINVAL; |
61 | |
62 | key_len -= 3; |
63 | |
64 | memcpy(nx_ctx->priv.ccm.nonce, in_key + key_len, 3); |
65 | |
66 | return ccm_aes_nx_set_key(tfm, in_key, key_len); |
67 | } |
68 | |
69 | static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm, |
70 | unsigned int authsize) |
71 | { |
72 | switch (authsize) { |
73 | case 4: |
74 | case 6: |
75 | case 8: |
76 | case 10: |
77 | case 12: |
78 | case 14: |
79 | case 16: |
80 | break; |
81 | default: |
82 | return -EINVAL; |
83 | } |
84 | |
85 | return 0; |
86 | } |
87 | |
88 | static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm, |
89 | unsigned int authsize) |
90 | { |
91 | switch (authsize) { |
92 | case 8: |
93 | case 12: |
94 | case 16: |
95 | break; |
96 | default: |
97 | return -EINVAL; |
98 | } |
99 | |
100 | return 0; |
101 | } |
102 | |
103 | /* taken from crypto/ccm.c */ |
104 | static int set_msg_len(u8 *block, unsigned int msglen, int csize) |
105 | { |
106 | __be32 data; |
107 | |
108 | memset(block, 0, csize); |
109 | block += csize; |
110 | |
111 | if (csize >= 4) |
112 | csize = 4; |
113 | else if (msglen > (unsigned int)(1 << (8 * csize))) |
114 | return -EOVERFLOW; |
115 | |
116 | data = cpu_to_be32(msglen); |
117 | memcpy(block - csize, (u8 *)&data + 4 - csize, csize); |
118 | |
119 | return 0; |
120 | } |
121 | |
122 | /* taken from crypto/ccm.c */ |
123 | static inline int crypto_ccm_check_iv(const u8 *iv) |
124 | { |
125 | /* 2 <= L <= 8, so 1 <= L' <= 7. */ |
126 | if (1 > iv[0] || iv[0] > 7) |
127 | return -EINVAL; |
128 | |
129 | return 0; |
130 | } |
131 | |
132 | /* based on code from crypto/ccm.c */ |
133 | static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize, |
134 | unsigned int cryptlen, u8 *b0) |
135 | { |
136 | unsigned int l, lp, m = authsize; |
137 | |
138 | memcpy(b0, iv, 16); |
139 | |
140 | lp = b0[0]; |
141 | l = lp + 1; |
142 | |
143 | /* set m, bits 3-5 */ |
144 | *b0 |= (8 * ((m - 2) / 2)); |
145 | |
146 | /* set adata, bit 6, if associated data is used */ |
147 | if (assoclen) |
148 | *b0 |= 64; |
149 | |
150 | return set_msg_len(block: b0 + 16 - l, msglen: cryptlen, csize: l); |
151 | } |
152 | |
153 | static int generate_pat(u8 *iv, |
154 | struct aead_request *req, |
155 | struct nx_crypto_ctx *nx_ctx, |
156 | unsigned int authsize, |
157 | unsigned int nbytes, |
158 | unsigned int assoclen, |
159 | u8 *out) |
160 | { |
161 | struct nx_sg *nx_insg = nx_ctx->in_sg; |
162 | struct nx_sg *nx_outsg = nx_ctx->out_sg; |
163 | unsigned int iauth_len = 0; |
164 | u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL; |
165 | int rc; |
166 | unsigned int max_sg_len; |
167 | |
168 | /* zero the ctr value */ |
169 | memset(iv + 15 - iv[0], 0, iv[0] + 1); |
170 | |
171 | /* page 78 of nx_wb.pdf has, |
172 | * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes |
173 | * in length. If a full message is used, the AES CCA implementation |
174 | * restricts the maximum AAD length to 2^32 -1 bytes. |
175 | * If partial messages are used, the implementation supports |
176 | * 2^64 -1 bytes maximum AAD length. |
177 | * |
178 | * However, in the cryptoapi's aead_request structure, |
179 | * assoclen is an unsigned int, thus it cannot hold a length |
180 | * value greater than 2^32 - 1. |
181 | * Thus the AAD is further constrained by this and is never |
182 | * greater than 2^32. |
183 | */ |
184 | |
185 | if (!assoclen) { |
186 | b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; |
187 | } else if (assoclen <= 14) { |
188 | /* if associated data is 14 bytes or less, we do 1 GCM |
189 | * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1, |
190 | * which is fed in through the source buffers here */ |
191 | b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; |
192 | b1 = nx_ctx->priv.ccm.iauth_tag; |
193 | iauth_len = assoclen; |
194 | } else if (assoclen <= 65280) { |
195 | /* if associated data is less than (2^16 - 2^8), we construct |
196 | * B1 differently and feed in the associated data to a CCA |
197 | * operation */ |
198 | b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0; |
199 | b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; |
200 | iauth_len = 14; |
201 | } else { |
202 | b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0; |
203 | b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; |
204 | iauth_len = 10; |
205 | } |
206 | |
207 | /* generate B0 */ |
208 | rc = generate_b0(iv, assoclen, authsize, cryptlen: nbytes, b0); |
209 | if (rc) |
210 | return rc; |
211 | |
212 | /* generate B1: |
213 | * add control info for associated data |
214 | * RFC 3610 and NIST Special Publication 800-38C |
215 | */ |
216 | if (b1) { |
217 | memset(b1, 0, 16); |
218 | if (assoclen <= 65280) { |
219 | *(u16 *)b1 = assoclen; |
220 | scatterwalk_map_and_copy(buf: b1 + 2, sg: req->src, start: 0, |
221 | nbytes: iauth_len, SCATTERWALK_FROM_SG); |
222 | } else { |
223 | *(u16 *)b1 = (u16)(0xfffe); |
224 | *(u32 *)&b1[2] = assoclen; |
225 | scatterwalk_map_and_copy(buf: b1 + 6, sg: req->src, start: 0, |
226 | nbytes: iauth_len, SCATTERWALK_FROM_SG); |
227 | } |
228 | } |
229 | |
230 | /* now copy any remaining AAD to scatterlist and call nx... */ |
231 | if (!assoclen) { |
232 | return rc; |
233 | } else if (assoclen <= 14) { |
234 | unsigned int len = 16; |
235 | |
236 | nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen); |
237 | |
238 | if (len != 16) |
239 | return -EINVAL; |
240 | |
241 | nx_outsg = nx_build_sg_list(nx_outsg, tmp, &len, |
242 | nx_ctx->ap->sglen); |
243 | |
244 | if (len != 16) |
245 | return -EINVAL; |
246 | |
247 | /* inlen should be negative, indicating to phyp that its a |
248 | * pointer to an sg list */ |
249 | nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * |
250 | sizeof(struct nx_sg); |
251 | nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * |
252 | sizeof(struct nx_sg); |
253 | |
254 | NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT; |
255 | NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE; |
256 | |
257 | result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac; |
258 | |
259 | rc = nx_hcall_sync(ctx: nx_ctx, op: &nx_ctx->op, |
260 | may_sleep: req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); |
261 | if (rc) |
262 | return rc; |
263 | |
264 | atomic_inc(v: &(nx_ctx->stats->aes_ops)); |
265 | atomic64_add(i: assoclen, v: &nx_ctx->stats->aes_bytes); |
266 | |
267 | } else { |
268 | unsigned int processed = 0, to_process; |
269 | |
270 | processed += iauth_len; |
271 | |
272 | /* page_limit: number of sg entries that fit on one page */ |
273 | max_sg_len = min_t(u64, nx_ctx->ap->sglen, |
274 | nx_driver.of.max_sg_len/sizeof(struct nx_sg)); |
275 | max_sg_len = min_t(u64, max_sg_len, |
276 | nx_ctx->ap->databytelen/NX_PAGE_SIZE); |
277 | |
278 | do { |
279 | to_process = min_t(u32, assoclen - processed, |
280 | nx_ctx->ap->databytelen); |
281 | |
282 | nx_insg = nx_walk_and_build(nx_ctx->in_sg, |
283 | nx_ctx->ap->sglen, |
284 | req->src, processed, |
285 | &to_process); |
286 | |
287 | if ((to_process + processed) < assoclen) { |
288 | NX_CPB_FDM(nx_ctx->csbcpb_aead) |= |
289 | NX_FDM_INTERMEDIATE; |
290 | } else { |
291 | NX_CPB_FDM(nx_ctx->csbcpb_aead) &= |
292 | ~NX_FDM_INTERMEDIATE; |
293 | } |
294 | |
295 | |
296 | nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) * |
297 | sizeof(struct nx_sg); |
298 | |
299 | result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0; |
300 | |
301 | rc = nx_hcall_sync(ctx: nx_ctx, op: &nx_ctx->op_aead, |
302 | may_sleep: req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); |
303 | if (rc) |
304 | return rc; |
305 | |
306 | memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0, |
307 | nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0, |
308 | AES_BLOCK_SIZE); |
309 | |
310 | NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION; |
311 | |
312 | atomic_inc(v: &(nx_ctx->stats->aes_ops)); |
313 | atomic64_add(i: assoclen, v: &nx_ctx->stats->aes_bytes); |
314 | |
315 | processed += to_process; |
316 | } while (processed < assoclen); |
317 | |
318 | result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0; |
319 | } |
320 | |
321 | memcpy(out, result, AES_BLOCK_SIZE); |
322 | |
323 | return rc; |
324 | } |
325 | |
326 | static int ccm_nx_decrypt(struct aead_request *req, |
327 | u8 *iv, |
328 | unsigned int assoclen) |
329 | { |
330 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm: req->base.tfm); |
331 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; |
332 | unsigned int nbytes = req->cryptlen; |
333 | unsigned int authsize = crypto_aead_authsize(tfm: crypto_aead_reqtfm(req)); |
334 | struct nx_ccm_priv *priv = &nx_ctx->priv.ccm; |
335 | unsigned long irq_flags; |
336 | unsigned int processed = 0, to_process; |
337 | int rc = -1; |
338 | |
339 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); |
340 | |
341 | nbytes -= authsize; |
342 | |
343 | /* copy out the auth tag to compare with later */ |
344 | scatterwalk_map_and_copy(buf: priv->oauth_tag, |
345 | sg: req->src, start: nbytes + req->assoclen, nbytes: authsize, |
346 | SCATTERWALK_FROM_SG); |
347 | |
348 | rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen, |
349 | out: csbcpb->cpb.aes_ccm.in_pat_or_b0); |
350 | if (rc) |
351 | goto out; |
352 | |
353 | do { |
354 | |
355 | /* to_process: the AES_BLOCK_SIZE data chunk to process in this |
356 | * update. This value is bound by sg list limits. |
357 | */ |
358 | to_process = nbytes - processed; |
359 | |
360 | if ((to_process + processed) < nbytes) |
361 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; |
362 | else |
363 | NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; |
364 | |
365 | NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; |
366 | |
367 | rc = nx_build_sg_lists(nx_ctx, iv, dst: req->dst, src: req->src, |
368 | nbytes: &to_process, offset: processed + req->assoclen, |
369 | oiv: csbcpb->cpb.aes_ccm.iv_or_ctr); |
370 | if (rc) |
371 | goto out; |
372 | |
373 | rc = nx_hcall_sync(ctx: nx_ctx, op: &nx_ctx->op, |
374 | may_sleep: req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); |
375 | if (rc) |
376 | goto out; |
377 | |
378 | /* for partial completion, copy following for next |
379 | * entry into loop... |
380 | */ |
381 | memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE); |
382 | memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0, |
383 | csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE); |
384 | memcpy(csbcpb->cpb.aes_ccm.in_s0, |
385 | csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE); |
386 | |
387 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; |
388 | |
389 | /* update stats */ |
390 | atomic_inc(v: &(nx_ctx->stats->aes_ops)); |
391 | atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count), |
392 | v: &(nx_ctx->stats->aes_bytes)); |
393 | |
394 | processed += to_process; |
395 | } while (processed < nbytes); |
396 | |
397 | rc = crypto_memneq(a: csbcpb->cpb.aes_ccm.out_pat_or_mac, b: priv->oauth_tag, |
398 | size: authsize) ? -EBADMSG : 0; |
399 | out: |
400 | spin_unlock_irqrestore(lock: &nx_ctx->lock, flags: irq_flags); |
401 | return rc; |
402 | } |
403 | |
404 | static int ccm_nx_encrypt(struct aead_request *req, |
405 | u8 *iv, |
406 | unsigned int assoclen) |
407 | { |
408 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm: req->base.tfm); |
409 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; |
410 | unsigned int nbytes = req->cryptlen; |
411 | unsigned int authsize = crypto_aead_authsize(tfm: crypto_aead_reqtfm(req)); |
412 | unsigned long irq_flags; |
413 | unsigned int processed = 0, to_process; |
414 | int rc = -1; |
415 | |
416 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); |
417 | |
418 | rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen, |
419 | out: csbcpb->cpb.aes_ccm.in_pat_or_b0); |
420 | if (rc) |
421 | goto out; |
422 | |
423 | do { |
424 | /* to process: the AES_BLOCK_SIZE data chunk to process in this |
425 | * update. This value is bound by sg list limits. |
426 | */ |
427 | to_process = nbytes - processed; |
428 | |
429 | if ((to_process + processed) < nbytes) |
430 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; |
431 | else |
432 | NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; |
433 | |
434 | NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; |
435 | |
436 | rc = nx_build_sg_lists(nx_ctx, iv, dst: req->dst, src: req->src, |
437 | nbytes: &to_process, offset: processed + req->assoclen, |
438 | oiv: csbcpb->cpb.aes_ccm.iv_or_ctr); |
439 | if (rc) |
440 | goto out; |
441 | |
442 | rc = nx_hcall_sync(ctx: nx_ctx, op: &nx_ctx->op, |
443 | may_sleep: req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); |
444 | if (rc) |
445 | goto out; |
446 | |
447 | /* for partial completion, copy following for next |
448 | * entry into loop... |
449 | */ |
450 | memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE); |
451 | memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0, |
452 | csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE); |
453 | memcpy(csbcpb->cpb.aes_ccm.in_s0, |
454 | csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE); |
455 | |
456 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; |
457 | |
458 | /* update stats */ |
459 | atomic_inc(v: &(nx_ctx->stats->aes_ops)); |
460 | atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count), |
461 | v: &(nx_ctx->stats->aes_bytes)); |
462 | |
463 | processed += to_process; |
464 | |
465 | } while (processed < nbytes); |
466 | |
467 | /* copy out the auth tag */ |
468 | scatterwalk_map_and_copy(buf: csbcpb->cpb.aes_ccm.out_pat_or_mac, |
469 | sg: req->dst, start: nbytes + req->assoclen, nbytes: authsize, |
470 | SCATTERWALK_TO_SG); |
471 | |
472 | out: |
473 | spin_unlock_irqrestore(lock: &nx_ctx->lock, flags: irq_flags); |
474 | return rc; |
475 | } |
476 | |
477 | static int ccm4309_aes_nx_encrypt(struct aead_request *req) |
478 | { |
479 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm: req->base.tfm); |
480 | struct nx_gcm_rctx *rctx = aead_request_ctx(req); |
481 | u8 *iv = rctx->iv; |
482 | |
483 | iv[0] = 3; |
484 | memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3); |
485 | memcpy(iv + 4, req->iv, 8); |
486 | |
487 | return ccm_nx_encrypt(req, iv, assoclen: req->assoclen - 8); |
488 | } |
489 | |
490 | static int ccm_aes_nx_encrypt(struct aead_request *req) |
491 | { |
492 | int rc; |
493 | |
494 | rc = crypto_ccm_check_iv(iv: req->iv); |
495 | if (rc) |
496 | return rc; |
497 | |
498 | return ccm_nx_encrypt(req, iv: req->iv, assoclen: req->assoclen); |
499 | } |
500 | |
501 | static int ccm4309_aes_nx_decrypt(struct aead_request *req) |
502 | { |
503 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm: req->base.tfm); |
504 | struct nx_gcm_rctx *rctx = aead_request_ctx(req); |
505 | u8 *iv = rctx->iv; |
506 | |
507 | iv[0] = 3; |
508 | memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3); |
509 | memcpy(iv + 4, req->iv, 8); |
510 | |
511 | return ccm_nx_decrypt(req, iv, assoclen: req->assoclen - 8); |
512 | } |
513 | |
514 | static int ccm_aes_nx_decrypt(struct aead_request *req) |
515 | { |
516 | int rc; |
517 | |
518 | rc = crypto_ccm_check_iv(iv: req->iv); |
519 | if (rc) |
520 | return rc; |
521 | |
522 | return ccm_nx_decrypt(req, iv: req->iv, assoclen: req->assoclen); |
523 | } |
524 | |
525 | struct aead_alg nx_ccm_aes_alg = { |
526 | .base = { |
527 | .cra_name = "ccm(aes)" , |
528 | .cra_driver_name = "ccm-aes-nx" , |
529 | .cra_priority = 300, |
530 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK, |
531 | .cra_blocksize = 1, |
532 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), |
533 | .cra_module = THIS_MODULE, |
534 | }, |
535 | .init = nx_crypto_ctx_aes_ccm_init, |
536 | .exit = nx_crypto_ctx_aead_exit, |
537 | .ivsize = AES_BLOCK_SIZE, |
538 | .maxauthsize = AES_BLOCK_SIZE, |
539 | .setkey = ccm_aes_nx_set_key, |
540 | .setauthsize = ccm_aes_nx_setauthsize, |
541 | .encrypt = ccm_aes_nx_encrypt, |
542 | .decrypt = ccm_aes_nx_decrypt, |
543 | }; |
544 | |
545 | struct aead_alg nx_ccm4309_aes_alg = { |
546 | .base = { |
547 | .cra_name = "rfc4309(ccm(aes))" , |
548 | .cra_driver_name = "rfc4309-ccm-aes-nx" , |
549 | .cra_priority = 300, |
550 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK, |
551 | .cra_blocksize = 1, |
552 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), |
553 | .cra_module = THIS_MODULE, |
554 | }, |
555 | .init = nx_crypto_ctx_aes_ccm_init, |
556 | .exit = nx_crypto_ctx_aead_exit, |
557 | .ivsize = 8, |
558 | .maxauthsize = AES_BLOCK_SIZE, |
559 | .setkey = ccm4309_aes_nx_set_key, |
560 | .setauthsize = ccm4309_aes_nx_setauthsize, |
561 | .encrypt = ccm4309_aes_nx_encrypt, |
562 | .decrypt = ccm4309_aes_nx_decrypt, |
563 | }; |
564 | |