| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * AES CCM routines supporting the Power 7+ Nest Accelerators driver |
| 4 | * |
| 5 | * Copyright (C) 2012 International Business Machines Inc. |
| 6 | * |
| 7 | * Author: Kent Yoder <yoder1@us.ibm.com> |
| 8 | */ |
| 9 | |
| 10 | #include <crypto/internal/aead.h> |
| 11 | #include <crypto/aes.h> |
| 12 | #include <crypto/algapi.h> |
| 13 | #include <crypto/scatterwalk.h> |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/types.h> |
| 16 | #include <linux/crypto.h> |
| 17 | #include <asm/vio.h> |
| 18 | |
| 19 | #include "nx_csbcpb.h" |
| 20 | #include "nx.h" |
| 21 | |
| 22 | |
| 23 | static int ccm_aes_nx_set_key(struct crypto_aead *tfm, |
| 24 | const u8 *in_key, |
| 25 | unsigned int key_len) |
| 26 | { |
| 27 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm: &tfm->base); |
| 28 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; |
| 29 | struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; |
| 30 | |
| 31 | nx_ctx_init(nx_ctx, HCOP_FC_AES); |
| 32 | |
| 33 | switch (key_len) { |
| 34 | case AES_KEYSIZE_128: |
| 35 | NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); |
| 36 | NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128); |
| 37 | nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; |
| 38 | break; |
| 39 | default: |
| 40 | return -EINVAL; |
| 41 | } |
| 42 | |
| 43 | csbcpb->cpb.hdr.mode = NX_MODE_AES_CCM; |
| 44 | memcpy(csbcpb->cpb.aes_ccm.key, in_key, key_len); |
| 45 | |
| 46 | csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_CCA; |
| 47 | memcpy(csbcpb_aead->cpb.aes_cca.key, in_key, key_len); |
| 48 | |
| 49 | return 0; |
| 50 | |
| 51 | } |
| 52 | |
| 53 | static int ccm4309_aes_nx_set_key(struct crypto_aead *tfm, |
| 54 | const u8 *in_key, |
| 55 | unsigned int key_len) |
| 56 | { |
| 57 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm: &tfm->base); |
| 58 | |
| 59 | if (key_len < 3) |
| 60 | return -EINVAL; |
| 61 | |
| 62 | key_len -= 3; |
| 63 | |
| 64 | memcpy(nx_ctx->priv.ccm.nonce, in_key + key_len, 3); |
| 65 | |
| 66 | return ccm_aes_nx_set_key(tfm, in_key, key_len); |
| 67 | } |
| 68 | |
| 69 | static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm, |
| 70 | unsigned int authsize) |
| 71 | { |
| 72 | switch (authsize) { |
| 73 | case 4: |
| 74 | case 6: |
| 75 | case 8: |
| 76 | case 10: |
| 77 | case 12: |
| 78 | case 14: |
| 79 | case 16: |
| 80 | break; |
| 81 | default: |
| 82 | return -EINVAL; |
| 83 | } |
| 84 | |
| 85 | return 0; |
| 86 | } |
| 87 | |
| 88 | static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm, |
| 89 | unsigned int authsize) |
| 90 | { |
| 91 | switch (authsize) { |
| 92 | case 8: |
| 93 | case 12: |
| 94 | case 16: |
| 95 | break; |
| 96 | default: |
| 97 | return -EINVAL; |
| 98 | } |
| 99 | |
| 100 | return 0; |
| 101 | } |
| 102 | |
| 103 | /* taken from crypto/ccm.c */ |
| 104 | static int set_msg_len(u8 *block, unsigned int msglen, int csize) |
| 105 | { |
| 106 | __be32 data; |
| 107 | |
| 108 | memset(block, 0, csize); |
| 109 | block += csize; |
| 110 | |
| 111 | if (csize >= 4) |
| 112 | csize = 4; |
| 113 | else if (msglen > (unsigned int)(1 << (8 * csize))) |
| 114 | return -EOVERFLOW; |
| 115 | |
| 116 | data = cpu_to_be32(msglen); |
| 117 | memcpy(block - csize, (u8 *)&data + 4 - csize, csize); |
| 118 | |
| 119 | return 0; |
| 120 | } |
| 121 | |
| 122 | /* taken from crypto/ccm.c */ |
| 123 | static inline int crypto_ccm_check_iv(const u8 *iv) |
| 124 | { |
| 125 | /* 2 <= L <= 8, so 1 <= L' <= 7. */ |
| 126 | if (1 > iv[0] || iv[0] > 7) |
| 127 | return -EINVAL; |
| 128 | |
| 129 | return 0; |
| 130 | } |
| 131 | |
| 132 | /* based on code from crypto/ccm.c */ |
| 133 | static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize, |
| 134 | unsigned int cryptlen, u8 *b0) |
| 135 | { |
| 136 | unsigned int l, lp, m = authsize; |
| 137 | |
| 138 | memcpy(b0, iv, 16); |
| 139 | |
| 140 | lp = b0[0]; |
| 141 | l = lp + 1; |
| 142 | |
| 143 | /* set m, bits 3-5 */ |
| 144 | *b0 |= (8 * ((m - 2) / 2)); |
| 145 | |
| 146 | /* set adata, bit 6, if associated data is used */ |
| 147 | if (assoclen) |
| 148 | *b0 |= 64; |
| 149 | |
| 150 | return set_msg_len(block: b0 + 16 - l, msglen: cryptlen, csize: l); |
| 151 | } |
| 152 | |
| 153 | static int generate_pat(u8 *iv, |
| 154 | struct aead_request *req, |
| 155 | struct nx_crypto_ctx *nx_ctx, |
| 156 | unsigned int authsize, |
| 157 | unsigned int nbytes, |
| 158 | unsigned int assoclen, |
| 159 | u8 *out) |
| 160 | { |
| 161 | struct nx_sg *nx_insg = nx_ctx->in_sg; |
| 162 | struct nx_sg *nx_outsg = nx_ctx->out_sg; |
| 163 | unsigned int iauth_len = 0; |
| 164 | u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL; |
| 165 | int rc; |
| 166 | unsigned int max_sg_len; |
| 167 | |
| 168 | /* zero the ctr value */ |
| 169 | memset(iv + 15 - iv[0], 0, iv[0] + 1); |
| 170 | |
| 171 | /* page 78 of nx_wb.pdf has, |
| 172 | * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes |
| 173 | * in length. If a full message is used, the AES CCA implementation |
| 174 | * restricts the maximum AAD length to 2^32 -1 bytes. |
| 175 | * If partial messages are used, the implementation supports |
| 176 | * 2^64 -1 bytes maximum AAD length. |
| 177 | * |
| 178 | * However, in the cryptoapi's aead_request structure, |
| 179 | * assoclen is an unsigned int, thus it cannot hold a length |
| 180 | * value greater than 2^32 - 1. |
| 181 | * Thus the AAD is further constrained by this and is never |
| 182 | * greater than 2^32. |
| 183 | */ |
| 184 | |
| 185 | if (!assoclen) { |
| 186 | b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; |
| 187 | } else if (assoclen <= 14) { |
| 188 | /* if associated data is 14 bytes or less, we do 1 GCM |
| 189 | * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1, |
| 190 | * which is fed in through the source buffers here */ |
| 191 | b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; |
| 192 | b1 = nx_ctx->priv.ccm.iauth_tag; |
| 193 | iauth_len = assoclen; |
| 194 | } else if (assoclen <= 65280) { |
| 195 | /* if associated data is less than (2^16 - 2^8), we construct |
| 196 | * B1 differently and feed in the associated data to a CCA |
| 197 | * operation */ |
| 198 | b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0; |
| 199 | b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; |
| 200 | iauth_len = 14; |
| 201 | } else { |
| 202 | b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0; |
| 203 | b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; |
| 204 | iauth_len = 10; |
| 205 | } |
| 206 | |
| 207 | /* generate B0 */ |
| 208 | rc = generate_b0(iv, assoclen, authsize, cryptlen: nbytes, b0); |
| 209 | if (rc) |
| 210 | return rc; |
| 211 | |
| 212 | /* generate B1: |
| 213 | * add control info for associated data |
| 214 | * RFC 3610 and NIST Special Publication 800-38C |
| 215 | */ |
| 216 | if (b1) { |
| 217 | memset(b1, 0, 16); |
| 218 | if (assoclen <= 65280) { |
| 219 | *(u16 *)b1 = assoclen; |
| 220 | memcpy_from_sglist(buf: b1 + 2, sg: req->src, start: 0, nbytes: iauth_len); |
| 221 | } else { |
| 222 | *(u16 *)b1 = (u16)(0xfffe); |
| 223 | *(u32 *)&b1[2] = assoclen; |
| 224 | memcpy_from_sglist(buf: b1 + 6, sg: req->src, start: 0, nbytes: iauth_len); |
| 225 | } |
| 226 | } |
| 227 | |
| 228 | /* now copy any remaining AAD to scatterlist and call nx... */ |
| 229 | if (!assoclen) { |
| 230 | return rc; |
| 231 | } else if (assoclen <= 14) { |
| 232 | unsigned int len = 16; |
| 233 | |
| 234 | nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen); |
| 235 | |
| 236 | if (len != 16) |
| 237 | return -EINVAL; |
| 238 | |
| 239 | nx_outsg = nx_build_sg_list(nx_outsg, tmp, &len, |
| 240 | nx_ctx->ap->sglen); |
| 241 | |
| 242 | if (len != 16) |
| 243 | return -EINVAL; |
| 244 | |
| 245 | /* inlen should be negative, indicating to phyp that its a |
| 246 | * pointer to an sg list */ |
| 247 | nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * |
| 248 | sizeof(struct nx_sg); |
| 249 | nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * |
| 250 | sizeof(struct nx_sg); |
| 251 | |
| 252 | NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT; |
| 253 | NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE; |
| 254 | |
| 255 | result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac; |
| 256 | |
| 257 | rc = nx_hcall_sync(ctx: nx_ctx, op: &nx_ctx->op, |
| 258 | may_sleep: req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); |
| 259 | if (rc) |
| 260 | return rc; |
| 261 | |
| 262 | atomic_inc(v: &(nx_ctx->stats->aes_ops)); |
| 263 | atomic64_add(i: assoclen, v: &nx_ctx->stats->aes_bytes); |
| 264 | |
| 265 | } else { |
| 266 | unsigned int processed = 0, to_process; |
| 267 | |
| 268 | processed += iauth_len; |
| 269 | |
| 270 | /* page_limit: number of sg entries that fit on one page */ |
| 271 | max_sg_len = min_t(u64, nx_ctx->ap->sglen, |
| 272 | nx_driver.of.max_sg_len/sizeof(struct nx_sg)); |
| 273 | max_sg_len = min_t(u64, max_sg_len, |
| 274 | nx_ctx->ap->databytelen/NX_PAGE_SIZE); |
| 275 | |
| 276 | do { |
| 277 | to_process = min_t(u32, assoclen - processed, |
| 278 | nx_ctx->ap->databytelen); |
| 279 | |
| 280 | nx_insg = nx_walk_and_build(nx_ctx->in_sg, |
| 281 | nx_ctx->ap->sglen, |
| 282 | req->src, processed, |
| 283 | &to_process); |
| 284 | |
| 285 | if ((to_process + processed) < assoclen) { |
| 286 | NX_CPB_FDM(nx_ctx->csbcpb_aead) |= |
| 287 | NX_FDM_INTERMEDIATE; |
| 288 | } else { |
| 289 | NX_CPB_FDM(nx_ctx->csbcpb_aead) &= |
| 290 | ~NX_FDM_INTERMEDIATE; |
| 291 | } |
| 292 | |
| 293 | |
| 294 | nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) * |
| 295 | sizeof(struct nx_sg); |
| 296 | |
| 297 | result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0; |
| 298 | |
| 299 | rc = nx_hcall_sync(ctx: nx_ctx, op: &nx_ctx->op_aead, |
| 300 | may_sleep: req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); |
| 301 | if (rc) |
| 302 | return rc; |
| 303 | |
| 304 | memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0, |
| 305 | nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0, |
| 306 | AES_BLOCK_SIZE); |
| 307 | |
| 308 | NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION; |
| 309 | |
| 310 | atomic_inc(v: &(nx_ctx->stats->aes_ops)); |
| 311 | atomic64_add(i: assoclen, v: &nx_ctx->stats->aes_bytes); |
| 312 | |
| 313 | processed += to_process; |
| 314 | } while (processed < assoclen); |
| 315 | |
| 316 | result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0; |
| 317 | } |
| 318 | |
| 319 | memcpy(out, result, AES_BLOCK_SIZE); |
| 320 | |
| 321 | return rc; |
| 322 | } |
| 323 | |
| 324 | static int ccm_nx_decrypt(struct aead_request *req, |
| 325 | u8 *iv, |
| 326 | unsigned int assoclen) |
| 327 | { |
| 328 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm: req->base.tfm); |
| 329 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; |
| 330 | unsigned int nbytes = req->cryptlen; |
| 331 | unsigned int authsize = crypto_aead_authsize(tfm: crypto_aead_reqtfm(req)); |
| 332 | struct nx_ccm_priv *priv = &nx_ctx->priv.ccm; |
| 333 | unsigned long irq_flags; |
| 334 | unsigned int processed = 0, to_process; |
| 335 | int rc = -1; |
| 336 | |
| 337 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); |
| 338 | |
| 339 | nbytes -= authsize; |
| 340 | |
| 341 | /* copy out the auth tag to compare with later */ |
| 342 | memcpy_from_sglist(buf: priv->oauth_tag, sg: req->src, start: nbytes + req->assoclen, |
| 343 | nbytes: authsize); |
| 344 | |
| 345 | rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen, |
| 346 | out: csbcpb->cpb.aes_ccm.in_pat_or_b0); |
| 347 | if (rc) |
| 348 | goto out; |
| 349 | |
| 350 | do { |
| 351 | |
| 352 | /* to_process: the AES_BLOCK_SIZE data chunk to process in this |
| 353 | * update. This value is bound by sg list limits. |
| 354 | */ |
| 355 | to_process = nbytes - processed; |
| 356 | |
| 357 | if ((to_process + processed) < nbytes) |
| 358 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; |
| 359 | else |
| 360 | NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; |
| 361 | |
| 362 | NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; |
| 363 | |
| 364 | rc = nx_build_sg_lists(nx_ctx, iv, dst: req->dst, src: req->src, |
| 365 | nbytes: &to_process, offset: processed + req->assoclen, |
| 366 | oiv: csbcpb->cpb.aes_ccm.iv_or_ctr); |
| 367 | if (rc) |
| 368 | goto out; |
| 369 | |
| 370 | rc = nx_hcall_sync(ctx: nx_ctx, op: &nx_ctx->op, |
| 371 | may_sleep: req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); |
| 372 | if (rc) |
| 373 | goto out; |
| 374 | |
| 375 | /* for partial completion, copy following for next |
| 376 | * entry into loop... |
| 377 | */ |
| 378 | memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE); |
| 379 | memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0, |
| 380 | csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE); |
| 381 | memcpy(csbcpb->cpb.aes_ccm.in_s0, |
| 382 | csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE); |
| 383 | |
| 384 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; |
| 385 | |
| 386 | /* update stats */ |
| 387 | atomic_inc(v: &(nx_ctx->stats->aes_ops)); |
| 388 | atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count), |
| 389 | v: &(nx_ctx->stats->aes_bytes)); |
| 390 | |
| 391 | processed += to_process; |
| 392 | } while (processed < nbytes); |
| 393 | |
| 394 | rc = crypto_memneq(a: csbcpb->cpb.aes_ccm.out_pat_or_mac, b: priv->oauth_tag, |
| 395 | size: authsize) ? -EBADMSG : 0; |
| 396 | out: |
| 397 | spin_unlock_irqrestore(lock: &nx_ctx->lock, flags: irq_flags); |
| 398 | return rc; |
| 399 | } |
| 400 | |
| 401 | static int ccm_nx_encrypt(struct aead_request *req, |
| 402 | u8 *iv, |
| 403 | unsigned int assoclen) |
| 404 | { |
| 405 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm: req->base.tfm); |
| 406 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; |
| 407 | unsigned int nbytes = req->cryptlen; |
| 408 | unsigned int authsize = crypto_aead_authsize(tfm: crypto_aead_reqtfm(req)); |
| 409 | unsigned long irq_flags; |
| 410 | unsigned int processed = 0, to_process; |
| 411 | int rc = -1; |
| 412 | |
| 413 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); |
| 414 | |
| 415 | rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen, |
| 416 | out: csbcpb->cpb.aes_ccm.in_pat_or_b0); |
| 417 | if (rc) |
| 418 | goto out; |
| 419 | |
| 420 | do { |
| 421 | /* to process: the AES_BLOCK_SIZE data chunk to process in this |
| 422 | * update. This value is bound by sg list limits. |
| 423 | */ |
| 424 | to_process = nbytes - processed; |
| 425 | |
| 426 | if ((to_process + processed) < nbytes) |
| 427 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; |
| 428 | else |
| 429 | NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; |
| 430 | |
| 431 | NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; |
| 432 | |
| 433 | rc = nx_build_sg_lists(nx_ctx, iv, dst: req->dst, src: req->src, |
| 434 | nbytes: &to_process, offset: processed + req->assoclen, |
| 435 | oiv: csbcpb->cpb.aes_ccm.iv_or_ctr); |
| 436 | if (rc) |
| 437 | goto out; |
| 438 | |
| 439 | rc = nx_hcall_sync(ctx: nx_ctx, op: &nx_ctx->op, |
| 440 | may_sleep: req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); |
| 441 | if (rc) |
| 442 | goto out; |
| 443 | |
| 444 | /* for partial completion, copy following for next |
| 445 | * entry into loop... |
| 446 | */ |
| 447 | memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE); |
| 448 | memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0, |
| 449 | csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE); |
| 450 | memcpy(csbcpb->cpb.aes_ccm.in_s0, |
| 451 | csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE); |
| 452 | |
| 453 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; |
| 454 | |
| 455 | /* update stats */ |
| 456 | atomic_inc(v: &(nx_ctx->stats->aes_ops)); |
| 457 | atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count), |
| 458 | v: &(nx_ctx->stats->aes_bytes)); |
| 459 | |
| 460 | processed += to_process; |
| 461 | |
| 462 | } while (processed < nbytes); |
| 463 | |
| 464 | /* copy out the auth tag */ |
| 465 | memcpy_to_sglist(sg: req->dst, start: nbytes + req->assoclen, |
| 466 | buf: csbcpb->cpb.aes_ccm.out_pat_or_mac, nbytes: authsize); |
| 467 | |
| 468 | out: |
| 469 | spin_unlock_irqrestore(lock: &nx_ctx->lock, flags: irq_flags); |
| 470 | return rc; |
| 471 | } |
| 472 | |
| 473 | static int ccm4309_aes_nx_encrypt(struct aead_request *req) |
| 474 | { |
| 475 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm: req->base.tfm); |
| 476 | struct nx_gcm_rctx *rctx = aead_request_ctx(req); |
| 477 | u8 *iv = rctx->iv; |
| 478 | |
| 479 | iv[0] = 3; |
| 480 | memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3); |
| 481 | memcpy(iv + 4, req->iv, 8); |
| 482 | |
| 483 | return ccm_nx_encrypt(req, iv, assoclen: req->assoclen - 8); |
| 484 | } |
| 485 | |
| 486 | static int ccm_aes_nx_encrypt(struct aead_request *req) |
| 487 | { |
| 488 | int rc; |
| 489 | |
| 490 | rc = crypto_ccm_check_iv(iv: req->iv); |
| 491 | if (rc) |
| 492 | return rc; |
| 493 | |
| 494 | return ccm_nx_encrypt(req, iv: req->iv, assoclen: req->assoclen); |
| 495 | } |
| 496 | |
| 497 | static int ccm4309_aes_nx_decrypt(struct aead_request *req) |
| 498 | { |
| 499 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm: req->base.tfm); |
| 500 | struct nx_gcm_rctx *rctx = aead_request_ctx(req); |
| 501 | u8 *iv = rctx->iv; |
| 502 | |
| 503 | iv[0] = 3; |
| 504 | memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3); |
| 505 | memcpy(iv + 4, req->iv, 8); |
| 506 | |
| 507 | return ccm_nx_decrypt(req, iv, assoclen: req->assoclen - 8); |
| 508 | } |
| 509 | |
| 510 | static int ccm_aes_nx_decrypt(struct aead_request *req) |
| 511 | { |
| 512 | int rc; |
| 513 | |
| 514 | rc = crypto_ccm_check_iv(iv: req->iv); |
| 515 | if (rc) |
| 516 | return rc; |
| 517 | |
| 518 | return ccm_nx_decrypt(req, iv: req->iv, assoclen: req->assoclen); |
| 519 | } |
| 520 | |
| 521 | struct aead_alg nx_ccm_aes_alg = { |
| 522 | .base = { |
| 523 | .cra_name = "ccm(aes)" , |
| 524 | .cra_driver_name = "ccm-aes-nx" , |
| 525 | .cra_priority = 300, |
| 526 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK, |
| 527 | .cra_blocksize = 1, |
| 528 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), |
| 529 | .cra_module = THIS_MODULE, |
| 530 | }, |
| 531 | .init = nx_crypto_ctx_aes_ccm_init, |
| 532 | .exit = nx_crypto_ctx_aead_exit, |
| 533 | .ivsize = AES_BLOCK_SIZE, |
| 534 | .maxauthsize = AES_BLOCK_SIZE, |
| 535 | .setkey = ccm_aes_nx_set_key, |
| 536 | .setauthsize = ccm_aes_nx_setauthsize, |
| 537 | .encrypt = ccm_aes_nx_encrypt, |
| 538 | .decrypt = ccm_aes_nx_decrypt, |
| 539 | }; |
| 540 | |
| 541 | struct aead_alg nx_ccm4309_aes_alg = { |
| 542 | .base = { |
| 543 | .cra_name = "rfc4309(ccm(aes))" , |
| 544 | .cra_driver_name = "rfc4309-ccm-aes-nx" , |
| 545 | .cra_priority = 300, |
| 546 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK, |
| 547 | .cra_blocksize = 1, |
| 548 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), |
| 549 | .cra_module = THIS_MODULE, |
| 550 | }, |
| 551 | .init = nx_crypto_ctx_aes_ccm_init, |
| 552 | .exit = nx_crypto_ctx_aead_exit, |
| 553 | .ivsize = 8, |
| 554 | .maxauthsize = AES_BLOCK_SIZE, |
| 555 | .setkey = ccm4309_aes_nx_set_key, |
| 556 | .setauthsize = ccm4309_aes_nx_setauthsize, |
| 557 | .encrypt = ccm4309_aes_nx_encrypt, |
| 558 | .decrypt = ccm4309_aes_nx_decrypt, |
| 559 | }; |
| 560 | |