1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * Copyright (c) 2021 Aspeed Technology Inc. |
4 | */ |
5 | |
6 | #include "aspeed-hace.h" |
7 | #include <crypto/des.h> |
8 | #include <crypto/engine.h> |
9 | #include <crypto/internal/des.h> |
10 | #include <crypto/internal/skcipher.h> |
11 | #include <linux/dma-mapping.h> |
12 | #include <linux/err.h> |
13 | #include <linux/io.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/module.h> |
16 | #include <linux/scatterlist.h> |
17 | #include <linux/string.h> |
18 | |
19 | #ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO_DEBUG |
20 | #define CIPHER_DBG(h, fmt, ...) \ |
21 | dev_info((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) |
22 | #else |
23 | #define CIPHER_DBG(h, fmt, ...) \ |
24 | dev_dbg((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) |
25 | #endif |
26 | |
27 | static int aspeed_crypto_do_fallback(struct skcipher_request *areq) |
28 | { |
29 | struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(req: areq); |
30 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req: areq); |
31 | struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
32 | int err; |
33 | |
34 | skcipher_request_set_tfm(req: &rctx->fallback_req, tfm: ctx->fallback_tfm); |
35 | skcipher_request_set_callback(req: &rctx->fallback_req, flags: areq->base.flags, |
36 | compl: areq->base.complete, data: areq->base.data); |
37 | skcipher_request_set_crypt(req: &rctx->fallback_req, src: areq->src, dst: areq->dst, |
38 | cryptlen: areq->cryptlen, iv: areq->iv); |
39 | |
40 | if (rctx->enc_cmd & HACE_CMD_ENCRYPT) |
41 | err = crypto_skcipher_encrypt(req: &rctx->fallback_req); |
42 | else |
43 | err = crypto_skcipher_decrypt(req: &rctx->fallback_req); |
44 | |
45 | return err; |
46 | } |
47 | |
48 | static bool aspeed_crypto_need_fallback(struct skcipher_request *areq) |
49 | { |
50 | struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(req: areq); |
51 | |
52 | if (areq->cryptlen == 0) |
53 | return true; |
54 | |
55 | if ((rctx->enc_cmd & HACE_CMD_DES_SELECT) && |
56 | !IS_ALIGNED(areq->cryptlen, DES_BLOCK_SIZE)) |
57 | return true; |
58 | |
59 | if ((!(rctx->enc_cmd & HACE_CMD_DES_SELECT)) && |
60 | !IS_ALIGNED(areq->cryptlen, AES_BLOCK_SIZE)) |
61 | return true; |
62 | |
63 | return false; |
64 | } |
65 | |
66 | static int aspeed_hace_crypto_handle_queue(struct aspeed_hace_dev *hace_dev, |
67 | struct skcipher_request *req) |
68 | { |
69 | if (hace_dev->version == AST2500_VERSION && |
70 | aspeed_crypto_need_fallback(areq: req)) { |
71 | CIPHER_DBG(hace_dev, "SW fallback\n" ); |
72 | return aspeed_crypto_do_fallback(areq: req); |
73 | } |
74 | |
75 | return crypto_transfer_skcipher_request_to_engine( |
76 | engine: hace_dev->crypt_engine_crypto, req); |
77 | } |
78 | |
79 | static int aspeed_crypto_do_request(struct crypto_engine *engine, void *areq) |
80 | { |
81 | struct skcipher_request *req = skcipher_request_cast(req: areq); |
82 | struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); |
83 | struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm: cipher); |
84 | struct aspeed_hace_dev *hace_dev = ctx->hace_dev; |
85 | struct aspeed_engine_crypto *crypto_engine; |
86 | int rc; |
87 | |
88 | crypto_engine = &hace_dev->crypto_engine; |
89 | crypto_engine->req = req; |
90 | crypto_engine->flags |= CRYPTO_FLAGS_BUSY; |
91 | |
92 | rc = ctx->start(hace_dev); |
93 | |
94 | if (rc != -EINPROGRESS) |
95 | return -EIO; |
96 | |
97 | return 0; |
98 | } |
99 | |
100 | static int aspeed_sk_complete(struct aspeed_hace_dev *hace_dev, int err) |
101 | { |
102 | struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; |
103 | struct aspeed_cipher_reqctx *rctx; |
104 | struct skcipher_request *req; |
105 | |
106 | CIPHER_DBG(hace_dev, "\n" ); |
107 | |
108 | req = crypto_engine->req; |
109 | rctx = skcipher_request_ctx(req); |
110 | |
111 | if (rctx->enc_cmd & HACE_CMD_IV_REQUIRE) { |
112 | if (rctx->enc_cmd & HACE_CMD_DES_SELECT) |
113 | memcpy(req->iv, crypto_engine->cipher_ctx + |
114 | DES_KEY_SIZE, DES_KEY_SIZE); |
115 | else |
116 | memcpy(req->iv, crypto_engine->cipher_ctx, |
117 | AES_BLOCK_SIZE); |
118 | } |
119 | |
120 | crypto_engine->flags &= ~CRYPTO_FLAGS_BUSY; |
121 | |
122 | crypto_finalize_skcipher_request(engine: hace_dev->crypt_engine_crypto, req, |
123 | err); |
124 | |
125 | return err; |
126 | } |
127 | |
128 | static int aspeed_sk_transfer_sg(struct aspeed_hace_dev *hace_dev) |
129 | { |
130 | struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; |
131 | struct device *dev = hace_dev->dev; |
132 | struct aspeed_cipher_reqctx *rctx; |
133 | struct skcipher_request *req; |
134 | |
135 | CIPHER_DBG(hace_dev, "\n" ); |
136 | |
137 | req = crypto_engine->req; |
138 | rctx = skcipher_request_ctx(req); |
139 | |
140 | if (req->src == req->dst) { |
141 | dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_BIDIRECTIONAL); |
142 | } else { |
143 | dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_TO_DEVICE); |
144 | dma_unmap_sg(dev, req->dst, rctx->dst_nents, DMA_FROM_DEVICE); |
145 | } |
146 | |
147 | return aspeed_sk_complete(hace_dev, err: 0); |
148 | } |
149 | |
150 | static int aspeed_sk_transfer(struct aspeed_hace_dev *hace_dev) |
151 | { |
152 | struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; |
153 | struct aspeed_cipher_reqctx *rctx; |
154 | struct skcipher_request *req; |
155 | struct scatterlist *out_sg; |
156 | int nbytes = 0; |
157 | int rc = 0; |
158 | |
159 | req = crypto_engine->req; |
160 | rctx = skcipher_request_ctx(req); |
161 | out_sg = req->dst; |
162 | |
163 | /* Copy output buffer to dst scatter-gather lists */ |
164 | nbytes = sg_copy_from_buffer(sgl: out_sg, nents: rctx->dst_nents, |
165 | buf: crypto_engine->cipher_addr, buflen: req->cryptlen); |
166 | if (!nbytes) { |
167 | dev_warn(hace_dev->dev, "invalid sg copy, %s:0x%x, %s:0x%x\n" , |
168 | "nbytes" , nbytes, "cryptlen" , req->cryptlen); |
169 | rc = -EINVAL; |
170 | } |
171 | |
172 | CIPHER_DBG(hace_dev, "%s:%d, %s:%d, %s:%d, %s:%p\n" , |
173 | "nbytes" , nbytes, "req->cryptlen" , req->cryptlen, |
174 | "nb_out_sg" , rctx->dst_nents, |
175 | "cipher addr" , crypto_engine->cipher_addr); |
176 | |
177 | return aspeed_sk_complete(hace_dev, err: rc); |
178 | } |
179 | |
180 | static int aspeed_sk_start(struct aspeed_hace_dev *hace_dev) |
181 | { |
182 | struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; |
183 | struct aspeed_cipher_reqctx *rctx; |
184 | struct skcipher_request *req; |
185 | struct scatterlist *in_sg; |
186 | int nbytes; |
187 | |
188 | req = crypto_engine->req; |
189 | rctx = skcipher_request_ctx(req); |
190 | in_sg = req->src; |
191 | |
192 | nbytes = sg_copy_to_buffer(sgl: in_sg, nents: rctx->src_nents, |
193 | buf: crypto_engine->cipher_addr, buflen: req->cryptlen); |
194 | |
195 | CIPHER_DBG(hace_dev, "%s:%d, %s:%d, %s:%d, %s:%p\n" , |
196 | "nbytes" , nbytes, "req->cryptlen" , req->cryptlen, |
197 | "nb_in_sg" , rctx->src_nents, |
198 | "cipher addr" , crypto_engine->cipher_addr); |
199 | |
200 | if (!nbytes) { |
201 | dev_warn(hace_dev->dev, "invalid sg copy, %s:0x%x, %s:0x%x\n" , |
202 | "nbytes" , nbytes, "cryptlen" , req->cryptlen); |
203 | return -EINVAL; |
204 | } |
205 | |
206 | crypto_engine->resume = aspeed_sk_transfer; |
207 | |
208 | /* Trigger engines */ |
209 | ast_hace_write(hace_dev, crypto_engine->cipher_dma_addr, |
210 | ASPEED_HACE_SRC); |
211 | ast_hace_write(hace_dev, crypto_engine->cipher_dma_addr, |
212 | ASPEED_HACE_DEST); |
213 | ast_hace_write(hace_dev, req->cryptlen, ASPEED_HACE_DATA_LEN); |
214 | ast_hace_write(hace_dev, rctx->enc_cmd, ASPEED_HACE_CMD); |
215 | |
216 | return -EINPROGRESS; |
217 | } |
218 | |
219 | static int aspeed_sk_start_sg(struct aspeed_hace_dev *hace_dev) |
220 | { |
221 | struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; |
222 | struct aspeed_sg_list *src_list, *dst_list; |
223 | dma_addr_t src_dma_addr, dst_dma_addr; |
224 | struct aspeed_cipher_reqctx *rctx; |
225 | struct skcipher_request *req; |
226 | struct scatterlist *s; |
227 | int src_sg_len; |
228 | int dst_sg_len; |
229 | int total, i; |
230 | int rc; |
231 | |
232 | CIPHER_DBG(hace_dev, "\n" ); |
233 | |
234 | req = crypto_engine->req; |
235 | rctx = skcipher_request_ctx(req); |
236 | |
237 | rctx->enc_cmd |= HACE_CMD_DES_SG_CTRL | HACE_CMD_SRC_SG_CTRL | |
238 | HACE_CMD_AES_KEY_HW_EXP | HACE_CMD_MBUS_REQ_SYNC_EN; |
239 | |
240 | /* BIDIRECTIONAL */ |
241 | if (req->dst == req->src) { |
242 | src_sg_len = dma_map_sg(hace_dev->dev, req->src, |
243 | rctx->src_nents, DMA_BIDIRECTIONAL); |
244 | dst_sg_len = src_sg_len; |
245 | if (!src_sg_len) { |
246 | dev_warn(hace_dev->dev, "dma_map_sg() src error\n" ); |
247 | return -EINVAL; |
248 | } |
249 | |
250 | } else { |
251 | src_sg_len = dma_map_sg(hace_dev->dev, req->src, |
252 | rctx->src_nents, DMA_TO_DEVICE); |
253 | if (!src_sg_len) { |
254 | dev_warn(hace_dev->dev, "dma_map_sg() src error\n" ); |
255 | return -EINVAL; |
256 | } |
257 | |
258 | dst_sg_len = dma_map_sg(hace_dev->dev, req->dst, |
259 | rctx->dst_nents, DMA_FROM_DEVICE); |
260 | if (!dst_sg_len) { |
261 | dev_warn(hace_dev->dev, "dma_map_sg() dst error\n" ); |
262 | rc = -EINVAL; |
263 | goto free_req_src; |
264 | } |
265 | } |
266 | |
267 | src_list = (struct aspeed_sg_list *)crypto_engine->cipher_addr; |
268 | src_dma_addr = crypto_engine->cipher_dma_addr; |
269 | total = req->cryptlen; |
270 | |
271 | for_each_sg(req->src, s, src_sg_len, i) { |
272 | u32 phy_addr = sg_dma_address(s); |
273 | u32 len = sg_dma_len(s); |
274 | |
275 | if (total > len) |
276 | total -= len; |
277 | else { |
278 | /* last sg list */ |
279 | len = total; |
280 | len |= BIT(31); |
281 | total = 0; |
282 | } |
283 | |
284 | src_list[i].phy_addr = cpu_to_le32(phy_addr); |
285 | src_list[i].len = cpu_to_le32(len); |
286 | } |
287 | |
288 | if (total != 0) { |
289 | rc = -EINVAL; |
290 | goto free_req; |
291 | } |
292 | |
293 | if (req->dst == req->src) { |
294 | dst_list = src_list; |
295 | dst_dma_addr = src_dma_addr; |
296 | |
297 | } else { |
298 | dst_list = (struct aspeed_sg_list *)crypto_engine->dst_sg_addr; |
299 | dst_dma_addr = crypto_engine->dst_sg_dma_addr; |
300 | total = req->cryptlen; |
301 | |
302 | for_each_sg(req->dst, s, dst_sg_len, i) { |
303 | u32 phy_addr = sg_dma_address(s); |
304 | u32 len = sg_dma_len(s); |
305 | |
306 | if (total > len) |
307 | total -= len; |
308 | else { |
309 | /* last sg list */ |
310 | len = total; |
311 | len |= BIT(31); |
312 | total = 0; |
313 | } |
314 | |
315 | dst_list[i].phy_addr = cpu_to_le32(phy_addr); |
316 | dst_list[i].len = cpu_to_le32(len); |
317 | |
318 | } |
319 | |
320 | dst_list[dst_sg_len].phy_addr = 0; |
321 | dst_list[dst_sg_len].len = 0; |
322 | } |
323 | |
324 | if (total != 0) { |
325 | rc = -EINVAL; |
326 | goto free_req; |
327 | } |
328 | |
329 | crypto_engine->resume = aspeed_sk_transfer_sg; |
330 | |
331 | /* Memory barrier to ensure all data setup before engine starts */ |
332 | mb(); |
333 | |
334 | /* Trigger engines */ |
335 | ast_hace_write(hace_dev, src_dma_addr, ASPEED_HACE_SRC); |
336 | ast_hace_write(hace_dev, dst_dma_addr, ASPEED_HACE_DEST); |
337 | ast_hace_write(hace_dev, req->cryptlen, ASPEED_HACE_DATA_LEN); |
338 | ast_hace_write(hace_dev, rctx->enc_cmd, ASPEED_HACE_CMD); |
339 | |
340 | return -EINPROGRESS; |
341 | |
342 | free_req: |
343 | if (req->dst == req->src) { |
344 | dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents, |
345 | DMA_BIDIRECTIONAL); |
346 | |
347 | } else { |
348 | dma_unmap_sg(hace_dev->dev, req->dst, rctx->dst_nents, |
349 | DMA_TO_DEVICE); |
350 | dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents, |
351 | DMA_TO_DEVICE); |
352 | } |
353 | |
354 | return rc; |
355 | |
356 | free_req_src: |
357 | dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); |
358 | |
359 | return rc; |
360 | } |
361 | |
362 | static int aspeed_hace_skcipher_trigger(struct aspeed_hace_dev *hace_dev) |
363 | { |
364 | struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; |
365 | struct aspeed_cipher_reqctx *rctx; |
366 | struct crypto_skcipher *cipher; |
367 | struct aspeed_cipher_ctx *ctx; |
368 | struct skcipher_request *req; |
369 | |
370 | CIPHER_DBG(hace_dev, "\n" ); |
371 | |
372 | req = crypto_engine->req; |
373 | rctx = skcipher_request_ctx(req); |
374 | cipher = crypto_skcipher_reqtfm(req); |
375 | ctx = crypto_skcipher_ctx(tfm: cipher); |
376 | |
377 | /* enable interrupt */ |
378 | rctx->enc_cmd |= HACE_CMD_ISR_EN; |
379 | |
380 | rctx->dst_nents = sg_nents(sg: req->dst); |
381 | rctx->src_nents = sg_nents(sg: req->src); |
382 | |
383 | ast_hace_write(hace_dev, crypto_engine->cipher_ctx_dma, |
384 | ASPEED_HACE_CONTEXT); |
385 | |
386 | if (rctx->enc_cmd & HACE_CMD_IV_REQUIRE) { |
387 | if (rctx->enc_cmd & HACE_CMD_DES_SELECT) |
388 | memcpy(crypto_engine->cipher_ctx + DES_BLOCK_SIZE, |
389 | req->iv, DES_BLOCK_SIZE); |
390 | else |
391 | memcpy(crypto_engine->cipher_ctx, req->iv, |
392 | AES_BLOCK_SIZE); |
393 | } |
394 | |
395 | if (hace_dev->version == AST2600_VERSION) { |
396 | memcpy(crypto_engine->cipher_ctx + 16, ctx->key, ctx->key_len); |
397 | |
398 | return aspeed_sk_start_sg(hace_dev); |
399 | } |
400 | |
401 | memcpy(crypto_engine->cipher_ctx + 16, ctx->key, AES_MAX_KEYLENGTH); |
402 | |
403 | return aspeed_sk_start(hace_dev); |
404 | } |
405 | |
406 | static int aspeed_des_crypt(struct skcipher_request *req, u32 cmd) |
407 | { |
408 | struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(req); |
409 | struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); |
410 | struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm: cipher); |
411 | struct aspeed_hace_dev *hace_dev = ctx->hace_dev; |
412 | u32 crypto_alg = cmd & HACE_CMD_OP_MODE_MASK; |
413 | |
414 | CIPHER_DBG(hace_dev, "\n" ); |
415 | |
416 | if (crypto_alg == HACE_CMD_CBC || crypto_alg == HACE_CMD_ECB) { |
417 | if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) |
418 | return -EINVAL; |
419 | } |
420 | |
421 | rctx->enc_cmd = cmd | HACE_CMD_DES_SELECT | HACE_CMD_RI_WO_DATA_ENABLE | |
422 | HACE_CMD_DES | HACE_CMD_CONTEXT_LOAD_ENABLE | |
423 | HACE_CMD_CONTEXT_SAVE_ENABLE; |
424 | |
425 | return aspeed_hace_crypto_handle_queue(hace_dev, req); |
426 | } |
427 | |
428 | static int aspeed_des_setkey(struct crypto_skcipher *cipher, const u8 *key, |
429 | unsigned int keylen) |
430 | { |
431 | struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm: cipher); |
432 | struct crypto_tfm *tfm = crypto_skcipher_tfm(tfm: cipher); |
433 | struct aspeed_hace_dev *hace_dev = ctx->hace_dev; |
434 | int rc; |
435 | |
436 | CIPHER_DBG(hace_dev, "keylen: %d bits\n" , keylen); |
437 | |
438 | if (keylen != DES_KEY_SIZE && keylen != DES3_EDE_KEY_SIZE) { |
439 | dev_warn(hace_dev->dev, "invalid keylen: %d bits\n" , keylen); |
440 | return -EINVAL; |
441 | } |
442 | |
443 | if (keylen == DES_KEY_SIZE) { |
444 | rc = crypto_des_verify_key(tfm, key); |
445 | if (rc) |
446 | return rc; |
447 | |
448 | } else if (keylen == DES3_EDE_KEY_SIZE) { |
449 | rc = crypto_des3_ede_verify_key(tfm, key); |
450 | if (rc) |
451 | return rc; |
452 | } |
453 | |
454 | memcpy(ctx->key, key, keylen); |
455 | ctx->key_len = keylen; |
456 | |
457 | crypto_skcipher_clear_flags(tfm: ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK); |
458 | crypto_skcipher_set_flags(tfm: ctx->fallback_tfm, flags: cipher->base.crt_flags & |
459 | CRYPTO_TFM_REQ_MASK); |
460 | |
461 | return crypto_skcipher_setkey(tfm: ctx->fallback_tfm, key, keylen); |
462 | } |
463 | |
464 | static int aspeed_tdes_ctr_decrypt(struct skcipher_request *req) |
465 | { |
466 | return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR | |
467 | HACE_CMD_TRIPLE_DES); |
468 | } |
469 | |
470 | static int aspeed_tdes_ctr_encrypt(struct skcipher_request *req) |
471 | { |
472 | return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR | |
473 | HACE_CMD_TRIPLE_DES); |
474 | } |
475 | |
476 | static int aspeed_tdes_cbc_decrypt(struct skcipher_request *req) |
477 | { |
478 | return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC | |
479 | HACE_CMD_TRIPLE_DES); |
480 | } |
481 | |
482 | static int aspeed_tdes_cbc_encrypt(struct skcipher_request *req) |
483 | { |
484 | return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC | |
485 | HACE_CMD_TRIPLE_DES); |
486 | } |
487 | |
488 | static int aspeed_tdes_ecb_decrypt(struct skcipher_request *req) |
489 | { |
490 | return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB | |
491 | HACE_CMD_TRIPLE_DES); |
492 | } |
493 | |
494 | static int aspeed_tdes_ecb_encrypt(struct skcipher_request *req) |
495 | { |
496 | return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB | |
497 | HACE_CMD_TRIPLE_DES); |
498 | } |
499 | |
500 | static int aspeed_des_ctr_decrypt(struct skcipher_request *req) |
501 | { |
502 | return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR | |
503 | HACE_CMD_SINGLE_DES); |
504 | } |
505 | |
506 | static int aspeed_des_ctr_encrypt(struct skcipher_request *req) |
507 | { |
508 | return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR | |
509 | HACE_CMD_SINGLE_DES); |
510 | } |
511 | |
512 | static int aspeed_des_cbc_decrypt(struct skcipher_request *req) |
513 | { |
514 | return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC | |
515 | HACE_CMD_SINGLE_DES); |
516 | } |
517 | |
518 | static int aspeed_des_cbc_encrypt(struct skcipher_request *req) |
519 | { |
520 | return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC | |
521 | HACE_CMD_SINGLE_DES); |
522 | } |
523 | |
524 | static int aspeed_des_ecb_decrypt(struct skcipher_request *req) |
525 | { |
526 | return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB | |
527 | HACE_CMD_SINGLE_DES); |
528 | } |
529 | |
530 | static int aspeed_des_ecb_encrypt(struct skcipher_request *req) |
531 | { |
532 | return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB | |
533 | HACE_CMD_SINGLE_DES); |
534 | } |
535 | |
536 | static int aspeed_aes_crypt(struct skcipher_request *req, u32 cmd) |
537 | { |
538 | struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(req); |
539 | struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); |
540 | struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm: cipher); |
541 | struct aspeed_hace_dev *hace_dev = ctx->hace_dev; |
542 | u32 crypto_alg = cmd & HACE_CMD_OP_MODE_MASK; |
543 | |
544 | if (crypto_alg == HACE_CMD_CBC || crypto_alg == HACE_CMD_ECB) { |
545 | if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) |
546 | return -EINVAL; |
547 | } |
548 | |
549 | CIPHER_DBG(hace_dev, "%s\n" , |
550 | (cmd & HACE_CMD_ENCRYPT) ? "encrypt" : "decrypt" ); |
551 | |
552 | cmd |= HACE_CMD_AES_SELECT | HACE_CMD_RI_WO_DATA_ENABLE | |
553 | HACE_CMD_CONTEXT_LOAD_ENABLE | HACE_CMD_CONTEXT_SAVE_ENABLE; |
554 | |
555 | switch (ctx->key_len) { |
556 | case AES_KEYSIZE_128: |
557 | cmd |= HACE_CMD_AES128; |
558 | break; |
559 | case AES_KEYSIZE_192: |
560 | cmd |= HACE_CMD_AES192; |
561 | break; |
562 | case AES_KEYSIZE_256: |
563 | cmd |= HACE_CMD_AES256; |
564 | break; |
565 | default: |
566 | return -EINVAL; |
567 | } |
568 | |
569 | rctx->enc_cmd = cmd; |
570 | |
571 | return aspeed_hace_crypto_handle_queue(hace_dev, req); |
572 | } |
573 | |
574 | static int aspeed_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, |
575 | unsigned int keylen) |
576 | { |
577 | struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm: cipher); |
578 | struct aspeed_hace_dev *hace_dev = ctx->hace_dev; |
579 | struct crypto_aes_ctx gen_aes_key; |
580 | |
581 | CIPHER_DBG(hace_dev, "keylen: %d bits\n" , (keylen * 8)); |
582 | |
583 | if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && |
584 | keylen != AES_KEYSIZE_256) |
585 | return -EINVAL; |
586 | |
587 | if (ctx->hace_dev->version == AST2500_VERSION) { |
588 | aes_expandkey(ctx: &gen_aes_key, in_key: key, key_len: keylen); |
589 | memcpy(ctx->key, gen_aes_key.key_enc, AES_MAX_KEYLENGTH); |
590 | |
591 | } else { |
592 | memcpy(ctx->key, key, keylen); |
593 | } |
594 | |
595 | ctx->key_len = keylen; |
596 | |
597 | crypto_skcipher_clear_flags(tfm: ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK); |
598 | crypto_skcipher_set_flags(tfm: ctx->fallback_tfm, flags: cipher->base.crt_flags & |
599 | CRYPTO_TFM_REQ_MASK); |
600 | |
601 | return crypto_skcipher_setkey(tfm: ctx->fallback_tfm, key, keylen); |
602 | } |
603 | |
604 | static int aspeed_aes_ctr_decrypt(struct skcipher_request *req) |
605 | { |
606 | return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR); |
607 | } |
608 | |
609 | static int aspeed_aes_ctr_encrypt(struct skcipher_request *req) |
610 | { |
611 | return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR); |
612 | } |
613 | |
614 | static int aspeed_aes_cbc_decrypt(struct skcipher_request *req) |
615 | { |
616 | return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC); |
617 | } |
618 | |
619 | static int aspeed_aes_cbc_encrypt(struct skcipher_request *req) |
620 | { |
621 | return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC); |
622 | } |
623 | |
624 | static int aspeed_aes_ecb_decrypt(struct skcipher_request *req) |
625 | { |
626 | return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB); |
627 | } |
628 | |
629 | static int aspeed_aes_ecb_encrypt(struct skcipher_request *req) |
630 | { |
631 | return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB); |
632 | } |
633 | |
634 | static int aspeed_crypto_cra_init(struct crypto_skcipher *tfm) |
635 | { |
636 | struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
637 | struct skcipher_alg *alg = crypto_skcipher_alg(tfm); |
638 | const char *name = crypto_tfm_alg_name(tfm: &tfm->base); |
639 | struct aspeed_hace_alg *crypto_alg; |
640 | |
641 | |
642 | crypto_alg = container_of(alg, struct aspeed_hace_alg, alg.skcipher.base); |
643 | ctx->hace_dev = crypto_alg->hace_dev; |
644 | ctx->start = aspeed_hace_skcipher_trigger; |
645 | |
646 | CIPHER_DBG(ctx->hace_dev, "%s\n" , name); |
647 | |
648 | ctx->fallback_tfm = crypto_alloc_skcipher(alg_name: name, type: 0, CRYPTO_ALG_ASYNC | |
649 | CRYPTO_ALG_NEED_FALLBACK); |
650 | if (IS_ERR(ptr: ctx->fallback_tfm)) { |
651 | dev_err(ctx->hace_dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n" , |
652 | name, PTR_ERR(ctx->fallback_tfm)); |
653 | return PTR_ERR(ptr: ctx->fallback_tfm); |
654 | } |
655 | |
656 | crypto_skcipher_set_reqsize(skcipher: tfm, reqsize: sizeof(struct aspeed_cipher_reqctx) + |
657 | crypto_skcipher_reqsize(tfm: ctx->fallback_tfm)); |
658 | |
659 | return 0; |
660 | } |
661 | |
662 | static void aspeed_crypto_cra_exit(struct crypto_skcipher *tfm) |
663 | { |
664 | struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
665 | struct aspeed_hace_dev *hace_dev = ctx->hace_dev; |
666 | |
667 | CIPHER_DBG(hace_dev, "%s\n" , crypto_tfm_alg_name(&tfm->base)); |
668 | crypto_free_skcipher(tfm: ctx->fallback_tfm); |
669 | } |
670 | |
671 | static struct aspeed_hace_alg aspeed_crypto_algs[] = { |
672 | { |
673 | .alg.skcipher.base = { |
674 | .min_keysize = AES_MIN_KEY_SIZE, |
675 | .max_keysize = AES_MAX_KEY_SIZE, |
676 | .setkey = aspeed_aes_setkey, |
677 | .encrypt = aspeed_aes_ecb_encrypt, |
678 | .decrypt = aspeed_aes_ecb_decrypt, |
679 | .init = aspeed_crypto_cra_init, |
680 | .exit = aspeed_crypto_cra_exit, |
681 | .base = { |
682 | .cra_name = "ecb(aes)" , |
683 | .cra_driver_name = "aspeed-ecb-aes" , |
684 | .cra_priority = 300, |
685 | .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | |
686 | CRYPTO_ALG_ASYNC | |
687 | CRYPTO_ALG_NEED_FALLBACK, |
688 | .cra_blocksize = AES_BLOCK_SIZE, |
689 | .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), |
690 | .cra_alignmask = 0x0f, |
691 | .cra_module = THIS_MODULE, |
692 | } |
693 | }, |
694 | .alg.skcipher.op = { |
695 | .do_one_request = aspeed_crypto_do_request, |
696 | }, |
697 | }, |
698 | { |
699 | .alg.skcipher.base = { |
700 | .ivsize = AES_BLOCK_SIZE, |
701 | .min_keysize = AES_MIN_KEY_SIZE, |
702 | .max_keysize = AES_MAX_KEY_SIZE, |
703 | .setkey = aspeed_aes_setkey, |
704 | .encrypt = aspeed_aes_cbc_encrypt, |
705 | .decrypt = aspeed_aes_cbc_decrypt, |
706 | .init = aspeed_crypto_cra_init, |
707 | .exit = aspeed_crypto_cra_exit, |
708 | .base = { |
709 | .cra_name = "cbc(aes)" , |
710 | .cra_driver_name = "aspeed-cbc-aes" , |
711 | .cra_priority = 300, |
712 | .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | |
713 | CRYPTO_ALG_ASYNC | |
714 | CRYPTO_ALG_NEED_FALLBACK, |
715 | .cra_blocksize = AES_BLOCK_SIZE, |
716 | .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), |
717 | .cra_alignmask = 0x0f, |
718 | .cra_module = THIS_MODULE, |
719 | } |
720 | }, |
721 | .alg.skcipher.op = { |
722 | .do_one_request = aspeed_crypto_do_request, |
723 | }, |
724 | }, |
725 | { |
726 | .alg.skcipher.base = { |
727 | .min_keysize = DES_KEY_SIZE, |
728 | .max_keysize = DES_KEY_SIZE, |
729 | .setkey = aspeed_des_setkey, |
730 | .encrypt = aspeed_des_ecb_encrypt, |
731 | .decrypt = aspeed_des_ecb_decrypt, |
732 | .init = aspeed_crypto_cra_init, |
733 | .exit = aspeed_crypto_cra_exit, |
734 | .base = { |
735 | .cra_name = "ecb(des)" , |
736 | .cra_driver_name = "aspeed-ecb-des" , |
737 | .cra_priority = 300, |
738 | .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | |
739 | CRYPTO_ALG_ASYNC | |
740 | CRYPTO_ALG_NEED_FALLBACK, |
741 | .cra_blocksize = DES_BLOCK_SIZE, |
742 | .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), |
743 | .cra_alignmask = 0x0f, |
744 | .cra_module = THIS_MODULE, |
745 | } |
746 | }, |
747 | .alg.skcipher.op = { |
748 | .do_one_request = aspeed_crypto_do_request, |
749 | }, |
750 | }, |
751 | { |
752 | .alg.skcipher.base = { |
753 | .ivsize = DES_BLOCK_SIZE, |
754 | .min_keysize = DES_KEY_SIZE, |
755 | .max_keysize = DES_KEY_SIZE, |
756 | .setkey = aspeed_des_setkey, |
757 | .encrypt = aspeed_des_cbc_encrypt, |
758 | .decrypt = aspeed_des_cbc_decrypt, |
759 | .init = aspeed_crypto_cra_init, |
760 | .exit = aspeed_crypto_cra_exit, |
761 | .base = { |
762 | .cra_name = "cbc(des)" , |
763 | .cra_driver_name = "aspeed-cbc-des" , |
764 | .cra_priority = 300, |
765 | .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | |
766 | CRYPTO_ALG_ASYNC | |
767 | CRYPTO_ALG_NEED_FALLBACK, |
768 | .cra_blocksize = DES_BLOCK_SIZE, |
769 | .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), |
770 | .cra_alignmask = 0x0f, |
771 | .cra_module = THIS_MODULE, |
772 | } |
773 | }, |
774 | .alg.skcipher.op = { |
775 | .do_one_request = aspeed_crypto_do_request, |
776 | }, |
777 | }, |
778 | { |
779 | .alg.skcipher.base = { |
780 | .min_keysize = DES3_EDE_KEY_SIZE, |
781 | .max_keysize = DES3_EDE_KEY_SIZE, |
782 | .setkey = aspeed_des_setkey, |
783 | .encrypt = aspeed_tdes_ecb_encrypt, |
784 | .decrypt = aspeed_tdes_ecb_decrypt, |
785 | .init = aspeed_crypto_cra_init, |
786 | .exit = aspeed_crypto_cra_exit, |
787 | .base = { |
788 | .cra_name = "ecb(des3_ede)" , |
789 | .cra_driver_name = "aspeed-ecb-tdes" , |
790 | .cra_priority = 300, |
791 | .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | |
792 | CRYPTO_ALG_ASYNC | |
793 | CRYPTO_ALG_NEED_FALLBACK, |
794 | .cra_blocksize = DES_BLOCK_SIZE, |
795 | .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), |
796 | .cra_alignmask = 0x0f, |
797 | .cra_module = THIS_MODULE, |
798 | } |
799 | }, |
800 | .alg.skcipher.op = { |
801 | .do_one_request = aspeed_crypto_do_request, |
802 | }, |
803 | }, |
804 | { |
805 | .alg.skcipher.base = { |
806 | .ivsize = DES_BLOCK_SIZE, |
807 | .min_keysize = DES3_EDE_KEY_SIZE, |
808 | .max_keysize = DES3_EDE_KEY_SIZE, |
809 | .setkey = aspeed_des_setkey, |
810 | .encrypt = aspeed_tdes_cbc_encrypt, |
811 | .decrypt = aspeed_tdes_cbc_decrypt, |
812 | .init = aspeed_crypto_cra_init, |
813 | .exit = aspeed_crypto_cra_exit, |
814 | .base = { |
815 | .cra_name = "cbc(des3_ede)" , |
816 | .cra_driver_name = "aspeed-cbc-tdes" , |
817 | .cra_priority = 300, |
818 | .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | |
819 | CRYPTO_ALG_ASYNC | |
820 | CRYPTO_ALG_NEED_FALLBACK, |
821 | .cra_blocksize = DES_BLOCK_SIZE, |
822 | .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), |
823 | .cra_alignmask = 0x0f, |
824 | .cra_module = THIS_MODULE, |
825 | } |
826 | }, |
827 | .alg.skcipher.op = { |
828 | .do_one_request = aspeed_crypto_do_request, |
829 | }, |
830 | }, |
831 | }; |
832 | |
833 | static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = { |
834 | { |
835 | .alg.skcipher.base = { |
836 | .ivsize = AES_BLOCK_SIZE, |
837 | .min_keysize = AES_MIN_KEY_SIZE, |
838 | .max_keysize = AES_MAX_KEY_SIZE, |
839 | .setkey = aspeed_aes_setkey, |
840 | .encrypt = aspeed_aes_ctr_encrypt, |
841 | .decrypt = aspeed_aes_ctr_decrypt, |
842 | .init = aspeed_crypto_cra_init, |
843 | .exit = aspeed_crypto_cra_exit, |
844 | .base = { |
845 | .cra_name = "ctr(aes)" , |
846 | .cra_driver_name = "aspeed-ctr-aes" , |
847 | .cra_priority = 300, |
848 | .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | |
849 | CRYPTO_ALG_ASYNC, |
850 | .cra_blocksize = 1, |
851 | .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), |
852 | .cra_alignmask = 0x0f, |
853 | .cra_module = THIS_MODULE, |
854 | } |
855 | }, |
856 | .alg.skcipher.op = { |
857 | .do_one_request = aspeed_crypto_do_request, |
858 | }, |
859 | }, |
860 | { |
861 | .alg.skcipher.base = { |
862 | .ivsize = DES_BLOCK_SIZE, |
863 | .min_keysize = DES_KEY_SIZE, |
864 | .max_keysize = DES_KEY_SIZE, |
865 | .setkey = aspeed_des_setkey, |
866 | .encrypt = aspeed_des_ctr_encrypt, |
867 | .decrypt = aspeed_des_ctr_decrypt, |
868 | .init = aspeed_crypto_cra_init, |
869 | .exit = aspeed_crypto_cra_exit, |
870 | .base = { |
871 | .cra_name = "ctr(des)" , |
872 | .cra_driver_name = "aspeed-ctr-des" , |
873 | .cra_priority = 300, |
874 | .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | |
875 | CRYPTO_ALG_ASYNC, |
876 | .cra_blocksize = 1, |
877 | .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), |
878 | .cra_alignmask = 0x0f, |
879 | .cra_module = THIS_MODULE, |
880 | } |
881 | }, |
882 | .alg.skcipher.op = { |
883 | .do_one_request = aspeed_crypto_do_request, |
884 | }, |
885 | }, |
886 | { |
887 | .alg.skcipher.base = { |
888 | .ivsize = DES_BLOCK_SIZE, |
889 | .min_keysize = DES3_EDE_KEY_SIZE, |
890 | .max_keysize = DES3_EDE_KEY_SIZE, |
891 | .setkey = aspeed_des_setkey, |
892 | .encrypt = aspeed_tdes_ctr_encrypt, |
893 | .decrypt = aspeed_tdes_ctr_decrypt, |
894 | .init = aspeed_crypto_cra_init, |
895 | .exit = aspeed_crypto_cra_exit, |
896 | .base = { |
897 | .cra_name = "ctr(des3_ede)" , |
898 | .cra_driver_name = "aspeed-ctr-tdes" , |
899 | .cra_priority = 300, |
900 | .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | |
901 | CRYPTO_ALG_ASYNC, |
902 | .cra_blocksize = 1, |
903 | .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), |
904 | .cra_alignmask = 0x0f, |
905 | .cra_module = THIS_MODULE, |
906 | } |
907 | }, |
908 | .alg.skcipher.op = { |
909 | .do_one_request = aspeed_crypto_do_request, |
910 | }, |
911 | }, |
912 | |
913 | }; |
914 | |
915 | void aspeed_unregister_hace_crypto_algs(struct aspeed_hace_dev *hace_dev) |
916 | { |
917 | int i; |
918 | |
919 | for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++) |
920 | crypto_engine_unregister_skcipher(alg: &aspeed_crypto_algs[i].alg.skcipher); |
921 | |
922 | if (hace_dev->version != AST2600_VERSION) |
923 | return; |
924 | |
925 | for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++) |
926 | crypto_engine_unregister_skcipher(alg: &aspeed_crypto_algs_g6[i].alg.skcipher); |
927 | } |
928 | |
929 | void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev) |
930 | { |
931 | int rc, i; |
932 | |
933 | CIPHER_DBG(hace_dev, "\n" ); |
934 | |
935 | for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++) { |
936 | aspeed_crypto_algs[i].hace_dev = hace_dev; |
937 | rc = crypto_engine_register_skcipher(alg: &aspeed_crypto_algs[i].alg.skcipher); |
938 | if (rc) { |
939 | CIPHER_DBG(hace_dev, "Failed to register %s\n" , |
940 | aspeed_crypto_algs[i].alg.skcipher.base.base.cra_name); |
941 | } |
942 | } |
943 | |
944 | if (hace_dev->version != AST2600_VERSION) |
945 | return; |
946 | |
947 | for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++) { |
948 | aspeed_crypto_algs_g6[i].hace_dev = hace_dev; |
949 | rc = crypto_engine_register_skcipher(alg: &aspeed_crypto_algs_g6[i].alg.skcipher); |
950 | if (rc) { |
951 | CIPHER_DBG(hace_dev, "Failed to register %s\n" , |
952 | aspeed_crypto_algs_g6[i].alg.skcipher.base.base.cra_name); |
953 | } |
954 | } |
955 | } |
956 | |