1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * Copyright 2021 Aspeed Technology Inc. |
4 | */ |
5 | #include <crypto/engine.h> |
6 | #include <crypto/internal/akcipher.h> |
7 | #include <crypto/internal/rsa.h> |
8 | #include <crypto/scatterwalk.h> |
9 | #include <linux/clk.h> |
10 | #include <linux/count_zeros.h> |
11 | #include <linux/dma-mapping.h> |
12 | #include <linux/err.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/mfd/syscon.h> |
16 | #include <linux/module.h> |
17 | #include <linux/of.h> |
18 | #include <linux/platform_device.h> |
19 | #include <linux/regmap.h> |
20 | #include <linux/slab.h> |
21 | #include <linux/string.h> |
22 | |
23 | #ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG |
24 | #define ACRY_DBG(d, fmt, ...) \ |
25 | dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) |
26 | #else |
27 | #define ACRY_DBG(d, fmt, ...) \ |
28 | dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) |
29 | #endif |
30 | |
31 | /***************************** |
32 | * * |
33 | * ACRY register definitions * |
34 | * * |
35 | * ***************************/ |
36 | #define ASPEED_ACRY_TRIGGER 0x000 /* ACRY Engine Control: trigger */ |
37 | #define ASPEED_ACRY_DMA_CMD 0x048 /* ACRY Engine Control: Command */ |
38 | #define ASPEED_ACRY_DMA_SRC_BASE 0x04C /* ACRY DRAM base address for DMA */ |
39 | #define ASPEED_ACRY_DMA_LEN 0x050 /* ACRY Data Length of DMA */ |
40 | #define ASPEED_ACRY_RSA_KEY_LEN 0x058 /* ACRY RSA Exp/Mod Key Length (Bits) */ |
41 | #define ASPEED_ACRY_INT_MASK 0x3F8 /* ACRY Interrupt Mask */ |
42 | #define ASPEED_ACRY_STATUS 0x3FC /* ACRY Interrupt Status */ |
43 | |
44 | /* rsa trigger */ |
45 | #define ACRY_CMD_RSA_TRIGGER BIT(0) |
46 | #define ACRY_CMD_DMA_RSA_TRIGGER BIT(1) |
47 | |
48 | /* rsa dma cmd */ |
49 | #define ACRY_CMD_DMA_SRAM_MODE_RSA (0x3 << 4) |
50 | #define ACRY_CMD_DMEM_AHB BIT(8) |
51 | #define ACRY_CMD_DMA_SRAM_AHB_ENGINE 0 |
52 | |
53 | /* rsa key len */ |
54 | #define RSA_E_BITS_LEN(x) ((x) << 16) |
55 | #define RSA_M_BITS_LEN(x) (x) |
56 | |
57 | /* acry isr */ |
58 | #define ACRY_RSA_ISR BIT(1) |
59 | |
60 | #define ASPEED_ACRY_BUFF_SIZE 0x1800 /* DMA buffer size */ |
61 | #define ASPEED_ACRY_SRAM_MAX_LEN 2048 /* ACRY SRAM maximum length (Bytes) */ |
62 | #define ASPEED_ACRY_RSA_MAX_KEY_LEN 512 /* ACRY RSA maximum key length (Bytes) */ |
63 | |
64 | #define CRYPTO_FLAGS_BUSY BIT(1) |
65 | #define BYTES_PER_DWORD 4 |
66 | |
67 | /***************************** |
68 | * * |
69 | * AHBC register definitions * |
70 | * * |
71 | * ***************************/ |
72 | #define AHBC_REGION_PROT 0x240 |
73 | #define REGION_ACRYM BIT(23) |
74 | |
75 | #define ast_acry_write(acry, val, offset) \ |
76 | writel((val), (acry)->regs + (offset)) |
77 | |
78 | #define ast_acry_read(acry, offset) \ |
79 | readl((acry)->regs + (offset)) |
80 | |
81 | struct aspeed_acry_dev; |
82 | |
83 | typedef int (*aspeed_acry_fn_t)(struct aspeed_acry_dev *); |
84 | |
85 | struct aspeed_acry_dev { |
86 | void __iomem *regs; |
87 | struct device *dev; |
88 | int irq; |
89 | struct clk *clk; |
90 | struct regmap *ahbc; |
91 | |
92 | struct akcipher_request *req; |
93 | struct tasklet_struct done_task; |
94 | aspeed_acry_fn_t resume; |
95 | unsigned long flags; |
96 | |
97 | /* ACRY output SRAM buffer */ |
98 | void __iomem *acry_sram; |
99 | |
100 | /* ACRY input DMA buffer */ |
101 | void *buf_addr; |
102 | dma_addr_t buf_dma_addr; |
103 | |
104 | struct crypto_engine *crypt_engine_rsa; |
105 | |
106 | /* ACRY SRAM memory mapped */ |
107 | int exp_dw_mapping[ASPEED_ACRY_RSA_MAX_KEY_LEN]; |
108 | int mod_dw_mapping[ASPEED_ACRY_RSA_MAX_KEY_LEN]; |
109 | int data_byte_mapping[ASPEED_ACRY_SRAM_MAX_LEN]; |
110 | }; |
111 | |
112 | struct aspeed_acry_ctx { |
113 | struct aspeed_acry_dev *acry_dev; |
114 | |
115 | struct rsa_key key; |
116 | int enc; |
117 | u8 *n; |
118 | u8 *e; |
119 | u8 *d; |
120 | size_t n_sz; |
121 | size_t e_sz; |
122 | size_t d_sz; |
123 | |
124 | aspeed_acry_fn_t trigger; |
125 | |
126 | struct crypto_akcipher *fallback_tfm; |
127 | }; |
128 | |
129 | struct aspeed_acry_alg { |
130 | struct aspeed_acry_dev *acry_dev; |
131 | struct akcipher_engine_alg akcipher; |
132 | }; |
133 | |
134 | enum aspeed_rsa_key_mode { |
135 | ASPEED_RSA_EXP_MODE = 0, |
136 | ASPEED_RSA_MOD_MODE, |
137 | ASPEED_RSA_DATA_MODE, |
138 | }; |
139 | |
140 | static inline struct akcipher_request * |
141 | akcipher_request_cast(struct crypto_async_request *req) |
142 | { |
143 | return container_of(req, struct akcipher_request, base); |
144 | } |
145 | |
146 | static int aspeed_acry_do_fallback(struct akcipher_request *req) |
147 | { |
148 | struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); |
149 | struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm: cipher); |
150 | int err; |
151 | |
152 | akcipher_request_set_tfm(req, tfm: ctx->fallback_tfm); |
153 | |
154 | if (ctx->enc) |
155 | err = crypto_akcipher_encrypt(req); |
156 | else |
157 | err = crypto_akcipher_decrypt(req); |
158 | |
159 | akcipher_request_set_tfm(req, tfm: cipher); |
160 | |
161 | return err; |
162 | } |
163 | |
164 | static bool aspeed_acry_need_fallback(struct akcipher_request *req) |
165 | { |
166 | struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); |
167 | struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm: cipher); |
168 | |
169 | return ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN; |
170 | } |
171 | |
172 | static int aspeed_acry_handle_queue(struct aspeed_acry_dev *acry_dev, |
173 | struct akcipher_request *req) |
174 | { |
175 | if (aspeed_acry_need_fallback(req)) { |
176 | ACRY_DBG(acry_dev, "SW fallback\n" ); |
177 | return aspeed_acry_do_fallback(req); |
178 | } |
179 | |
180 | return crypto_transfer_akcipher_request_to_engine(engine: acry_dev->crypt_engine_rsa, req); |
181 | } |
182 | |
183 | static int aspeed_acry_do_request(struct crypto_engine *engine, void *areq) |
184 | { |
185 | struct akcipher_request *req = akcipher_request_cast(req: areq); |
186 | struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); |
187 | struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm: cipher); |
188 | struct aspeed_acry_dev *acry_dev = ctx->acry_dev; |
189 | |
190 | acry_dev->req = req; |
191 | acry_dev->flags |= CRYPTO_FLAGS_BUSY; |
192 | |
193 | return ctx->trigger(acry_dev); |
194 | } |
195 | |
196 | static int aspeed_acry_complete(struct aspeed_acry_dev *acry_dev, int err) |
197 | { |
198 | struct akcipher_request *req = acry_dev->req; |
199 | |
200 | acry_dev->flags &= ~CRYPTO_FLAGS_BUSY; |
201 | |
202 | crypto_finalize_akcipher_request(engine: acry_dev->crypt_engine_rsa, req, err); |
203 | |
204 | return err; |
205 | } |
206 | |
207 | /* |
208 | * Copy Data to DMA buffer for engine used. |
209 | */ |
210 | static void aspeed_acry_rsa_sg_copy_to_buffer(struct aspeed_acry_dev *acry_dev, |
211 | u8 *buf, struct scatterlist *src, |
212 | size_t nbytes) |
213 | { |
214 | static u8 dram_buffer[ASPEED_ACRY_SRAM_MAX_LEN]; |
215 | int i = 0, j; |
216 | int data_idx; |
217 | |
218 | ACRY_DBG(acry_dev, "\n" ); |
219 | |
220 | scatterwalk_map_and_copy(buf: dram_buffer, sg: src, start: 0, nbytes, out: 0); |
221 | |
222 | for (j = nbytes - 1; j >= 0; j--) { |
223 | data_idx = acry_dev->data_byte_mapping[i]; |
224 | buf[data_idx] = dram_buffer[j]; |
225 | i++; |
226 | } |
227 | |
228 | for (; i < ASPEED_ACRY_SRAM_MAX_LEN; i++) { |
229 | data_idx = acry_dev->data_byte_mapping[i]; |
230 | buf[data_idx] = 0; |
231 | } |
232 | } |
233 | |
234 | /* |
235 | * Copy Exp/Mod to DMA buffer for engine used. |
236 | * |
237 | * Params: |
238 | * - mode 0 : Exponential |
239 | * - mode 1 : Modulus |
240 | * |
241 | * Example: |
242 | * - DRAM memory layout: |
243 | * D[0], D[4], D[8], D[12] |
244 | * - ACRY SRAM memory layout should reverse the order of source data: |
245 | * D[12], D[8], D[4], D[0] |
246 | */ |
247 | static int aspeed_acry_rsa_ctx_copy(struct aspeed_acry_dev *acry_dev, void *buf, |
248 | const void *xbuf, size_t nbytes, |
249 | enum aspeed_rsa_key_mode mode) |
250 | { |
251 | const u8 *src = xbuf; |
252 | __le32 *dw_buf = buf; |
253 | int nbits, ndw; |
254 | int i, j, idx; |
255 | u32 data = 0; |
256 | |
257 | ACRY_DBG(acry_dev, "nbytes:%zu, mode:%d\n" , nbytes, mode); |
258 | |
259 | if (nbytes > ASPEED_ACRY_RSA_MAX_KEY_LEN) |
260 | return -ENOMEM; |
261 | |
262 | /* Remove the leading zeros */ |
263 | while (nbytes > 0 && src[0] == 0) { |
264 | src++; |
265 | nbytes--; |
266 | } |
267 | |
268 | nbits = nbytes * 8; |
269 | if (nbytes > 0) |
270 | nbits -= count_leading_zeros(x: src[0]) - (BITS_PER_LONG - 8); |
271 | |
272 | /* double-world alignment */ |
273 | ndw = DIV_ROUND_UP(nbytes, BYTES_PER_DWORD); |
274 | |
275 | if (nbytes > 0) { |
276 | i = BYTES_PER_DWORD - nbytes % BYTES_PER_DWORD; |
277 | i %= BYTES_PER_DWORD; |
278 | |
279 | for (j = ndw; j > 0; j--) { |
280 | for (; i < BYTES_PER_DWORD; i++) { |
281 | data <<= 8; |
282 | data |= *src++; |
283 | } |
284 | |
285 | i = 0; |
286 | |
287 | if (mode == ASPEED_RSA_EXP_MODE) |
288 | idx = acry_dev->exp_dw_mapping[j - 1]; |
289 | else /* mode == ASPEED_RSA_MOD_MODE */ |
290 | idx = acry_dev->mod_dw_mapping[j - 1]; |
291 | |
292 | dw_buf[idx] = cpu_to_le32(data); |
293 | } |
294 | } |
295 | |
296 | return nbits; |
297 | } |
298 | |
299 | static int aspeed_acry_rsa_transfer(struct aspeed_acry_dev *acry_dev) |
300 | { |
301 | struct akcipher_request *req = acry_dev->req; |
302 | u8 __iomem *sram_buffer = acry_dev->acry_sram; |
303 | struct scatterlist *out_sg = req->dst; |
304 | static u8 dram_buffer[ASPEED_ACRY_SRAM_MAX_LEN]; |
305 | int leading_zero = 1; |
306 | int result_nbytes; |
307 | int i = 0, j; |
308 | int data_idx; |
309 | |
310 | /* Set Data Memory to AHB(CPU) Access Mode */ |
311 | ast_acry_write(acry_dev, ACRY_CMD_DMEM_AHB, ASPEED_ACRY_DMA_CMD); |
312 | |
313 | /* Disable ACRY SRAM protection */ |
314 | regmap_update_bits(map: acry_dev->ahbc, AHBC_REGION_PROT, |
315 | REGION_ACRYM, val: 0); |
316 | |
317 | result_nbytes = ASPEED_ACRY_SRAM_MAX_LEN; |
318 | |
319 | for (j = ASPEED_ACRY_SRAM_MAX_LEN - 1; j >= 0; j--) { |
320 | data_idx = acry_dev->data_byte_mapping[j]; |
321 | if (readb(addr: sram_buffer + data_idx) == 0 && leading_zero) { |
322 | result_nbytes--; |
323 | } else { |
324 | leading_zero = 0; |
325 | dram_buffer[i] = readb(addr: sram_buffer + data_idx); |
326 | i++; |
327 | } |
328 | } |
329 | |
330 | ACRY_DBG(acry_dev, "result_nbytes:%d, req->dst_len:%d\n" , |
331 | result_nbytes, req->dst_len); |
332 | |
333 | if (result_nbytes <= req->dst_len) { |
334 | scatterwalk_map_and_copy(buf: dram_buffer, sg: out_sg, start: 0, nbytes: result_nbytes, |
335 | out: 1); |
336 | req->dst_len = result_nbytes; |
337 | |
338 | } else { |
339 | dev_err(acry_dev->dev, "RSA engine error!\n" ); |
340 | } |
341 | |
342 | memzero_explicit(s: acry_dev->buf_addr, ASPEED_ACRY_BUFF_SIZE); |
343 | |
344 | return aspeed_acry_complete(acry_dev, err: 0); |
345 | } |
346 | |
347 | static int aspeed_acry_rsa_trigger(struct aspeed_acry_dev *acry_dev) |
348 | { |
349 | struct akcipher_request *req = acry_dev->req; |
350 | struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); |
351 | struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm: cipher); |
352 | int ne, nm; |
353 | |
354 | if (!ctx->n || !ctx->n_sz) { |
355 | dev_err(acry_dev->dev, "%s: key n is not set\n" , __func__); |
356 | return -EINVAL; |
357 | } |
358 | |
359 | memzero_explicit(s: acry_dev->buf_addr, ASPEED_ACRY_BUFF_SIZE); |
360 | |
361 | /* Copy source data to DMA buffer */ |
362 | aspeed_acry_rsa_sg_copy_to_buffer(acry_dev, buf: acry_dev->buf_addr, |
363 | src: req->src, nbytes: req->src_len); |
364 | |
365 | nm = aspeed_acry_rsa_ctx_copy(acry_dev, buf: acry_dev->buf_addr, xbuf: ctx->n, |
366 | nbytes: ctx->n_sz, mode: ASPEED_RSA_MOD_MODE); |
367 | if (ctx->enc) { |
368 | if (!ctx->e || !ctx->e_sz) { |
369 | dev_err(acry_dev->dev, "%s: key e is not set\n" , |
370 | __func__); |
371 | return -EINVAL; |
372 | } |
373 | /* Copy key e to DMA buffer */ |
374 | ne = aspeed_acry_rsa_ctx_copy(acry_dev, buf: acry_dev->buf_addr, |
375 | xbuf: ctx->e, nbytes: ctx->e_sz, |
376 | mode: ASPEED_RSA_EXP_MODE); |
377 | } else { |
378 | if (!ctx->d || !ctx->d_sz) { |
379 | dev_err(acry_dev->dev, "%s: key d is not set\n" , |
380 | __func__); |
381 | return -EINVAL; |
382 | } |
383 | /* Copy key d to DMA buffer */ |
384 | ne = aspeed_acry_rsa_ctx_copy(acry_dev, buf: acry_dev->buf_addr, |
385 | xbuf: ctx->key.d, nbytes: ctx->key.d_sz, |
386 | mode: ASPEED_RSA_EXP_MODE); |
387 | } |
388 | |
389 | ast_acry_write(acry_dev, acry_dev->buf_dma_addr, |
390 | ASPEED_ACRY_DMA_SRC_BASE); |
391 | ast_acry_write(acry_dev, (ne << 16) + nm, |
392 | ASPEED_ACRY_RSA_KEY_LEN); |
393 | ast_acry_write(acry_dev, ASPEED_ACRY_BUFF_SIZE, |
394 | ASPEED_ACRY_DMA_LEN); |
395 | |
396 | acry_dev->resume = aspeed_acry_rsa_transfer; |
397 | |
398 | /* Enable ACRY SRAM protection */ |
399 | regmap_update_bits(map: acry_dev->ahbc, AHBC_REGION_PROT, |
400 | REGION_ACRYM, REGION_ACRYM); |
401 | |
402 | ast_acry_write(acry_dev, ACRY_RSA_ISR, ASPEED_ACRY_INT_MASK); |
403 | ast_acry_write(acry_dev, ACRY_CMD_DMA_SRAM_MODE_RSA | |
404 | ACRY_CMD_DMA_SRAM_AHB_ENGINE, ASPEED_ACRY_DMA_CMD); |
405 | |
406 | /* Trigger RSA engines */ |
407 | ast_acry_write(acry_dev, ACRY_CMD_RSA_TRIGGER | |
408 | ACRY_CMD_DMA_RSA_TRIGGER, ASPEED_ACRY_TRIGGER); |
409 | |
410 | return 0; |
411 | } |
412 | |
413 | static int aspeed_acry_rsa_enc(struct akcipher_request *req) |
414 | { |
415 | struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); |
416 | struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm: cipher); |
417 | struct aspeed_acry_dev *acry_dev = ctx->acry_dev; |
418 | |
419 | ctx->trigger = aspeed_acry_rsa_trigger; |
420 | ctx->enc = 1; |
421 | |
422 | return aspeed_acry_handle_queue(acry_dev, req); |
423 | } |
424 | |
425 | static int aspeed_acry_rsa_dec(struct akcipher_request *req) |
426 | { |
427 | struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); |
428 | struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm: cipher); |
429 | struct aspeed_acry_dev *acry_dev = ctx->acry_dev; |
430 | |
431 | ctx->trigger = aspeed_acry_rsa_trigger; |
432 | ctx->enc = 0; |
433 | |
434 | return aspeed_acry_handle_queue(acry_dev, req); |
435 | } |
436 | |
437 | static u8 *aspeed_rsa_key_copy(u8 *src, size_t len) |
438 | { |
439 | return kmemdup(p: src, size: len, GFP_KERNEL); |
440 | } |
441 | |
442 | static int aspeed_rsa_set_n(struct aspeed_acry_ctx *ctx, u8 *value, |
443 | size_t len) |
444 | { |
445 | ctx->n_sz = len; |
446 | ctx->n = aspeed_rsa_key_copy(src: value, len); |
447 | if (!ctx->n) |
448 | return -ENOMEM; |
449 | |
450 | return 0; |
451 | } |
452 | |
453 | static int aspeed_rsa_set_e(struct aspeed_acry_ctx *ctx, u8 *value, |
454 | size_t len) |
455 | { |
456 | ctx->e_sz = len; |
457 | ctx->e = aspeed_rsa_key_copy(src: value, len); |
458 | if (!ctx->e) |
459 | return -ENOMEM; |
460 | |
461 | return 0; |
462 | } |
463 | |
464 | static int aspeed_rsa_set_d(struct aspeed_acry_ctx *ctx, u8 *value, |
465 | size_t len) |
466 | { |
467 | ctx->d_sz = len; |
468 | ctx->d = aspeed_rsa_key_copy(src: value, len); |
469 | if (!ctx->d) |
470 | return -ENOMEM; |
471 | |
472 | return 0; |
473 | } |
474 | |
475 | static void aspeed_rsa_key_free(struct aspeed_acry_ctx *ctx) |
476 | { |
477 | kfree_sensitive(objp: ctx->n); |
478 | kfree_sensitive(objp: ctx->e); |
479 | kfree_sensitive(objp: ctx->d); |
480 | ctx->n_sz = 0; |
481 | ctx->e_sz = 0; |
482 | ctx->d_sz = 0; |
483 | } |
484 | |
485 | static int aspeed_acry_rsa_setkey(struct crypto_akcipher *tfm, const void *key, |
486 | unsigned int keylen, int priv) |
487 | { |
488 | struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm); |
489 | struct aspeed_acry_dev *acry_dev = ctx->acry_dev; |
490 | int ret; |
491 | |
492 | if (priv) |
493 | ret = rsa_parse_priv_key(rsa_key: &ctx->key, key, key_len: keylen); |
494 | else |
495 | ret = rsa_parse_pub_key(rsa_key: &ctx->key, key, key_len: keylen); |
496 | |
497 | if (ret) { |
498 | dev_err(acry_dev->dev, "rsa parse key failed, ret:0x%x\n" , |
499 | ret); |
500 | return ret; |
501 | } |
502 | |
503 | /* Aspeed engine supports up to 4096 bits, |
504 | * Use software fallback instead. |
505 | */ |
506 | if (ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN) |
507 | return 0; |
508 | |
509 | ret = aspeed_rsa_set_n(ctx, value: (u8 *)ctx->key.n, len: ctx->key.n_sz); |
510 | if (ret) |
511 | goto err; |
512 | |
513 | ret = aspeed_rsa_set_e(ctx, value: (u8 *)ctx->key.e, len: ctx->key.e_sz); |
514 | if (ret) |
515 | goto err; |
516 | |
517 | if (priv) { |
518 | ret = aspeed_rsa_set_d(ctx, value: (u8 *)ctx->key.d, len: ctx->key.d_sz); |
519 | if (ret) |
520 | goto err; |
521 | } |
522 | |
523 | return 0; |
524 | |
525 | err: |
526 | dev_err(acry_dev->dev, "rsa set key failed\n" ); |
527 | aspeed_rsa_key_free(ctx); |
528 | |
529 | return ret; |
530 | } |
531 | |
532 | static int aspeed_acry_rsa_set_pub_key(struct crypto_akcipher *tfm, |
533 | const void *key, |
534 | unsigned int keylen) |
535 | { |
536 | struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm); |
537 | int ret; |
538 | |
539 | ret = crypto_akcipher_set_pub_key(tfm: ctx->fallback_tfm, key, keylen); |
540 | if (ret) |
541 | return ret; |
542 | |
543 | return aspeed_acry_rsa_setkey(tfm, key, keylen, priv: 0); |
544 | } |
545 | |
546 | static int aspeed_acry_rsa_set_priv_key(struct crypto_akcipher *tfm, |
547 | const void *key, |
548 | unsigned int keylen) |
549 | { |
550 | struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm); |
551 | int ret; |
552 | |
553 | ret = crypto_akcipher_set_priv_key(tfm: ctx->fallback_tfm, key, keylen); |
554 | if (ret) |
555 | return ret; |
556 | |
557 | return aspeed_acry_rsa_setkey(tfm, key, keylen, priv: 1); |
558 | } |
559 | |
560 | static unsigned int aspeed_acry_rsa_max_size(struct crypto_akcipher *tfm) |
561 | { |
562 | struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm); |
563 | |
564 | if (ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN) |
565 | return crypto_akcipher_maxsize(tfm: ctx->fallback_tfm); |
566 | |
567 | return ctx->n_sz; |
568 | } |
569 | |
570 | static int aspeed_acry_rsa_init_tfm(struct crypto_akcipher *tfm) |
571 | { |
572 | struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm); |
573 | struct akcipher_alg *alg = crypto_akcipher_alg(tfm); |
574 | const char *name = crypto_tfm_alg_name(tfm: &tfm->base); |
575 | struct aspeed_acry_alg *acry_alg; |
576 | |
577 | acry_alg = container_of(alg, struct aspeed_acry_alg, akcipher.base); |
578 | |
579 | ctx->acry_dev = acry_alg->acry_dev; |
580 | |
581 | ctx->fallback_tfm = crypto_alloc_akcipher(alg_name: name, type: 0, CRYPTO_ALG_ASYNC | |
582 | CRYPTO_ALG_NEED_FALLBACK); |
583 | if (IS_ERR(ptr: ctx->fallback_tfm)) { |
584 | dev_err(ctx->acry_dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n" , |
585 | name, PTR_ERR(ctx->fallback_tfm)); |
586 | return PTR_ERR(ptr: ctx->fallback_tfm); |
587 | } |
588 | |
589 | return 0; |
590 | } |
591 | |
592 | static void aspeed_acry_rsa_exit_tfm(struct crypto_akcipher *tfm) |
593 | { |
594 | struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm); |
595 | |
596 | crypto_free_akcipher(tfm: ctx->fallback_tfm); |
597 | } |
598 | |
599 | static struct aspeed_acry_alg aspeed_acry_akcipher_algs[] = { |
600 | { |
601 | .akcipher.base = { |
602 | .encrypt = aspeed_acry_rsa_enc, |
603 | .decrypt = aspeed_acry_rsa_dec, |
604 | .sign = aspeed_acry_rsa_dec, |
605 | .verify = aspeed_acry_rsa_enc, |
606 | .set_pub_key = aspeed_acry_rsa_set_pub_key, |
607 | .set_priv_key = aspeed_acry_rsa_set_priv_key, |
608 | .max_size = aspeed_acry_rsa_max_size, |
609 | .init = aspeed_acry_rsa_init_tfm, |
610 | .exit = aspeed_acry_rsa_exit_tfm, |
611 | .base = { |
612 | .cra_name = "rsa" , |
613 | .cra_driver_name = "aspeed-rsa" , |
614 | .cra_priority = 300, |
615 | .cra_flags = CRYPTO_ALG_TYPE_AKCIPHER | |
616 | CRYPTO_ALG_ASYNC | |
617 | CRYPTO_ALG_KERN_DRIVER_ONLY | |
618 | CRYPTO_ALG_NEED_FALLBACK, |
619 | .cra_module = THIS_MODULE, |
620 | .cra_ctxsize = sizeof(struct aspeed_acry_ctx), |
621 | }, |
622 | }, |
623 | .akcipher.op = { |
624 | .do_one_request = aspeed_acry_do_request, |
625 | }, |
626 | }, |
627 | }; |
628 | |
629 | static void aspeed_acry_register(struct aspeed_acry_dev *acry_dev) |
630 | { |
631 | int i, rc; |
632 | |
633 | for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++) { |
634 | aspeed_acry_akcipher_algs[i].acry_dev = acry_dev; |
635 | rc = crypto_engine_register_akcipher(alg: &aspeed_acry_akcipher_algs[i].akcipher); |
636 | if (rc) { |
637 | ACRY_DBG(acry_dev, "Failed to register %s\n" , |
638 | aspeed_acry_akcipher_algs[i].akcipher.base.base.cra_name); |
639 | } |
640 | } |
641 | } |
642 | |
643 | static void aspeed_acry_unregister(struct aspeed_acry_dev *acry_dev) |
644 | { |
645 | int i; |
646 | |
647 | for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++) |
648 | crypto_engine_unregister_akcipher(alg: &aspeed_acry_akcipher_algs[i].akcipher); |
649 | } |
650 | |
651 | /* ACRY interrupt service routine. */ |
652 | static irqreturn_t aspeed_acry_irq(int irq, void *dev) |
653 | { |
654 | struct aspeed_acry_dev *acry_dev = (struct aspeed_acry_dev *)dev; |
655 | u32 sts; |
656 | |
657 | sts = ast_acry_read(acry_dev, ASPEED_ACRY_STATUS); |
658 | ast_acry_write(acry_dev, sts, ASPEED_ACRY_STATUS); |
659 | |
660 | ACRY_DBG(acry_dev, "irq sts:0x%x\n" , sts); |
661 | |
662 | if (sts & ACRY_RSA_ISR) { |
663 | /* Stop RSA engine */ |
664 | ast_acry_write(acry_dev, 0, ASPEED_ACRY_TRIGGER); |
665 | |
666 | if (acry_dev->flags & CRYPTO_FLAGS_BUSY) |
667 | tasklet_schedule(t: &acry_dev->done_task); |
668 | else |
669 | dev_err(acry_dev->dev, "RSA no active requests.\n" ); |
670 | } |
671 | |
672 | return IRQ_HANDLED; |
673 | } |
674 | |
675 | /* |
676 | * ACRY SRAM has its own memory layout. |
677 | * Set the DRAM to SRAM indexing for future used. |
678 | */ |
679 | static void aspeed_acry_sram_mapping(struct aspeed_acry_dev *acry_dev) |
680 | { |
681 | int i, j = 0; |
682 | |
683 | for (i = 0; i < (ASPEED_ACRY_SRAM_MAX_LEN / BYTES_PER_DWORD); i++) { |
684 | acry_dev->exp_dw_mapping[i] = j; |
685 | acry_dev->mod_dw_mapping[i] = j + 4; |
686 | acry_dev->data_byte_mapping[(i * 4)] = (j + 8) * 4; |
687 | acry_dev->data_byte_mapping[(i * 4) + 1] = (j + 8) * 4 + 1; |
688 | acry_dev->data_byte_mapping[(i * 4) + 2] = (j + 8) * 4 + 2; |
689 | acry_dev->data_byte_mapping[(i * 4) + 3] = (j + 8) * 4 + 3; |
690 | j++; |
691 | j = j % 4 ? j : j + 8; |
692 | } |
693 | } |
694 | |
695 | static void aspeed_acry_done_task(unsigned long data) |
696 | { |
697 | struct aspeed_acry_dev *acry_dev = (struct aspeed_acry_dev *)data; |
698 | |
699 | (void)acry_dev->resume(acry_dev); |
700 | } |
701 | |
702 | static const struct of_device_id aspeed_acry_of_matches[] = { |
703 | { .compatible = "aspeed,ast2600-acry" , }, |
704 | {}, |
705 | }; |
706 | |
707 | static int aspeed_acry_probe(struct platform_device *pdev) |
708 | { |
709 | struct aspeed_acry_dev *acry_dev; |
710 | struct device *dev = &pdev->dev; |
711 | int rc; |
712 | |
713 | acry_dev = devm_kzalloc(dev, size: sizeof(struct aspeed_acry_dev), |
714 | GFP_KERNEL); |
715 | if (!acry_dev) |
716 | return -ENOMEM; |
717 | |
718 | acry_dev->dev = dev; |
719 | |
720 | platform_set_drvdata(pdev, data: acry_dev); |
721 | |
722 | acry_dev->regs = devm_platform_ioremap_resource(pdev, index: 0); |
723 | if (IS_ERR(ptr: acry_dev->regs)) |
724 | return PTR_ERR(ptr: acry_dev->regs); |
725 | |
726 | acry_dev->acry_sram = devm_platform_ioremap_resource(pdev, index: 1); |
727 | if (IS_ERR(ptr: acry_dev->acry_sram)) |
728 | return PTR_ERR(ptr: acry_dev->acry_sram); |
729 | |
730 | /* Get irq number and register it */ |
731 | acry_dev->irq = platform_get_irq(pdev, 0); |
732 | if (acry_dev->irq < 0) |
733 | return -ENXIO; |
734 | |
735 | rc = devm_request_irq(dev, irq: acry_dev->irq, handler: aspeed_acry_irq, irqflags: 0, |
736 | devname: dev_name(dev), dev_id: acry_dev); |
737 | if (rc) { |
738 | dev_err(dev, "Failed to request irq.\n" ); |
739 | return rc; |
740 | } |
741 | |
742 | acry_dev->clk = devm_clk_get_enabled(dev, NULL); |
743 | if (IS_ERR(ptr: acry_dev->clk)) { |
744 | dev_err(dev, "Failed to get acry clk\n" ); |
745 | return PTR_ERR(ptr: acry_dev->clk); |
746 | } |
747 | |
748 | acry_dev->ahbc = syscon_regmap_lookup_by_phandle(np: dev->of_node, |
749 | property: "aspeed,ahbc" ); |
750 | if (IS_ERR(ptr: acry_dev->ahbc)) { |
751 | dev_err(dev, "Failed to get AHBC regmap\n" ); |
752 | return -ENODEV; |
753 | } |
754 | |
755 | /* Initialize crypto hardware engine structure for RSA */ |
756 | acry_dev->crypt_engine_rsa = crypto_engine_alloc_init(dev, rt: true); |
757 | if (!acry_dev->crypt_engine_rsa) { |
758 | rc = -ENOMEM; |
759 | goto clk_exit; |
760 | } |
761 | |
762 | rc = crypto_engine_start(engine: acry_dev->crypt_engine_rsa); |
763 | if (rc) |
764 | goto err_engine_rsa_start; |
765 | |
766 | tasklet_init(t: &acry_dev->done_task, func: aspeed_acry_done_task, |
767 | data: (unsigned long)acry_dev); |
768 | |
769 | /* Set Data Memory to AHB(CPU) Access Mode */ |
770 | ast_acry_write(acry_dev, ACRY_CMD_DMEM_AHB, ASPEED_ACRY_DMA_CMD); |
771 | |
772 | /* Initialize ACRY SRAM index */ |
773 | aspeed_acry_sram_mapping(acry_dev); |
774 | |
775 | acry_dev->buf_addr = dmam_alloc_coherent(dev, ASPEED_ACRY_BUFF_SIZE, |
776 | dma_handle: &acry_dev->buf_dma_addr, |
777 | GFP_KERNEL); |
778 | if (!acry_dev->buf_addr) { |
779 | rc = -ENOMEM; |
780 | goto err_engine_rsa_start; |
781 | } |
782 | |
783 | aspeed_acry_register(acry_dev); |
784 | |
785 | dev_info(dev, "Aspeed ACRY Accelerator successfully registered\n" ); |
786 | |
787 | return 0; |
788 | |
789 | err_engine_rsa_start: |
790 | crypto_engine_exit(engine: acry_dev->crypt_engine_rsa); |
791 | clk_exit: |
792 | clk_disable_unprepare(clk: acry_dev->clk); |
793 | |
794 | return rc; |
795 | } |
796 | |
797 | static void aspeed_acry_remove(struct platform_device *pdev) |
798 | { |
799 | struct aspeed_acry_dev *acry_dev = platform_get_drvdata(pdev); |
800 | |
801 | aspeed_acry_unregister(acry_dev); |
802 | crypto_engine_exit(engine: acry_dev->crypt_engine_rsa); |
803 | tasklet_kill(t: &acry_dev->done_task); |
804 | clk_disable_unprepare(clk: acry_dev->clk); |
805 | } |
806 | |
807 | MODULE_DEVICE_TABLE(of, aspeed_acry_of_matches); |
808 | |
809 | static struct platform_driver aspeed_acry_driver = { |
810 | .probe = aspeed_acry_probe, |
811 | .remove_new = aspeed_acry_remove, |
812 | .driver = { |
813 | .name = KBUILD_MODNAME, |
814 | .of_match_table = aspeed_acry_of_matches, |
815 | }, |
816 | }; |
817 | |
818 | module_platform_driver(aspeed_acry_driver); |
819 | |
820 | MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>" ); |
821 | MODULE_DESCRIPTION("ASPEED ACRY driver for hardware RSA Engine" ); |
822 | MODULE_LICENSE("GPL" ); |
823 | |