1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Routines supporting the Power 7+ Nest Accelerators driver |
4 | * |
5 | * Copyright (C) 2011-2012 International Business Machines Inc. |
6 | * |
7 | * Author: Kent Yoder <yoder1@us.ibm.com> |
8 | */ |
9 | |
10 | #include <crypto/internal/aead.h> |
11 | #include <crypto/internal/hash.h> |
12 | #include <crypto/aes.h> |
13 | #include <crypto/sha2.h> |
14 | #include <crypto/algapi.h> |
15 | #include <crypto/scatterwalk.h> |
16 | #include <linux/module.h> |
17 | #include <linux/moduleparam.h> |
18 | #include <linux/types.h> |
19 | #include <linux/mm.h> |
20 | #include <linux/scatterlist.h> |
21 | #include <linux/device.h> |
22 | #include <linux/of.h> |
23 | #include <asm/hvcall.h> |
24 | #include <asm/vio.h> |
25 | |
26 | #include "nx_csbcpb.h" |
27 | #include "nx.h" |
28 | |
29 | |
30 | /** |
31 | * nx_hcall_sync - make an H_COP_OP hcall for the passed in op structure |
32 | * |
33 | * @nx_ctx: the crypto context handle |
34 | * @op: PFO operation struct to pass in |
35 | * @may_sleep: flag indicating the request can sleep |
36 | * |
37 | * Make the hcall, retrying while the hardware is busy. If we cannot yield |
38 | * the thread, limit the number of retries to 10 here. |
39 | */ |
40 | int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx, |
41 | struct vio_pfo_op *op, |
42 | u32 may_sleep) |
43 | { |
44 | int rc, retries = 10; |
45 | struct vio_dev *viodev = nx_driver.viodev; |
46 | |
47 | atomic_inc(v: &(nx_ctx->stats->sync_ops)); |
48 | |
49 | do { |
50 | rc = vio_h_cop_sync(viodev, op); |
51 | } while (rc == -EBUSY && !may_sleep && retries--); |
52 | |
53 | if (rc) { |
54 | dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d " |
55 | "hcall rc: %ld\n" , rc, op->hcall_err); |
56 | atomic_inc(v: &(nx_ctx->stats->errors)); |
57 | atomic_set(v: &(nx_ctx->stats->last_error), i: op->hcall_err); |
58 | atomic_set(v: &(nx_ctx->stats->last_error_pid), current->pid); |
59 | } |
60 | |
61 | return rc; |
62 | } |
63 | |
64 | /** |
65 | * nx_build_sg_list - build an NX scatter list describing a single buffer |
66 | * |
67 | * @sg_head: pointer to the first scatter list element to build |
68 | * @start_addr: pointer to the linear buffer |
69 | * @len: length of the data at @start_addr |
70 | * @sgmax: the largest number of scatter list elements we're allowed to create |
71 | * |
72 | * This function will start writing nx_sg elements at @sg_head and keep |
73 | * writing them until all of the data from @start_addr is described or |
74 | * until sgmax elements have been written. Scatter list elements will be |
75 | * created such that none of the elements describes a buffer that crosses a 4K |
76 | * boundary. |
77 | */ |
78 | struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head, |
79 | u8 *start_addr, |
80 | unsigned int *len, |
81 | u32 sgmax) |
82 | { |
83 | unsigned int sg_len = 0; |
84 | struct nx_sg *sg; |
85 | u64 sg_addr = (u64)start_addr; |
86 | u64 end_addr; |
87 | |
88 | /* determine the start and end for this address range - slightly |
89 | * different if this is in VMALLOC_REGION */ |
90 | if (is_vmalloc_addr(x: start_addr)) |
91 | sg_addr = page_to_phys(vmalloc_to_page(start_addr)) |
92 | + offset_in_page(sg_addr); |
93 | else |
94 | sg_addr = __pa(sg_addr); |
95 | |
96 | end_addr = sg_addr + *len; |
97 | |
98 | /* each iteration will write one struct nx_sg element and add the |
99 | * length of data described by that element to sg_len. Once @len bytes |
100 | * have been described (or @sgmax elements have been written), the |
101 | * loop ends. min_t is used to ensure @end_addr falls on the same page |
102 | * as sg_addr, if not, we need to create another nx_sg element for the |
103 | * data on the next page. |
104 | * |
105 | * Also when using vmalloc'ed data, every time that a system page |
106 | * boundary is crossed the physical address needs to be re-calculated. |
107 | */ |
108 | for (sg = sg_head; sg_len < *len; sg++) { |
109 | u64 next_page; |
110 | |
111 | sg->addr = sg_addr; |
112 | sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE), |
113 | end_addr); |
114 | |
115 | next_page = (sg->addr & PAGE_MASK) + PAGE_SIZE; |
116 | sg->len = min_t(u64, sg_addr, next_page) - sg->addr; |
117 | sg_len += sg->len; |
118 | |
119 | if (sg_addr >= next_page && |
120 | is_vmalloc_addr(x: start_addr + sg_len)) { |
121 | sg_addr = page_to_phys(vmalloc_to_page( |
122 | start_addr + sg_len)); |
123 | end_addr = sg_addr + *len - sg_len; |
124 | } |
125 | |
126 | if ((sg - sg_head) == sgmax) { |
127 | pr_err("nx: scatter/gather list overflow, pid: %d\n" , |
128 | current->pid); |
129 | sg++; |
130 | break; |
131 | } |
132 | } |
133 | *len = sg_len; |
134 | |
135 | /* return the moved sg_head pointer */ |
136 | return sg; |
137 | } |
138 | |
139 | /** |
140 | * nx_walk_and_build - walk a linux scatterlist and build an nx scatterlist |
141 | * |
142 | * @nx_dst: pointer to the first nx_sg element to write |
143 | * @sglen: max number of nx_sg entries we're allowed to write |
144 | * @sg_src: pointer to the source linux scatterlist to walk |
145 | * @start: number of bytes to fast-forward past at the beginning of @sg_src |
146 | * @src_len: number of bytes to walk in @sg_src |
147 | */ |
148 | struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst, |
149 | unsigned int sglen, |
150 | struct scatterlist *sg_src, |
151 | unsigned int start, |
152 | unsigned int *src_len) |
153 | { |
154 | struct scatter_walk walk; |
155 | struct nx_sg *nx_sg = nx_dst; |
156 | unsigned int n, offset = 0, len = *src_len; |
157 | char *dst; |
158 | |
159 | /* we need to fast forward through @start bytes first */ |
160 | for (;;) { |
161 | scatterwalk_start(walk: &walk, sg: sg_src); |
162 | |
163 | if (start < offset + sg_src->length) |
164 | break; |
165 | |
166 | offset += sg_src->length; |
167 | sg_src = sg_next(sg_src); |
168 | } |
169 | |
170 | /* start - offset is the number of bytes to advance in the scatterlist |
171 | * element we're currently looking at */ |
172 | scatterwalk_advance(walk: &walk, nbytes: start - offset); |
173 | |
174 | while (len && (nx_sg - nx_dst) < sglen) { |
175 | n = scatterwalk_clamp(walk: &walk, nbytes: len); |
176 | if (!n) { |
177 | /* In cases where we have scatterlist chain sg_next |
178 | * handles with it properly */ |
179 | scatterwalk_start(walk: &walk, sg: sg_next(walk.sg)); |
180 | n = scatterwalk_clamp(walk: &walk, nbytes: len); |
181 | } |
182 | dst = scatterwalk_map(walk: &walk); |
183 | |
184 | nx_sg = nx_build_sg_list(sg_head: nx_sg, start_addr: dst, len: &n, sgmax: sglen - (nx_sg - nx_dst)); |
185 | len -= n; |
186 | |
187 | scatterwalk_unmap(vaddr: dst); |
188 | scatterwalk_advance(walk: &walk, nbytes: n); |
189 | scatterwalk_done(walk: &walk, SCATTERWALK_FROM_SG, more: len); |
190 | } |
191 | /* update to_process */ |
192 | *src_len -= len; |
193 | |
194 | /* return the moved destination pointer */ |
195 | return nx_sg; |
196 | } |
197 | |
198 | /** |
199 | * trim_sg_list - ensures the bound in sg list. |
200 | * @sg: sg list head |
201 | * @end: sg lisg end |
202 | * @delta: is the amount we need to crop in order to bound the list. |
203 | * @nbytes: length of data in the scatterlists or data length - whichever |
204 | * is greater. |
205 | */ |
206 | static long int trim_sg_list(struct nx_sg *sg, |
207 | struct nx_sg *end, |
208 | unsigned int delta, |
209 | unsigned int *nbytes) |
210 | { |
211 | long int oplen; |
212 | long int data_back; |
213 | unsigned int is_delta = delta; |
214 | |
215 | while (delta && end > sg) { |
216 | struct nx_sg *last = end - 1; |
217 | |
218 | if (last->len > delta) { |
219 | last->len -= delta; |
220 | delta = 0; |
221 | } else { |
222 | end--; |
223 | delta -= last->len; |
224 | } |
225 | } |
226 | |
227 | /* There are cases where we need to crop list in order to make it |
228 | * a block size multiple, but we also need to align data. In order to |
229 | * that we need to calculate how much we need to put back to be |
230 | * processed |
231 | */ |
232 | oplen = (sg - end) * sizeof(struct nx_sg); |
233 | if (is_delta) { |
234 | data_back = (abs(oplen) / AES_BLOCK_SIZE) * sg->len; |
235 | data_back = *nbytes - (data_back & ~(AES_BLOCK_SIZE - 1)); |
236 | *nbytes -= data_back; |
237 | } |
238 | |
239 | return oplen; |
240 | } |
241 | |
242 | /** |
243 | * nx_build_sg_lists - walk the input scatterlists and build arrays of NX |
244 | * scatterlists based on them. |
245 | * |
246 | * @nx_ctx: NX crypto context for the lists we're building |
247 | * @iv: iv data, if the algorithm requires it |
248 | * @dst: destination scatterlist |
249 | * @src: source scatterlist |
250 | * @nbytes: length of data described in the scatterlists |
251 | * @offset: number of bytes to fast-forward past at the beginning of |
252 | * scatterlists. |
253 | * @oiv: destination for the iv data, if the algorithm requires it |
254 | * |
255 | * This is common code shared by all the AES algorithms. It uses the crypto |
256 | * scatterlist walk routines to traverse input and output scatterlists, building |
257 | * corresponding NX scatterlists |
258 | */ |
259 | int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, |
260 | const u8 *iv, |
261 | struct scatterlist *dst, |
262 | struct scatterlist *src, |
263 | unsigned int *nbytes, |
264 | unsigned int offset, |
265 | u8 *oiv) |
266 | { |
267 | unsigned int delta = 0; |
268 | unsigned int total = *nbytes; |
269 | struct nx_sg *nx_insg = nx_ctx->in_sg; |
270 | struct nx_sg *nx_outsg = nx_ctx->out_sg; |
271 | unsigned int max_sg_len; |
272 | |
273 | max_sg_len = min_t(u64, nx_ctx->ap->sglen, |
274 | nx_driver.of.max_sg_len/sizeof(struct nx_sg)); |
275 | max_sg_len = min_t(u64, max_sg_len, |
276 | nx_ctx->ap->databytelen/NX_PAGE_SIZE); |
277 | |
278 | if (oiv) |
279 | memcpy(oiv, iv, AES_BLOCK_SIZE); |
280 | |
281 | *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen); |
282 | |
283 | nx_outsg = nx_walk_and_build(nx_dst: nx_outsg, sglen: max_sg_len, sg_src: dst, |
284 | start: offset, src_len: nbytes); |
285 | nx_insg = nx_walk_and_build(nx_dst: nx_insg, sglen: max_sg_len, sg_src: src, |
286 | start: offset, src_len: nbytes); |
287 | |
288 | if (*nbytes < total) |
289 | delta = *nbytes - (*nbytes & ~(AES_BLOCK_SIZE - 1)); |
290 | |
291 | /* these lengths should be negative, which will indicate to phyp that |
292 | * the input and output parameters are scatterlists, not linear |
293 | * buffers */ |
294 | nx_ctx->op.inlen = trim_sg_list(sg: nx_ctx->in_sg, end: nx_insg, delta, nbytes); |
295 | nx_ctx->op.outlen = trim_sg_list(sg: nx_ctx->out_sg, end: nx_outsg, delta, nbytes); |
296 | |
297 | return 0; |
298 | } |
299 | |
300 | /** |
301 | * nx_ctx_init - initialize an nx_ctx's vio_pfo_op struct |
302 | * |
303 | * @nx_ctx: the nx context to initialize |
304 | * @function: the function code for the op |
305 | */ |
306 | void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function) |
307 | { |
308 | spin_lock_init(&nx_ctx->lock); |
309 | memset(nx_ctx->kmem, 0, nx_ctx->kmem_len); |
310 | nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT; |
311 | |
312 | nx_ctx->op.flags = function; |
313 | nx_ctx->op.csbcpb = __pa(nx_ctx->csbcpb); |
314 | nx_ctx->op.in = __pa(nx_ctx->in_sg); |
315 | nx_ctx->op.out = __pa(nx_ctx->out_sg); |
316 | |
317 | if (nx_ctx->csbcpb_aead) { |
318 | nx_ctx->csbcpb_aead->csb.valid |= NX_CSB_VALID_BIT; |
319 | |
320 | nx_ctx->op_aead.flags = function; |
321 | nx_ctx->op_aead.csbcpb = __pa(nx_ctx->csbcpb_aead); |
322 | nx_ctx->op_aead.in = __pa(nx_ctx->in_sg); |
323 | nx_ctx->op_aead.out = __pa(nx_ctx->out_sg); |
324 | } |
325 | } |
326 | |
327 | static void nx_of_update_status(struct device *dev, |
328 | struct property *p, |
329 | struct nx_of *props) |
330 | { |
331 | if (!strncmp(p->value, "okay" , p->length)) { |
332 | props->status = NX_WAITING; |
333 | props->flags |= NX_OF_FLAG_STATUS_SET; |
334 | } else { |
335 | dev_info(dev, "%s: status '%s' is not 'okay'\n" , __func__, |
336 | (char *)p->value); |
337 | } |
338 | } |
339 | |
340 | static void nx_of_update_sglen(struct device *dev, |
341 | struct property *p, |
342 | struct nx_of *props) |
343 | { |
344 | if (p->length != sizeof(props->max_sg_len)) { |
345 | dev_err(dev, "%s: unexpected format for " |
346 | "ibm,max-sg-len property\n" , __func__); |
347 | dev_dbg(dev, "%s: ibm,max-sg-len is %d bytes " |
348 | "long, expected %zd bytes\n" , __func__, |
349 | p->length, sizeof(props->max_sg_len)); |
350 | return; |
351 | } |
352 | |
353 | props->max_sg_len = *(u32 *)p->value; |
354 | props->flags |= NX_OF_FLAG_MAXSGLEN_SET; |
355 | } |
356 | |
357 | static void nx_of_update_msc(struct device *dev, |
358 | struct property *p, |
359 | struct nx_of *props) |
360 | { |
361 | struct msc_triplet *trip; |
362 | struct max_sync_cop *msc; |
363 | unsigned int bytes_so_far, i, lenp; |
364 | |
365 | msc = (struct max_sync_cop *)p->value; |
366 | lenp = p->length; |
367 | |
368 | /* You can't tell if the data read in for this property is sane by its |
369 | * size alone. This is because there are sizes embedded in the data |
370 | * structure. The best we can do is check lengths as we parse and bail |
371 | * as soon as a length error is detected. */ |
372 | bytes_so_far = 0; |
373 | |
374 | while ((bytes_so_far + sizeof(struct max_sync_cop)) <= lenp) { |
375 | bytes_so_far += sizeof(struct max_sync_cop); |
376 | |
377 | trip = msc->trip; |
378 | |
379 | for (i = 0; |
380 | ((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) && |
381 | i < msc->triplets; |
382 | i++) { |
383 | if (msc->fc >= NX_MAX_FC || msc->mode >= NX_MAX_MODE) { |
384 | dev_err(dev, "unknown function code/mode " |
385 | "combo: %d/%d (ignored)\n" , msc->fc, |
386 | msc->mode); |
387 | goto next_loop; |
388 | } |
389 | |
390 | if (!trip->sglen || trip->databytelen < NX_PAGE_SIZE) { |
391 | dev_warn(dev, "bogus sglen/databytelen: " |
392 | "%u/%u (ignored)\n" , trip->sglen, |
393 | trip->databytelen); |
394 | goto next_loop; |
395 | } |
396 | |
397 | switch (trip->keybitlen) { |
398 | case 128: |
399 | case 160: |
400 | props->ap[msc->fc][msc->mode][0].databytelen = |
401 | trip->databytelen; |
402 | props->ap[msc->fc][msc->mode][0].sglen = |
403 | trip->sglen; |
404 | break; |
405 | case 192: |
406 | props->ap[msc->fc][msc->mode][1].databytelen = |
407 | trip->databytelen; |
408 | props->ap[msc->fc][msc->mode][1].sglen = |
409 | trip->sglen; |
410 | break; |
411 | case 256: |
412 | if (msc->fc == NX_FC_AES) { |
413 | props->ap[msc->fc][msc->mode][2]. |
414 | databytelen = trip->databytelen; |
415 | props->ap[msc->fc][msc->mode][2].sglen = |
416 | trip->sglen; |
417 | } else if (msc->fc == NX_FC_AES_HMAC || |
418 | msc->fc == NX_FC_SHA) { |
419 | props->ap[msc->fc][msc->mode][1]. |
420 | databytelen = trip->databytelen; |
421 | props->ap[msc->fc][msc->mode][1].sglen = |
422 | trip->sglen; |
423 | } else { |
424 | dev_warn(dev, "unknown function " |
425 | "code/key bit len combo" |
426 | ": (%u/256)\n" , msc->fc); |
427 | } |
428 | break; |
429 | case 512: |
430 | props->ap[msc->fc][msc->mode][2].databytelen = |
431 | trip->databytelen; |
432 | props->ap[msc->fc][msc->mode][2].sglen = |
433 | trip->sglen; |
434 | break; |
435 | default: |
436 | dev_warn(dev, "unknown function code/key bit " |
437 | "len combo: (%u/%u)\n" , msc->fc, |
438 | trip->keybitlen); |
439 | break; |
440 | } |
441 | next_loop: |
442 | bytes_so_far += sizeof(struct msc_triplet); |
443 | trip++; |
444 | } |
445 | |
446 | msc = (struct max_sync_cop *)trip; |
447 | } |
448 | |
449 | props->flags |= NX_OF_FLAG_MAXSYNCCOP_SET; |
450 | } |
451 | |
452 | /** |
453 | * nx_of_init - read openFirmware values from the device tree |
454 | * |
455 | * @dev: device handle |
456 | * @props: pointer to struct to hold the properties values |
457 | * |
458 | * Called once at driver probe time, this function will read out the |
459 | * openFirmware properties we use at runtime. If all the OF properties are |
460 | * acceptable, when we exit this function props->flags will indicate that |
461 | * we're ready to register our crypto algorithms. |
462 | */ |
463 | static void nx_of_init(struct device *dev, struct nx_of *props) |
464 | { |
465 | struct device_node *base_node = dev->of_node; |
466 | struct property *p; |
467 | |
468 | p = of_find_property(np: base_node, name: "status" , NULL); |
469 | if (!p) |
470 | dev_info(dev, "%s: property 'status' not found\n" , __func__); |
471 | else |
472 | nx_of_update_status(dev, p, props); |
473 | |
474 | p = of_find_property(np: base_node, name: "ibm,max-sg-len" , NULL); |
475 | if (!p) |
476 | dev_info(dev, "%s: property 'ibm,max-sg-len' not found\n" , |
477 | __func__); |
478 | else |
479 | nx_of_update_sglen(dev, p, props); |
480 | |
481 | p = of_find_property(np: base_node, name: "ibm,max-sync-cop" , NULL); |
482 | if (!p) |
483 | dev_info(dev, "%s: property 'ibm,max-sync-cop' not found\n" , |
484 | __func__); |
485 | else |
486 | nx_of_update_msc(dev, p, props); |
487 | } |
488 | |
489 | static bool nx_check_prop(struct device *dev, u32 fc, u32 mode, int slot) |
490 | { |
491 | struct alg_props *props = &nx_driver.of.ap[fc][mode][slot]; |
492 | |
493 | if (!props->sglen || props->databytelen < NX_PAGE_SIZE) { |
494 | if (dev) |
495 | dev_warn(dev, "bogus sglen/databytelen for %u/%u/%u: " |
496 | "%u/%u (ignored)\n" , fc, mode, slot, |
497 | props->sglen, props->databytelen); |
498 | return false; |
499 | } |
500 | |
501 | return true; |
502 | } |
503 | |
504 | static bool nx_check_props(struct device *dev, u32 fc, u32 mode) |
505 | { |
506 | int i; |
507 | |
508 | for (i = 0; i < 3; i++) |
509 | if (!nx_check_prop(dev, fc, mode, slot: i)) |
510 | return false; |
511 | |
512 | return true; |
513 | } |
514 | |
515 | static int nx_register_skcipher(struct skcipher_alg *alg, u32 fc, u32 mode) |
516 | { |
517 | return nx_check_props(dev: &nx_driver.viodev->dev, fc, mode) ? |
518 | crypto_register_skcipher(alg) : 0; |
519 | } |
520 | |
521 | static int nx_register_aead(struct aead_alg *alg, u32 fc, u32 mode) |
522 | { |
523 | return nx_check_props(dev: &nx_driver.viodev->dev, fc, mode) ? |
524 | crypto_register_aead(alg) : 0; |
525 | } |
526 | |
527 | static int nx_register_shash(struct shash_alg *alg, u32 fc, u32 mode, int slot) |
528 | { |
529 | return (slot >= 0 ? nx_check_prop(dev: &nx_driver.viodev->dev, |
530 | fc, mode, slot) : |
531 | nx_check_props(dev: &nx_driver.viodev->dev, fc, mode)) ? |
532 | crypto_register_shash(alg) : 0; |
533 | } |
534 | |
535 | static void nx_unregister_skcipher(struct skcipher_alg *alg, u32 fc, u32 mode) |
536 | { |
537 | if (nx_check_props(NULL, fc, mode)) |
538 | crypto_unregister_skcipher(alg); |
539 | } |
540 | |
541 | static void nx_unregister_aead(struct aead_alg *alg, u32 fc, u32 mode) |
542 | { |
543 | if (nx_check_props(NULL, fc, mode)) |
544 | crypto_unregister_aead(alg); |
545 | } |
546 | |
547 | static void nx_unregister_shash(struct shash_alg *alg, u32 fc, u32 mode, |
548 | int slot) |
549 | { |
550 | if (slot >= 0 ? nx_check_prop(NULL, fc, mode, slot) : |
551 | nx_check_props(NULL, fc, mode)) |
552 | crypto_unregister_shash(alg); |
553 | } |
554 | |
555 | /** |
556 | * nx_register_algs - register algorithms with the crypto API |
557 | * |
558 | * Called from nx_probe() |
559 | * |
560 | * If all OF properties are in an acceptable state, the driver flags will |
561 | * indicate that we're ready and we'll create our debugfs files and register |
562 | * out crypto algorithms. |
563 | */ |
564 | static int nx_register_algs(void) |
565 | { |
566 | int rc = -1; |
567 | |
568 | if (nx_driver.of.flags != NX_OF_FLAG_MASK_READY) |
569 | goto out; |
570 | |
571 | memset(&nx_driver.stats, 0, sizeof(struct nx_stats)); |
572 | |
573 | NX_DEBUGFS_INIT(&nx_driver); |
574 | |
575 | nx_driver.of.status = NX_OKAY; |
576 | |
577 | rc = nx_register_skcipher(alg: &nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB); |
578 | if (rc) |
579 | goto out; |
580 | |
581 | rc = nx_register_skcipher(alg: &nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC); |
582 | if (rc) |
583 | goto out_unreg_ecb; |
584 | |
585 | rc = nx_register_skcipher(alg: &nx_ctr3686_aes_alg, NX_FC_AES, |
586 | NX_MODE_AES_CTR); |
587 | if (rc) |
588 | goto out_unreg_cbc; |
589 | |
590 | rc = nx_register_aead(alg: &nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); |
591 | if (rc) |
592 | goto out_unreg_ctr3686; |
593 | |
594 | rc = nx_register_aead(alg: &nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); |
595 | if (rc) |
596 | goto out_unreg_gcm; |
597 | |
598 | rc = nx_register_aead(alg: &nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); |
599 | if (rc) |
600 | goto out_unreg_gcm4106; |
601 | |
602 | rc = nx_register_aead(alg: &nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); |
603 | if (rc) |
604 | goto out_unreg_ccm; |
605 | |
606 | rc = nx_register_shash(alg: &nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA, |
607 | NX_PROPS_SHA256); |
608 | if (rc) |
609 | goto out_unreg_ccm4309; |
610 | |
611 | rc = nx_register_shash(alg: &nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA, |
612 | NX_PROPS_SHA512); |
613 | if (rc) |
614 | goto out_unreg_s256; |
615 | |
616 | rc = nx_register_shash(alg: &nx_shash_aes_xcbc_alg, |
617 | NX_FC_AES, NX_MODE_AES_XCBC_MAC, slot: -1); |
618 | if (rc) |
619 | goto out_unreg_s512; |
620 | |
621 | goto out; |
622 | |
623 | out_unreg_s512: |
624 | nx_unregister_shash(alg: &nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA, |
625 | NX_PROPS_SHA512); |
626 | out_unreg_s256: |
627 | nx_unregister_shash(alg: &nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA, |
628 | NX_PROPS_SHA256); |
629 | out_unreg_ccm4309: |
630 | nx_unregister_aead(alg: &nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); |
631 | out_unreg_ccm: |
632 | nx_unregister_aead(alg: &nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); |
633 | out_unreg_gcm4106: |
634 | nx_unregister_aead(alg: &nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); |
635 | out_unreg_gcm: |
636 | nx_unregister_aead(alg: &nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); |
637 | out_unreg_ctr3686: |
638 | nx_unregister_skcipher(alg: &nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); |
639 | out_unreg_cbc: |
640 | nx_unregister_skcipher(alg: &nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC); |
641 | out_unreg_ecb: |
642 | nx_unregister_skcipher(alg: &nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB); |
643 | out: |
644 | return rc; |
645 | } |
646 | |
647 | /** |
648 | * nx_crypto_ctx_init - create and initialize a crypto api context |
649 | * |
650 | * @nx_ctx: the crypto api context |
651 | * @fc: function code for the context |
652 | * @mode: the function code specific mode for this context |
653 | */ |
654 | static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode) |
655 | { |
656 | if (nx_driver.of.status != NX_OKAY) { |
657 | pr_err("Attempt to initialize NX crypto context while device " |
658 | "is not available!\n" ); |
659 | return -ENODEV; |
660 | } |
661 | |
662 | /* we need an extra page for csbcpb_aead for these modes */ |
663 | if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM) |
664 | nx_ctx->kmem_len = (5 * NX_PAGE_SIZE) + |
665 | sizeof(struct nx_csbcpb); |
666 | else |
667 | nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) + |
668 | sizeof(struct nx_csbcpb); |
669 | |
670 | nx_ctx->kmem = kmalloc(size: nx_ctx->kmem_len, GFP_KERNEL); |
671 | if (!nx_ctx->kmem) |
672 | return -ENOMEM; |
673 | |
674 | /* the csbcpb and scatterlists must be 4K aligned pages */ |
675 | nx_ctx->csbcpb = (struct nx_csbcpb *)(round_up((u64)nx_ctx->kmem, |
676 | (u64)NX_PAGE_SIZE)); |
677 | nx_ctx->in_sg = (struct nx_sg *)((u8 *)nx_ctx->csbcpb + NX_PAGE_SIZE); |
678 | nx_ctx->out_sg = (struct nx_sg *)((u8 *)nx_ctx->in_sg + NX_PAGE_SIZE); |
679 | |
680 | if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM) |
681 | nx_ctx->csbcpb_aead = |
682 | (struct nx_csbcpb *)((u8 *)nx_ctx->out_sg + |
683 | NX_PAGE_SIZE); |
684 | |
685 | /* give each context a pointer to global stats and their OF |
686 | * properties */ |
687 | nx_ctx->stats = &nx_driver.stats; |
688 | memcpy(nx_ctx->props, nx_driver.of.ap[fc][mode], |
689 | sizeof(struct alg_props) * 3); |
690 | |
691 | return 0; |
692 | } |
693 | |
694 | /* entry points from the crypto tfm initializers */ |
695 | int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm) |
696 | { |
697 | crypto_aead_set_reqsize(aead: tfm, reqsize: sizeof(struct nx_ccm_rctx)); |
698 | return nx_crypto_ctx_init(nx_ctx: crypto_aead_ctx(tfm), NX_FC_AES, |
699 | NX_MODE_AES_CCM); |
700 | } |
701 | |
702 | int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm) |
703 | { |
704 | crypto_aead_set_reqsize(aead: tfm, reqsize: sizeof(struct nx_gcm_rctx)); |
705 | return nx_crypto_ctx_init(nx_ctx: crypto_aead_ctx(tfm), NX_FC_AES, |
706 | NX_MODE_AES_GCM); |
707 | } |
708 | |
709 | int nx_crypto_ctx_aes_ctr_init(struct crypto_skcipher *tfm) |
710 | { |
711 | return nx_crypto_ctx_init(nx_ctx: crypto_skcipher_ctx(tfm), NX_FC_AES, |
712 | NX_MODE_AES_CTR); |
713 | } |
714 | |
715 | int nx_crypto_ctx_aes_cbc_init(struct crypto_skcipher *tfm) |
716 | { |
717 | return nx_crypto_ctx_init(nx_ctx: crypto_skcipher_ctx(tfm), NX_FC_AES, |
718 | NX_MODE_AES_CBC); |
719 | } |
720 | |
721 | int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm) |
722 | { |
723 | return nx_crypto_ctx_init(nx_ctx: crypto_skcipher_ctx(tfm), NX_FC_AES, |
724 | NX_MODE_AES_ECB); |
725 | } |
726 | |
727 | int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm) |
728 | { |
729 | return nx_crypto_ctx_init(nx_ctx: crypto_tfm_ctx(tfm), NX_FC_SHA, NX_MODE_SHA); |
730 | } |
731 | |
732 | int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm) |
733 | { |
734 | return nx_crypto_ctx_init(nx_ctx: crypto_tfm_ctx(tfm), NX_FC_AES, |
735 | NX_MODE_AES_XCBC_MAC); |
736 | } |
737 | |
738 | /** |
739 | * nx_crypto_ctx_exit - destroy a crypto api context |
740 | * |
741 | * @tfm: the crypto transform pointer for the context |
742 | * |
743 | * As crypto API contexts are destroyed, this exit hook is called to free the |
744 | * memory associated with it. |
745 | */ |
746 | void nx_crypto_ctx_exit(struct crypto_tfm *tfm) |
747 | { |
748 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); |
749 | |
750 | kfree_sensitive(objp: nx_ctx->kmem); |
751 | nx_ctx->csbcpb = NULL; |
752 | nx_ctx->csbcpb_aead = NULL; |
753 | nx_ctx->in_sg = NULL; |
754 | nx_ctx->out_sg = NULL; |
755 | } |
756 | |
757 | void nx_crypto_ctx_skcipher_exit(struct crypto_skcipher *tfm) |
758 | { |
759 | nx_crypto_ctx_exit(tfm: crypto_skcipher_ctx(tfm)); |
760 | } |
761 | |
762 | void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm) |
763 | { |
764 | struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm); |
765 | |
766 | kfree_sensitive(objp: nx_ctx->kmem); |
767 | } |
768 | |
769 | static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id) |
770 | { |
771 | dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n" , |
772 | viodev->name, viodev->resource_id); |
773 | |
774 | if (nx_driver.viodev) { |
775 | dev_err(&viodev->dev, "%s: Attempt to register more than one " |
776 | "instance of the hardware\n" , __func__); |
777 | return -EINVAL; |
778 | } |
779 | |
780 | nx_driver.viodev = viodev; |
781 | |
782 | nx_of_init(dev: &viodev->dev, props: &nx_driver.of); |
783 | |
784 | return nx_register_algs(); |
785 | } |
786 | |
787 | static void nx_remove(struct vio_dev *viodev) |
788 | { |
789 | dev_dbg(&viodev->dev, "entering nx_remove for UA 0x%x\n" , |
790 | viodev->unit_address); |
791 | |
792 | if (nx_driver.of.status == NX_OKAY) { |
793 | NX_DEBUGFS_FINI(&nx_driver); |
794 | |
795 | nx_unregister_shash(alg: &nx_shash_aes_xcbc_alg, |
796 | NX_FC_AES, NX_MODE_AES_XCBC_MAC, slot: -1); |
797 | nx_unregister_shash(alg: &nx_shash_sha512_alg, |
798 | NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256); |
799 | nx_unregister_shash(alg: &nx_shash_sha256_alg, |
800 | NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA512); |
801 | nx_unregister_aead(alg: &nx_ccm4309_aes_alg, |
802 | NX_FC_AES, NX_MODE_AES_CCM); |
803 | nx_unregister_aead(alg: &nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); |
804 | nx_unregister_aead(alg: &nx_gcm4106_aes_alg, |
805 | NX_FC_AES, NX_MODE_AES_GCM); |
806 | nx_unregister_aead(alg: &nx_gcm_aes_alg, |
807 | NX_FC_AES, NX_MODE_AES_GCM); |
808 | nx_unregister_skcipher(alg: &nx_ctr3686_aes_alg, |
809 | NX_FC_AES, NX_MODE_AES_CTR); |
810 | nx_unregister_skcipher(alg: &nx_cbc_aes_alg, NX_FC_AES, |
811 | NX_MODE_AES_CBC); |
812 | nx_unregister_skcipher(alg: &nx_ecb_aes_alg, NX_FC_AES, |
813 | NX_MODE_AES_ECB); |
814 | } |
815 | } |
816 | |
817 | |
818 | /* module wide initialization/cleanup */ |
819 | static int __init nx_init(void) |
820 | { |
821 | return vio_register_driver(&nx_driver.viodriver); |
822 | } |
823 | |
824 | static void __exit nx_fini(void) |
825 | { |
826 | vio_unregister_driver(&nx_driver.viodriver); |
827 | } |
828 | |
829 | static const struct vio_device_id nx_crypto_driver_ids[] = { |
830 | { "ibm,sym-encryption-v1" , "ibm,sym-encryption" }, |
831 | { "" , "" } |
832 | }; |
833 | MODULE_DEVICE_TABLE(vio, nx_crypto_driver_ids); |
834 | |
835 | /* driver state structure */ |
836 | struct nx_crypto_driver nx_driver = { |
837 | .viodriver = { |
838 | .id_table = nx_crypto_driver_ids, |
839 | .probe = nx_probe, |
840 | .remove = nx_remove, |
841 | .name = NX_NAME, |
842 | }, |
843 | }; |
844 | |
845 | module_init(nx_init); |
846 | module_exit(nx_fini); |
847 | |
848 | MODULE_AUTHOR("Kent Yoder <yoder1@us.ibm.com>" ); |
849 | MODULE_DESCRIPTION(NX_STRING); |
850 | MODULE_LICENSE("GPL" ); |
851 | MODULE_VERSION(NX_VERSION); |
852 | |