1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Symmetric key cipher operations. |
4 | * |
5 | * Generic encrypt/decrypt wrapper for ciphers, handles operations across |
6 | * multiple page boundaries by using temporary blocks. In user context, |
7 | * the kernel is given a chance to schedule us once per page. |
8 | * |
9 | * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> |
10 | */ |
11 | |
12 | #include <crypto/internal/aead.h> |
13 | #include <crypto/internal/cipher.h> |
14 | #include <crypto/internal/skcipher.h> |
15 | #include <crypto/scatterwalk.h> |
16 | #include <linux/bug.h> |
17 | #include <linux/cryptouser.h> |
18 | #include <linux/err.h> |
19 | #include <linux/kernel.h> |
20 | #include <linux/list.h> |
21 | #include <linux/mm.h> |
22 | #include <linux/module.h> |
23 | #include <linux/seq_file.h> |
24 | #include <linux/slab.h> |
25 | #include <linux/string.h> |
26 | #include <net/netlink.h> |
27 | #include "skcipher.h" |
28 | |
29 | #define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e |
30 | |
31 | enum { |
32 | SKCIPHER_WALK_PHYS = 1 << 0, |
33 | SKCIPHER_WALK_SLOW = 1 << 1, |
34 | SKCIPHER_WALK_COPY = 1 << 2, |
35 | SKCIPHER_WALK_DIFF = 1 << 3, |
36 | SKCIPHER_WALK_SLEEP = 1 << 4, |
37 | }; |
38 | |
39 | struct skcipher_walk_buffer { |
40 | struct list_head entry; |
41 | struct scatter_walk dst; |
42 | unsigned int len; |
43 | u8 *data; |
44 | u8 buffer[]; |
45 | }; |
46 | |
47 | static const struct crypto_type crypto_skcipher_type; |
48 | |
49 | static int skcipher_walk_next(struct skcipher_walk *walk); |
50 | |
51 | static inline void skcipher_map_src(struct skcipher_walk *walk) |
52 | { |
53 | walk->src.virt.addr = scatterwalk_map(walk: &walk->in); |
54 | } |
55 | |
56 | static inline void skcipher_map_dst(struct skcipher_walk *walk) |
57 | { |
58 | walk->dst.virt.addr = scatterwalk_map(walk: &walk->out); |
59 | } |
60 | |
61 | static inline void skcipher_unmap_src(struct skcipher_walk *walk) |
62 | { |
63 | scatterwalk_unmap(vaddr: walk->src.virt.addr); |
64 | } |
65 | |
66 | static inline void skcipher_unmap_dst(struct skcipher_walk *walk) |
67 | { |
68 | scatterwalk_unmap(vaddr: walk->dst.virt.addr); |
69 | } |
70 | |
71 | static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk) |
72 | { |
73 | return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC; |
74 | } |
75 | |
76 | /* Get a spot of the specified length that does not straddle a page. |
77 | * The caller needs to ensure that there is enough space for this operation. |
78 | */ |
79 | static inline u8 *skcipher_get_spot(u8 *start, unsigned int len) |
80 | { |
81 | u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); |
82 | |
83 | return max(start, end_page); |
84 | } |
85 | |
86 | static inline struct skcipher_alg *__crypto_skcipher_alg( |
87 | struct crypto_alg *alg) |
88 | { |
89 | return container_of(alg, struct skcipher_alg, base); |
90 | } |
91 | |
92 | static inline struct crypto_istat_cipher *skcipher_get_stat( |
93 | struct skcipher_alg *alg) |
94 | { |
95 | return skcipher_get_stat_common(alg: &alg->co); |
96 | } |
97 | |
98 | static inline int crypto_skcipher_errstat(struct skcipher_alg *alg, int err) |
99 | { |
100 | struct crypto_istat_cipher *istat = skcipher_get_stat(alg); |
101 | |
102 | if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) |
103 | return err; |
104 | |
105 | if (err && err != -EINPROGRESS && err != -EBUSY) |
106 | atomic64_inc(v: &istat->err_cnt); |
107 | |
108 | return err; |
109 | } |
110 | |
111 | static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) |
112 | { |
113 | u8 *addr; |
114 | |
115 | addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); |
116 | addr = skcipher_get_spot(start: addr, len: bsize); |
117 | scatterwalk_copychunks(buf: addr, walk: &walk->out, nbytes: bsize, |
118 | out: (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1); |
119 | return 0; |
120 | } |
121 | |
122 | int skcipher_walk_done(struct skcipher_walk *walk, int err) |
123 | { |
124 | unsigned int n = walk->nbytes; |
125 | unsigned int nbytes = 0; |
126 | |
127 | if (!n) |
128 | goto finish; |
129 | |
130 | if (likely(err >= 0)) { |
131 | n -= err; |
132 | nbytes = walk->total - n; |
133 | } |
134 | |
135 | if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS | |
136 | SKCIPHER_WALK_SLOW | |
137 | SKCIPHER_WALK_COPY | |
138 | SKCIPHER_WALK_DIFF)))) { |
139 | unmap_src: |
140 | skcipher_unmap_src(walk); |
141 | } else if (walk->flags & SKCIPHER_WALK_DIFF) { |
142 | skcipher_unmap_dst(walk); |
143 | goto unmap_src; |
144 | } else if (walk->flags & SKCIPHER_WALK_COPY) { |
145 | skcipher_map_dst(walk); |
146 | memcpy(walk->dst.virt.addr, walk->page, n); |
147 | skcipher_unmap_dst(walk); |
148 | } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) { |
149 | if (err > 0) { |
150 | /* |
151 | * Didn't process all bytes. Either the algorithm is |
152 | * broken, or this was the last step and it turned out |
153 | * the message wasn't evenly divisible into blocks but |
154 | * the algorithm requires it. |
155 | */ |
156 | err = -EINVAL; |
157 | nbytes = 0; |
158 | } else |
159 | n = skcipher_done_slow(walk, bsize: n); |
160 | } |
161 | |
162 | if (err > 0) |
163 | err = 0; |
164 | |
165 | walk->total = nbytes; |
166 | walk->nbytes = 0; |
167 | |
168 | scatterwalk_advance(walk: &walk->in, nbytes: n); |
169 | scatterwalk_advance(walk: &walk->out, nbytes: n); |
170 | scatterwalk_done(walk: &walk->in, out: 0, more: nbytes); |
171 | scatterwalk_done(walk: &walk->out, out: 1, more: nbytes); |
172 | |
173 | if (nbytes) { |
174 | crypto_yield(flags: walk->flags & SKCIPHER_WALK_SLEEP ? |
175 | CRYPTO_TFM_REQ_MAY_SLEEP : 0); |
176 | return skcipher_walk_next(walk); |
177 | } |
178 | |
179 | finish: |
180 | /* Short-circuit for the common/fast path. */ |
181 | if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) |
182 | goto out; |
183 | |
184 | if (walk->flags & SKCIPHER_WALK_PHYS) |
185 | goto out; |
186 | |
187 | if (walk->iv != walk->oiv) |
188 | memcpy(walk->oiv, walk->iv, walk->ivsize); |
189 | if (walk->buffer != walk->page) |
190 | kfree(objp: walk->buffer); |
191 | if (walk->page) |
192 | free_page((unsigned long)walk->page); |
193 | |
194 | out: |
195 | return err; |
196 | } |
197 | EXPORT_SYMBOL_GPL(skcipher_walk_done); |
198 | |
199 | void skcipher_walk_complete(struct skcipher_walk *walk, int err) |
200 | { |
201 | struct skcipher_walk_buffer *p, *tmp; |
202 | |
203 | list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { |
204 | u8 *data; |
205 | |
206 | if (err) |
207 | goto done; |
208 | |
209 | data = p->data; |
210 | if (!data) { |
211 | data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1); |
212 | data = skcipher_get_spot(start: data, len: walk->stride); |
213 | } |
214 | |
215 | scatterwalk_copychunks(buf: data, walk: &p->dst, nbytes: p->len, out: 1); |
216 | |
217 | if (offset_in_page(p->data) + p->len + walk->stride > |
218 | PAGE_SIZE) |
219 | free_page((unsigned long)p->data); |
220 | |
221 | done: |
222 | list_del(entry: &p->entry); |
223 | kfree(objp: p); |
224 | } |
225 | |
226 | if (!err && walk->iv != walk->oiv) |
227 | memcpy(walk->oiv, walk->iv, walk->ivsize); |
228 | if (walk->buffer != walk->page) |
229 | kfree(objp: walk->buffer); |
230 | if (walk->page) |
231 | free_page((unsigned long)walk->page); |
232 | } |
233 | EXPORT_SYMBOL_GPL(skcipher_walk_complete); |
234 | |
235 | static void skcipher_queue_write(struct skcipher_walk *walk, |
236 | struct skcipher_walk_buffer *p) |
237 | { |
238 | p->dst = walk->out; |
239 | list_add_tail(new: &p->entry, head: &walk->buffers); |
240 | } |
241 | |
242 | static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) |
243 | { |
244 | bool phys = walk->flags & SKCIPHER_WALK_PHYS; |
245 | unsigned alignmask = walk->alignmask; |
246 | struct skcipher_walk_buffer *p; |
247 | unsigned a; |
248 | unsigned n; |
249 | u8 *buffer; |
250 | void *v; |
251 | |
252 | if (!phys) { |
253 | if (!walk->buffer) |
254 | walk->buffer = walk->page; |
255 | buffer = walk->buffer; |
256 | if (buffer) |
257 | goto ok; |
258 | } |
259 | |
260 | /* Start with the minimum alignment of kmalloc. */ |
261 | a = crypto_tfm_ctx_alignment() - 1; |
262 | n = bsize; |
263 | |
264 | if (phys) { |
265 | /* Calculate the minimum alignment of p->buffer. */ |
266 | a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1; |
267 | n += sizeof(*p); |
268 | } |
269 | |
270 | /* Minimum size to align p->buffer by alignmask. */ |
271 | n += alignmask & ~a; |
272 | |
273 | /* Minimum size to ensure p->buffer does not straddle a page. */ |
274 | n += (bsize - 1) & ~(alignmask | a); |
275 | |
276 | v = kzalloc(size: n, flags: skcipher_walk_gfp(walk)); |
277 | if (!v) |
278 | return skcipher_walk_done(walk, -ENOMEM); |
279 | |
280 | if (phys) { |
281 | p = v; |
282 | p->len = bsize; |
283 | skcipher_queue_write(walk, p); |
284 | buffer = p->buffer; |
285 | } else { |
286 | walk->buffer = v; |
287 | buffer = v; |
288 | } |
289 | |
290 | ok: |
291 | walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1); |
292 | walk->dst.virt.addr = skcipher_get_spot(start: walk->dst.virt.addr, len: bsize); |
293 | walk->src.virt.addr = walk->dst.virt.addr; |
294 | |
295 | scatterwalk_copychunks(buf: walk->src.virt.addr, walk: &walk->in, nbytes: bsize, out: 0); |
296 | |
297 | walk->nbytes = bsize; |
298 | walk->flags |= SKCIPHER_WALK_SLOW; |
299 | |
300 | return 0; |
301 | } |
302 | |
303 | static int skcipher_next_copy(struct skcipher_walk *walk) |
304 | { |
305 | struct skcipher_walk_buffer *p; |
306 | u8 *tmp = walk->page; |
307 | |
308 | skcipher_map_src(walk); |
309 | memcpy(tmp, walk->src.virt.addr, walk->nbytes); |
310 | skcipher_unmap_src(walk); |
311 | |
312 | walk->src.virt.addr = tmp; |
313 | walk->dst.virt.addr = tmp; |
314 | |
315 | if (!(walk->flags & SKCIPHER_WALK_PHYS)) |
316 | return 0; |
317 | |
318 | p = kmalloc(size: sizeof(*p), flags: skcipher_walk_gfp(walk)); |
319 | if (!p) |
320 | return -ENOMEM; |
321 | |
322 | p->data = walk->page; |
323 | p->len = walk->nbytes; |
324 | skcipher_queue_write(walk, p); |
325 | |
326 | if (offset_in_page(walk->page) + walk->nbytes + walk->stride > |
327 | PAGE_SIZE) |
328 | walk->page = NULL; |
329 | else |
330 | walk->page += walk->nbytes; |
331 | |
332 | return 0; |
333 | } |
334 | |
335 | static int skcipher_next_fast(struct skcipher_walk *walk) |
336 | { |
337 | unsigned long diff; |
338 | |
339 | walk->src.phys.page = scatterwalk_page(walk: &walk->in); |
340 | walk->src.phys.offset = offset_in_page(walk->in.offset); |
341 | walk->dst.phys.page = scatterwalk_page(walk: &walk->out); |
342 | walk->dst.phys.offset = offset_in_page(walk->out.offset); |
343 | |
344 | if (walk->flags & SKCIPHER_WALK_PHYS) |
345 | return 0; |
346 | |
347 | diff = walk->src.phys.offset - walk->dst.phys.offset; |
348 | diff |= walk->src.virt.page - walk->dst.virt.page; |
349 | |
350 | skcipher_map_src(walk); |
351 | walk->dst.virt.addr = walk->src.virt.addr; |
352 | |
353 | if (diff) { |
354 | walk->flags |= SKCIPHER_WALK_DIFF; |
355 | skcipher_map_dst(walk); |
356 | } |
357 | |
358 | return 0; |
359 | } |
360 | |
361 | static int skcipher_walk_next(struct skcipher_walk *walk) |
362 | { |
363 | unsigned int bsize; |
364 | unsigned int n; |
365 | int err; |
366 | |
367 | walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | |
368 | SKCIPHER_WALK_DIFF); |
369 | |
370 | n = walk->total; |
371 | bsize = min(walk->stride, max(n, walk->blocksize)); |
372 | n = scatterwalk_clamp(walk: &walk->in, nbytes: n); |
373 | n = scatterwalk_clamp(walk: &walk->out, nbytes: n); |
374 | |
375 | if (unlikely(n < bsize)) { |
376 | if (unlikely(walk->total < walk->blocksize)) |
377 | return skcipher_walk_done(walk, -EINVAL); |
378 | |
379 | slow_path: |
380 | err = skcipher_next_slow(walk, bsize); |
381 | goto set_phys_lowmem; |
382 | } |
383 | |
384 | if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { |
385 | if (!walk->page) { |
386 | gfp_t gfp = skcipher_walk_gfp(walk); |
387 | |
388 | walk->page = (void *)__get_free_page(gfp); |
389 | if (!walk->page) |
390 | goto slow_path; |
391 | } |
392 | |
393 | walk->nbytes = min_t(unsigned, n, |
394 | PAGE_SIZE - offset_in_page(walk->page)); |
395 | walk->flags |= SKCIPHER_WALK_COPY; |
396 | err = skcipher_next_copy(walk); |
397 | goto set_phys_lowmem; |
398 | } |
399 | |
400 | walk->nbytes = n; |
401 | |
402 | return skcipher_next_fast(walk); |
403 | |
404 | set_phys_lowmem: |
405 | if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) { |
406 | walk->src.phys.page = virt_to_page(walk->src.virt.addr); |
407 | walk->dst.phys.page = virt_to_page(walk->dst.virt.addr); |
408 | walk->src.phys.offset &= PAGE_SIZE - 1; |
409 | walk->dst.phys.offset &= PAGE_SIZE - 1; |
410 | } |
411 | return err; |
412 | } |
413 | |
414 | static int skcipher_copy_iv(struct skcipher_walk *walk) |
415 | { |
416 | unsigned a = crypto_tfm_ctx_alignment() - 1; |
417 | unsigned alignmask = walk->alignmask; |
418 | unsigned ivsize = walk->ivsize; |
419 | unsigned bs = walk->stride; |
420 | unsigned aligned_bs; |
421 | unsigned size; |
422 | u8 *iv; |
423 | |
424 | aligned_bs = ALIGN(bs, alignmask + 1); |
425 | |
426 | /* Minimum size to align buffer by alignmask. */ |
427 | size = alignmask & ~a; |
428 | |
429 | if (walk->flags & SKCIPHER_WALK_PHYS) |
430 | size += ivsize; |
431 | else { |
432 | size += aligned_bs + ivsize; |
433 | |
434 | /* Minimum size to ensure buffer does not straddle a page. */ |
435 | size += (bs - 1) & ~(alignmask | a); |
436 | } |
437 | |
438 | walk->buffer = kmalloc(size, flags: skcipher_walk_gfp(walk)); |
439 | if (!walk->buffer) |
440 | return -ENOMEM; |
441 | |
442 | iv = PTR_ALIGN(walk->buffer, alignmask + 1); |
443 | iv = skcipher_get_spot(start: iv, len: bs) + aligned_bs; |
444 | |
445 | walk->iv = memcpy(iv, walk->iv, walk->ivsize); |
446 | return 0; |
447 | } |
448 | |
449 | static int skcipher_walk_first(struct skcipher_walk *walk) |
450 | { |
451 | if (WARN_ON_ONCE(in_hardirq())) |
452 | return -EDEADLK; |
453 | |
454 | walk->buffer = NULL; |
455 | if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { |
456 | int err = skcipher_copy_iv(walk); |
457 | if (err) |
458 | return err; |
459 | } |
460 | |
461 | walk->page = NULL; |
462 | |
463 | return skcipher_walk_next(walk); |
464 | } |
465 | |
466 | static int skcipher_walk_skcipher(struct skcipher_walk *walk, |
467 | struct skcipher_request *req) |
468 | { |
469 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
470 | struct skcipher_alg *alg = crypto_skcipher_alg(tfm); |
471 | |
472 | walk->total = req->cryptlen; |
473 | walk->nbytes = 0; |
474 | walk->iv = req->iv; |
475 | walk->oiv = req->iv; |
476 | |
477 | if (unlikely(!walk->total)) |
478 | return 0; |
479 | |
480 | scatterwalk_start(walk: &walk->in, sg: req->src); |
481 | scatterwalk_start(walk: &walk->out, sg: req->dst); |
482 | |
483 | walk->flags &= ~SKCIPHER_WALK_SLEEP; |
484 | walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? |
485 | SKCIPHER_WALK_SLEEP : 0; |
486 | |
487 | walk->blocksize = crypto_skcipher_blocksize(tfm); |
488 | walk->ivsize = crypto_skcipher_ivsize(tfm); |
489 | walk->alignmask = crypto_skcipher_alignmask(tfm); |
490 | |
491 | if (alg->co.base.cra_type != &crypto_skcipher_type) |
492 | walk->stride = alg->co.chunksize; |
493 | else |
494 | walk->stride = alg->walksize; |
495 | |
496 | return skcipher_walk_first(walk); |
497 | } |
498 | |
499 | int skcipher_walk_virt(struct skcipher_walk *walk, |
500 | struct skcipher_request *req, bool atomic) |
501 | { |
502 | int err; |
503 | |
504 | might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); |
505 | |
506 | walk->flags &= ~SKCIPHER_WALK_PHYS; |
507 | |
508 | err = skcipher_walk_skcipher(walk, req); |
509 | |
510 | walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0; |
511 | |
512 | return err; |
513 | } |
514 | EXPORT_SYMBOL_GPL(skcipher_walk_virt); |
515 | |
516 | int skcipher_walk_async(struct skcipher_walk *walk, |
517 | struct skcipher_request *req) |
518 | { |
519 | walk->flags |= SKCIPHER_WALK_PHYS; |
520 | |
521 | INIT_LIST_HEAD(list: &walk->buffers); |
522 | |
523 | return skcipher_walk_skcipher(walk, req); |
524 | } |
525 | EXPORT_SYMBOL_GPL(skcipher_walk_async); |
526 | |
527 | static int skcipher_walk_aead_common(struct skcipher_walk *walk, |
528 | struct aead_request *req, bool atomic) |
529 | { |
530 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
531 | int err; |
532 | |
533 | walk->nbytes = 0; |
534 | walk->iv = req->iv; |
535 | walk->oiv = req->iv; |
536 | |
537 | if (unlikely(!walk->total)) |
538 | return 0; |
539 | |
540 | walk->flags &= ~SKCIPHER_WALK_PHYS; |
541 | |
542 | scatterwalk_start(walk: &walk->in, sg: req->src); |
543 | scatterwalk_start(walk: &walk->out, sg: req->dst); |
544 | |
545 | scatterwalk_copychunks(NULL, walk: &walk->in, nbytes: req->assoclen, out: 2); |
546 | scatterwalk_copychunks(NULL, walk: &walk->out, nbytes: req->assoclen, out: 2); |
547 | |
548 | scatterwalk_done(walk: &walk->in, out: 0, more: walk->total); |
549 | scatterwalk_done(walk: &walk->out, out: 0, more: walk->total); |
550 | |
551 | if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) |
552 | walk->flags |= SKCIPHER_WALK_SLEEP; |
553 | else |
554 | walk->flags &= ~SKCIPHER_WALK_SLEEP; |
555 | |
556 | walk->blocksize = crypto_aead_blocksize(tfm); |
557 | walk->stride = crypto_aead_chunksize(tfm); |
558 | walk->ivsize = crypto_aead_ivsize(tfm); |
559 | walk->alignmask = crypto_aead_alignmask(tfm); |
560 | |
561 | err = skcipher_walk_first(walk); |
562 | |
563 | if (atomic) |
564 | walk->flags &= ~SKCIPHER_WALK_SLEEP; |
565 | |
566 | return err; |
567 | } |
568 | |
569 | int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, |
570 | struct aead_request *req, bool atomic) |
571 | { |
572 | walk->total = req->cryptlen; |
573 | |
574 | return skcipher_walk_aead_common(walk, req, atomic); |
575 | } |
576 | EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt); |
577 | |
578 | int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, |
579 | struct aead_request *req, bool atomic) |
580 | { |
581 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
582 | |
583 | walk->total = req->cryptlen - crypto_aead_authsize(tfm); |
584 | |
585 | return skcipher_walk_aead_common(walk, req, atomic); |
586 | } |
587 | EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt); |
588 | |
589 | static void skcipher_set_needkey(struct crypto_skcipher *tfm) |
590 | { |
591 | if (crypto_skcipher_max_keysize(tfm) != 0) |
592 | crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY); |
593 | } |
594 | |
595 | static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm, |
596 | const u8 *key, unsigned int keylen) |
597 | { |
598 | unsigned long alignmask = crypto_skcipher_alignmask(tfm); |
599 | struct skcipher_alg *cipher = crypto_skcipher_alg(tfm); |
600 | u8 *buffer, *alignbuffer; |
601 | unsigned long absize; |
602 | int ret; |
603 | |
604 | absize = keylen + alignmask; |
605 | buffer = kmalloc(size: absize, GFP_ATOMIC); |
606 | if (!buffer) |
607 | return -ENOMEM; |
608 | |
609 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); |
610 | memcpy(alignbuffer, key, keylen); |
611 | ret = cipher->setkey(tfm, alignbuffer, keylen); |
612 | kfree_sensitive(objp: buffer); |
613 | return ret; |
614 | } |
615 | |
616 | int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, |
617 | unsigned int keylen) |
618 | { |
619 | struct skcipher_alg *cipher = crypto_skcipher_alg(tfm); |
620 | unsigned long alignmask = crypto_skcipher_alignmask(tfm); |
621 | int err; |
622 | |
623 | if (cipher->co.base.cra_type != &crypto_skcipher_type) { |
624 | struct crypto_lskcipher **ctx = crypto_skcipher_ctx(tfm); |
625 | |
626 | crypto_lskcipher_clear_flags(tfm: *ctx, CRYPTO_TFM_REQ_MASK); |
627 | crypto_lskcipher_set_flags(tfm: *ctx, |
628 | flags: crypto_skcipher_get_flags(tfm) & |
629 | CRYPTO_TFM_REQ_MASK); |
630 | err = crypto_lskcipher_setkey(tfm: *ctx, key, keylen); |
631 | goto out; |
632 | } |
633 | |
634 | if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) |
635 | return -EINVAL; |
636 | |
637 | if ((unsigned long)key & alignmask) |
638 | err = skcipher_setkey_unaligned(tfm, key, keylen); |
639 | else |
640 | err = cipher->setkey(tfm, key, keylen); |
641 | |
642 | out: |
643 | if (unlikely(err)) { |
644 | skcipher_set_needkey(tfm); |
645 | return err; |
646 | } |
647 | |
648 | crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); |
649 | return 0; |
650 | } |
651 | EXPORT_SYMBOL_GPL(crypto_skcipher_setkey); |
652 | |
653 | int crypto_skcipher_encrypt(struct skcipher_request *req) |
654 | { |
655 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
656 | struct skcipher_alg *alg = crypto_skcipher_alg(tfm); |
657 | int ret; |
658 | |
659 | if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { |
660 | struct crypto_istat_cipher *istat = skcipher_get_stat(alg); |
661 | |
662 | atomic64_inc(v: &istat->encrypt_cnt); |
663 | atomic64_add(i: req->cryptlen, v: &istat->encrypt_tlen); |
664 | } |
665 | |
666 | if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) |
667 | ret = -ENOKEY; |
668 | else if (alg->co.base.cra_type != &crypto_skcipher_type) |
669 | ret = crypto_lskcipher_encrypt_sg(req); |
670 | else |
671 | ret = alg->encrypt(req); |
672 | |
673 | return crypto_skcipher_errstat(alg, err: ret); |
674 | } |
675 | EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt); |
676 | |
677 | int crypto_skcipher_decrypt(struct skcipher_request *req) |
678 | { |
679 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
680 | struct skcipher_alg *alg = crypto_skcipher_alg(tfm); |
681 | int ret; |
682 | |
683 | if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { |
684 | struct crypto_istat_cipher *istat = skcipher_get_stat(alg); |
685 | |
686 | atomic64_inc(v: &istat->decrypt_cnt); |
687 | atomic64_add(i: req->cryptlen, v: &istat->decrypt_tlen); |
688 | } |
689 | |
690 | if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) |
691 | ret = -ENOKEY; |
692 | else if (alg->co.base.cra_type != &crypto_skcipher_type) |
693 | ret = crypto_lskcipher_decrypt_sg(req); |
694 | else |
695 | ret = alg->decrypt(req); |
696 | |
697 | return crypto_skcipher_errstat(alg, err: ret); |
698 | } |
699 | EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt); |
700 | |
701 | static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) |
702 | { |
703 | struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); |
704 | struct skcipher_alg *alg = crypto_skcipher_alg(tfm: skcipher); |
705 | |
706 | alg->exit(skcipher); |
707 | } |
708 | |
709 | static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) |
710 | { |
711 | struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); |
712 | struct skcipher_alg *alg = crypto_skcipher_alg(tfm: skcipher); |
713 | |
714 | skcipher_set_needkey(tfm: skcipher); |
715 | |
716 | if (tfm->__crt_alg->cra_type != &crypto_skcipher_type) |
717 | return crypto_init_lskcipher_ops_sg(tfm); |
718 | |
719 | if (alg->exit) |
720 | skcipher->base.exit = crypto_skcipher_exit_tfm; |
721 | |
722 | if (alg->init) |
723 | return alg->init(skcipher); |
724 | |
725 | return 0; |
726 | } |
727 | |
728 | static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) |
729 | { |
730 | if (alg->cra_type != &crypto_skcipher_type) |
731 | return sizeof(struct crypto_lskcipher *); |
732 | |
733 | return crypto_alg_extsize(alg); |
734 | } |
735 | |
736 | static void crypto_skcipher_free_instance(struct crypto_instance *inst) |
737 | { |
738 | struct skcipher_instance *skcipher = |
739 | container_of(inst, struct skcipher_instance, s.base); |
740 | |
741 | skcipher->free(skcipher); |
742 | } |
743 | |
744 | static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) |
745 | __maybe_unused; |
746 | static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) |
747 | { |
748 | struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg); |
749 | |
750 | seq_printf(m, fmt: "type : skcipher\n" ); |
751 | seq_printf(m, fmt: "async : %s\n" , |
752 | alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no" ); |
753 | seq_printf(m, fmt: "blocksize : %u\n" , alg->cra_blocksize); |
754 | seq_printf(m, fmt: "min keysize : %u\n" , skcipher->min_keysize); |
755 | seq_printf(m, fmt: "max keysize : %u\n" , skcipher->max_keysize); |
756 | seq_printf(m, fmt: "ivsize : %u\n" , skcipher->ivsize); |
757 | seq_printf(m, fmt: "chunksize : %u\n" , skcipher->chunksize); |
758 | seq_printf(m, fmt: "walksize : %u\n" , skcipher->walksize); |
759 | } |
760 | |
761 | static int __maybe_unused crypto_skcipher_report( |
762 | struct sk_buff *skb, struct crypto_alg *alg) |
763 | { |
764 | struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg); |
765 | struct crypto_report_blkcipher rblkcipher; |
766 | |
767 | memset(&rblkcipher, 0, sizeof(rblkcipher)); |
768 | |
769 | strscpy(p: rblkcipher.type, q: "skcipher" , size: sizeof(rblkcipher.type)); |
770 | strscpy(p: rblkcipher.geniv, q: "<none>" , size: sizeof(rblkcipher.geniv)); |
771 | |
772 | rblkcipher.blocksize = alg->cra_blocksize; |
773 | rblkcipher.min_keysize = skcipher->min_keysize; |
774 | rblkcipher.max_keysize = skcipher->max_keysize; |
775 | rblkcipher.ivsize = skcipher->ivsize; |
776 | |
777 | return nla_put(skb, attrtype: CRYPTOCFGA_REPORT_BLKCIPHER, |
778 | attrlen: sizeof(rblkcipher), data: &rblkcipher); |
779 | } |
780 | |
781 | static int __maybe_unused crypto_skcipher_report_stat( |
782 | struct sk_buff *skb, struct crypto_alg *alg) |
783 | { |
784 | struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg); |
785 | struct crypto_istat_cipher *istat; |
786 | struct crypto_stat_cipher rcipher; |
787 | |
788 | istat = skcipher_get_stat(alg: skcipher); |
789 | |
790 | memset(&rcipher, 0, sizeof(rcipher)); |
791 | |
792 | strscpy(p: rcipher.type, q: "cipher" , size: sizeof(rcipher.type)); |
793 | |
794 | rcipher.stat_encrypt_cnt = atomic64_read(v: &istat->encrypt_cnt); |
795 | rcipher.stat_encrypt_tlen = atomic64_read(v: &istat->encrypt_tlen); |
796 | rcipher.stat_decrypt_cnt = atomic64_read(v: &istat->decrypt_cnt); |
797 | rcipher.stat_decrypt_tlen = atomic64_read(v: &istat->decrypt_tlen); |
798 | rcipher.stat_err_cnt = atomic64_read(v: &istat->err_cnt); |
799 | |
800 | return nla_put(skb, attrtype: CRYPTOCFGA_STAT_CIPHER, attrlen: sizeof(rcipher), data: &rcipher); |
801 | } |
802 | |
803 | static const struct crypto_type crypto_skcipher_type = { |
804 | .extsize = crypto_skcipher_extsize, |
805 | .init_tfm = crypto_skcipher_init_tfm, |
806 | .free = crypto_skcipher_free_instance, |
807 | #ifdef CONFIG_PROC_FS |
808 | .show = crypto_skcipher_show, |
809 | #endif |
810 | #if IS_ENABLED(CONFIG_CRYPTO_USER) |
811 | .report = crypto_skcipher_report, |
812 | #endif |
813 | #ifdef CONFIG_CRYPTO_STATS |
814 | .report_stat = crypto_skcipher_report_stat, |
815 | #endif |
816 | .maskclear = ~CRYPTO_ALG_TYPE_MASK, |
817 | .maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK, |
818 | .type = CRYPTO_ALG_TYPE_SKCIPHER, |
819 | .tfmsize = offsetof(struct crypto_skcipher, base), |
820 | }; |
821 | |
822 | int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, |
823 | struct crypto_instance *inst, |
824 | const char *name, u32 type, u32 mask) |
825 | { |
826 | spawn->base.frontend = &crypto_skcipher_type; |
827 | return crypto_grab_spawn(spawn: &spawn->base, inst, name, type, mask); |
828 | } |
829 | EXPORT_SYMBOL_GPL(crypto_grab_skcipher); |
830 | |
831 | struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, |
832 | u32 type, u32 mask) |
833 | { |
834 | return crypto_alloc_tfm(alg_name, frontend: &crypto_skcipher_type, type, mask); |
835 | } |
836 | EXPORT_SYMBOL_GPL(crypto_alloc_skcipher); |
837 | |
838 | struct crypto_sync_skcipher *crypto_alloc_sync_skcipher( |
839 | const char *alg_name, u32 type, u32 mask) |
840 | { |
841 | struct crypto_skcipher *tfm; |
842 | |
843 | /* Only sync algorithms allowed. */ |
844 | mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE; |
845 | |
846 | tfm = crypto_alloc_tfm(alg_name, frontend: &crypto_skcipher_type, type, mask); |
847 | |
848 | /* |
849 | * Make sure we do not allocate something that might get used with |
850 | * an on-stack request: check the request size. |
851 | */ |
852 | if (!IS_ERR(ptr: tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) > |
853 | MAX_SYNC_SKCIPHER_REQSIZE)) { |
854 | crypto_free_skcipher(tfm); |
855 | return ERR_PTR(error: -EINVAL); |
856 | } |
857 | |
858 | return (struct crypto_sync_skcipher *)tfm; |
859 | } |
860 | EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher); |
861 | |
862 | int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask) |
863 | { |
864 | return crypto_type_has_alg(name: alg_name, frontend: &crypto_skcipher_type, type, mask); |
865 | } |
866 | EXPORT_SYMBOL_GPL(crypto_has_skcipher); |
867 | |
868 | int skcipher_prepare_alg_common(struct skcipher_alg_common *alg) |
869 | { |
870 | struct crypto_istat_cipher *istat = skcipher_get_stat_common(alg); |
871 | struct crypto_alg *base = &alg->base; |
872 | |
873 | if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8) |
874 | return -EINVAL; |
875 | |
876 | if (!alg->chunksize) |
877 | alg->chunksize = base->cra_blocksize; |
878 | |
879 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; |
880 | |
881 | if (IS_ENABLED(CONFIG_CRYPTO_STATS)) |
882 | memset(istat, 0, sizeof(*istat)); |
883 | |
884 | return 0; |
885 | } |
886 | |
887 | static int skcipher_prepare_alg(struct skcipher_alg *alg) |
888 | { |
889 | struct crypto_alg *base = &alg->base; |
890 | int err; |
891 | |
892 | err = skcipher_prepare_alg_common(alg: &alg->co); |
893 | if (err) |
894 | return err; |
895 | |
896 | if (alg->walksize > PAGE_SIZE / 8) |
897 | return -EINVAL; |
898 | |
899 | if (!alg->walksize) |
900 | alg->walksize = alg->chunksize; |
901 | |
902 | base->cra_type = &crypto_skcipher_type; |
903 | base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER; |
904 | |
905 | return 0; |
906 | } |
907 | |
908 | int crypto_register_skcipher(struct skcipher_alg *alg) |
909 | { |
910 | struct crypto_alg *base = &alg->base; |
911 | int err; |
912 | |
913 | err = skcipher_prepare_alg(alg); |
914 | if (err) |
915 | return err; |
916 | |
917 | return crypto_register_alg(alg: base); |
918 | } |
919 | EXPORT_SYMBOL_GPL(crypto_register_skcipher); |
920 | |
921 | void crypto_unregister_skcipher(struct skcipher_alg *alg) |
922 | { |
923 | crypto_unregister_alg(alg: &alg->base); |
924 | } |
925 | EXPORT_SYMBOL_GPL(crypto_unregister_skcipher); |
926 | |
927 | int crypto_register_skciphers(struct skcipher_alg *algs, int count) |
928 | { |
929 | int i, ret; |
930 | |
931 | for (i = 0; i < count; i++) { |
932 | ret = crypto_register_skcipher(&algs[i]); |
933 | if (ret) |
934 | goto err; |
935 | } |
936 | |
937 | return 0; |
938 | |
939 | err: |
940 | for (--i; i >= 0; --i) |
941 | crypto_unregister_skcipher(&algs[i]); |
942 | |
943 | return ret; |
944 | } |
945 | EXPORT_SYMBOL_GPL(crypto_register_skciphers); |
946 | |
947 | void crypto_unregister_skciphers(struct skcipher_alg *algs, int count) |
948 | { |
949 | int i; |
950 | |
951 | for (i = count - 1; i >= 0; --i) |
952 | crypto_unregister_skcipher(&algs[i]); |
953 | } |
954 | EXPORT_SYMBOL_GPL(crypto_unregister_skciphers); |
955 | |
956 | int skcipher_register_instance(struct crypto_template *tmpl, |
957 | struct skcipher_instance *inst) |
958 | { |
959 | int err; |
960 | |
961 | if (WARN_ON(!inst->free)) |
962 | return -EINVAL; |
963 | |
964 | err = skcipher_prepare_alg(alg: &inst->alg); |
965 | if (err) |
966 | return err; |
967 | |
968 | return crypto_register_instance(tmpl, inst: skcipher_crypto_instance(inst)); |
969 | } |
970 | EXPORT_SYMBOL_GPL(skcipher_register_instance); |
971 | |
972 | static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key, |
973 | unsigned int keylen) |
974 | { |
975 | struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); |
976 | |
977 | crypto_cipher_clear_flags(tfm: cipher, CRYPTO_TFM_REQ_MASK); |
978 | crypto_cipher_set_flags(tfm: cipher, flags: crypto_skcipher_get_flags(tfm) & |
979 | CRYPTO_TFM_REQ_MASK); |
980 | return crypto_cipher_setkey(tfm: cipher, key, keylen); |
981 | } |
982 | |
983 | static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm) |
984 | { |
985 | struct skcipher_instance *inst = skcipher_alg_instance(skcipher: tfm); |
986 | struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst); |
987 | struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm); |
988 | struct crypto_cipher *cipher; |
989 | |
990 | cipher = crypto_spawn_cipher(spawn); |
991 | if (IS_ERR(ptr: cipher)) |
992 | return PTR_ERR(ptr: cipher); |
993 | |
994 | ctx->cipher = cipher; |
995 | return 0; |
996 | } |
997 | |
998 | static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm) |
999 | { |
1000 | struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm); |
1001 | |
1002 | crypto_free_cipher(tfm: ctx->cipher); |
1003 | } |
1004 | |
1005 | static void skcipher_free_instance_simple(struct skcipher_instance *inst) |
1006 | { |
1007 | crypto_drop_cipher(spawn: skcipher_instance_ctx(inst)); |
1008 | kfree(objp: inst); |
1009 | } |
1010 | |
1011 | /** |
1012 | * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode |
1013 | * |
1014 | * Allocate an skcipher_instance for a simple block cipher mode of operation, |
1015 | * e.g. cbc or ecb. The instance context will have just a single crypto_spawn, |
1016 | * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize, |
1017 | * alignmask, and priority are set from the underlying cipher but can be |
1018 | * overridden if needed. The tfm context defaults to skcipher_ctx_simple, and |
1019 | * default ->setkey(), ->init(), and ->exit() methods are installed. |
1020 | * |
1021 | * @tmpl: the template being instantiated |
1022 | * @tb: the template parameters |
1023 | * |
1024 | * Return: a pointer to the new instance, or an ERR_PTR(). The caller still |
1025 | * needs to register the instance. |
1026 | */ |
1027 | struct skcipher_instance *skcipher_alloc_instance_simple( |
1028 | struct crypto_template *tmpl, struct rtattr **tb) |
1029 | { |
1030 | u32 mask; |
1031 | struct skcipher_instance *inst; |
1032 | struct crypto_cipher_spawn *spawn; |
1033 | struct crypto_alg *cipher_alg; |
1034 | int err; |
1035 | |
1036 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, mask_ret: &mask); |
1037 | if (err) |
1038 | return ERR_PTR(error: err); |
1039 | |
1040 | inst = kzalloc(size: sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); |
1041 | if (!inst) |
1042 | return ERR_PTR(error: -ENOMEM); |
1043 | spawn = skcipher_instance_ctx(inst); |
1044 | |
1045 | err = crypto_grab_cipher(spawn, inst: skcipher_crypto_instance(inst), |
1046 | name: crypto_attr_alg_name(rta: tb[1]), type: 0, mask); |
1047 | if (err) |
1048 | goto err_free_inst; |
1049 | cipher_alg = crypto_spawn_cipher_alg(spawn); |
1050 | |
1051 | err = crypto_inst_setname(inst: skcipher_crypto_instance(inst), name: tmpl->name, |
1052 | alg: cipher_alg); |
1053 | if (err) |
1054 | goto err_free_inst; |
1055 | |
1056 | inst->free = skcipher_free_instance_simple; |
1057 | |
1058 | /* Default algorithm properties, can be overridden */ |
1059 | inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize; |
1060 | inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask; |
1061 | inst->alg.base.cra_priority = cipher_alg->cra_priority; |
1062 | inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize; |
1063 | inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize; |
1064 | inst->alg.ivsize = cipher_alg->cra_blocksize; |
1065 | |
1066 | /* Use skcipher_ctx_simple by default, can be overridden */ |
1067 | inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple); |
1068 | inst->alg.setkey = skcipher_setkey_simple; |
1069 | inst->alg.init = skcipher_init_tfm_simple; |
1070 | inst->alg.exit = skcipher_exit_tfm_simple; |
1071 | |
1072 | return inst; |
1073 | |
1074 | err_free_inst: |
1075 | skcipher_free_instance_simple(inst); |
1076 | return ERR_PTR(error: err); |
1077 | } |
1078 | EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple); |
1079 | |
1080 | MODULE_LICENSE("GPL" ); |
1081 | MODULE_DESCRIPTION("Symmetric key cipher type" ); |
1082 | MODULE_IMPORT_NS(CRYPTO_INTERNAL); |
1083 | |