1 | // SPDX-License-Identifier: GPL-2.0 |
---|---|
2 | /* |
3 | * Copyright 2019 Google LLC |
4 | */ |
5 | |
6 | /* |
7 | * Refer to Documentation/block/inline-encryption.rst for detailed explanation. |
8 | */ |
9 | |
10 | #define pr_fmt(fmt) "blk-crypto: " fmt |
11 | |
12 | #include <linux/bio.h> |
13 | #include <linux/blkdev.h> |
14 | #include <linux/blk-crypto-profile.h> |
15 | #include <linux/module.h> |
16 | #include <linux/ratelimit.h> |
17 | #include <linux/slab.h> |
18 | |
19 | #include "blk-crypto-internal.h" |
20 | |
21 | const struct blk_crypto_mode blk_crypto_modes[] = { |
22 | [BLK_ENCRYPTION_MODE_AES_256_XTS] = { |
23 | .name = "AES-256-XTS", |
24 | .cipher_str = "xts(aes)", |
25 | .keysize = 64, |
26 | .security_strength = 32, |
27 | .ivsize = 16, |
28 | }, |
29 | [BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = { |
30 | .name = "AES-128-CBC-ESSIV", |
31 | .cipher_str = "essiv(cbc(aes),sha256)", |
32 | .keysize = 16, |
33 | .security_strength = 16, |
34 | .ivsize = 16, |
35 | }, |
36 | [BLK_ENCRYPTION_MODE_ADIANTUM] = { |
37 | .name = "Adiantum", |
38 | .cipher_str = "adiantum(xchacha12,aes)", |
39 | .keysize = 32, |
40 | .security_strength = 32, |
41 | .ivsize = 32, |
42 | }, |
43 | [BLK_ENCRYPTION_MODE_SM4_XTS] = { |
44 | .name = "SM4-XTS", |
45 | .cipher_str = "xts(sm4)", |
46 | .keysize = 32, |
47 | .security_strength = 16, |
48 | .ivsize = 16, |
49 | }, |
50 | }; |
51 | |
52 | /* |
53 | * This number needs to be at least (the number of threads doing IO |
54 | * concurrently) * (maximum recursive depth of a bio), so that we don't |
55 | * deadlock on crypt_ctx allocations. The default is chosen to be the same |
56 | * as the default number of post read contexts in both EXT4 and F2FS. |
57 | */ |
58 | static int num_prealloc_crypt_ctxs = 128; |
59 | |
60 | module_param(num_prealloc_crypt_ctxs, int, 0444); |
61 | MODULE_PARM_DESC(num_prealloc_crypt_ctxs, |
62 | "Number of bio crypto contexts to preallocate"); |
63 | |
64 | static struct kmem_cache *bio_crypt_ctx_cache; |
65 | static mempool_t *bio_crypt_ctx_pool; |
66 | |
67 | static int __init bio_crypt_ctx_init(void) |
68 | { |
69 | size_t i; |
70 | |
71 | bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0); |
72 | if (!bio_crypt_ctx_cache) |
73 | goto out_no_mem; |
74 | |
75 | bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs, |
76 | bio_crypt_ctx_cache); |
77 | if (!bio_crypt_ctx_pool) |
78 | goto out_no_mem; |
79 | |
80 | /* This is assumed in various places. */ |
81 | BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0); |
82 | |
83 | /* |
84 | * Validate the crypto mode properties. This ideally would be done with |
85 | * static assertions, but boot-time checks are the next best thing. |
86 | */ |
87 | for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) { |
88 | BUG_ON(blk_crypto_modes[i].keysize > |
89 | BLK_CRYPTO_MAX_RAW_KEY_SIZE); |
90 | BUG_ON(blk_crypto_modes[i].security_strength > |
91 | blk_crypto_modes[i].keysize); |
92 | BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE); |
93 | } |
94 | |
95 | return 0; |
96 | out_no_mem: |
97 | panic(fmt: "Failed to allocate mem for bio crypt ctxs\n"); |
98 | } |
99 | subsys_initcall(bio_crypt_ctx_init); |
100 | |
101 | void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key, |
102 | const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask) |
103 | { |
104 | struct bio_crypt_ctx *bc; |
105 | |
106 | /* |
107 | * The caller must use a gfp_mask that contains __GFP_DIRECT_RECLAIM so |
108 | * that the mempool_alloc() can't fail. |
109 | */ |
110 | WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM)); |
111 | |
112 | bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); |
113 | |
114 | bc->bc_key = key; |
115 | memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun)); |
116 | |
117 | bio->bi_crypt_context = bc; |
118 | } |
119 | |
120 | void __bio_crypt_free_ctx(struct bio *bio) |
121 | { |
122 | mempool_free(element: bio->bi_crypt_context, pool: bio_crypt_ctx_pool); |
123 | bio->bi_crypt_context = NULL; |
124 | } |
125 | |
126 | int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) |
127 | { |
128 | dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); |
129 | if (!dst->bi_crypt_context) |
130 | return -ENOMEM; |
131 | *dst->bi_crypt_context = *src->bi_crypt_context; |
132 | return 0; |
133 | } |
134 | |
135 | /* Increments @dun by @inc, treating @dun as a multi-limb integer. */ |
136 | void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], |
137 | unsigned int inc) |
138 | { |
139 | int i; |
140 | |
141 | for (i = 0; inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) { |
142 | dun[i] += inc; |
143 | /* |
144 | * If the addition in this limb overflowed, then we need to |
145 | * carry 1 into the next limb. Else the carry is 0. |
146 | */ |
147 | if (dun[i] < inc) |
148 | inc = 1; |
149 | else |
150 | inc = 0; |
151 | } |
152 | } |
153 | |
154 | void __bio_crypt_advance(struct bio *bio, unsigned int bytes) |
155 | { |
156 | struct bio_crypt_ctx *bc = bio->bi_crypt_context; |
157 | |
158 | bio_crypt_dun_increment(dun: bc->bc_dun, |
159 | inc: bytes >> bc->bc_key->data_unit_size_bits); |
160 | } |
161 | |
162 | /* |
163 | * Returns true if @bc->bc_dun plus @bytes converted to data units is equal to |
164 | * @next_dun, treating the DUNs as multi-limb integers. |
165 | */ |
166 | bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc, |
167 | unsigned int bytes, |
168 | const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]) |
169 | { |
170 | int i; |
171 | unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits; |
172 | |
173 | for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) { |
174 | if (bc->bc_dun[i] + carry != next_dun[i]) |
175 | return false; |
176 | /* |
177 | * If the addition in this limb overflowed, then we need to |
178 | * carry 1 into the next limb. Else the carry is 0. |
179 | */ |
180 | if ((bc->bc_dun[i] + carry) < carry) |
181 | carry = 1; |
182 | else |
183 | carry = 0; |
184 | } |
185 | |
186 | /* If the DUN wrapped through 0, don't treat it as contiguous. */ |
187 | return carry == 0; |
188 | } |
189 | |
190 | /* |
191 | * Checks that two bio crypt contexts are compatible - i.e. that |
192 | * they are mergeable except for data_unit_num continuity. |
193 | */ |
194 | static bool bio_crypt_ctx_compatible(struct bio_crypt_ctx *bc1, |
195 | struct bio_crypt_ctx *bc2) |
196 | { |
197 | if (!bc1) |
198 | return !bc2; |
199 | |
200 | return bc2 && bc1->bc_key == bc2->bc_key; |
201 | } |
202 | |
203 | bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio) |
204 | { |
205 | return bio_crypt_ctx_compatible(bc1: rq->crypt_ctx, bc2: bio->bi_crypt_context); |
206 | } |
207 | |
208 | /* |
209 | * Checks that two bio crypt contexts are compatible, and also |
210 | * that their data_unit_nums are continuous (and can hence be merged) |
211 | * in the order @bc1 followed by @bc2. |
212 | */ |
213 | bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes, |
214 | struct bio_crypt_ctx *bc2) |
215 | { |
216 | if (!bio_crypt_ctx_compatible(bc1, bc2)) |
217 | return false; |
218 | |
219 | return !bc1 || bio_crypt_dun_is_contiguous(bc: bc1, bytes: bc1_bytes, next_dun: bc2->bc_dun); |
220 | } |
221 | |
222 | /* Check that all I/O segments are data unit aligned. */ |
223 | static bool bio_crypt_check_alignment(struct bio *bio) |
224 | { |
225 | const unsigned int data_unit_size = |
226 | bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size; |
227 | struct bvec_iter iter; |
228 | struct bio_vec bv; |
229 | |
230 | bio_for_each_segment(bv, bio, iter) { |
231 | if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size)) |
232 | return false; |
233 | } |
234 | |
235 | return true; |
236 | } |
237 | |
238 | blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq) |
239 | { |
240 | return blk_crypto_get_keyslot(profile: rq->q->crypto_profile, |
241 | key: rq->crypt_ctx->bc_key, |
242 | slot_ptr: &rq->crypt_keyslot); |
243 | } |
244 | |
245 | void __blk_crypto_rq_put_keyslot(struct request *rq) |
246 | { |
247 | blk_crypto_put_keyslot(slot: rq->crypt_keyslot); |
248 | rq->crypt_keyslot = NULL; |
249 | } |
250 | |
251 | void __blk_crypto_free_request(struct request *rq) |
252 | { |
253 | /* The keyslot, if one was needed, should have been released earlier. */ |
254 | if (WARN_ON_ONCE(rq->crypt_keyslot)) |
255 | __blk_crypto_rq_put_keyslot(rq); |
256 | |
257 | mempool_free(element: rq->crypt_ctx, pool: bio_crypt_ctx_pool); |
258 | rq->crypt_ctx = NULL; |
259 | } |
260 | |
261 | /** |
262 | * __blk_crypto_bio_prep - Prepare bio for inline encryption |
263 | * |
264 | * @bio_ptr: pointer to original bio pointer |
265 | * |
266 | * If the bio crypt context provided for the bio is supported by the underlying |
267 | * device's inline encryption hardware, do nothing. |
268 | * |
269 | * Otherwise, try to perform en/decryption for this bio by falling back to the |
270 | * kernel crypto API. When the crypto API fallback is used for encryption, |
271 | * blk-crypto may choose to split the bio into 2 - the first one that will |
272 | * continue to be processed and the second one that will be resubmitted via |
273 | * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents |
274 | * of the aforementioned "first one", and *bio_ptr will be updated to this |
275 | * bounce bio. |
276 | * |
277 | * Caller must ensure bio has bio_crypt_ctx. |
278 | * |
279 | * Return: true on success; false on error (and bio->bi_status will be set |
280 | * appropriately, and bio_endio() will have been called so bio |
281 | * submission should abort). |
282 | */ |
283 | bool __blk_crypto_bio_prep(struct bio **bio_ptr) |
284 | { |
285 | struct bio *bio = *bio_ptr; |
286 | const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key; |
287 | |
288 | /* Error if bio has no data. */ |
289 | if (WARN_ON_ONCE(!bio_has_data(bio))) { |
290 | bio->bi_status = BLK_STS_IOERR; |
291 | goto fail; |
292 | } |
293 | |
294 | if (!bio_crypt_check_alignment(bio)) { |
295 | bio->bi_status = BLK_STS_IOERR; |
296 | goto fail; |
297 | } |
298 | |
299 | /* |
300 | * Success if device supports the encryption context, or if we succeeded |
301 | * in falling back to the crypto API. |
302 | */ |
303 | if (blk_crypto_config_supported_natively(bdev: bio->bi_bdev, |
304 | cfg: &bc_key->crypto_cfg)) |
305 | return true; |
306 | if (blk_crypto_fallback_bio_prep(bio_ptr)) |
307 | return true; |
308 | fail: |
309 | bio_endio(*bio_ptr); |
310 | return false; |
311 | } |
312 | |
313 | int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio, |
314 | gfp_t gfp_mask) |
315 | { |
316 | if (!rq->crypt_ctx) { |
317 | rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); |
318 | if (!rq->crypt_ctx) |
319 | return -ENOMEM; |
320 | } |
321 | *rq->crypt_ctx = *bio->bi_crypt_context; |
322 | return 0; |
323 | } |
324 | |
325 | /** |
326 | * blk_crypto_init_key() - Prepare a key for use with blk-crypto |
327 | * @blk_key: Pointer to the blk_crypto_key to initialize. |
328 | * @key_bytes: the bytes of the key |
329 | * @key_size: size of the key in bytes |
330 | * @key_type: type of the key -- either raw or hardware-wrapped |
331 | * @crypto_mode: identifier for the encryption algorithm to use |
332 | * @dun_bytes: number of bytes that will be used to specify the DUN when this |
333 | * key is used |
334 | * @data_unit_size: the data unit size to use for en/decryption |
335 | * |
336 | * Return: 0 on success, -errno on failure. The caller is responsible for |
337 | * zeroizing both blk_key and key_bytes when done with them. |
338 | */ |
339 | int blk_crypto_init_key(struct blk_crypto_key *blk_key, |
340 | const u8 *key_bytes, size_t key_size, |
341 | enum blk_crypto_key_type key_type, |
342 | enum blk_crypto_mode_num crypto_mode, |
343 | unsigned int dun_bytes, |
344 | unsigned int data_unit_size) |
345 | { |
346 | const struct blk_crypto_mode *mode; |
347 | |
348 | memset(blk_key, 0, sizeof(*blk_key)); |
349 | |
350 | if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes)) |
351 | return -EINVAL; |
352 | |
353 | mode = &blk_crypto_modes[crypto_mode]; |
354 | switch (key_type) { |
355 | case BLK_CRYPTO_KEY_TYPE_RAW: |
356 | if (key_size != mode->keysize) |
357 | return -EINVAL; |
358 | break; |
359 | case BLK_CRYPTO_KEY_TYPE_HW_WRAPPED: |
360 | if (key_size < mode->security_strength || |
361 | key_size > BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE) |
362 | return -EINVAL; |
363 | break; |
364 | default: |
365 | return -EINVAL; |
366 | } |
367 | |
368 | if (dun_bytes == 0 || dun_bytes > mode->ivsize) |
369 | return -EINVAL; |
370 | |
371 | if (!is_power_of_2(n: data_unit_size)) |
372 | return -EINVAL; |
373 | |
374 | blk_key->crypto_cfg.crypto_mode = crypto_mode; |
375 | blk_key->crypto_cfg.dun_bytes = dun_bytes; |
376 | blk_key->crypto_cfg.data_unit_size = data_unit_size; |
377 | blk_key->crypto_cfg.key_type = key_type; |
378 | blk_key->data_unit_size_bits = ilog2(data_unit_size); |
379 | blk_key->size = key_size; |
380 | memcpy(blk_key->bytes, key_bytes, key_size); |
381 | |
382 | return 0; |
383 | } |
384 | |
385 | bool blk_crypto_config_supported_natively(struct block_device *bdev, |
386 | const struct blk_crypto_config *cfg) |
387 | { |
388 | return __blk_crypto_cfg_supported(profile: bdev_get_queue(bdev)->crypto_profile, |
389 | cfg); |
390 | } |
391 | |
392 | /* |
393 | * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the |
394 | * block_device it's submitted to supports inline crypto, or the |
395 | * blk-crypto-fallback is enabled and supports the cfg). |
396 | */ |
397 | bool blk_crypto_config_supported(struct block_device *bdev, |
398 | const struct blk_crypto_config *cfg) |
399 | { |
400 | if (IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) && |
401 | cfg->key_type == BLK_CRYPTO_KEY_TYPE_RAW) |
402 | return true; |
403 | return blk_crypto_config_supported_natively(bdev, cfg); |
404 | } |
405 | |
406 | /** |
407 | * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device |
408 | * @bdev: block device to operate on |
409 | * @key: A key to use on the device |
410 | * |
411 | * Upper layers must call this function to ensure that either the hardware |
412 | * supports the key's crypto settings, or the crypto API fallback has transforms |
413 | * for the needed mode allocated and ready to go. This function may allocate |
414 | * an skcipher, and *should not* be called from the data path, since that might |
415 | * cause a deadlock |
416 | * |
417 | * Return: 0 on success; -EOPNOTSUPP if the key is wrapped but the hardware does |
418 | * not support wrapped keys; -ENOPKG if the key is a raw key but the |
419 | * hardware does not support raw keys and blk-crypto-fallback is either |
420 | * disabled or the needed algorithm is disabled in the crypto API; or |
421 | * another -errno code if something else went wrong. |
422 | */ |
423 | int blk_crypto_start_using_key(struct block_device *bdev, |
424 | const struct blk_crypto_key *key) |
425 | { |
426 | if (blk_crypto_config_supported_natively(bdev, cfg: &key->crypto_cfg)) |
427 | return 0; |
428 | if (key->crypto_cfg.key_type != BLK_CRYPTO_KEY_TYPE_RAW) { |
429 | pr_warn_ratelimited("%pg: no support for wrapped keys\n", bdev); |
430 | return -EOPNOTSUPP; |
431 | } |
432 | return blk_crypto_fallback_start_using_mode(mode_num: key->crypto_cfg.crypto_mode); |
433 | } |
434 | |
435 | /** |
436 | * blk_crypto_evict_key() - Evict a blk_crypto_key from a block_device |
437 | * @bdev: a block_device on which I/O using the key may have been done |
438 | * @key: the key to evict |
439 | * |
440 | * For a given block_device, this function removes the given blk_crypto_key from |
441 | * the keyslot management structures and evicts it from any underlying hardware |
442 | * keyslot(s) or blk-crypto-fallback keyslot it may have been programmed into. |
443 | * |
444 | * Upper layers must call this before freeing the blk_crypto_key. It must be |
445 | * called for every block_device the key may have been used on. The key must no |
446 | * longer be in use by any I/O when this function is called. |
447 | * |
448 | * Context: May sleep. |
449 | */ |
450 | void blk_crypto_evict_key(struct block_device *bdev, |
451 | const struct blk_crypto_key *key) |
452 | { |
453 | struct request_queue *q = bdev_get_queue(bdev); |
454 | int err; |
455 | |
456 | if (blk_crypto_config_supported_natively(bdev, cfg: &key->crypto_cfg)) |
457 | err = __blk_crypto_evict_key(profile: q->crypto_profile, key); |
458 | else |
459 | err = blk_crypto_fallback_evict_key(key); |
460 | /* |
461 | * An error can only occur here if the key failed to be evicted from a |
462 | * keyslot (due to a hardware or driver issue) or is allegedly still in |
463 | * use by I/O (due to a kernel bug). Even in these cases, the key is |
464 | * still unlinked from the keyslot management structures, and the caller |
465 | * is allowed and expected to free it right away. There's nothing |
466 | * callers can do to handle errors, so just log them and return void. |
467 | */ |
468 | if (err) |
469 | pr_warn_ratelimited("%pg: error %d evicting key\n", bdev, err); |
470 | } |
471 | EXPORT_SYMBOL_GPL(blk_crypto_evict_key); |
472 | |
473 | static int blk_crypto_ioctl_import_key(struct blk_crypto_profile *profile, |
474 | void __user *argp) |
475 | { |
476 | struct blk_crypto_import_key_arg arg; |
477 | u8 raw_key[BLK_CRYPTO_MAX_RAW_KEY_SIZE]; |
478 | u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]; |
479 | int ret; |
480 | |
481 | if (copy_from_user(to: &arg, from: argp, n: sizeof(arg))) |
482 | return -EFAULT; |
483 | |
484 | if (memchr_inv(p: arg.reserved, c: 0, size: sizeof(arg.reserved))) |
485 | return -EINVAL; |
486 | |
487 | if (arg.raw_key_size < 16 || arg.raw_key_size > sizeof(raw_key)) |
488 | return -EINVAL; |
489 | |
490 | if (copy_from_user(to: raw_key, u64_to_user_ptr(arg.raw_key_ptr), |
491 | n: arg.raw_key_size)) { |
492 | ret = -EFAULT; |
493 | goto out; |
494 | } |
495 | ret = blk_crypto_import_key(profile, raw_key, raw_key_size: arg.raw_key_size, lt_key); |
496 | if (ret < 0) |
497 | goto out; |
498 | if (ret > arg.lt_key_size) { |
499 | ret = -EOVERFLOW; |
500 | goto out; |
501 | } |
502 | arg.lt_key_size = ret; |
503 | if (copy_to_user(u64_to_user_ptr(arg.lt_key_ptr), from: lt_key, |
504 | n: arg.lt_key_size) || |
505 | copy_to_user(to: argp, from: &arg, n: sizeof(arg))) { |
506 | ret = -EFAULT; |
507 | goto out; |
508 | } |
509 | ret = 0; |
510 | |
511 | out: |
512 | memzero_explicit(s: raw_key, count: sizeof(raw_key)); |
513 | memzero_explicit(s: lt_key, count: sizeof(lt_key)); |
514 | return ret; |
515 | } |
516 | |
517 | static int blk_crypto_ioctl_generate_key(struct blk_crypto_profile *profile, |
518 | void __user *argp) |
519 | { |
520 | struct blk_crypto_generate_key_arg arg; |
521 | u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]; |
522 | int ret; |
523 | |
524 | if (copy_from_user(to: &arg, from: argp, n: sizeof(arg))) |
525 | return -EFAULT; |
526 | |
527 | if (memchr_inv(p: arg.reserved, c: 0, size: sizeof(arg.reserved))) |
528 | return -EINVAL; |
529 | |
530 | ret = blk_crypto_generate_key(profile, lt_key); |
531 | if (ret < 0) |
532 | goto out; |
533 | if (ret > arg.lt_key_size) { |
534 | ret = -EOVERFLOW; |
535 | goto out; |
536 | } |
537 | arg.lt_key_size = ret; |
538 | if (copy_to_user(u64_to_user_ptr(arg.lt_key_ptr), from: lt_key, |
539 | n: arg.lt_key_size) || |
540 | copy_to_user(to: argp, from: &arg, n: sizeof(arg))) { |
541 | ret = -EFAULT; |
542 | goto out; |
543 | } |
544 | ret = 0; |
545 | |
546 | out: |
547 | memzero_explicit(s: lt_key, count: sizeof(lt_key)); |
548 | return ret; |
549 | } |
550 | |
551 | static int blk_crypto_ioctl_prepare_key(struct blk_crypto_profile *profile, |
552 | void __user *argp) |
553 | { |
554 | struct blk_crypto_prepare_key_arg arg; |
555 | u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]; |
556 | u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]; |
557 | int ret; |
558 | |
559 | if (copy_from_user(to: &arg, from: argp, n: sizeof(arg))) |
560 | return -EFAULT; |
561 | |
562 | if (memchr_inv(p: arg.reserved, c: 0, size: sizeof(arg.reserved))) |
563 | return -EINVAL; |
564 | |
565 | if (arg.lt_key_size > sizeof(lt_key)) |
566 | return -EINVAL; |
567 | |
568 | if (copy_from_user(to: lt_key, u64_to_user_ptr(arg.lt_key_ptr), |
569 | n: arg.lt_key_size)) { |
570 | ret = -EFAULT; |
571 | goto out; |
572 | } |
573 | ret = blk_crypto_prepare_key(profile, lt_key, lt_key_size: arg.lt_key_size, eph_key); |
574 | if (ret < 0) |
575 | goto out; |
576 | if (ret > arg.eph_key_size) { |
577 | ret = -EOVERFLOW; |
578 | goto out; |
579 | } |
580 | arg.eph_key_size = ret; |
581 | if (copy_to_user(u64_to_user_ptr(arg.eph_key_ptr), from: eph_key, |
582 | n: arg.eph_key_size) || |
583 | copy_to_user(to: argp, from: &arg, n: sizeof(arg))) { |
584 | ret = -EFAULT; |
585 | goto out; |
586 | } |
587 | ret = 0; |
588 | |
589 | out: |
590 | memzero_explicit(s: lt_key, count: sizeof(lt_key)); |
591 | memzero_explicit(s: eph_key, count: sizeof(eph_key)); |
592 | return ret; |
593 | } |
594 | |
595 | int blk_crypto_ioctl(struct block_device *bdev, unsigned int cmd, |
596 | void __user *argp) |
597 | { |
598 | struct blk_crypto_profile *profile = |
599 | bdev_get_queue(bdev)->crypto_profile; |
600 | |
601 | if (!profile) |
602 | return -EOPNOTSUPP; |
603 | |
604 | switch (cmd) { |
605 | case BLKCRYPTOIMPORTKEY: |
606 | return blk_crypto_ioctl_import_key(profile, argp); |
607 | case BLKCRYPTOGENERATEKEY: |
608 | return blk_crypto_ioctl_generate_key(profile, argp); |
609 | case BLKCRYPTOPREPAREKEY: |
610 | return blk_crypto_ioctl_prepare_key(profile, argp); |
611 | default: |
612 | return -ENOTTY; |
613 | } |
614 | } |
615 |
Definitions
- blk_crypto_modes
- num_prealloc_crypt_ctxs
- bio_crypt_ctx_cache
- bio_crypt_ctx_pool
- bio_crypt_ctx_init
- bio_crypt_set_ctx
- __bio_crypt_free_ctx
- __bio_crypt_clone
- bio_crypt_dun_increment
- __bio_crypt_advance
- bio_crypt_dun_is_contiguous
- bio_crypt_ctx_compatible
- bio_crypt_rq_ctx_compatible
- bio_crypt_ctx_mergeable
- bio_crypt_check_alignment
- __blk_crypto_rq_get_keyslot
- __blk_crypto_rq_put_keyslot
- __blk_crypto_free_request
- __blk_crypto_bio_prep
- __blk_crypto_rq_bio_prep
- blk_crypto_init_key
- blk_crypto_config_supported_natively
- blk_crypto_config_supported
- blk_crypto_start_using_key
- blk_crypto_evict_key
- blk_crypto_ioctl_import_key
- blk_crypto_ioctl_generate_key
- blk_crypto_ioctl_prepare_key
Improve your Profiling and Debugging skills
Find out more