1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * Copyright 2019 Google LLC |
4 | */ |
5 | |
6 | #ifndef __LINUX_BLK_CRYPTO_INTERNAL_H |
7 | #define __LINUX_BLK_CRYPTO_INTERNAL_H |
8 | |
9 | #include <linux/bio.h> |
10 | #include <linux/blk-mq.h> |
11 | |
12 | /* Represents a crypto mode supported by blk-crypto */ |
13 | struct blk_crypto_mode { |
14 | const char *name; /* name of this mode, shown in sysfs */ |
15 | const char *cipher_str; /* crypto API name (for fallback case) */ |
16 | unsigned int keysize; /* key size in bytes */ |
17 | unsigned int ivsize; /* iv size in bytes */ |
18 | }; |
19 | |
20 | extern const struct blk_crypto_mode blk_crypto_modes[]; |
21 | |
22 | #ifdef CONFIG_BLK_INLINE_ENCRYPTION |
23 | |
24 | int blk_crypto_sysfs_register(struct gendisk *disk); |
25 | |
26 | void blk_crypto_sysfs_unregister(struct gendisk *disk); |
27 | |
28 | void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], |
29 | unsigned int inc); |
30 | |
31 | bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio); |
32 | |
33 | bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes, |
34 | struct bio_crypt_ctx *bc2); |
35 | |
36 | static inline bool bio_crypt_ctx_back_mergeable(struct request *req, |
37 | struct bio *bio) |
38 | { |
39 | return bio_crypt_ctx_mergeable(bc1: req->crypt_ctx, bc1_bytes: blk_rq_bytes(rq: req), |
40 | bc2: bio->bi_crypt_context); |
41 | } |
42 | |
43 | static inline bool bio_crypt_ctx_front_mergeable(struct request *req, |
44 | struct bio *bio) |
45 | { |
46 | return bio_crypt_ctx_mergeable(bc1: bio->bi_crypt_context, |
47 | bc1_bytes: bio->bi_iter.bi_size, bc2: req->crypt_ctx); |
48 | } |
49 | |
50 | static inline bool bio_crypt_ctx_merge_rq(struct request *req, |
51 | struct request *next) |
52 | { |
53 | return bio_crypt_ctx_mergeable(bc1: req->crypt_ctx, bc1_bytes: blk_rq_bytes(rq: req), |
54 | bc2: next->crypt_ctx); |
55 | } |
56 | |
57 | static inline void blk_crypto_rq_set_defaults(struct request *rq) |
58 | { |
59 | rq->crypt_ctx = NULL; |
60 | rq->crypt_keyslot = NULL; |
61 | } |
62 | |
63 | static inline bool blk_crypto_rq_is_encrypted(struct request *rq) |
64 | { |
65 | return rq->crypt_ctx; |
66 | } |
67 | |
68 | static inline bool blk_crypto_rq_has_keyslot(struct request *rq) |
69 | { |
70 | return rq->crypt_keyslot; |
71 | } |
72 | |
73 | blk_status_t blk_crypto_get_keyslot(struct blk_crypto_profile *profile, |
74 | const struct blk_crypto_key *key, |
75 | struct blk_crypto_keyslot **slot_ptr); |
76 | |
77 | void blk_crypto_put_keyslot(struct blk_crypto_keyslot *slot); |
78 | |
79 | int __blk_crypto_evict_key(struct blk_crypto_profile *profile, |
80 | const struct blk_crypto_key *key); |
81 | |
82 | bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile, |
83 | const struct blk_crypto_config *cfg); |
84 | |
85 | #else /* CONFIG_BLK_INLINE_ENCRYPTION */ |
86 | |
87 | static inline int blk_crypto_sysfs_register(struct gendisk *disk) |
88 | { |
89 | return 0; |
90 | } |
91 | |
92 | static inline void blk_crypto_sysfs_unregister(struct gendisk *disk) |
93 | { |
94 | } |
95 | |
96 | static inline bool bio_crypt_rq_ctx_compatible(struct request *rq, |
97 | struct bio *bio) |
98 | { |
99 | return true; |
100 | } |
101 | |
102 | static inline bool bio_crypt_ctx_front_mergeable(struct request *req, |
103 | struct bio *bio) |
104 | { |
105 | return true; |
106 | } |
107 | |
108 | static inline bool bio_crypt_ctx_back_mergeable(struct request *req, |
109 | struct bio *bio) |
110 | { |
111 | return true; |
112 | } |
113 | |
114 | static inline bool bio_crypt_ctx_merge_rq(struct request *req, |
115 | struct request *next) |
116 | { |
117 | return true; |
118 | } |
119 | |
120 | static inline void blk_crypto_rq_set_defaults(struct request *rq) { } |
121 | |
122 | static inline bool blk_crypto_rq_is_encrypted(struct request *rq) |
123 | { |
124 | return false; |
125 | } |
126 | |
127 | static inline bool blk_crypto_rq_has_keyslot(struct request *rq) |
128 | { |
129 | return false; |
130 | } |
131 | |
132 | #endif /* CONFIG_BLK_INLINE_ENCRYPTION */ |
133 | |
134 | void __bio_crypt_advance(struct bio *bio, unsigned int bytes); |
135 | static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes) |
136 | { |
137 | if (bio_has_crypt_ctx(bio)) |
138 | __bio_crypt_advance(bio, bytes); |
139 | } |
140 | |
141 | void __bio_crypt_free_ctx(struct bio *bio); |
142 | static inline void bio_crypt_free_ctx(struct bio *bio) |
143 | { |
144 | if (bio_has_crypt_ctx(bio)) |
145 | __bio_crypt_free_ctx(bio); |
146 | } |
147 | |
148 | static inline void bio_crypt_do_front_merge(struct request *rq, |
149 | struct bio *bio) |
150 | { |
151 | #ifdef CONFIG_BLK_INLINE_ENCRYPTION |
152 | if (bio_has_crypt_ctx(bio)) |
153 | memcpy(rq->crypt_ctx->bc_dun, bio->bi_crypt_context->bc_dun, |
154 | sizeof(rq->crypt_ctx->bc_dun)); |
155 | #endif |
156 | } |
157 | |
158 | bool __blk_crypto_bio_prep(struct bio **bio_ptr); |
159 | static inline bool blk_crypto_bio_prep(struct bio **bio_ptr) |
160 | { |
161 | if (bio_has_crypt_ctx(bio: *bio_ptr)) |
162 | return __blk_crypto_bio_prep(bio_ptr); |
163 | return true; |
164 | } |
165 | |
166 | blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq); |
167 | static inline blk_status_t blk_crypto_rq_get_keyslot(struct request *rq) |
168 | { |
169 | if (blk_crypto_rq_is_encrypted(rq)) |
170 | return __blk_crypto_rq_get_keyslot(rq); |
171 | return BLK_STS_OK; |
172 | } |
173 | |
174 | void __blk_crypto_rq_put_keyslot(struct request *rq); |
175 | static inline void blk_crypto_rq_put_keyslot(struct request *rq) |
176 | { |
177 | if (blk_crypto_rq_has_keyslot(rq)) |
178 | __blk_crypto_rq_put_keyslot(rq); |
179 | } |
180 | |
181 | void __blk_crypto_free_request(struct request *rq); |
182 | static inline void blk_crypto_free_request(struct request *rq) |
183 | { |
184 | if (blk_crypto_rq_is_encrypted(rq)) |
185 | __blk_crypto_free_request(rq); |
186 | } |
187 | |
188 | int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio, |
189 | gfp_t gfp_mask); |
190 | /** |
191 | * blk_crypto_rq_bio_prep - Prepare a request's crypt_ctx when its first bio |
192 | * is inserted |
193 | * @rq: The request to prepare |
194 | * @bio: The first bio being inserted into the request |
195 | * @gfp_mask: Memory allocation flags |
196 | * |
197 | * Return: 0 on success, -ENOMEM if out of memory. -ENOMEM is only possible if |
198 | * @gfp_mask doesn't include %__GFP_DIRECT_RECLAIM. |
199 | */ |
200 | static inline int blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio, |
201 | gfp_t gfp_mask) |
202 | { |
203 | if (bio_has_crypt_ctx(bio)) |
204 | return __blk_crypto_rq_bio_prep(rq, bio, gfp_mask); |
205 | return 0; |
206 | } |
207 | |
208 | #ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK |
209 | |
210 | int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num); |
211 | |
212 | bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr); |
213 | |
214 | int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key); |
215 | |
216 | #else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */ |
217 | |
218 | static inline int |
219 | blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num) |
220 | { |
221 | pr_warn_once("crypto API fallback is disabled\n" ); |
222 | return -ENOPKG; |
223 | } |
224 | |
225 | static inline bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr) |
226 | { |
227 | pr_warn_once("crypto API fallback disabled; failing request.\n" ); |
228 | (*bio_ptr)->bi_status = BLK_STS_NOTSUPP; |
229 | return false; |
230 | } |
231 | |
232 | static inline int |
233 | blk_crypto_fallback_evict_key(const struct blk_crypto_key *key) |
234 | { |
235 | return 0; |
236 | } |
237 | |
238 | #endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */ |
239 | |
240 | #endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */ |
241 | |