1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* LRW: as defined by Cyril Guyot in |
3 | * http://grouper.ieee.org/groups/1619/email/pdf00017.pdf |
4 | * |
5 | * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org> |
6 | * |
7 | * Based on ecb.c |
8 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
9 | */ |
10 | /* This implementation is checked against the test vectors in the above |
11 | * document and by a test vector provided by Ken Buchanan at |
12 | * https://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html |
13 | * |
14 | * The test vectors are included in the testing module tcrypt.[ch] */ |
15 | |
16 | #include <crypto/internal/skcipher.h> |
17 | #include <crypto/scatterwalk.h> |
18 | #include <linux/err.h> |
19 | #include <linux/init.h> |
20 | #include <linux/kernel.h> |
21 | #include <linux/module.h> |
22 | #include <linux/scatterlist.h> |
23 | #include <linux/slab.h> |
24 | |
25 | #include <crypto/b128ops.h> |
26 | #include <crypto/gf128mul.h> |
27 | |
28 | #define LRW_BLOCK_SIZE 16 |
29 | |
30 | struct lrw_tfm_ctx { |
31 | struct crypto_skcipher *child; |
32 | |
33 | /* |
34 | * optimizes multiplying a random (non incrementing, as at the |
35 | * start of a new sector) value with key2, we could also have |
36 | * used 4k optimization tables or no optimization at all. In the |
37 | * latter case we would have to store key2 here |
38 | */ |
39 | struct gf128mul_64k *table; |
40 | |
41 | /* |
42 | * stores: |
43 | * key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 }, |
44 | * key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 } |
45 | * key2*{ 0,0,...1,1,1,1,1 }, etc |
46 | * needed for optimized multiplication of incrementing values |
47 | * with key2 |
48 | */ |
49 | be128 mulinc[128]; |
50 | }; |
51 | |
52 | struct lrw_request_ctx { |
53 | be128 t; |
54 | struct skcipher_request subreq; |
55 | }; |
56 | |
57 | static inline void lrw_setbit128_bbe(void *b, int bit) |
58 | { |
59 | __set_bit(bit ^ (0x80 - |
60 | #ifdef __BIG_ENDIAN |
61 | BITS_PER_LONG |
62 | #else |
63 | BITS_PER_BYTE |
64 | #endif |
65 | ), b); |
66 | } |
67 | |
68 | static int lrw_setkey(struct crypto_skcipher *parent, const u8 *key, |
69 | unsigned int keylen) |
70 | { |
71 | struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm: parent); |
72 | struct crypto_skcipher *child = ctx->child; |
73 | int err, bsize = LRW_BLOCK_SIZE; |
74 | const u8 *tweak = key + keylen - bsize; |
75 | be128 tmp = { 0 }; |
76 | int i; |
77 | |
78 | crypto_skcipher_clear_flags(tfm: child, CRYPTO_TFM_REQ_MASK); |
79 | crypto_skcipher_set_flags(tfm: child, flags: crypto_skcipher_get_flags(tfm: parent) & |
80 | CRYPTO_TFM_REQ_MASK); |
81 | err = crypto_skcipher_setkey(tfm: child, key, keylen: keylen - bsize); |
82 | if (err) |
83 | return err; |
84 | |
85 | if (ctx->table) |
86 | gf128mul_free_64k(t: ctx->table); |
87 | |
88 | /* initialize multiplication table for Key2 */ |
89 | ctx->table = gf128mul_init_64k_bbe(g: (be128 *)tweak); |
90 | if (!ctx->table) |
91 | return -ENOMEM; |
92 | |
93 | /* initialize optimization table */ |
94 | for (i = 0; i < 128; i++) { |
95 | lrw_setbit128_bbe(b: &tmp, bit: i); |
96 | ctx->mulinc[i] = tmp; |
97 | gf128mul_64k_bbe(a: &ctx->mulinc[i], t: ctx->table); |
98 | } |
99 | |
100 | return 0; |
101 | } |
102 | |
103 | /* |
104 | * Returns the number of trailing '1' bits in the words of the counter, which is |
105 | * represented by 4 32-bit words, arranged from least to most significant. |
106 | * At the same time, increments the counter by one. |
107 | * |
108 | * For example: |
109 | * |
110 | * u32 counter[4] = { 0xFFFFFFFF, 0x1, 0x0, 0x0 }; |
111 | * int i = lrw_next_index(&counter); |
112 | * // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 } |
113 | */ |
114 | static int lrw_next_index(u32 *counter) |
115 | { |
116 | int i, res = 0; |
117 | |
118 | for (i = 0; i < 4; i++) { |
119 | if (counter[i] + 1 != 0) |
120 | return res + ffz(counter[i]++); |
121 | |
122 | counter[i] = 0; |
123 | res += 32; |
124 | } |
125 | |
126 | /* |
127 | * If we get here, then x == 128 and we are incrementing the counter |
128 | * from all ones to all zeros. This means we must return index 127, i.e. |
129 | * the one corresponding to key2*{ 1,...,1 }. |
130 | */ |
131 | return 127; |
132 | } |
133 | |
134 | /* |
135 | * We compute the tweak masks twice (both before and after the ECB encryption or |
136 | * decryption) to avoid having to allocate a temporary buffer and/or make |
137 | * mutliple calls to the 'ecb(..)' instance, which usually would be slower than |
138 | * just doing the lrw_next_index() calls again. |
139 | */ |
140 | static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass) |
141 | { |
142 | const int bs = LRW_BLOCK_SIZE; |
143 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
144 | const struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); |
145 | struct lrw_request_ctx *rctx = skcipher_request_ctx(req); |
146 | be128 t = rctx->t; |
147 | struct skcipher_walk w; |
148 | __be32 *iv; |
149 | u32 counter[4]; |
150 | int err; |
151 | |
152 | if (second_pass) { |
153 | req = &rctx->subreq; |
154 | /* set to our TFM to enforce correct alignment: */ |
155 | skcipher_request_set_tfm(req, tfm); |
156 | } |
157 | |
158 | err = skcipher_walk_virt(walk: &w, req, atomic: false); |
159 | if (err) |
160 | return err; |
161 | |
162 | iv = (__be32 *)w.iv; |
163 | counter[0] = be32_to_cpu(iv[3]); |
164 | counter[1] = be32_to_cpu(iv[2]); |
165 | counter[2] = be32_to_cpu(iv[1]); |
166 | counter[3] = be32_to_cpu(iv[0]); |
167 | |
168 | while (w.nbytes) { |
169 | unsigned int avail = w.nbytes; |
170 | be128 *wsrc; |
171 | be128 *wdst; |
172 | |
173 | wsrc = w.src.virt.addr; |
174 | wdst = w.dst.virt.addr; |
175 | |
176 | do { |
177 | be128_xor(r: wdst++, p: &t, q: wsrc++); |
178 | |
179 | /* T <- I*Key2, using the optimization |
180 | * discussed in the specification */ |
181 | be128_xor(r: &t, p: &t, |
182 | q: &ctx->mulinc[lrw_next_index(counter)]); |
183 | } while ((avail -= bs) >= bs); |
184 | |
185 | if (second_pass && w.nbytes == w.total) { |
186 | iv[0] = cpu_to_be32(counter[3]); |
187 | iv[1] = cpu_to_be32(counter[2]); |
188 | iv[2] = cpu_to_be32(counter[1]); |
189 | iv[3] = cpu_to_be32(counter[0]); |
190 | } |
191 | |
192 | err = skcipher_walk_done(walk: &w, err: avail); |
193 | } |
194 | |
195 | return err; |
196 | } |
197 | |
198 | static int lrw_xor_tweak_pre(struct skcipher_request *req) |
199 | { |
200 | return lrw_xor_tweak(req, second_pass: false); |
201 | } |
202 | |
203 | static int lrw_xor_tweak_post(struct skcipher_request *req) |
204 | { |
205 | return lrw_xor_tweak(req, second_pass: true); |
206 | } |
207 | |
208 | static void lrw_crypt_done(void *data, int err) |
209 | { |
210 | struct skcipher_request *req = data; |
211 | |
212 | if (!err) { |
213 | struct lrw_request_ctx *rctx = skcipher_request_ctx(req); |
214 | |
215 | rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; |
216 | err = lrw_xor_tweak_post(req); |
217 | } |
218 | |
219 | skcipher_request_complete(req, err); |
220 | } |
221 | |
222 | static void lrw_init_crypt(struct skcipher_request *req) |
223 | { |
224 | const struct lrw_tfm_ctx *ctx = |
225 | crypto_skcipher_ctx(tfm: crypto_skcipher_reqtfm(req)); |
226 | struct lrw_request_ctx *rctx = skcipher_request_ctx(req); |
227 | struct skcipher_request *subreq = &rctx->subreq; |
228 | |
229 | skcipher_request_set_tfm(req: subreq, tfm: ctx->child); |
230 | skcipher_request_set_callback(req: subreq, flags: req->base.flags, compl: lrw_crypt_done, |
231 | data: req); |
232 | /* pass req->iv as IV (will be used by xor_tweak, ECB will ignore it) */ |
233 | skcipher_request_set_crypt(req: subreq, src: req->dst, dst: req->dst, |
234 | cryptlen: req->cryptlen, iv: req->iv); |
235 | |
236 | /* calculate first value of T */ |
237 | memcpy(&rctx->t, req->iv, sizeof(rctx->t)); |
238 | |
239 | /* T <- I*Key2 */ |
240 | gf128mul_64k_bbe(a: &rctx->t, t: ctx->table); |
241 | } |
242 | |
243 | static int lrw_encrypt(struct skcipher_request *req) |
244 | { |
245 | struct lrw_request_ctx *rctx = skcipher_request_ctx(req); |
246 | struct skcipher_request *subreq = &rctx->subreq; |
247 | |
248 | lrw_init_crypt(req); |
249 | return lrw_xor_tweak_pre(req) ?: |
250 | crypto_skcipher_encrypt(req: subreq) ?: |
251 | lrw_xor_tweak_post(req); |
252 | } |
253 | |
254 | static int lrw_decrypt(struct skcipher_request *req) |
255 | { |
256 | struct lrw_request_ctx *rctx = skcipher_request_ctx(req); |
257 | struct skcipher_request *subreq = &rctx->subreq; |
258 | |
259 | lrw_init_crypt(req); |
260 | return lrw_xor_tweak_pre(req) ?: |
261 | crypto_skcipher_decrypt(req: subreq) ?: |
262 | lrw_xor_tweak_post(req); |
263 | } |
264 | |
265 | static int lrw_init_tfm(struct crypto_skcipher *tfm) |
266 | { |
267 | struct skcipher_instance *inst = skcipher_alg_instance(skcipher: tfm); |
268 | struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); |
269 | struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); |
270 | struct crypto_skcipher *cipher; |
271 | |
272 | cipher = crypto_spawn_skcipher(spawn); |
273 | if (IS_ERR(ptr: cipher)) |
274 | return PTR_ERR(ptr: cipher); |
275 | |
276 | ctx->child = cipher; |
277 | |
278 | crypto_skcipher_set_reqsize(skcipher: tfm, reqsize: crypto_skcipher_reqsize(tfm: cipher) + |
279 | sizeof(struct lrw_request_ctx)); |
280 | |
281 | return 0; |
282 | } |
283 | |
284 | static void lrw_exit_tfm(struct crypto_skcipher *tfm) |
285 | { |
286 | struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); |
287 | |
288 | if (ctx->table) |
289 | gf128mul_free_64k(t: ctx->table); |
290 | crypto_free_skcipher(tfm: ctx->child); |
291 | } |
292 | |
293 | static void lrw_free_instance(struct skcipher_instance *inst) |
294 | { |
295 | crypto_drop_skcipher(spawn: skcipher_instance_ctx(inst)); |
296 | kfree(objp: inst); |
297 | } |
298 | |
299 | static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) |
300 | { |
301 | struct crypto_skcipher_spawn *spawn; |
302 | struct skcipher_alg_common *alg; |
303 | struct skcipher_instance *inst; |
304 | const char *cipher_name; |
305 | char ecb_name[CRYPTO_MAX_ALG_NAME]; |
306 | u32 mask; |
307 | int err; |
308 | |
309 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, mask_ret: &mask); |
310 | if (err) |
311 | return err; |
312 | |
313 | cipher_name = crypto_attr_alg_name(rta: tb[1]); |
314 | if (IS_ERR(ptr: cipher_name)) |
315 | return PTR_ERR(ptr: cipher_name); |
316 | |
317 | inst = kzalloc(size: sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); |
318 | if (!inst) |
319 | return -ENOMEM; |
320 | |
321 | spawn = skcipher_instance_ctx(inst); |
322 | |
323 | err = crypto_grab_skcipher(spawn, inst: skcipher_crypto_instance(inst), |
324 | name: cipher_name, type: 0, mask); |
325 | if (err == -ENOENT) { |
326 | err = -ENAMETOOLONG; |
327 | if (snprintf(buf: ecb_name, CRYPTO_MAX_ALG_NAME, fmt: "ecb(%s)" , |
328 | cipher_name) >= CRYPTO_MAX_ALG_NAME) |
329 | goto err_free_inst; |
330 | |
331 | err = crypto_grab_skcipher(spawn, |
332 | inst: skcipher_crypto_instance(inst), |
333 | name: ecb_name, type: 0, mask); |
334 | } |
335 | |
336 | if (err) |
337 | goto err_free_inst; |
338 | |
339 | alg = crypto_spawn_skcipher_alg_common(spawn); |
340 | |
341 | err = -EINVAL; |
342 | if (alg->base.cra_blocksize != LRW_BLOCK_SIZE) |
343 | goto err_free_inst; |
344 | |
345 | if (alg->ivsize) |
346 | goto err_free_inst; |
347 | |
348 | err = crypto_inst_setname(inst: skcipher_crypto_instance(inst), name: "lrw" , |
349 | alg: &alg->base); |
350 | if (err) |
351 | goto err_free_inst; |
352 | |
353 | err = -EINVAL; |
354 | cipher_name = alg->base.cra_name; |
355 | |
356 | /* Alas we screwed up the naming so we have to mangle the |
357 | * cipher name. |
358 | */ |
359 | if (!strncmp(cipher_name, "ecb(" , 4)) { |
360 | int len; |
361 | |
362 | len = strscpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); |
363 | if (len < 2) |
364 | goto err_free_inst; |
365 | |
366 | if (ecb_name[len - 1] != ')') |
367 | goto err_free_inst; |
368 | |
369 | ecb_name[len - 1] = 0; |
370 | |
371 | if (snprintf(buf: inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
372 | fmt: "lrw(%s)" , ecb_name) >= CRYPTO_MAX_ALG_NAME) { |
373 | err = -ENAMETOOLONG; |
374 | goto err_free_inst; |
375 | } |
376 | } else |
377 | goto err_free_inst; |
378 | |
379 | inst->alg.base.cra_priority = alg->base.cra_priority; |
380 | inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE; |
381 | inst->alg.base.cra_alignmask = alg->base.cra_alignmask | |
382 | (__alignof__(be128) - 1); |
383 | |
384 | inst->alg.ivsize = LRW_BLOCK_SIZE; |
385 | inst->alg.min_keysize = alg->min_keysize + LRW_BLOCK_SIZE; |
386 | inst->alg.max_keysize = alg->max_keysize + LRW_BLOCK_SIZE; |
387 | |
388 | inst->alg.base.cra_ctxsize = sizeof(struct lrw_tfm_ctx); |
389 | |
390 | inst->alg.init = lrw_init_tfm; |
391 | inst->alg.exit = lrw_exit_tfm; |
392 | |
393 | inst->alg.setkey = lrw_setkey; |
394 | inst->alg.encrypt = lrw_encrypt; |
395 | inst->alg.decrypt = lrw_decrypt; |
396 | |
397 | inst->free = lrw_free_instance; |
398 | |
399 | err = skcipher_register_instance(tmpl, inst); |
400 | if (err) { |
401 | err_free_inst: |
402 | lrw_free_instance(inst); |
403 | } |
404 | return err; |
405 | } |
406 | |
407 | static struct crypto_template lrw_tmpl = { |
408 | .name = "lrw" , |
409 | .create = lrw_create, |
410 | .module = THIS_MODULE, |
411 | }; |
412 | |
413 | static int __init lrw_module_init(void) |
414 | { |
415 | return crypto_register_template(tmpl: &lrw_tmpl); |
416 | } |
417 | |
418 | static void __exit lrw_module_exit(void) |
419 | { |
420 | crypto_unregister_template(tmpl: &lrw_tmpl); |
421 | } |
422 | |
423 | subsys_initcall(lrw_module_init); |
424 | module_exit(lrw_module_exit); |
425 | |
426 | MODULE_LICENSE("GPL" ); |
427 | MODULE_DESCRIPTION("LRW block cipher mode" ); |
428 | MODULE_ALIAS_CRYPTO("lrw" ); |
429 | MODULE_SOFTDEP("pre: ecb" ); |
430 | |