1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * t10_pi.c - Functions for generating and verifying T10 Protection |
4 | * Information. |
5 | */ |
6 | |
7 | #include <linux/t10-pi.h> |
8 | #include <linux/blk-integrity.h> |
9 | #include <linux/crc-t10dif.h> |
10 | #include <linux/crc64.h> |
11 | #include <linux/module.h> |
12 | #include <net/checksum.h> |
13 | #include <asm/unaligned.h> |
14 | |
15 | typedef __be16 (csum_fn) (__be16, void *, unsigned int); |
16 | |
17 | static __be16 t10_pi_crc_fn(__be16 crc, void *data, unsigned int len) |
18 | { |
19 | return cpu_to_be16(crc_t10dif_update(be16_to_cpu(crc), data, len)); |
20 | } |
21 | |
22 | static __be16 t10_pi_ip_fn(__be16 csum, void *data, unsigned int len) |
23 | { |
24 | return (__force __be16)ip_compute_csum(buff: data, len); |
25 | } |
26 | |
27 | /* |
28 | * Type 1 and Type 2 protection use the same format: 16 bit guard tag, |
29 | * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref |
30 | * tag. |
31 | */ |
32 | static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter, |
33 | csum_fn *fn, enum t10_dif_type type) |
34 | { |
35 | u8 offset = iter->pi_offset; |
36 | unsigned int i; |
37 | |
38 | for (i = 0 ; i < iter->data_size ; i += iter->interval) { |
39 | struct t10_pi_tuple *pi = iter->prot_buf + offset; |
40 | |
41 | pi->guard_tag = fn(0, iter->data_buf, iter->interval); |
42 | if (offset) |
43 | pi->guard_tag = fn(pi->guard_tag, iter->prot_buf, |
44 | offset); |
45 | pi->app_tag = 0; |
46 | |
47 | if (type == T10_PI_TYPE1_PROTECTION) |
48 | pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed)); |
49 | else |
50 | pi->ref_tag = 0; |
51 | |
52 | iter->data_buf += iter->interval; |
53 | iter->prot_buf += iter->tuple_size; |
54 | iter->seed++; |
55 | } |
56 | |
57 | return BLK_STS_OK; |
58 | } |
59 | |
60 | static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter, |
61 | csum_fn *fn, enum t10_dif_type type) |
62 | { |
63 | u8 offset = iter->pi_offset; |
64 | unsigned int i; |
65 | |
66 | BUG_ON(type == T10_PI_TYPE0_PROTECTION); |
67 | |
68 | for (i = 0 ; i < iter->data_size ; i += iter->interval) { |
69 | struct t10_pi_tuple *pi = iter->prot_buf + offset; |
70 | __be16 csum; |
71 | |
72 | if (type == T10_PI_TYPE1_PROTECTION || |
73 | type == T10_PI_TYPE2_PROTECTION) { |
74 | if (pi->app_tag == T10_PI_APP_ESCAPE) |
75 | goto next; |
76 | |
77 | if (be32_to_cpu(pi->ref_tag) != |
78 | lower_32_bits(iter->seed)) { |
79 | pr_err("%s: ref tag error at location %llu " \ |
80 | "(rcvd %u)\n" , iter->disk_name, |
81 | (unsigned long long) |
82 | iter->seed, be32_to_cpu(pi->ref_tag)); |
83 | return BLK_STS_PROTECTION; |
84 | } |
85 | } else if (type == T10_PI_TYPE3_PROTECTION) { |
86 | if (pi->app_tag == T10_PI_APP_ESCAPE && |
87 | pi->ref_tag == T10_PI_REF_ESCAPE) |
88 | goto next; |
89 | } |
90 | |
91 | csum = fn(0, iter->data_buf, iter->interval); |
92 | if (offset) |
93 | csum = fn(csum, iter->prot_buf, offset); |
94 | |
95 | if (pi->guard_tag != csum) { |
96 | pr_err("%s: guard tag error at sector %llu " \ |
97 | "(rcvd %04x, want %04x)\n" , iter->disk_name, |
98 | (unsigned long long)iter->seed, |
99 | be16_to_cpu(pi->guard_tag), be16_to_cpu(csum)); |
100 | return BLK_STS_PROTECTION; |
101 | } |
102 | |
103 | next: |
104 | iter->data_buf += iter->interval; |
105 | iter->prot_buf += iter->tuple_size; |
106 | iter->seed++; |
107 | } |
108 | |
109 | return BLK_STS_OK; |
110 | } |
111 | |
112 | static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter) |
113 | { |
114 | return t10_pi_generate(iter, fn: t10_pi_crc_fn, type: T10_PI_TYPE1_PROTECTION); |
115 | } |
116 | |
117 | static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter) |
118 | { |
119 | return t10_pi_generate(iter, fn: t10_pi_ip_fn, type: T10_PI_TYPE1_PROTECTION); |
120 | } |
121 | |
122 | static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter) |
123 | { |
124 | return t10_pi_verify(iter, fn: t10_pi_crc_fn, type: T10_PI_TYPE1_PROTECTION); |
125 | } |
126 | |
127 | static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter) |
128 | { |
129 | return t10_pi_verify(iter, fn: t10_pi_ip_fn, type: T10_PI_TYPE1_PROTECTION); |
130 | } |
131 | |
132 | /** |
133 | * t10_pi_type1_prepare - prepare PI prior submitting request to device |
134 | * @rq: request with PI that should be prepared |
135 | * |
136 | * For Type 1/Type 2, the virtual start sector is the one that was |
137 | * originally submitted by the block layer for the ref_tag usage. Due to |
138 | * partitioning, MD/DM cloning, etc. the actual physical start sector is |
139 | * likely to be different. Remap protection information to match the |
140 | * physical LBA. |
141 | */ |
142 | static void t10_pi_type1_prepare(struct request *rq) |
143 | { |
144 | struct blk_integrity *bi = &rq->q->integrity; |
145 | const int tuple_sz = bi->tuple_size; |
146 | u32 ref_tag = t10_pi_ref_tag(rq); |
147 | u8 offset = bi->pi_offset; |
148 | struct bio *bio; |
149 | |
150 | __rq_for_each_bio(bio, rq) { |
151 | struct bio_integrity_payload *bip = bio_integrity(bio); |
152 | u32 virt = bip_get_seed(bip) & 0xffffffff; |
153 | struct bio_vec iv; |
154 | struct bvec_iter iter; |
155 | |
156 | /* Already remapped? */ |
157 | if (bip->bip_flags & BIP_MAPPED_INTEGRITY) |
158 | break; |
159 | |
160 | bip_for_each_vec(iv, bip, iter) { |
161 | unsigned int j; |
162 | void *p; |
163 | |
164 | p = bvec_kmap_local(bvec: &iv); |
165 | for (j = 0; j < iv.bv_len; j += tuple_sz) { |
166 | struct t10_pi_tuple *pi = p + offset; |
167 | |
168 | if (be32_to_cpu(pi->ref_tag) == virt) |
169 | pi->ref_tag = cpu_to_be32(ref_tag); |
170 | virt++; |
171 | ref_tag++; |
172 | p += tuple_sz; |
173 | } |
174 | kunmap_local(p); |
175 | } |
176 | |
177 | bip->bip_flags |= BIP_MAPPED_INTEGRITY; |
178 | } |
179 | } |
180 | |
181 | /** |
182 | * t10_pi_type1_complete - prepare PI prior returning request to the blk layer |
183 | * @rq: request with PI that should be prepared |
184 | * @nr_bytes: total bytes to prepare |
185 | * |
186 | * For Type 1/Type 2, the virtual start sector is the one that was |
187 | * originally submitted by the block layer for the ref_tag usage. Due to |
188 | * partitioning, MD/DM cloning, etc. the actual physical start sector is |
189 | * likely to be different. Since the physical start sector was submitted |
190 | * to the device, we should remap it back to virtual values expected by the |
191 | * block layer. |
192 | */ |
193 | static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes) |
194 | { |
195 | struct blk_integrity *bi = &rq->q->integrity; |
196 | unsigned intervals = nr_bytes >> bi->interval_exp; |
197 | const int tuple_sz = bi->tuple_size; |
198 | u32 ref_tag = t10_pi_ref_tag(rq); |
199 | u8 offset = bi->pi_offset; |
200 | struct bio *bio; |
201 | |
202 | __rq_for_each_bio(bio, rq) { |
203 | struct bio_integrity_payload *bip = bio_integrity(bio); |
204 | u32 virt = bip_get_seed(bip) & 0xffffffff; |
205 | struct bio_vec iv; |
206 | struct bvec_iter iter; |
207 | |
208 | bip_for_each_vec(iv, bip, iter) { |
209 | unsigned int j; |
210 | void *p; |
211 | |
212 | p = bvec_kmap_local(bvec: &iv); |
213 | for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) { |
214 | struct t10_pi_tuple *pi = p + offset; |
215 | |
216 | if (be32_to_cpu(pi->ref_tag) == ref_tag) |
217 | pi->ref_tag = cpu_to_be32(virt); |
218 | virt++; |
219 | ref_tag++; |
220 | intervals--; |
221 | p += tuple_sz; |
222 | } |
223 | kunmap_local(p); |
224 | } |
225 | } |
226 | } |
227 | |
228 | static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter) |
229 | { |
230 | return t10_pi_generate(iter, fn: t10_pi_crc_fn, type: T10_PI_TYPE3_PROTECTION); |
231 | } |
232 | |
233 | static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter) |
234 | { |
235 | return t10_pi_generate(iter, fn: t10_pi_ip_fn, type: T10_PI_TYPE3_PROTECTION); |
236 | } |
237 | |
238 | static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter) |
239 | { |
240 | return t10_pi_verify(iter, fn: t10_pi_crc_fn, type: T10_PI_TYPE3_PROTECTION); |
241 | } |
242 | |
243 | static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter) |
244 | { |
245 | return t10_pi_verify(iter, fn: t10_pi_ip_fn, type: T10_PI_TYPE3_PROTECTION); |
246 | } |
247 | |
248 | /* Type 3 does not have a reference tag so no remapping is required. */ |
249 | static void t10_pi_type3_prepare(struct request *rq) |
250 | { |
251 | } |
252 | |
253 | /* Type 3 does not have a reference tag so no remapping is required. */ |
254 | static void t10_pi_type3_complete(struct request *rq, unsigned int nr_bytes) |
255 | { |
256 | } |
257 | |
258 | const struct blk_integrity_profile t10_pi_type1_crc = { |
259 | .name = "T10-DIF-TYPE1-CRC" , |
260 | .generate_fn = t10_pi_type1_generate_crc, |
261 | .verify_fn = t10_pi_type1_verify_crc, |
262 | .prepare_fn = t10_pi_type1_prepare, |
263 | .complete_fn = t10_pi_type1_complete, |
264 | }; |
265 | EXPORT_SYMBOL(t10_pi_type1_crc); |
266 | |
267 | const struct blk_integrity_profile t10_pi_type1_ip = { |
268 | .name = "T10-DIF-TYPE1-IP" , |
269 | .generate_fn = t10_pi_type1_generate_ip, |
270 | .verify_fn = t10_pi_type1_verify_ip, |
271 | .prepare_fn = t10_pi_type1_prepare, |
272 | .complete_fn = t10_pi_type1_complete, |
273 | }; |
274 | EXPORT_SYMBOL(t10_pi_type1_ip); |
275 | |
276 | const struct blk_integrity_profile t10_pi_type3_crc = { |
277 | .name = "T10-DIF-TYPE3-CRC" , |
278 | .generate_fn = t10_pi_type3_generate_crc, |
279 | .verify_fn = t10_pi_type3_verify_crc, |
280 | .prepare_fn = t10_pi_type3_prepare, |
281 | .complete_fn = t10_pi_type3_complete, |
282 | }; |
283 | EXPORT_SYMBOL(t10_pi_type3_crc); |
284 | |
285 | const struct blk_integrity_profile t10_pi_type3_ip = { |
286 | .name = "T10-DIF-TYPE3-IP" , |
287 | .generate_fn = t10_pi_type3_generate_ip, |
288 | .verify_fn = t10_pi_type3_verify_ip, |
289 | .prepare_fn = t10_pi_type3_prepare, |
290 | .complete_fn = t10_pi_type3_complete, |
291 | }; |
292 | EXPORT_SYMBOL(t10_pi_type3_ip); |
293 | |
294 | static __be64 ext_pi_crc64(u64 crc, void *data, unsigned int len) |
295 | { |
296 | return cpu_to_be64(crc64_rocksoft_update(crc, data, len)); |
297 | } |
298 | |
299 | static blk_status_t ext_pi_crc64_generate(struct blk_integrity_iter *iter, |
300 | enum t10_dif_type type) |
301 | { |
302 | u8 offset = iter->pi_offset; |
303 | unsigned int i; |
304 | |
305 | for (i = 0 ; i < iter->data_size ; i += iter->interval) { |
306 | struct crc64_pi_tuple *pi = iter->prot_buf + offset; |
307 | |
308 | pi->guard_tag = ext_pi_crc64(crc: 0, data: iter->data_buf, len: iter->interval); |
309 | if (offset) |
310 | pi->guard_tag = ext_pi_crc64(be64_to_cpu(pi->guard_tag), |
311 | data: iter->prot_buf, len: offset); |
312 | pi->app_tag = 0; |
313 | |
314 | if (type == T10_PI_TYPE1_PROTECTION) |
315 | put_unaligned_be48(val: iter->seed, p: pi->ref_tag); |
316 | else |
317 | put_unaligned_be48(val: 0ULL, p: pi->ref_tag); |
318 | |
319 | iter->data_buf += iter->interval; |
320 | iter->prot_buf += iter->tuple_size; |
321 | iter->seed++; |
322 | } |
323 | |
324 | return BLK_STS_OK; |
325 | } |
326 | |
327 | static bool ext_pi_ref_escape(u8 *ref_tag) |
328 | { |
329 | static u8 ref_escape[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; |
330 | |
331 | return memcmp(p: ref_tag, q: ref_escape, size: sizeof(ref_escape)) == 0; |
332 | } |
333 | |
334 | static blk_status_t ext_pi_crc64_verify(struct blk_integrity_iter *iter, |
335 | enum t10_dif_type type) |
336 | { |
337 | u8 offset = iter->pi_offset; |
338 | unsigned int i; |
339 | |
340 | for (i = 0; i < iter->data_size; i += iter->interval) { |
341 | struct crc64_pi_tuple *pi = iter->prot_buf + offset; |
342 | u64 ref, seed; |
343 | __be64 csum; |
344 | |
345 | if (type == T10_PI_TYPE1_PROTECTION) { |
346 | if (pi->app_tag == T10_PI_APP_ESCAPE) |
347 | goto next; |
348 | |
349 | ref = get_unaligned_be48(p: pi->ref_tag); |
350 | seed = lower_48_bits(n: iter->seed); |
351 | if (ref != seed) { |
352 | pr_err("%s: ref tag error at location %llu (rcvd %llu)\n" , |
353 | iter->disk_name, seed, ref); |
354 | return BLK_STS_PROTECTION; |
355 | } |
356 | } else if (type == T10_PI_TYPE3_PROTECTION) { |
357 | if (pi->app_tag == T10_PI_APP_ESCAPE && |
358 | ext_pi_ref_escape(ref_tag: pi->ref_tag)) |
359 | goto next; |
360 | } |
361 | |
362 | csum = ext_pi_crc64(crc: 0, data: iter->data_buf, len: iter->interval); |
363 | if (offset) |
364 | csum = ext_pi_crc64(be64_to_cpu(csum), data: iter->prot_buf, |
365 | len: offset); |
366 | |
367 | if (pi->guard_tag != csum) { |
368 | pr_err("%s: guard tag error at sector %llu " \ |
369 | "(rcvd %016llx, want %016llx)\n" , |
370 | iter->disk_name, (unsigned long long)iter->seed, |
371 | be64_to_cpu(pi->guard_tag), be64_to_cpu(csum)); |
372 | return BLK_STS_PROTECTION; |
373 | } |
374 | |
375 | next: |
376 | iter->data_buf += iter->interval; |
377 | iter->prot_buf += iter->tuple_size; |
378 | iter->seed++; |
379 | } |
380 | |
381 | return BLK_STS_OK; |
382 | } |
383 | |
384 | static blk_status_t ext_pi_type1_verify_crc64(struct blk_integrity_iter *iter) |
385 | { |
386 | return ext_pi_crc64_verify(iter, type: T10_PI_TYPE1_PROTECTION); |
387 | } |
388 | |
389 | static blk_status_t ext_pi_type1_generate_crc64(struct blk_integrity_iter *iter) |
390 | { |
391 | return ext_pi_crc64_generate(iter, type: T10_PI_TYPE1_PROTECTION); |
392 | } |
393 | |
394 | static void ext_pi_type1_prepare(struct request *rq) |
395 | { |
396 | struct blk_integrity *bi = &rq->q->integrity; |
397 | const int tuple_sz = bi->tuple_size; |
398 | u64 ref_tag = ext_pi_ref_tag(rq); |
399 | u8 offset = bi->pi_offset; |
400 | struct bio *bio; |
401 | |
402 | __rq_for_each_bio(bio, rq) { |
403 | struct bio_integrity_payload *bip = bio_integrity(bio); |
404 | u64 virt = lower_48_bits(n: bip_get_seed(bip)); |
405 | struct bio_vec iv; |
406 | struct bvec_iter iter; |
407 | |
408 | /* Already remapped? */ |
409 | if (bip->bip_flags & BIP_MAPPED_INTEGRITY) |
410 | break; |
411 | |
412 | bip_for_each_vec(iv, bip, iter) { |
413 | unsigned int j; |
414 | void *p; |
415 | |
416 | p = bvec_kmap_local(bvec: &iv); |
417 | for (j = 0; j < iv.bv_len; j += tuple_sz) { |
418 | struct crc64_pi_tuple *pi = p + offset; |
419 | u64 ref = get_unaligned_be48(p: pi->ref_tag); |
420 | |
421 | if (ref == virt) |
422 | put_unaligned_be48(val: ref_tag, p: pi->ref_tag); |
423 | virt++; |
424 | ref_tag++; |
425 | p += tuple_sz; |
426 | } |
427 | kunmap_local(p); |
428 | } |
429 | |
430 | bip->bip_flags |= BIP_MAPPED_INTEGRITY; |
431 | } |
432 | } |
433 | |
434 | static void ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes) |
435 | { |
436 | struct blk_integrity *bi = &rq->q->integrity; |
437 | unsigned intervals = nr_bytes >> bi->interval_exp; |
438 | const int tuple_sz = bi->tuple_size; |
439 | u64 ref_tag = ext_pi_ref_tag(rq); |
440 | u8 offset = bi->pi_offset; |
441 | struct bio *bio; |
442 | |
443 | __rq_for_each_bio(bio, rq) { |
444 | struct bio_integrity_payload *bip = bio_integrity(bio); |
445 | u64 virt = lower_48_bits(n: bip_get_seed(bip)); |
446 | struct bio_vec iv; |
447 | struct bvec_iter iter; |
448 | |
449 | bip_for_each_vec(iv, bip, iter) { |
450 | unsigned int j; |
451 | void *p; |
452 | |
453 | p = bvec_kmap_local(bvec: &iv); |
454 | for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) { |
455 | struct crc64_pi_tuple *pi = p + offset; |
456 | u64 ref = get_unaligned_be48(p: pi->ref_tag); |
457 | |
458 | if (ref == ref_tag) |
459 | put_unaligned_be48(val: virt, p: pi->ref_tag); |
460 | virt++; |
461 | ref_tag++; |
462 | intervals--; |
463 | p += tuple_sz; |
464 | } |
465 | kunmap_local(p); |
466 | } |
467 | } |
468 | } |
469 | |
470 | static blk_status_t ext_pi_type3_verify_crc64(struct blk_integrity_iter *iter) |
471 | { |
472 | return ext_pi_crc64_verify(iter, type: T10_PI_TYPE3_PROTECTION); |
473 | } |
474 | |
475 | static blk_status_t ext_pi_type3_generate_crc64(struct blk_integrity_iter *iter) |
476 | { |
477 | return ext_pi_crc64_generate(iter, type: T10_PI_TYPE3_PROTECTION); |
478 | } |
479 | |
480 | const struct blk_integrity_profile ext_pi_type1_crc64 = { |
481 | .name = "EXT-DIF-TYPE1-CRC64" , |
482 | .generate_fn = ext_pi_type1_generate_crc64, |
483 | .verify_fn = ext_pi_type1_verify_crc64, |
484 | .prepare_fn = ext_pi_type1_prepare, |
485 | .complete_fn = ext_pi_type1_complete, |
486 | }; |
487 | EXPORT_SYMBOL_GPL(ext_pi_type1_crc64); |
488 | |
489 | const struct blk_integrity_profile ext_pi_type3_crc64 = { |
490 | .name = "EXT-DIF-TYPE3-CRC64" , |
491 | .generate_fn = ext_pi_type3_generate_crc64, |
492 | .verify_fn = ext_pi_type3_verify_crc64, |
493 | .prepare_fn = t10_pi_type3_prepare, |
494 | .complete_fn = t10_pi_type3_complete, |
495 | }; |
496 | EXPORT_SYMBOL_GPL(ext_pi_type3_crc64); |
497 | |
498 | MODULE_LICENSE("GPL" ); |
499 | MODULE_LICENSE("GPL" ); |
500 | |