1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2011, 2012 STRATO. All rights reserved. |
4 | */ |
5 | |
6 | #include <linux/blkdev.h> |
7 | #include <linux/ratelimit.h> |
8 | #include <linux/sched/mm.h> |
9 | #include <crypto/hash.h> |
10 | #include "ctree.h" |
11 | #include "discard.h" |
12 | #include "volumes.h" |
13 | #include "disk-io.h" |
14 | #include "ordered-data.h" |
15 | #include "transaction.h" |
16 | #include "backref.h" |
17 | #include "extent_io.h" |
18 | #include "dev-replace.h" |
19 | #include "raid56.h" |
20 | #include "block-group.h" |
21 | #include "zoned.h" |
22 | #include "fs.h" |
23 | #include "accessors.h" |
24 | #include "file-item.h" |
25 | #include "scrub.h" |
26 | #include "raid-stripe-tree.h" |
27 | |
28 | /* |
29 | * This is only the first step towards a full-features scrub. It reads all |
30 | * extent and super block and verifies the checksums. In case a bad checksum |
31 | * is found or the extent cannot be read, good data will be written back if |
32 | * any can be found. |
33 | * |
34 | * Future enhancements: |
35 | * - In case an unrepairable extent is encountered, track which files are |
36 | * affected and report them |
37 | * - track and record media errors, throw out bad devices |
38 | * - add a mode to also read unallocated space |
39 | */ |
40 | |
41 | struct scrub_ctx; |
42 | |
43 | /* |
44 | * The following value only influences the performance. |
45 | * |
46 | * This determines how many stripes would be submitted in one go, |
47 | * which is 512KiB (BTRFS_STRIPE_LEN * SCRUB_STRIPES_PER_GROUP). |
48 | */ |
49 | #define SCRUB_STRIPES_PER_GROUP 8 |
50 | |
51 | /* |
52 | * How many groups we have for each sctx. |
53 | * |
54 | * This would be 8M per device, the same value as the old scrub in-flight bios |
55 | * size limit. |
56 | */ |
57 | #define SCRUB_GROUPS_PER_SCTX 16 |
58 | |
59 | #define SCRUB_TOTAL_STRIPES (SCRUB_GROUPS_PER_SCTX * SCRUB_STRIPES_PER_GROUP) |
60 | |
61 | /* |
62 | * The following value times PAGE_SIZE needs to be large enough to match the |
63 | * largest node/leaf/sector size that shall be supported. |
64 | */ |
65 | #define SCRUB_MAX_SECTORS_PER_BLOCK (BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K) |
66 | |
67 | /* Represent one sector and its needed info to verify the content. */ |
68 | struct scrub_sector_verification { |
69 | bool is_metadata; |
70 | |
71 | union { |
72 | /* |
73 | * Csum pointer for data csum verification. Should point to a |
74 | * sector csum inside scrub_stripe::csums. |
75 | * |
76 | * NULL if this data sector has no csum. |
77 | */ |
78 | u8 *csum; |
79 | |
80 | /* |
81 | * Extra info for metadata verification. All sectors inside a |
82 | * tree block share the same generation. |
83 | */ |
84 | u64 generation; |
85 | }; |
86 | }; |
87 | |
88 | enum scrub_stripe_flags { |
89 | /* Set when @mirror_num, @dev, @physical and @logical are set. */ |
90 | SCRUB_STRIPE_FLAG_INITIALIZED, |
91 | |
92 | /* Set when the read-repair is finished. */ |
93 | SCRUB_STRIPE_FLAG_REPAIR_DONE, |
94 | |
95 | /* |
96 | * Set for data stripes if it's triggered from P/Q stripe. |
97 | * During such scrub, we should not report errors in data stripes, nor |
98 | * update the accounting. |
99 | */ |
100 | SCRUB_STRIPE_FLAG_NO_REPORT, |
101 | }; |
102 | |
103 | #define SCRUB_STRIPE_PAGES (BTRFS_STRIPE_LEN / PAGE_SIZE) |
104 | |
105 | /* |
106 | * Represent one contiguous range with a length of BTRFS_STRIPE_LEN. |
107 | */ |
108 | struct scrub_stripe { |
109 | struct scrub_ctx *sctx; |
110 | struct btrfs_block_group *bg; |
111 | |
112 | struct page *pages[SCRUB_STRIPE_PAGES]; |
113 | struct scrub_sector_verification *sectors; |
114 | |
115 | struct btrfs_device *dev; |
116 | u64 logical; |
117 | u64 physical; |
118 | |
119 | u16 mirror_num; |
120 | |
121 | /* Should be BTRFS_STRIPE_LEN / sectorsize. */ |
122 | u16 nr_sectors; |
123 | |
124 | /* |
125 | * How many data/meta extents are in this stripe. Only for scrub status |
126 | * reporting purposes. |
127 | */ |
128 | u16 nr_data_extents; |
129 | u16 nr_meta_extents; |
130 | |
131 | atomic_t pending_io; |
132 | wait_queue_head_t io_wait; |
133 | wait_queue_head_t repair_wait; |
134 | |
135 | /* |
136 | * Indicate the states of the stripe. Bits are defined in |
137 | * scrub_stripe_flags enum. |
138 | */ |
139 | unsigned long state; |
140 | |
141 | /* Indicate which sectors are covered by extent items. */ |
142 | unsigned long extent_sector_bitmap; |
143 | |
144 | /* |
145 | * The errors hit during the initial read of the stripe. |
146 | * |
147 | * Would be utilized for error reporting and repair. |
148 | * |
149 | * The remaining init_nr_* records the number of errors hit, only used |
150 | * by error reporting. |
151 | */ |
152 | unsigned long init_error_bitmap; |
153 | unsigned int init_nr_io_errors; |
154 | unsigned int init_nr_csum_errors; |
155 | unsigned int init_nr_meta_errors; |
156 | |
157 | /* |
158 | * The following error bitmaps are all for the current status. |
159 | * Every time we submit a new read, these bitmaps may be updated. |
160 | * |
161 | * error_bitmap = io_error_bitmap | csum_error_bitmap | meta_error_bitmap; |
162 | * |
163 | * IO and csum errors can happen for both metadata and data. |
164 | */ |
165 | unsigned long error_bitmap; |
166 | unsigned long io_error_bitmap; |
167 | unsigned long csum_error_bitmap; |
168 | unsigned long meta_error_bitmap; |
169 | |
170 | /* For writeback (repair or replace) error reporting. */ |
171 | unsigned long write_error_bitmap; |
172 | |
173 | /* Writeback can be concurrent, thus we need to protect the bitmap. */ |
174 | spinlock_t write_error_lock; |
175 | |
176 | /* |
177 | * Checksum for the whole stripe if this stripe is inside a data block |
178 | * group. |
179 | */ |
180 | u8 *csums; |
181 | |
182 | struct work_struct work; |
183 | }; |
184 | |
185 | struct scrub_ctx { |
186 | struct scrub_stripe stripes[SCRUB_TOTAL_STRIPES]; |
187 | struct scrub_stripe *raid56_data_stripes; |
188 | struct btrfs_fs_info *fs_info; |
189 | struct btrfs_path extent_path; |
190 | struct btrfs_path csum_path; |
191 | int first_free; |
192 | int cur_stripe; |
193 | atomic_t cancel_req; |
194 | int readonly; |
195 | |
196 | /* State of IO submission throttling affecting the associated device */ |
197 | ktime_t throttle_deadline; |
198 | u64 throttle_sent; |
199 | |
200 | int is_dev_replace; |
201 | u64 write_pointer; |
202 | |
203 | struct mutex wr_lock; |
204 | struct btrfs_device *wr_tgtdev; |
205 | |
206 | /* |
207 | * statistics |
208 | */ |
209 | struct btrfs_scrub_progress stat; |
210 | spinlock_t stat_lock; |
211 | |
212 | /* |
213 | * Use a ref counter to avoid use-after-free issues. Scrub workers |
214 | * decrement bios_in_flight and workers_pending and then do a wakeup |
215 | * on the list_wait wait queue. We must ensure the main scrub task |
216 | * doesn't free the scrub context before or while the workers are |
217 | * doing the wakeup() call. |
218 | */ |
219 | refcount_t refs; |
220 | }; |
221 | |
222 | struct scrub_warning { |
223 | struct btrfs_path *path; |
224 | u64 extent_item_size; |
225 | const char *errstr; |
226 | u64 physical; |
227 | u64 logical; |
228 | struct btrfs_device *dev; |
229 | }; |
230 | |
231 | static void release_scrub_stripe(struct scrub_stripe *stripe) |
232 | { |
233 | if (!stripe) |
234 | return; |
235 | |
236 | for (int i = 0; i < SCRUB_STRIPE_PAGES; i++) { |
237 | if (stripe->pages[i]) |
238 | __free_page(stripe->pages[i]); |
239 | stripe->pages[i] = NULL; |
240 | } |
241 | kfree(objp: stripe->sectors); |
242 | kfree(objp: stripe->csums); |
243 | stripe->sectors = NULL; |
244 | stripe->csums = NULL; |
245 | stripe->sctx = NULL; |
246 | stripe->state = 0; |
247 | } |
248 | |
249 | static int init_scrub_stripe(struct btrfs_fs_info *fs_info, |
250 | struct scrub_stripe *stripe) |
251 | { |
252 | int ret; |
253 | |
254 | memset(stripe, 0, sizeof(*stripe)); |
255 | |
256 | stripe->nr_sectors = BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits; |
257 | stripe->state = 0; |
258 | |
259 | init_waitqueue_head(&stripe->io_wait); |
260 | init_waitqueue_head(&stripe->repair_wait); |
261 | atomic_set(v: &stripe->pending_io, i: 0); |
262 | spin_lock_init(&stripe->write_error_lock); |
263 | |
264 | ret = btrfs_alloc_page_array(SCRUB_STRIPE_PAGES, page_array: stripe->pages, extra_gfp: 0); |
265 | if (ret < 0) |
266 | goto error; |
267 | |
268 | stripe->sectors = kcalloc(n: stripe->nr_sectors, |
269 | size: sizeof(struct scrub_sector_verification), |
270 | GFP_KERNEL); |
271 | if (!stripe->sectors) |
272 | goto error; |
273 | |
274 | stripe->csums = kcalloc(BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits, |
275 | size: fs_info->csum_size, GFP_KERNEL); |
276 | if (!stripe->csums) |
277 | goto error; |
278 | return 0; |
279 | error: |
280 | release_scrub_stripe(stripe); |
281 | return -ENOMEM; |
282 | } |
283 | |
284 | static void wait_scrub_stripe_io(struct scrub_stripe *stripe) |
285 | { |
286 | wait_event(stripe->io_wait, atomic_read(&stripe->pending_io) == 0); |
287 | } |
288 | |
289 | static void scrub_put_ctx(struct scrub_ctx *sctx); |
290 | |
291 | static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) |
292 | { |
293 | while (atomic_read(v: &fs_info->scrub_pause_req)) { |
294 | mutex_unlock(lock: &fs_info->scrub_lock); |
295 | wait_event(fs_info->scrub_pause_wait, |
296 | atomic_read(&fs_info->scrub_pause_req) == 0); |
297 | mutex_lock(&fs_info->scrub_lock); |
298 | } |
299 | } |
300 | |
301 | static void scrub_pause_on(struct btrfs_fs_info *fs_info) |
302 | { |
303 | atomic_inc(v: &fs_info->scrubs_paused); |
304 | wake_up(&fs_info->scrub_pause_wait); |
305 | } |
306 | |
307 | static void scrub_pause_off(struct btrfs_fs_info *fs_info) |
308 | { |
309 | mutex_lock(&fs_info->scrub_lock); |
310 | __scrub_blocked_if_needed(fs_info); |
311 | atomic_dec(v: &fs_info->scrubs_paused); |
312 | mutex_unlock(lock: &fs_info->scrub_lock); |
313 | |
314 | wake_up(&fs_info->scrub_pause_wait); |
315 | } |
316 | |
317 | static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) |
318 | { |
319 | scrub_pause_on(fs_info); |
320 | scrub_pause_off(fs_info); |
321 | } |
322 | |
323 | static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx) |
324 | { |
325 | int i; |
326 | |
327 | if (!sctx) |
328 | return; |
329 | |
330 | for (i = 0; i < SCRUB_TOTAL_STRIPES; i++) |
331 | release_scrub_stripe(stripe: &sctx->stripes[i]); |
332 | |
333 | kvfree(addr: sctx); |
334 | } |
335 | |
336 | static void scrub_put_ctx(struct scrub_ctx *sctx) |
337 | { |
338 | if (refcount_dec_and_test(r: &sctx->refs)) |
339 | scrub_free_ctx(sctx); |
340 | } |
341 | |
342 | static noinline_for_stack struct scrub_ctx *scrub_setup_ctx( |
343 | struct btrfs_fs_info *fs_info, int is_dev_replace) |
344 | { |
345 | struct scrub_ctx *sctx; |
346 | int i; |
347 | |
348 | /* Since sctx has inline 128 stripes, it can go beyond 64K easily. Use |
349 | * kvzalloc(). |
350 | */ |
351 | sctx = kvzalloc(size: sizeof(*sctx), GFP_KERNEL); |
352 | if (!sctx) |
353 | goto nomem; |
354 | refcount_set(r: &sctx->refs, n: 1); |
355 | sctx->is_dev_replace = is_dev_replace; |
356 | sctx->fs_info = fs_info; |
357 | sctx->extent_path.search_commit_root = 1; |
358 | sctx->extent_path.skip_locking = 1; |
359 | sctx->csum_path.search_commit_root = 1; |
360 | sctx->csum_path.skip_locking = 1; |
361 | for (i = 0; i < SCRUB_TOTAL_STRIPES; i++) { |
362 | int ret; |
363 | |
364 | ret = init_scrub_stripe(fs_info, stripe: &sctx->stripes[i]); |
365 | if (ret < 0) |
366 | goto nomem; |
367 | sctx->stripes[i].sctx = sctx; |
368 | } |
369 | sctx->first_free = 0; |
370 | atomic_set(v: &sctx->cancel_req, i: 0); |
371 | |
372 | spin_lock_init(&sctx->stat_lock); |
373 | sctx->throttle_deadline = 0; |
374 | |
375 | mutex_init(&sctx->wr_lock); |
376 | if (is_dev_replace) { |
377 | WARN_ON(!fs_info->dev_replace.tgtdev); |
378 | sctx->wr_tgtdev = fs_info->dev_replace.tgtdev; |
379 | } |
380 | |
381 | return sctx; |
382 | |
383 | nomem: |
384 | scrub_free_ctx(sctx); |
385 | return ERR_PTR(error: -ENOMEM); |
386 | } |
387 | |
388 | static int scrub_print_warning_inode(u64 inum, u64 offset, u64 num_bytes, |
389 | u64 root, void *warn_ctx) |
390 | { |
391 | u32 nlink; |
392 | int ret; |
393 | int i; |
394 | unsigned nofs_flag; |
395 | struct extent_buffer *eb; |
396 | struct btrfs_inode_item *inode_item; |
397 | struct scrub_warning *swarn = warn_ctx; |
398 | struct btrfs_fs_info *fs_info = swarn->dev->fs_info; |
399 | struct inode_fs_paths *ipath = NULL; |
400 | struct btrfs_root *local_root; |
401 | struct btrfs_key key; |
402 | |
403 | local_root = btrfs_get_fs_root(fs_info, objectid: root, check_ref: true); |
404 | if (IS_ERR(ptr: local_root)) { |
405 | ret = PTR_ERR(ptr: local_root); |
406 | goto err; |
407 | } |
408 | |
409 | /* |
410 | * this makes the path point to (inum INODE_ITEM ioff) |
411 | */ |
412 | key.objectid = inum; |
413 | key.type = BTRFS_INODE_ITEM_KEY; |
414 | key.offset = 0; |
415 | |
416 | ret = btrfs_search_slot(NULL, root: local_root, key: &key, p: swarn->path, ins_len: 0, cow: 0); |
417 | if (ret) { |
418 | btrfs_put_root(root: local_root); |
419 | btrfs_release_path(p: swarn->path); |
420 | goto err; |
421 | } |
422 | |
423 | eb = swarn->path->nodes[0]; |
424 | inode_item = btrfs_item_ptr(eb, swarn->path->slots[0], |
425 | struct btrfs_inode_item); |
426 | nlink = btrfs_inode_nlink(eb, s: inode_item); |
427 | btrfs_release_path(p: swarn->path); |
428 | |
429 | /* |
430 | * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub |
431 | * uses GFP_NOFS in this context, so we keep it consistent but it does |
432 | * not seem to be strictly necessary. |
433 | */ |
434 | nofs_flag = memalloc_nofs_save(); |
435 | ipath = init_ipath(total_bytes: 4096, fs_root: local_root, path: swarn->path); |
436 | memalloc_nofs_restore(flags: nofs_flag); |
437 | if (IS_ERR(ptr: ipath)) { |
438 | btrfs_put_root(root: local_root); |
439 | ret = PTR_ERR(ptr: ipath); |
440 | ipath = NULL; |
441 | goto err; |
442 | } |
443 | ret = paths_from_inode(inum, ipath); |
444 | |
445 | if (ret < 0) |
446 | goto err; |
447 | |
448 | /* |
449 | * we deliberately ignore the bit ipath might have been too small to |
450 | * hold all of the paths here |
451 | */ |
452 | for (i = 0; i < ipath->fspath->elem_cnt; ++i) |
453 | btrfs_warn_in_rcu(fs_info, |
454 | "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)" , |
455 | swarn->errstr, swarn->logical, |
456 | btrfs_dev_name(swarn->dev), |
457 | swarn->physical, |
458 | root, inum, offset, |
459 | fs_info->sectorsize, nlink, |
460 | (char *)(unsigned long)ipath->fspath->val[i]); |
461 | |
462 | btrfs_put_root(root: local_root); |
463 | free_ipath(ipath); |
464 | return 0; |
465 | |
466 | err: |
467 | btrfs_warn_in_rcu(fs_info, |
468 | "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d" , |
469 | swarn->errstr, swarn->logical, |
470 | btrfs_dev_name(swarn->dev), |
471 | swarn->physical, |
472 | root, inum, offset, ret); |
473 | |
474 | free_ipath(ipath); |
475 | return 0; |
476 | } |
477 | |
478 | static void scrub_print_common_warning(const char *errstr, struct btrfs_device *dev, |
479 | bool is_super, u64 logical, u64 physical) |
480 | { |
481 | struct btrfs_fs_info *fs_info = dev->fs_info; |
482 | struct btrfs_path *path; |
483 | struct btrfs_key found_key; |
484 | struct extent_buffer *eb; |
485 | struct btrfs_extent_item *ei; |
486 | struct scrub_warning swarn; |
487 | u64 flags = 0; |
488 | u32 item_size; |
489 | int ret; |
490 | |
491 | /* Super block error, no need to search extent tree. */ |
492 | if (is_super) { |
493 | btrfs_warn_in_rcu(fs_info, "%s on device %s, physical %llu" , |
494 | errstr, btrfs_dev_name(dev), physical); |
495 | return; |
496 | } |
497 | path = btrfs_alloc_path(); |
498 | if (!path) |
499 | return; |
500 | |
501 | swarn.physical = physical; |
502 | swarn.logical = logical; |
503 | swarn.errstr = errstr; |
504 | swarn.dev = NULL; |
505 | |
506 | ret = extent_from_logical(fs_info, logical: swarn.logical, path, found_key: &found_key, |
507 | flags: &flags); |
508 | if (ret < 0) |
509 | goto out; |
510 | |
511 | swarn.extent_item_size = found_key.offset; |
512 | |
513 | eb = path->nodes[0]; |
514 | ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); |
515 | item_size = btrfs_item_size(eb, slot: path->slots[0]); |
516 | |
517 | if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { |
518 | unsigned long ptr = 0; |
519 | u8 ref_level; |
520 | u64 ref_root; |
521 | |
522 | while (true) { |
523 | ret = tree_backref_for_extent(ptr: &ptr, eb, key: &found_key, ei, |
524 | item_size, out_root: &ref_root, |
525 | out_level: &ref_level); |
526 | if (ret < 0) { |
527 | btrfs_warn(fs_info, |
528 | "failed to resolve tree backref for logical %llu: %d" , |
529 | swarn.logical, ret); |
530 | break; |
531 | } |
532 | if (ret > 0) |
533 | break; |
534 | btrfs_warn_in_rcu(fs_info, |
535 | "%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu" , |
536 | errstr, swarn.logical, btrfs_dev_name(dev), |
537 | swarn.physical, (ref_level ? "node" : "leaf" ), |
538 | ref_level, ref_root); |
539 | } |
540 | btrfs_release_path(p: path); |
541 | } else { |
542 | struct btrfs_backref_walk_ctx ctx = { 0 }; |
543 | |
544 | btrfs_release_path(p: path); |
545 | |
546 | ctx.bytenr = found_key.objectid; |
547 | ctx.extent_item_pos = swarn.logical - found_key.objectid; |
548 | ctx.fs_info = fs_info; |
549 | |
550 | swarn.path = path; |
551 | swarn.dev = dev; |
552 | |
553 | iterate_extent_inodes(ctx: &ctx, search_commit_root: true, iterate: scrub_print_warning_inode, user_ctx: &swarn); |
554 | } |
555 | |
556 | out: |
557 | btrfs_free_path(p: path); |
558 | } |
559 | |
560 | static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical) |
561 | { |
562 | int ret = 0; |
563 | u64 length; |
564 | |
565 | if (!btrfs_is_zoned(fs_info: sctx->fs_info)) |
566 | return 0; |
567 | |
568 | if (!btrfs_dev_is_sequential(device: sctx->wr_tgtdev, pos: physical)) |
569 | return 0; |
570 | |
571 | if (sctx->write_pointer < physical) { |
572 | length = physical - sctx->write_pointer; |
573 | |
574 | ret = btrfs_zoned_issue_zeroout(device: sctx->wr_tgtdev, |
575 | physical: sctx->write_pointer, length); |
576 | if (!ret) |
577 | sctx->write_pointer = physical; |
578 | } |
579 | return ret; |
580 | } |
581 | |
582 | static struct page *scrub_stripe_get_page(struct scrub_stripe *stripe, int sector_nr) |
583 | { |
584 | struct btrfs_fs_info *fs_info = stripe->bg->fs_info; |
585 | int page_index = (sector_nr << fs_info->sectorsize_bits) >> PAGE_SHIFT; |
586 | |
587 | return stripe->pages[page_index]; |
588 | } |
589 | |
590 | static unsigned int scrub_stripe_get_page_offset(struct scrub_stripe *stripe, |
591 | int sector_nr) |
592 | { |
593 | struct btrfs_fs_info *fs_info = stripe->bg->fs_info; |
594 | |
595 | return offset_in_page(sector_nr << fs_info->sectorsize_bits); |
596 | } |
597 | |
598 | static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr) |
599 | { |
600 | struct btrfs_fs_info *fs_info = stripe->bg->fs_info; |
601 | const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits; |
602 | const u64 logical = stripe->logical + (sector_nr << fs_info->sectorsize_bits); |
603 | const struct page *first_page = scrub_stripe_get_page(stripe, sector_nr); |
604 | const unsigned int first_off = scrub_stripe_get_page_offset(stripe, sector_nr); |
605 | SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); |
606 | u8 on_disk_csum[BTRFS_CSUM_SIZE]; |
607 | u8 calculated_csum[BTRFS_CSUM_SIZE]; |
608 | struct btrfs_header *; |
609 | |
610 | /* |
611 | * Here we don't have a good way to attach the pages (and subpages) |
612 | * to a dummy extent buffer, thus we have to directly grab the members |
613 | * from pages. |
614 | */ |
615 | header = (struct btrfs_header *)(page_address(first_page) + first_off); |
616 | memcpy(on_disk_csum, header->csum, fs_info->csum_size); |
617 | |
618 | if (logical != btrfs_stack_header_bytenr(s: header)) { |
619 | bitmap_set(map: &stripe->csum_error_bitmap, start: sector_nr, nbits: sectors_per_tree); |
620 | bitmap_set(map: &stripe->error_bitmap, start: sector_nr, nbits: sectors_per_tree); |
621 | btrfs_warn_rl(fs_info, |
622 | "tree block %llu mirror %u has bad bytenr, has %llu want %llu" , |
623 | logical, stripe->mirror_num, |
624 | btrfs_stack_header_bytenr(header), logical); |
625 | return; |
626 | } |
627 | if (memcmp(p: header->fsid, q: fs_info->fs_devices->metadata_uuid, |
628 | BTRFS_FSID_SIZE) != 0) { |
629 | bitmap_set(map: &stripe->meta_error_bitmap, start: sector_nr, nbits: sectors_per_tree); |
630 | bitmap_set(map: &stripe->error_bitmap, start: sector_nr, nbits: sectors_per_tree); |
631 | btrfs_warn_rl(fs_info, |
632 | "tree block %llu mirror %u has bad fsid, has %pU want %pU" , |
633 | logical, stripe->mirror_num, |
634 | header->fsid, fs_info->fs_devices->fsid); |
635 | return; |
636 | } |
637 | if (memcmp(p: header->chunk_tree_uuid, q: fs_info->chunk_tree_uuid, |
638 | BTRFS_UUID_SIZE) != 0) { |
639 | bitmap_set(map: &stripe->meta_error_bitmap, start: sector_nr, nbits: sectors_per_tree); |
640 | bitmap_set(map: &stripe->error_bitmap, start: sector_nr, nbits: sectors_per_tree); |
641 | btrfs_warn_rl(fs_info, |
642 | "tree block %llu mirror %u has bad chunk tree uuid, has %pU want %pU" , |
643 | logical, stripe->mirror_num, |
644 | header->chunk_tree_uuid, fs_info->chunk_tree_uuid); |
645 | return; |
646 | } |
647 | |
648 | /* Now check tree block csum. */ |
649 | shash->tfm = fs_info->csum_shash; |
650 | crypto_shash_init(desc: shash); |
651 | crypto_shash_update(desc: shash, page_address(first_page) + first_off + |
652 | BTRFS_CSUM_SIZE, len: fs_info->sectorsize - BTRFS_CSUM_SIZE); |
653 | |
654 | for (int i = sector_nr + 1; i < sector_nr + sectors_per_tree; i++) { |
655 | struct page *page = scrub_stripe_get_page(stripe, sector_nr: i); |
656 | unsigned int page_off = scrub_stripe_get_page_offset(stripe, sector_nr: i); |
657 | |
658 | crypto_shash_update(desc: shash, page_address(page) + page_off, |
659 | len: fs_info->sectorsize); |
660 | } |
661 | |
662 | crypto_shash_final(desc: shash, out: calculated_csum); |
663 | if (memcmp(p: calculated_csum, q: on_disk_csum, size: fs_info->csum_size) != 0) { |
664 | bitmap_set(map: &stripe->meta_error_bitmap, start: sector_nr, nbits: sectors_per_tree); |
665 | bitmap_set(map: &stripe->error_bitmap, start: sector_nr, nbits: sectors_per_tree); |
666 | btrfs_warn_rl(fs_info, |
667 | "tree block %llu mirror %u has bad csum, has " CSUM_FMT " want " CSUM_FMT, |
668 | logical, stripe->mirror_num, |
669 | CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum), |
670 | CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum)); |
671 | return; |
672 | } |
673 | if (stripe->sectors[sector_nr].generation != |
674 | btrfs_stack_header_generation(s: header)) { |
675 | bitmap_set(map: &stripe->meta_error_bitmap, start: sector_nr, nbits: sectors_per_tree); |
676 | bitmap_set(map: &stripe->error_bitmap, start: sector_nr, nbits: sectors_per_tree); |
677 | btrfs_warn_rl(fs_info, |
678 | "tree block %llu mirror %u has bad generation, has %llu want %llu" , |
679 | logical, stripe->mirror_num, |
680 | btrfs_stack_header_generation(header), |
681 | stripe->sectors[sector_nr].generation); |
682 | return; |
683 | } |
684 | bitmap_clear(map: &stripe->error_bitmap, start: sector_nr, nbits: sectors_per_tree); |
685 | bitmap_clear(map: &stripe->csum_error_bitmap, start: sector_nr, nbits: sectors_per_tree); |
686 | bitmap_clear(map: &stripe->meta_error_bitmap, start: sector_nr, nbits: sectors_per_tree); |
687 | } |
688 | |
689 | static void scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr) |
690 | { |
691 | struct btrfs_fs_info *fs_info = stripe->bg->fs_info; |
692 | struct scrub_sector_verification *sector = &stripe->sectors[sector_nr]; |
693 | const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits; |
694 | struct page *page = scrub_stripe_get_page(stripe, sector_nr); |
695 | unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr); |
696 | u8 csum_buf[BTRFS_CSUM_SIZE]; |
697 | int ret; |
698 | |
699 | ASSERT(sector_nr >= 0 && sector_nr < stripe->nr_sectors); |
700 | |
701 | /* Sector not utilized, skip it. */ |
702 | if (!test_bit(sector_nr, &stripe->extent_sector_bitmap)) |
703 | return; |
704 | |
705 | /* IO error, no need to check. */ |
706 | if (test_bit(sector_nr, &stripe->io_error_bitmap)) |
707 | return; |
708 | |
709 | /* Metadata, verify the full tree block. */ |
710 | if (sector->is_metadata) { |
711 | /* |
712 | * Check if the tree block crosses the stripe boundary. If |
713 | * crossed the boundary, we cannot verify it but only give a |
714 | * warning. |
715 | * |
716 | * This can only happen on a very old filesystem where chunks |
717 | * are not ensured to be stripe aligned. |
718 | */ |
719 | if (unlikely(sector_nr + sectors_per_tree > stripe->nr_sectors)) { |
720 | btrfs_warn_rl(fs_info, |
721 | "tree block at %llu crosses stripe boundary %llu" , |
722 | stripe->logical + |
723 | (sector_nr << fs_info->sectorsize_bits), |
724 | stripe->logical); |
725 | return; |
726 | } |
727 | scrub_verify_one_metadata(stripe, sector_nr); |
728 | return; |
729 | } |
730 | |
731 | /* |
732 | * Data is easier, we just verify the data csum (if we have it). For |
733 | * cases without csum, we have no other choice but to trust it. |
734 | */ |
735 | if (!sector->csum) { |
736 | clear_bit(nr: sector_nr, addr: &stripe->error_bitmap); |
737 | return; |
738 | } |
739 | |
740 | ret = btrfs_check_sector_csum(fs_info, page, pgoff, csum: csum_buf, csum_expected: sector->csum); |
741 | if (ret < 0) { |
742 | set_bit(nr: sector_nr, addr: &stripe->csum_error_bitmap); |
743 | set_bit(nr: sector_nr, addr: &stripe->error_bitmap); |
744 | } else { |
745 | clear_bit(nr: sector_nr, addr: &stripe->csum_error_bitmap); |
746 | clear_bit(nr: sector_nr, addr: &stripe->error_bitmap); |
747 | } |
748 | } |
749 | |
750 | /* Verify specified sectors of a stripe. */ |
751 | static void scrub_verify_one_stripe(struct scrub_stripe *stripe, unsigned long bitmap) |
752 | { |
753 | struct btrfs_fs_info *fs_info = stripe->bg->fs_info; |
754 | const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits; |
755 | int sector_nr; |
756 | |
757 | for_each_set_bit(sector_nr, &bitmap, stripe->nr_sectors) { |
758 | scrub_verify_one_sector(stripe, sector_nr); |
759 | if (stripe->sectors[sector_nr].is_metadata) |
760 | sector_nr += sectors_per_tree - 1; |
761 | } |
762 | } |
763 | |
764 | static int calc_sector_number(struct scrub_stripe *stripe, struct bio_vec *first_bvec) |
765 | { |
766 | int i; |
767 | |
768 | for (i = 0; i < stripe->nr_sectors; i++) { |
769 | if (scrub_stripe_get_page(stripe, sector_nr: i) == first_bvec->bv_page && |
770 | scrub_stripe_get_page_offset(stripe, sector_nr: i) == first_bvec->bv_offset) |
771 | break; |
772 | } |
773 | ASSERT(i < stripe->nr_sectors); |
774 | return i; |
775 | } |
776 | |
777 | /* |
778 | * Repair read is different to the regular read: |
779 | * |
780 | * - Only reads the failed sectors |
781 | * - May have extra blocksize limits |
782 | */ |
783 | static void scrub_repair_read_endio(struct btrfs_bio *bbio) |
784 | { |
785 | struct scrub_stripe *stripe = bbio->private; |
786 | struct btrfs_fs_info *fs_info = stripe->bg->fs_info; |
787 | struct bio_vec *bvec; |
788 | int sector_nr = calc_sector_number(stripe, first_bvec: bio_first_bvec_all(bio: &bbio->bio)); |
789 | u32 bio_size = 0; |
790 | int i; |
791 | |
792 | ASSERT(sector_nr < stripe->nr_sectors); |
793 | |
794 | bio_for_each_bvec_all(bvec, &bbio->bio, i) |
795 | bio_size += bvec->bv_len; |
796 | |
797 | if (bbio->bio.bi_status) { |
798 | bitmap_set(map: &stripe->io_error_bitmap, start: sector_nr, |
799 | nbits: bio_size >> fs_info->sectorsize_bits); |
800 | bitmap_set(map: &stripe->error_bitmap, start: sector_nr, |
801 | nbits: bio_size >> fs_info->sectorsize_bits); |
802 | } else { |
803 | bitmap_clear(map: &stripe->io_error_bitmap, start: sector_nr, |
804 | nbits: bio_size >> fs_info->sectorsize_bits); |
805 | } |
806 | bio_put(&bbio->bio); |
807 | if (atomic_dec_and_test(v: &stripe->pending_io)) |
808 | wake_up(&stripe->io_wait); |
809 | } |
810 | |
811 | static int calc_next_mirror(int mirror, int num_copies) |
812 | { |
813 | ASSERT(mirror <= num_copies); |
814 | return (mirror + 1 > num_copies) ? 1 : mirror + 1; |
815 | } |
816 | |
817 | static void scrub_stripe_submit_repair_read(struct scrub_stripe *stripe, |
818 | int mirror, int blocksize, bool wait) |
819 | { |
820 | struct btrfs_fs_info *fs_info = stripe->bg->fs_info; |
821 | struct btrfs_bio *bbio = NULL; |
822 | const unsigned long old_error_bitmap = stripe->error_bitmap; |
823 | int i; |
824 | |
825 | ASSERT(stripe->mirror_num >= 1); |
826 | ASSERT(atomic_read(&stripe->pending_io) == 0); |
827 | |
828 | for_each_set_bit(i, &old_error_bitmap, stripe->nr_sectors) { |
829 | struct page *page; |
830 | int pgoff; |
831 | int ret; |
832 | |
833 | page = scrub_stripe_get_page(stripe, sector_nr: i); |
834 | pgoff = scrub_stripe_get_page_offset(stripe, sector_nr: i); |
835 | |
836 | /* The current sector cannot be merged, submit the bio. */ |
837 | if (bbio && ((i > 0 && !test_bit(i - 1, &stripe->error_bitmap)) || |
838 | bbio->bio.bi_iter.bi_size >= blocksize)) { |
839 | ASSERT(bbio->bio.bi_iter.bi_size); |
840 | atomic_inc(v: &stripe->pending_io); |
841 | btrfs_submit_bio(bbio, mirror_num: mirror); |
842 | if (wait) |
843 | wait_scrub_stripe_io(stripe); |
844 | bbio = NULL; |
845 | } |
846 | |
847 | if (!bbio) { |
848 | bbio = btrfs_bio_alloc(nr_vecs: stripe->nr_sectors, opf: REQ_OP_READ, |
849 | fs_info, end_io: scrub_repair_read_endio, private: stripe); |
850 | bbio->bio.bi_iter.bi_sector = (stripe->logical + |
851 | (i << fs_info->sectorsize_bits)) >> SECTOR_SHIFT; |
852 | } |
853 | |
854 | ret = bio_add_page(bio: &bbio->bio, page, len: fs_info->sectorsize, off: pgoff); |
855 | ASSERT(ret == fs_info->sectorsize); |
856 | } |
857 | if (bbio) { |
858 | ASSERT(bbio->bio.bi_iter.bi_size); |
859 | atomic_inc(v: &stripe->pending_io); |
860 | btrfs_submit_bio(bbio, mirror_num: mirror); |
861 | if (wait) |
862 | wait_scrub_stripe_io(stripe); |
863 | } |
864 | } |
865 | |
866 | static void scrub_stripe_report_errors(struct scrub_ctx *sctx, |
867 | struct scrub_stripe *stripe) |
868 | { |
869 | static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, |
870 | DEFAULT_RATELIMIT_BURST); |
871 | struct btrfs_fs_info *fs_info = sctx->fs_info; |
872 | struct btrfs_device *dev = NULL; |
873 | u64 physical = 0; |
874 | int nr_data_sectors = 0; |
875 | int nr_meta_sectors = 0; |
876 | int nr_nodatacsum_sectors = 0; |
877 | int nr_repaired_sectors = 0; |
878 | int sector_nr; |
879 | |
880 | if (test_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state)) |
881 | return; |
882 | |
883 | /* |
884 | * Init needed infos for error reporting. |
885 | * |
886 | * Although our scrub_stripe infrastructure is mostly based on btrfs_submit_bio() |
887 | * thus no need for dev/physical, error reporting still needs dev and physical. |
888 | */ |
889 | if (!bitmap_empty(src: &stripe->init_error_bitmap, nbits: stripe->nr_sectors)) { |
890 | u64 mapped_len = fs_info->sectorsize; |
891 | struct btrfs_io_context *bioc = NULL; |
892 | int stripe_index = stripe->mirror_num - 1; |
893 | int ret; |
894 | |
895 | /* For scrub, our mirror_num should always start at 1. */ |
896 | ASSERT(stripe->mirror_num >= 1); |
897 | ret = btrfs_map_block(fs_info, op: BTRFS_MAP_GET_READ_MIRRORS, |
898 | logical: stripe->logical, length: &mapped_len, bioc_ret: &bioc, |
899 | NULL, NULL); |
900 | /* |
901 | * If we failed, dev will be NULL, and later detailed reports |
902 | * will just be skipped. |
903 | */ |
904 | if (ret < 0) |
905 | goto skip; |
906 | physical = bioc->stripes[stripe_index].physical; |
907 | dev = bioc->stripes[stripe_index].dev; |
908 | btrfs_put_bioc(bioc); |
909 | } |
910 | |
911 | skip: |
912 | for_each_set_bit(sector_nr, &stripe->extent_sector_bitmap, stripe->nr_sectors) { |
913 | bool repaired = false; |
914 | |
915 | if (stripe->sectors[sector_nr].is_metadata) { |
916 | nr_meta_sectors++; |
917 | } else { |
918 | nr_data_sectors++; |
919 | if (!stripe->sectors[sector_nr].csum) |
920 | nr_nodatacsum_sectors++; |
921 | } |
922 | |
923 | if (test_bit(sector_nr, &stripe->init_error_bitmap) && |
924 | !test_bit(sector_nr, &stripe->error_bitmap)) { |
925 | nr_repaired_sectors++; |
926 | repaired = true; |
927 | } |
928 | |
929 | /* Good sector from the beginning, nothing need to be done. */ |
930 | if (!test_bit(sector_nr, &stripe->init_error_bitmap)) |
931 | continue; |
932 | |
933 | /* |
934 | * Report error for the corrupted sectors. If repaired, just |
935 | * output the message of repaired message. |
936 | */ |
937 | if (repaired) { |
938 | if (dev) { |
939 | btrfs_err_rl_in_rcu(fs_info, |
940 | "fixed up error at logical %llu on dev %s physical %llu" , |
941 | stripe->logical, btrfs_dev_name(dev), |
942 | physical); |
943 | } else { |
944 | btrfs_err_rl_in_rcu(fs_info, |
945 | "fixed up error at logical %llu on mirror %u" , |
946 | stripe->logical, stripe->mirror_num); |
947 | } |
948 | continue; |
949 | } |
950 | |
951 | /* The remaining are all for unrepaired. */ |
952 | if (dev) { |
953 | btrfs_err_rl_in_rcu(fs_info, |
954 | "unable to fixup (regular) error at logical %llu on dev %s physical %llu" , |
955 | stripe->logical, btrfs_dev_name(dev), |
956 | physical); |
957 | } else { |
958 | btrfs_err_rl_in_rcu(fs_info, |
959 | "unable to fixup (regular) error at logical %llu on mirror %u" , |
960 | stripe->logical, stripe->mirror_num); |
961 | } |
962 | |
963 | if (test_bit(sector_nr, &stripe->io_error_bitmap)) |
964 | if (__ratelimit(&rs) && dev) |
965 | scrub_print_common_warning(errstr: "i/o error" , dev, is_super: false, |
966 | logical: stripe->logical, physical); |
967 | if (test_bit(sector_nr, &stripe->csum_error_bitmap)) |
968 | if (__ratelimit(&rs) && dev) |
969 | scrub_print_common_warning(errstr: "checksum error" , dev, is_super: false, |
970 | logical: stripe->logical, physical); |
971 | if (test_bit(sector_nr, &stripe->meta_error_bitmap)) |
972 | if (__ratelimit(&rs) && dev) |
973 | scrub_print_common_warning(errstr: "header error" , dev, is_super: false, |
974 | logical: stripe->logical, physical); |
975 | } |
976 | |
977 | spin_lock(lock: &sctx->stat_lock); |
978 | sctx->stat.data_extents_scrubbed += stripe->nr_data_extents; |
979 | sctx->stat.tree_extents_scrubbed += stripe->nr_meta_extents; |
980 | sctx->stat.data_bytes_scrubbed += nr_data_sectors << fs_info->sectorsize_bits; |
981 | sctx->stat.tree_bytes_scrubbed += nr_meta_sectors << fs_info->sectorsize_bits; |
982 | sctx->stat.no_csum += nr_nodatacsum_sectors; |
983 | sctx->stat.read_errors += stripe->init_nr_io_errors; |
984 | sctx->stat.csum_errors += stripe->init_nr_csum_errors; |
985 | sctx->stat.verify_errors += stripe->init_nr_meta_errors; |
986 | sctx->stat.uncorrectable_errors += |
987 | bitmap_weight(src: &stripe->error_bitmap, nbits: stripe->nr_sectors); |
988 | sctx->stat.corrected_errors += nr_repaired_sectors; |
989 | spin_unlock(lock: &sctx->stat_lock); |
990 | } |
991 | |
992 | static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe, |
993 | unsigned long write_bitmap, bool dev_replace); |
994 | |
995 | /* |
996 | * The main entrance for all read related scrub work, including: |
997 | * |
998 | * - Wait for the initial read to finish |
999 | * - Verify and locate any bad sectors |
1000 | * - Go through the remaining mirrors and try to read as large blocksize as |
1001 | * possible |
1002 | * - Go through all mirrors (including the failed mirror) sector-by-sector |
1003 | * - Submit writeback for repaired sectors |
1004 | * |
1005 | * Writeback for dev-replace does not happen here, it needs extra |
1006 | * synchronization for zoned devices. |
1007 | */ |
1008 | static void scrub_stripe_read_repair_worker(struct work_struct *work) |
1009 | { |
1010 | struct scrub_stripe *stripe = container_of(work, struct scrub_stripe, work); |
1011 | struct scrub_ctx *sctx = stripe->sctx; |
1012 | struct btrfs_fs_info *fs_info = sctx->fs_info; |
1013 | int num_copies = btrfs_num_copies(fs_info, logical: stripe->bg->start, |
1014 | len: stripe->bg->length); |
1015 | int mirror; |
1016 | int i; |
1017 | |
1018 | ASSERT(stripe->mirror_num > 0); |
1019 | |
1020 | wait_scrub_stripe_io(stripe); |
1021 | scrub_verify_one_stripe(stripe, bitmap: stripe->extent_sector_bitmap); |
1022 | /* Save the initial failed bitmap for later repair and report usage. */ |
1023 | stripe->init_error_bitmap = stripe->error_bitmap; |
1024 | stripe->init_nr_io_errors = bitmap_weight(src: &stripe->io_error_bitmap, |
1025 | nbits: stripe->nr_sectors); |
1026 | stripe->init_nr_csum_errors = bitmap_weight(src: &stripe->csum_error_bitmap, |
1027 | nbits: stripe->nr_sectors); |
1028 | stripe->init_nr_meta_errors = bitmap_weight(src: &stripe->meta_error_bitmap, |
1029 | nbits: stripe->nr_sectors); |
1030 | |
1031 | if (bitmap_empty(src: &stripe->init_error_bitmap, nbits: stripe->nr_sectors)) |
1032 | goto out; |
1033 | |
1034 | /* |
1035 | * Try all remaining mirrors. |
1036 | * |
1037 | * Here we still try to read as large block as possible, as this is |
1038 | * faster and we have extra safety nets to rely on. |
1039 | */ |
1040 | for (mirror = calc_next_mirror(mirror: stripe->mirror_num, num_copies); |
1041 | mirror != stripe->mirror_num; |
1042 | mirror = calc_next_mirror(mirror, num_copies)) { |
1043 | const unsigned long old_error_bitmap = stripe->error_bitmap; |
1044 | |
1045 | scrub_stripe_submit_repair_read(stripe, mirror, |
1046 | BTRFS_STRIPE_LEN, wait: false); |
1047 | wait_scrub_stripe_io(stripe); |
1048 | scrub_verify_one_stripe(stripe, bitmap: old_error_bitmap); |
1049 | if (bitmap_empty(src: &stripe->error_bitmap, nbits: stripe->nr_sectors)) |
1050 | goto out; |
1051 | } |
1052 | |
1053 | /* |
1054 | * Last safety net, try re-checking all mirrors, including the failed |
1055 | * one, sector-by-sector. |
1056 | * |
1057 | * As if one sector failed the drive's internal csum, the whole read |
1058 | * containing the offending sector would be marked as error. |
1059 | * Thus here we do sector-by-sector read. |
1060 | * |
1061 | * This can be slow, thus we only try it as the last resort. |
1062 | */ |
1063 | |
1064 | for (i = 0, mirror = stripe->mirror_num; |
1065 | i < num_copies; |
1066 | i++, mirror = calc_next_mirror(mirror, num_copies)) { |
1067 | const unsigned long old_error_bitmap = stripe->error_bitmap; |
1068 | |
1069 | scrub_stripe_submit_repair_read(stripe, mirror, |
1070 | blocksize: fs_info->sectorsize, wait: true); |
1071 | wait_scrub_stripe_io(stripe); |
1072 | scrub_verify_one_stripe(stripe, bitmap: old_error_bitmap); |
1073 | if (bitmap_empty(src: &stripe->error_bitmap, nbits: stripe->nr_sectors)) |
1074 | goto out; |
1075 | } |
1076 | out: |
1077 | /* |
1078 | * Submit the repaired sectors. For zoned case, we cannot do repair |
1079 | * in-place, but queue the bg to be relocated. |
1080 | */ |
1081 | if (btrfs_is_zoned(fs_info)) { |
1082 | if (!bitmap_empty(src: &stripe->error_bitmap, nbits: stripe->nr_sectors)) |
1083 | btrfs_repair_one_zone(fs_info, logical: sctx->stripes[0].bg->start); |
1084 | } else if (!sctx->readonly) { |
1085 | unsigned long repaired; |
1086 | |
1087 | bitmap_andnot(dst: &repaired, src1: &stripe->init_error_bitmap, |
1088 | src2: &stripe->error_bitmap, nbits: stripe->nr_sectors); |
1089 | scrub_write_sectors(sctx, stripe, write_bitmap: repaired, dev_replace: false); |
1090 | wait_scrub_stripe_io(stripe); |
1091 | } |
1092 | |
1093 | scrub_stripe_report_errors(sctx, stripe); |
1094 | set_bit(nr: SCRUB_STRIPE_FLAG_REPAIR_DONE, addr: &stripe->state); |
1095 | wake_up(&stripe->repair_wait); |
1096 | } |
1097 | |
1098 | static void scrub_read_endio(struct btrfs_bio *bbio) |
1099 | { |
1100 | struct scrub_stripe *stripe = bbio->private; |
1101 | struct bio_vec *bvec; |
1102 | int sector_nr = calc_sector_number(stripe, first_bvec: bio_first_bvec_all(bio: &bbio->bio)); |
1103 | int num_sectors; |
1104 | u32 bio_size = 0; |
1105 | int i; |
1106 | |
1107 | ASSERT(sector_nr < stripe->nr_sectors); |
1108 | bio_for_each_bvec_all(bvec, &bbio->bio, i) |
1109 | bio_size += bvec->bv_len; |
1110 | num_sectors = bio_size >> stripe->bg->fs_info->sectorsize_bits; |
1111 | |
1112 | if (bbio->bio.bi_status) { |
1113 | bitmap_set(map: &stripe->io_error_bitmap, start: sector_nr, nbits: num_sectors); |
1114 | bitmap_set(map: &stripe->error_bitmap, start: sector_nr, nbits: num_sectors); |
1115 | } else { |
1116 | bitmap_clear(map: &stripe->io_error_bitmap, start: sector_nr, nbits: num_sectors); |
1117 | } |
1118 | bio_put(&bbio->bio); |
1119 | if (atomic_dec_and_test(v: &stripe->pending_io)) { |
1120 | wake_up(&stripe->io_wait); |
1121 | INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker); |
1122 | queue_work(wq: stripe->bg->fs_info->scrub_workers, work: &stripe->work); |
1123 | } |
1124 | } |
1125 | |
1126 | static void scrub_write_endio(struct btrfs_bio *bbio) |
1127 | { |
1128 | struct scrub_stripe *stripe = bbio->private; |
1129 | struct btrfs_fs_info *fs_info = stripe->bg->fs_info; |
1130 | struct bio_vec *bvec; |
1131 | int sector_nr = calc_sector_number(stripe, first_bvec: bio_first_bvec_all(bio: &bbio->bio)); |
1132 | u32 bio_size = 0; |
1133 | int i; |
1134 | |
1135 | bio_for_each_bvec_all(bvec, &bbio->bio, i) |
1136 | bio_size += bvec->bv_len; |
1137 | |
1138 | if (bbio->bio.bi_status) { |
1139 | unsigned long flags; |
1140 | |
1141 | spin_lock_irqsave(&stripe->write_error_lock, flags); |
1142 | bitmap_set(map: &stripe->write_error_bitmap, start: sector_nr, |
1143 | nbits: bio_size >> fs_info->sectorsize_bits); |
1144 | spin_unlock_irqrestore(lock: &stripe->write_error_lock, flags); |
1145 | } |
1146 | bio_put(&bbio->bio); |
1147 | |
1148 | if (atomic_dec_and_test(v: &stripe->pending_io)) |
1149 | wake_up(&stripe->io_wait); |
1150 | } |
1151 | |
1152 | static void scrub_submit_write_bio(struct scrub_ctx *sctx, |
1153 | struct scrub_stripe *stripe, |
1154 | struct btrfs_bio *bbio, bool dev_replace) |
1155 | { |
1156 | struct btrfs_fs_info *fs_info = sctx->fs_info; |
1157 | u32 bio_len = bbio->bio.bi_iter.bi_size; |
1158 | u32 bio_off = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT) - |
1159 | stripe->logical; |
1160 | |
1161 | fill_writer_pointer_gap(sctx, physical: stripe->physical + bio_off); |
1162 | atomic_inc(v: &stripe->pending_io); |
1163 | btrfs_submit_repair_write(bbio, mirror_num: stripe->mirror_num, dev_replace); |
1164 | if (!btrfs_is_zoned(fs_info)) |
1165 | return; |
1166 | /* |
1167 | * For zoned writeback, queue depth must be 1, thus we must wait for |
1168 | * the write to finish before the next write. |
1169 | */ |
1170 | wait_scrub_stripe_io(stripe); |
1171 | |
1172 | /* |
1173 | * And also need to update the write pointer if write finished |
1174 | * successfully. |
1175 | */ |
1176 | if (!test_bit(bio_off >> fs_info->sectorsize_bits, |
1177 | &stripe->write_error_bitmap)) |
1178 | sctx->write_pointer += bio_len; |
1179 | } |
1180 | |
1181 | /* |
1182 | * Submit the write bio(s) for the sectors specified by @write_bitmap. |
1183 | * |
1184 | * Here we utilize btrfs_submit_repair_write(), which has some extra benefits: |
1185 | * |
1186 | * - Only needs logical bytenr and mirror_num |
1187 | * Just like the scrub read path |
1188 | * |
1189 | * - Would only result in writes to the specified mirror |
1190 | * Unlike the regular writeback path, which would write back to all stripes |
1191 | * |
1192 | * - Handle dev-replace and read-repair writeback differently |
1193 | */ |
1194 | static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe, |
1195 | unsigned long write_bitmap, bool dev_replace) |
1196 | { |
1197 | struct btrfs_fs_info *fs_info = stripe->bg->fs_info; |
1198 | struct btrfs_bio *bbio = NULL; |
1199 | int sector_nr; |
1200 | |
1201 | for_each_set_bit(sector_nr, &write_bitmap, stripe->nr_sectors) { |
1202 | struct page *page = scrub_stripe_get_page(stripe, sector_nr); |
1203 | unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr); |
1204 | int ret; |
1205 | |
1206 | /* We should only writeback sectors covered by an extent. */ |
1207 | ASSERT(test_bit(sector_nr, &stripe->extent_sector_bitmap)); |
1208 | |
1209 | /* Cannot merge with previous sector, submit the current one. */ |
1210 | if (bbio && sector_nr && !test_bit(sector_nr - 1, &write_bitmap)) { |
1211 | scrub_submit_write_bio(sctx, stripe, bbio, dev_replace); |
1212 | bbio = NULL; |
1213 | } |
1214 | if (!bbio) { |
1215 | bbio = btrfs_bio_alloc(nr_vecs: stripe->nr_sectors, opf: REQ_OP_WRITE, |
1216 | fs_info, end_io: scrub_write_endio, private: stripe); |
1217 | bbio->bio.bi_iter.bi_sector = (stripe->logical + |
1218 | (sector_nr << fs_info->sectorsize_bits)) >> |
1219 | SECTOR_SHIFT; |
1220 | } |
1221 | ret = bio_add_page(bio: &bbio->bio, page, len: fs_info->sectorsize, off: pgoff); |
1222 | ASSERT(ret == fs_info->sectorsize); |
1223 | } |
1224 | if (bbio) |
1225 | scrub_submit_write_bio(sctx, stripe, bbio, dev_replace); |
1226 | } |
1227 | |
1228 | /* |
1229 | * Throttling of IO submission, bandwidth-limit based, the timeslice is 1 |
1230 | * second. Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max. |
1231 | */ |
1232 | static void scrub_throttle_dev_io(struct scrub_ctx *sctx, struct btrfs_device *device, |
1233 | unsigned int bio_size) |
1234 | { |
1235 | const int time_slice = 1000; |
1236 | s64 delta; |
1237 | ktime_t now; |
1238 | u32 div; |
1239 | u64 bwlimit; |
1240 | |
1241 | bwlimit = READ_ONCE(device->scrub_speed_max); |
1242 | if (bwlimit == 0) |
1243 | return; |
1244 | |
1245 | /* |
1246 | * Slice is divided into intervals when the IO is submitted, adjust by |
1247 | * bwlimit and maximum of 64 intervals. |
1248 | */ |
1249 | div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024))); |
1250 | div = min_t(u32, 64, div); |
1251 | |
1252 | /* Start new epoch, set deadline */ |
1253 | now = ktime_get(); |
1254 | if (sctx->throttle_deadline == 0) { |
1255 | sctx->throttle_deadline = ktime_add_ms(kt: now, msec: time_slice / div); |
1256 | sctx->throttle_sent = 0; |
1257 | } |
1258 | |
1259 | /* Still in the time to send? */ |
1260 | if (ktime_before(cmp1: now, cmp2: sctx->throttle_deadline)) { |
1261 | /* If current bio is within the limit, send it */ |
1262 | sctx->throttle_sent += bio_size; |
1263 | if (sctx->throttle_sent <= div_u64(dividend: bwlimit, divisor: div)) |
1264 | return; |
1265 | |
1266 | /* We're over the limit, sleep until the rest of the slice */ |
1267 | delta = ktime_ms_delta(later: sctx->throttle_deadline, earlier: now); |
1268 | } else { |
1269 | /* New request after deadline, start new epoch */ |
1270 | delta = 0; |
1271 | } |
1272 | |
1273 | if (delta) { |
1274 | long timeout; |
1275 | |
1276 | timeout = div_u64(dividend: delta * HZ, divisor: 1000); |
1277 | schedule_timeout_interruptible(timeout); |
1278 | } |
1279 | |
1280 | /* Next call will start the deadline period */ |
1281 | sctx->throttle_deadline = 0; |
1282 | } |
1283 | |
1284 | /* |
1285 | * Given a physical address, this will calculate it's |
1286 | * logical offset. if this is a parity stripe, it will return |
1287 | * the most left data stripe's logical offset. |
1288 | * |
1289 | * return 0 if it is a data stripe, 1 means parity stripe. |
1290 | */ |
1291 | static int get_raid56_logic_offset(u64 physical, int num, |
1292 | struct btrfs_chunk_map *map, u64 *offset, |
1293 | u64 *stripe_start) |
1294 | { |
1295 | int i; |
1296 | int j = 0; |
1297 | u64 last_offset; |
1298 | const int data_stripes = nr_data_stripes(map); |
1299 | |
1300 | last_offset = (physical - map->stripes[num].physical) * data_stripes; |
1301 | if (stripe_start) |
1302 | *stripe_start = last_offset; |
1303 | |
1304 | *offset = last_offset; |
1305 | for (i = 0; i < data_stripes; i++) { |
1306 | u32 stripe_nr; |
1307 | u32 stripe_index; |
1308 | u32 rot; |
1309 | |
1310 | *offset = last_offset + btrfs_stripe_nr_to_offset(stripe_nr: i); |
1311 | |
1312 | stripe_nr = (u32)(*offset >> BTRFS_STRIPE_LEN_SHIFT) / data_stripes; |
1313 | |
1314 | /* Work out the disk rotation on this stripe-set */ |
1315 | rot = stripe_nr % map->num_stripes; |
1316 | /* calculate which stripe this data locates */ |
1317 | rot += i; |
1318 | stripe_index = rot % map->num_stripes; |
1319 | if (stripe_index == num) |
1320 | return 0; |
1321 | if (stripe_index < num) |
1322 | j++; |
1323 | } |
1324 | *offset = last_offset + btrfs_stripe_nr_to_offset(stripe_nr: j); |
1325 | return 1; |
1326 | } |
1327 | |
1328 | /* |
1329 | * Return 0 if the extent item range covers any byte of the range. |
1330 | * Return <0 if the extent item is before @search_start. |
1331 | * Return >0 if the extent item is after @start_start + @search_len. |
1332 | */ |
1333 | static int compare_extent_item_range(struct btrfs_path *path, |
1334 | u64 search_start, u64 search_len) |
1335 | { |
1336 | struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info; |
1337 | u64 len; |
1338 | struct btrfs_key key; |
1339 | |
1340 | btrfs_item_key_to_cpu(eb: path->nodes[0], cpu_key: &key, nr: path->slots[0]); |
1341 | ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY || |
1342 | key.type == BTRFS_METADATA_ITEM_KEY); |
1343 | if (key.type == BTRFS_METADATA_ITEM_KEY) |
1344 | len = fs_info->nodesize; |
1345 | else |
1346 | len = key.offset; |
1347 | |
1348 | if (key.objectid + len <= search_start) |
1349 | return -1; |
1350 | if (key.objectid >= search_start + search_len) |
1351 | return 1; |
1352 | return 0; |
1353 | } |
1354 | |
1355 | /* |
1356 | * Locate one extent item which covers any byte in range |
1357 | * [@search_start, @search_start + @search_length) |
1358 | * |
1359 | * If the path is not initialized, we will initialize the search by doing |
1360 | * a btrfs_search_slot(). |
1361 | * If the path is already initialized, we will use the path as the initial |
1362 | * slot, to avoid duplicated btrfs_search_slot() calls. |
1363 | * |
1364 | * NOTE: If an extent item starts before @search_start, we will still |
1365 | * return the extent item. This is for data extent crossing stripe boundary. |
1366 | * |
1367 | * Return 0 if we found such extent item, and @path will point to the extent item. |
1368 | * Return >0 if no such extent item can be found, and @path will be released. |
1369 | * Return <0 if hit fatal error, and @path will be released. |
1370 | */ |
1371 | static int find_first_extent_item(struct btrfs_root *extent_root, |
1372 | struct btrfs_path *path, |
1373 | u64 search_start, u64 search_len) |
1374 | { |
1375 | struct btrfs_fs_info *fs_info = extent_root->fs_info; |
1376 | struct btrfs_key key; |
1377 | int ret; |
1378 | |
1379 | /* Continue using the existing path */ |
1380 | if (path->nodes[0]) |
1381 | goto search_forward; |
1382 | |
1383 | if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) |
1384 | key.type = BTRFS_METADATA_ITEM_KEY; |
1385 | else |
1386 | key.type = BTRFS_EXTENT_ITEM_KEY; |
1387 | key.objectid = search_start; |
1388 | key.offset = (u64)-1; |
1389 | |
1390 | ret = btrfs_search_slot(NULL, root: extent_root, key: &key, p: path, ins_len: 0, cow: 0); |
1391 | if (ret < 0) |
1392 | return ret; |
1393 | if (ret == 0) { |
1394 | /* |
1395 | * Key with offset -1 found, there would have to exist an extent |
1396 | * item with such offset, but this is out of the valid range. |
1397 | */ |
1398 | btrfs_release_path(p: path); |
1399 | return -EUCLEAN; |
1400 | } |
1401 | |
1402 | /* |
1403 | * Here we intentionally pass 0 as @min_objectid, as there could be |
1404 | * an extent item starting before @search_start. |
1405 | */ |
1406 | ret = btrfs_previous_extent_item(root: extent_root, path, min_objectid: 0); |
1407 | if (ret < 0) |
1408 | return ret; |
1409 | /* |
1410 | * No matter whether we have found an extent item, the next loop will |
1411 | * properly do every check on the key. |
1412 | */ |
1413 | search_forward: |
1414 | while (true) { |
1415 | btrfs_item_key_to_cpu(eb: path->nodes[0], cpu_key: &key, nr: path->slots[0]); |
1416 | if (key.objectid >= search_start + search_len) |
1417 | break; |
1418 | if (key.type != BTRFS_METADATA_ITEM_KEY && |
1419 | key.type != BTRFS_EXTENT_ITEM_KEY) |
1420 | goto next; |
1421 | |
1422 | ret = compare_extent_item_range(path, search_start, search_len); |
1423 | if (ret == 0) |
1424 | return ret; |
1425 | if (ret > 0) |
1426 | break; |
1427 | next: |
1428 | ret = btrfs_next_item(root: extent_root, p: path); |
1429 | if (ret) { |
1430 | /* Either no more items or a fatal error. */ |
1431 | btrfs_release_path(p: path); |
1432 | return ret; |
1433 | } |
1434 | } |
1435 | btrfs_release_path(p: path); |
1436 | return 1; |
1437 | } |
1438 | |
1439 | static void get_extent_info(struct btrfs_path *path, u64 *extent_start_ret, |
1440 | u64 *size_ret, u64 *flags_ret, u64 *generation_ret) |
1441 | { |
1442 | struct btrfs_key key; |
1443 | struct btrfs_extent_item *ei; |
1444 | |
1445 | btrfs_item_key_to_cpu(eb: path->nodes[0], cpu_key: &key, nr: path->slots[0]); |
1446 | ASSERT(key.type == BTRFS_METADATA_ITEM_KEY || |
1447 | key.type == BTRFS_EXTENT_ITEM_KEY); |
1448 | *extent_start_ret = key.objectid; |
1449 | if (key.type == BTRFS_METADATA_ITEM_KEY) |
1450 | *size_ret = path->nodes[0]->fs_info->nodesize; |
1451 | else |
1452 | *size_ret = key.offset; |
1453 | ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item); |
1454 | *flags_ret = btrfs_extent_flags(eb: path->nodes[0], s: ei); |
1455 | *generation_ret = btrfs_extent_generation(eb: path->nodes[0], s: ei); |
1456 | } |
1457 | |
1458 | static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical, |
1459 | u64 physical, u64 physical_end) |
1460 | { |
1461 | struct btrfs_fs_info *fs_info = sctx->fs_info; |
1462 | int ret = 0; |
1463 | |
1464 | if (!btrfs_is_zoned(fs_info)) |
1465 | return 0; |
1466 | |
1467 | mutex_lock(&sctx->wr_lock); |
1468 | if (sctx->write_pointer < physical_end) { |
1469 | ret = btrfs_sync_zone_write_pointer(tgt_dev: sctx->wr_tgtdev, logical, |
1470 | physical_start: physical, |
1471 | physical_pos: sctx->write_pointer); |
1472 | if (ret) |
1473 | btrfs_err(fs_info, |
1474 | "zoned: failed to recover write pointer" ); |
1475 | } |
1476 | mutex_unlock(lock: &sctx->wr_lock); |
1477 | btrfs_dev_clear_zone_empty(device: sctx->wr_tgtdev, pos: physical); |
1478 | |
1479 | return ret; |
1480 | } |
1481 | |
1482 | static void fill_one_extent_info(struct btrfs_fs_info *fs_info, |
1483 | struct scrub_stripe *stripe, |
1484 | u64 extent_start, u64 extent_len, |
1485 | u64 extent_flags, u64 extent_gen) |
1486 | { |
1487 | for (u64 cur_logical = max(stripe->logical, extent_start); |
1488 | cur_logical < min(stripe->logical + BTRFS_STRIPE_LEN, |
1489 | extent_start + extent_len); |
1490 | cur_logical += fs_info->sectorsize) { |
1491 | const int nr_sector = (cur_logical - stripe->logical) >> |
1492 | fs_info->sectorsize_bits; |
1493 | struct scrub_sector_verification *sector = |
1494 | &stripe->sectors[nr_sector]; |
1495 | |
1496 | set_bit(nr: nr_sector, addr: &stripe->extent_sector_bitmap); |
1497 | if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { |
1498 | sector->is_metadata = true; |
1499 | sector->generation = extent_gen; |
1500 | } |
1501 | } |
1502 | } |
1503 | |
1504 | static void scrub_stripe_reset_bitmaps(struct scrub_stripe *stripe) |
1505 | { |
1506 | stripe->extent_sector_bitmap = 0; |
1507 | stripe->init_error_bitmap = 0; |
1508 | stripe->init_nr_io_errors = 0; |
1509 | stripe->init_nr_csum_errors = 0; |
1510 | stripe->init_nr_meta_errors = 0; |
1511 | stripe->error_bitmap = 0; |
1512 | stripe->io_error_bitmap = 0; |
1513 | stripe->csum_error_bitmap = 0; |
1514 | stripe->meta_error_bitmap = 0; |
1515 | } |
1516 | |
1517 | /* |
1518 | * Locate one stripe which has at least one extent in its range. |
1519 | * |
1520 | * Return 0 if found such stripe, and store its info into @stripe. |
1521 | * Return >0 if there is no such stripe in the specified range. |
1522 | * Return <0 for error. |
1523 | */ |
1524 | static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg, |
1525 | struct btrfs_path *extent_path, |
1526 | struct btrfs_path *csum_path, |
1527 | struct btrfs_device *dev, u64 physical, |
1528 | int mirror_num, u64 logical_start, |
1529 | u32 logical_len, |
1530 | struct scrub_stripe *stripe) |
1531 | { |
1532 | struct btrfs_fs_info *fs_info = bg->fs_info; |
1533 | struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr: bg->start); |
1534 | struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bytenr: bg->start); |
1535 | const u64 logical_end = logical_start + logical_len; |
1536 | u64 cur_logical = logical_start; |
1537 | u64 stripe_end; |
1538 | u64 extent_start; |
1539 | u64 extent_len; |
1540 | u64 extent_flags; |
1541 | u64 extent_gen; |
1542 | int ret; |
1543 | |
1544 | memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) * |
1545 | stripe->nr_sectors); |
1546 | scrub_stripe_reset_bitmaps(stripe); |
1547 | |
1548 | /* The range must be inside the bg. */ |
1549 | ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length); |
1550 | |
1551 | ret = find_first_extent_item(extent_root, path: extent_path, search_start: logical_start, |
1552 | search_len: logical_len); |
1553 | /* Either error or not found. */ |
1554 | if (ret) |
1555 | goto out; |
1556 | get_extent_info(path: extent_path, extent_start_ret: &extent_start, size_ret: &extent_len, flags_ret: &extent_flags, |
1557 | generation_ret: &extent_gen); |
1558 | if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) |
1559 | stripe->nr_meta_extents++; |
1560 | if (extent_flags & BTRFS_EXTENT_FLAG_DATA) |
1561 | stripe->nr_data_extents++; |
1562 | cur_logical = max(extent_start, cur_logical); |
1563 | |
1564 | /* |
1565 | * Round down to stripe boundary. |
1566 | * |
1567 | * The extra calculation against bg->start is to handle block groups |
1568 | * whose logical bytenr is not BTRFS_STRIPE_LEN aligned. |
1569 | */ |
1570 | stripe->logical = round_down(cur_logical - bg->start, BTRFS_STRIPE_LEN) + |
1571 | bg->start; |
1572 | stripe->physical = physical + stripe->logical - logical_start; |
1573 | stripe->dev = dev; |
1574 | stripe->bg = bg; |
1575 | stripe->mirror_num = mirror_num; |
1576 | stripe_end = stripe->logical + BTRFS_STRIPE_LEN - 1; |
1577 | |
1578 | /* Fill the first extent info into stripe->sectors[] array. */ |
1579 | fill_one_extent_info(fs_info, stripe, extent_start, extent_len, |
1580 | extent_flags, extent_gen); |
1581 | cur_logical = extent_start + extent_len; |
1582 | |
1583 | /* Fill the extent info for the remaining sectors. */ |
1584 | while (cur_logical <= stripe_end) { |
1585 | ret = find_first_extent_item(extent_root, path: extent_path, search_start: cur_logical, |
1586 | search_len: stripe_end - cur_logical + 1); |
1587 | if (ret < 0) |
1588 | goto out; |
1589 | if (ret > 0) { |
1590 | ret = 0; |
1591 | break; |
1592 | } |
1593 | get_extent_info(path: extent_path, extent_start_ret: &extent_start, size_ret: &extent_len, |
1594 | flags_ret: &extent_flags, generation_ret: &extent_gen); |
1595 | if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) |
1596 | stripe->nr_meta_extents++; |
1597 | if (extent_flags & BTRFS_EXTENT_FLAG_DATA) |
1598 | stripe->nr_data_extents++; |
1599 | fill_one_extent_info(fs_info, stripe, extent_start, extent_len, |
1600 | extent_flags, extent_gen); |
1601 | cur_logical = extent_start + extent_len; |
1602 | } |
1603 | |
1604 | /* Now fill the data csum. */ |
1605 | if (bg->flags & BTRFS_BLOCK_GROUP_DATA) { |
1606 | int sector_nr; |
1607 | unsigned long csum_bitmap = 0; |
1608 | |
1609 | /* Csum space should have already been allocated. */ |
1610 | ASSERT(stripe->csums); |
1611 | |
1612 | /* |
1613 | * Our csum bitmap should be large enough, as BTRFS_STRIPE_LEN |
1614 | * should contain at most 16 sectors. |
1615 | */ |
1616 | ASSERT(BITS_PER_LONG >= BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits); |
1617 | |
1618 | ret = btrfs_lookup_csums_bitmap(root: csum_root, path: csum_path, |
1619 | start: stripe->logical, end: stripe_end, |
1620 | csum_buf: stripe->csums, csum_bitmap: &csum_bitmap); |
1621 | if (ret < 0) |
1622 | goto out; |
1623 | if (ret > 0) |
1624 | ret = 0; |
1625 | |
1626 | for_each_set_bit(sector_nr, &csum_bitmap, stripe->nr_sectors) { |
1627 | stripe->sectors[sector_nr].csum = stripe->csums + |
1628 | sector_nr * fs_info->csum_size; |
1629 | } |
1630 | } |
1631 | set_bit(nr: SCRUB_STRIPE_FLAG_INITIALIZED, addr: &stripe->state); |
1632 | out: |
1633 | return ret; |
1634 | } |
1635 | |
1636 | static void scrub_reset_stripe(struct scrub_stripe *stripe) |
1637 | { |
1638 | scrub_stripe_reset_bitmaps(stripe); |
1639 | |
1640 | stripe->nr_meta_extents = 0; |
1641 | stripe->nr_data_extents = 0; |
1642 | stripe->state = 0; |
1643 | |
1644 | for (int i = 0; i < stripe->nr_sectors; i++) { |
1645 | stripe->sectors[i].is_metadata = false; |
1646 | stripe->sectors[i].csum = NULL; |
1647 | stripe->sectors[i].generation = 0; |
1648 | } |
1649 | } |
1650 | |
1651 | static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx, |
1652 | struct scrub_stripe *stripe) |
1653 | { |
1654 | struct btrfs_fs_info *fs_info = stripe->bg->fs_info; |
1655 | struct btrfs_bio *bbio = NULL; |
1656 | unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start + |
1657 | stripe->bg->length - stripe->logical) >> |
1658 | fs_info->sectorsize_bits; |
1659 | u64 stripe_len = BTRFS_STRIPE_LEN; |
1660 | int mirror = stripe->mirror_num; |
1661 | int i; |
1662 | |
1663 | atomic_inc(v: &stripe->pending_io); |
1664 | |
1665 | for_each_set_bit(i, &stripe->extent_sector_bitmap, stripe->nr_sectors) { |
1666 | struct page *page = scrub_stripe_get_page(stripe, sector_nr: i); |
1667 | unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr: i); |
1668 | |
1669 | /* We're beyond the chunk boundary, no need to read anymore. */ |
1670 | if (i >= nr_sectors) |
1671 | break; |
1672 | |
1673 | /* The current sector cannot be merged, submit the bio. */ |
1674 | if (bbio && |
1675 | ((i > 0 && |
1676 | !test_bit(i - 1, &stripe->extent_sector_bitmap)) || |
1677 | bbio->bio.bi_iter.bi_size >= stripe_len)) { |
1678 | ASSERT(bbio->bio.bi_iter.bi_size); |
1679 | atomic_inc(v: &stripe->pending_io); |
1680 | btrfs_submit_bio(bbio, mirror_num: mirror); |
1681 | bbio = NULL; |
1682 | } |
1683 | |
1684 | if (!bbio) { |
1685 | struct btrfs_io_stripe io_stripe = {}; |
1686 | struct btrfs_io_context *bioc = NULL; |
1687 | const u64 logical = stripe->logical + |
1688 | (i << fs_info->sectorsize_bits); |
1689 | int err; |
1690 | |
1691 | bbio = btrfs_bio_alloc(nr_vecs: stripe->nr_sectors, opf: REQ_OP_READ, |
1692 | fs_info, end_io: scrub_read_endio, private: stripe); |
1693 | bbio->bio.bi_iter.bi_sector = logical >> SECTOR_SHIFT; |
1694 | |
1695 | io_stripe.is_scrub = true; |
1696 | err = btrfs_map_block(fs_info, op: BTRFS_MAP_READ, logical, |
1697 | length: &stripe_len, bioc_ret: &bioc, smap: &io_stripe, |
1698 | mirror_num_ret: &mirror); |
1699 | btrfs_put_bioc(bioc); |
1700 | if (err) { |
1701 | btrfs_bio_end_io(bbio, |
1702 | status: errno_to_blk_status(errno: err)); |
1703 | return; |
1704 | } |
1705 | } |
1706 | |
1707 | __bio_add_page(bio: &bbio->bio, page, len: fs_info->sectorsize, off: pgoff); |
1708 | } |
1709 | |
1710 | if (bbio) { |
1711 | ASSERT(bbio->bio.bi_iter.bi_size); |
1712 | atomic_inc(v: &stripe->pending_io); |
1713 | btrfs_submit_bio(bbio, mirror_num: mirror); |
1714 | } |
1715 | |
1716 | if (atomic_dec_and_test(v: &stripe->pending_io)) { |
1717 | wake_up(&stripe->io_wait); |
1718 | INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker); |
1719 | queue_work(wq: stripe->bg->fs_info->scrub_workers, work: &stripe->work); |
1720 | } |
1721 | } |
1722 | |
1723 | static void scrub_submit_initial_read(struct scrub_ctx *sctx, |
1724 | struct scrub_stripe *stripe) |
1725 | { |
1726 | struct btrfs_fs_info *fs_info = sctx->fs_info; |
1727 | struct btrfs_bio *bbio; |
1728 | unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start + |
1729 | stripe->bg->length - stripe->logical) >> |
1730 | fs_info->sectorsize_bits; |
1731 | int mirror = stripe->mirror_num; |
1732 | |
1733 | ASSERT(stripe->bg); |
1734 | ASSERT(stripe->mirror_num > 0); |
1735 | ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state)); |
1736 | |
1737 | if (btrfs_need_stripe_tree_update(fs_info, map_type: stripe->bg->flags)) { |
1738 | scrub_submit_extent_sector_read(sctx, stripe); |
1739 | return; |
1740 | } |
1741 | |
1742 | bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, opf: REQ_OP_READ, fs_info, |
1743 | end_io: scrub_read_endio, private: stripe); |
1744 | |
1745 | bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT; |
1746 | /* Read the whole range inside the chunk boundary. */ |
1747 | for (unsigned int cur = 0; cur < nr_sectors; cur++) { |
1748 | struct page *page = scrub_stripe_get_page(stripe, sector_nr: cur); |
1749 | unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr: cur); |
1750 | int ret; |
1751 | |
1752 | ret = bio_add_page(bio: &bbio->bio, page, len: fs_info->sectorsize, off: pgoff); |
1753 | /* We should have allocated enough bio vectors. */ |
1754 | ASSERT(ret == fs_info->sectorsize); |
1755 | } |
1756 | atomic_inc(v: &stripe->pending_io); |
1757 | |
1758 | /* |
1759 | * For dev-replace, either user asks to avoid the source dev, or |
1760 | * the device is missing, we try the next mirror instead. |
1761 | */ |
1762 | if (sctx->is_dev_replace && |
1763 | (fs_info->dev_replace.cont_reading_from_srcdev_mode == |
1764 | BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID || |
1765 | !stripe->dev->bdev)) { |
1766 | int num_copies = btrfs_num_copies(fs_info, logical: stripe->bg->start, |
1767 | len: stripe->bg->length); |
1768 | |
1769 | mirror = calc_next_mirror(mirror, num_copies); |
1770 | } |
1771 | btrfs_submit_bio(bbio, mirror_num: mirror); |
1772 | } |
1773 | |
1774 | static bool stripe_has_metadata_error(struct scrub_stripe *stripe) |
1775 | { |
1776 | int i; |
1777 | |
1778 | for_each_set_bit(i, &stripe->error_bitmap, stripe->nr_sectors) { |
1779 | if (stripe->sectors[i].is_metadata) { |
1780 | struct btrfs_fs_info *fs_info = stripe->bg->fs_info; |
1781 | |
1782 | btrfs_err(fs_info, |
1783 | "stripe %llu has unrepaired metadata sector at %llu" , |
1784 | stripe->logical, |
1785 | stripe->logical + (i << fs_info->sectorsize_bits)); |
1786 | return true; |
1787 | } |
1788 | } |
1789 | return false; |
1790 | } |
1791 | |
1792 | static void submit_initial_group_read(struct scrub_ctx *sctx, |
1793 | unsigned int first_slot, |
1794 | unsigned int nr_stripes) |
1795 | { |
1796 | struct blk_plug plug; |
1797 | |
1798 | ASSERT(first_slot < SCRUB_TOTAL_STRIPES); |
1799 | ASSERT(first_slot + nr_stripes <= SCRUB_TOTAL_STRIPES); |
1800 | |
1801 | scrub_throttle_dev_io(sctx, device: sctx->stripes[0].dev, |
1802 | bio_size: btrfs_stripe_nr_to_offset(stripe_nr: nr_stripes)); |
1803 | blk_start_plug(&plug); |
1804 | for (int i = 0; i < nr_stripes; i++) { |
1805 | struct scrub_stripe *stripe = &sctx->stripes[first_slot + i]; |
1806 | |
1807 | /* Those stripes should be initialized. */ |
1808 | ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state)); |
1809 | scrub_submit_initial_read(sctx, stripe); |
1810 | } |
1811 | blk_finish_plug(&plug); |
1812 | } |
1813 | |
1814 | static int flush_scrub_stripes(struct scrub_ctx *sctx) |
1815 | { |
1816 | struct btrfs_fs_info *fs_info = sctx->fs_info; |
1817 | struct scrub_stripe *stripe; |
1818 | const int nr_stripes = sctx->cur_stripe; |
1819 | int ret = 0; |
1820 | |
1821 | if (!nr_stripes) |
1822 | return 0; |
1823 | |
1824 | ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state)); |
1825 | |
1826 | /* Submit the stripes which are populated but not submitted. */ |
1827 | if (nr_stripes % SCRUB_STRIPES_PER_GROUP) { |
1828 | const int first_slot = round_down(nr_stripes, SCRUB_STRIPES_PER_GROUP); |
1829 | |
1830 | submit_initial_group_read(sctx, first_slot, nr_stripes: nr_stripes - first_slot); |
1831 | } |
1832 | |
1833 | for (int i = 0; i < nr_stripes; i++) { |
1834 | stripe = &sctx->stripes[i]; |
1835 | |
1836 | wait_event(stripe->repair_wait, |
1837 | test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state)); |
1838 | } |
1839 | |
1840 | /* Submit for dev-replace. */ |
1841 | if (sctx->is_dev_replace) { |
1842 | /* |
1843 | * For dev-replace, if we know there is something wrong with |
1844 | * metadata, we should immediately abort. |
1845 | */ |
1846 | for (int i = 0; i < nr_stripes; i++) { |
1847 | if (stripe_has_metadata_error(stripe: &sctx->stripes[i])) { |
1848 | ret = -EIO; |
1849 | goto out; |
1850 | } |
1851 | } |
1852 | for (int i = 0; i < nr_stripes; i++) { |
1853 | unsigned long good; |
1854 | |
1855 | stripe = &sctx->stripes[i]; |
1856 | |
1857 | ASSERT(stripe->dev == fs_info->dev_replace.srcdev); |
1858 | |
1859 | bitmap_andnot(dst: &good, src1: &stripe->extent_sector_bitmap, |
1860 | src2: &stripe->error_bitmap, nbits: stripe->nr_sectors); |
1861 | scrub_write_sectors(sctx, stripe, write_bitmap: good, dev_replace: true); |
1862 | } |
1863 | } |
1864 | |
1865 | /* Wait for the above writebacks to finish. */ |
1866 | for (int i = 0; i < nr_stripes; i++) { |
1867 | stripe = &sctx->stripes[i]; |
1868 | |
1869 | wait_scrub_stripe_io(stripe); |
1870 | scrub_reset_stripe(stripe); |
1871 | } |
1872 | out: |
1873 | sctx->cur_stripe = 0; |
1874 | return ret; |
1875 | } |
1876 | |
1877 | static void raid56_scrub_wait_endio(struct bio *bio) |
1878 | { |
1879 | complete(bio->bi_private); |
1880 | } |
1881 | |
1882 | static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *bg, |
1883 | struct btrfs_device *dev, int mirror_num, |
1884 | u64 logical, u32 length, u64 physical, |
1885 | u64 *found_logical_ret) |
1886 | { |
1887 | struct scrub_stripe *stripe; |
1888 | int ret; |
1889 | |
1890 | /* |
1891 | * There should always be one slot left, as caller filling the last |
1892 | * slot should flush them all. |
1893 | */ |
1894 | ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES); |
1895 | |
1896 | /* @found_logical_ret must be specified. */ |
1897 | ASSERT(found_logical_ret); |
1898 | |
1899 | stripe = &sctx->stripes[sctx->cur_stripe]; |
1900 | scrub_reset_stripe(stripe); |
1901 | ret = scrub_find_fill_first_stripe(bg, extent_path: &sctx->extent_path, |
1902 | csum_path: &sctx->csum_path, dev, physical, |
1903 | mirror_num, logical_start: logical, logical_len: length, stripe); |
1904 | /* Either >0 as no more extents or <0 for error. */ |
1905 | if (ret) |
1906 | return ret; |
1907 | *found_logical_ret = stripe->logical; |
1908 | sctx->cur_stripe++; |
1909 | |
1910 | /* We filled one group, submit it. */ |
1911 | if (sctx->cur_stripe % SCRUB_STRIPES_PER_GROUP == 0) { |
1912 | const int first_slot = sctx->cur_stripe - SCRUB_STRIPES_PER_GROUP; |
1913 | |
1914 | submit_initial_group_read(sctx, first_slot, SCRUB_STRIPES_PER_GROUP); |
1915 | } |
1916 | |
1917 | /* Last slot used, flush them all. */ |
1918 | if (sctx->cur_stripe == SCRUB_TOTAL_STRIPES) |
1919 | return flush_scrub_stripes(sctx); |
1920 | return 0; |
1921 | } |
1922 | |
1923 | static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx, |
1924 | struct btrfs_device *scrub_dev, |
1925 | struct btrfs_block_group *bg, |
1926 | struct btrfs_chunk_map *map, |
1927 | u64 full_stripe_start) |
1928 | { |
1929 | DECLARE_COMPLETION_ONSTACK(io_done); |
1930 | struct btrfs_fs_info *fs_info = sctx->fs_info; |
1931 | struct btrfs_raid_bio *rbio; |
1932 | struct btrfs_io_context *bioc = NULL; |
1933 | struct btrfs_path extent_path = { 0 }; |
1934 | struct btrfs_path csum_path = { 0 }; |
1935 | struct bio *bio; |
1936 | struct scrub_stripe *stripe; |
1937 | bool all_empty = true; |
1938 | const int data_stripes = nr_data_stripes(map); |
1939 | unsigned long extent_bitmap = 0; |
1940 | u64 length = btrfs_stripe_nr_to_offset(stripe_nr: data_stripes); |
1941 | int ret; |
1942 | |
1943 | ASSERT(sctx->raid56_data_stripes); |
1944 | |
1945 | /* |
1946 | * For data stripe search, we cannot re-use the same extent/csum paths, |
1947 | * as the data stripe bytenr may be smaller than previous extent. Thus |
1948 | * we have to use our own extent/csum paths. |
1949 | */ |
1950 | extent_path.search_commit_root = 1; |
1951 | extent_path.skip_locking = 1; |
1952 | csum_path.search_commit_root = 1; |
1953 | csum_path.skip_locking = 1; |
1954 | |
1955 | for (int i = 0; i < data_stripes; i++) { |
1956 | int stripe_index; |
1957 | int rot; |
1958 | u64 physical; |
1959 | |
1960 | stripe = &sctx->raid56_data_stripes[i]; |
1961 | rot = div_u64(dividend: full_stripe_start - bg->start, |
1962 | divisor: data_stripes) >> BTRFS_STRIPE_LEN_SHIFT; |
1963 | stripe_index = (i + rot) % map->num_stripes; |
1964 | physical = map->stripes[stripe_index].physical + |
1965 | btrfs_stripe_nr_to_offset(stripe_nr: rot); |
1966 | |
1967 | scrub_reset_stripe(stripe); |
1968 | set_bit(nr: SCRUB_STRIPE_FLAG_NO_REPORT, addr: &stripe->state); |
1969 | ret = scrub_find_fill_first_stripe(bg, extent_path: &extent_path, csum_path: &csum_path, |
1970 | dev: map->stripes[stripe_index].dev, physical, mirror_num: 1, |
1971 | logical_start: full_stripe_start + btrfs_stripe_nr_to_offset(stripe_nr: i), |
1972 | BTRFS_STRIPE_LEN, stripe); |
1973 | if (ret < 0) |
1974 | goto out; |
1975 | /* |
1976 | * No extent in this data stripe, need to manually mark them |
1977 | * initialized to make later read submission happy. |
1978 | */ |
1979 | if (ret > 0) { |
1980 | stripe->logical = full_stripe_start + |
1981 | btrfs_stripe_nr_to_offset(stripe_nr: i); |
1982 | stripe->dev = map->stripes[stripe_index].dev; |
1983 | stripe->mirror_num = 1; |
1984 | set_bit(nr: SCRUB_STRIPE_FLAG_INITIALIZED, addr: &stripe->state); |
1985 | } |
1986 | } |
1987 | |
1988 | /* Check if all data stripes are empty. */ |
1989 | for (int i = 0; i < data_stripes; i++) { |
1990 | stripe = &sctx->raid56_data_stripes[i]; |
1991 | if (!bitmap_empty(src: &stripe->extent_sector_bitmap, nbits: stripe->nr_sectors)) { |
1992 | all_empty = false; |
1993 | break; |
1994 | } |
1995 | } |
1996 | if (all_empty) { |
1997 | ret = 0; |
1998 | goto out; |
1999 | } |
2000 | |
2001 | for (int i = 0; i < data_stripes; i++) { |
2002 | stripe = &sctx->raid56_data_stripes[i]; |
2003 | scrub_submit_initial_read(sctx, stripe); |
2004 | } |
2005 | for (int i = 0; i < data_stripes; i++) { |
2006 | stripe = &sctx->raid56_data_stripes[i]; |
2007 | |
2008 | wait_event(stripe->repair_wait, |
2009 | test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state)); |
2010 | } |
2011 | /* For now, no zoned support for RAID56. */ |
2012 | ASSERT(!btrfs_is_zoned(sctx->fs_info)); |
2013 | |
2014 | /* |
2015 | * Now all data stripes are properly verified. Check if we have any |
2016 | * unrepaired, if so abort immediately or we could further corrupt the |
2017 | * P/Q stripes. |
2018 | * |
2019 | * During the loop, also populate extent_bitmap. |
2020 | */ |
2021 | for (int i = 0; i < data_stripes; i++) { |
2022 | unsigned long error; |
2023 | |
2024 | stripe = &sctx->raid56_data_stripes[i]; |
2025 | |
2026 | /* |
2027 | * We should only check the errors where there is an extent. |
2028 | * As we may hit an empty data stripe while it's missing. |
2029 | */ |
2030 | bitmap_and(dst: &error, src1: &stripe->error_bitmap, |
2031 | src2: &stripe->extent_sector_bitmap, nbits: stripe->nr_sectors); |
2032 | if (!bitmap_empty(src: &error, nbits: stripe->nr_sectors)) { |
2033 | btrfs_err(fs_info, |
2034 | "unrepaired sectors detected, full stripe %llu data stripe %u errors %*pbl" , |
2035 | full_stripe_start, i, stripe->nr_sectors, |
2036 | &error); |
2037 | ret = -EIO; |
2038 | goto out; |
2039 | } |
2040 | bitmap_or(dst: &extent_bitmap, src1: &extent_bitmap, |
2041 | src2: &stripe->extent_sector_bitmap, nbits: stripe->nr_sectors); |
2042 | } |
2043 | |
2044 | /* Now we can check and regenerate the P/Q stripe. */ |
2045 | bio = bio_alloc(NULL, nr_vecs: 1, opf: REQ_OP_READ, GFP_NOFS); |
2046 | bio->bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT; |
2047 | bio->bi_private = &io_done; |
2048 | bio->bi_end_io = raid56_scrub_wait_endio; |
2049 | |
2050 | btrfs_bio_counter_inc_blocked(fs_info); |
2051 | ret = btrfs_map_block(fs_info, op: BTRFS_MAP_WRITE, logical: full_stripe_start, |
2052 | length: &length, bioc_ret: &bioc, NULL, NULL); |
2053 | if (ret < 0) { |
2054 | btrfs_put_bioc(bioc); |
2055 | btrfs_bio_counter_dec(fs_info); |
2056 | goto out; |
2057 | } |
2058 | rbio = raid56_parity_alloc_scrub_rbio(bio, bioc, scrub_dev, dbitmap: &extent_bitmap, |
2059 | BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits); |
2060 | btrfs_put_bioc(bioc); |
2061 | if (!rbio) { |
2062 | ret = -ENOMEM; |
2063 | btrfs_bio_counter_dec(fs_info); |
2064 | goto out; |
2065 | } |
2066 | /* Use the recovered stripes as cache to avoid read them from disk again. */ |
2067 | for (int i = 0; i < data_stripes; i++) { |
2068 | stripe = &sctx->raid56_data_stripes[i]; |
2069 | |
2070 | raid56_parity_cache_data_pages(rbio, data_pages: stripe->pages, |
2071 | data_logical: full_stripe_start + (i << BTRFS_STRIPE_LEN_SHIFT)); |
2072 | } |
2073 | raid56_parity_submit_scrub_rbio(rbio); |
2074 | wait_for_completion_io(&io_done); |
2075 | ret = blk_status_to_errno(status: bio->bi_status); |
2076 | bio_put(bio); |
2077 | btrfs_bio_counter_dec(fs_info); |
2078 | |
2079 | btrfs_release_path(p: &extent_path); |
2080 | btrfs_release_path(p: &csum_path); |
2081 | out: |
2082 | return ret; |
2083 | } |
2084 | |
2085 | /* |
2086 | * Scrub one range which can only has simple mirror based profile. |
2087 | * (Including all range in SINGLE/DUP/RAID1/RAID1C*, and each stripe in |
2088 | * RAID0/RAID10). |
2089 | * |
2090 | * Since we may need to handle a subset of block group, we need @logical_start |
2091 | * and @logical_length parameter. |
2092 | */ |
2093 | static int scrub_simple_mirror(struct scrub_ctx *sctx, |
2094 | struct btrfs_block_group *bg, |
2095 | struct btrfs_chunk_map *map, |
2096 | u64 logical_start, u64 logical_length, |
2097 | struct btrfs_device *device, |
2098 | u64 physical, int mirror_num) |
2099 | { |
2100 | struct btrfs_fs_info *fs_info = sctx->fs_info; |
2101 | const u64 logical_end = logical_start + logical_length; |
2102 | u64 cur_logical = logical_start; |
2103 | int ret; |
2104 | |
2105 | /* The range must be inside the bg */ |
2106 | ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length); |
2107 | |
2108 | /* Go through each extent items inside the logical range */ |
2109 | while (cur_logical < logical_end) { |
2110 | u64 found_logical = U64_MAX; |
2111 | u64 cur_physical = physical + cur_logical - logical_start; |
2112 | |
2113 | /* Canceled? */ |
2114 | if (atomic_read(v: &fs_info->scrub_cancel_req) || |
2115 | atomic_read(v: &sctx->cancel_req)) { |
2116 | ret = -ECANCELED; |
2117 | break; |
2118 | } |
2119 | /* Paused? */ |
2120 | if (atomic_read(v: &fs_info->scrub_pause_req)) { |
2121 | /* Push queued extents */ |
2122 | scrub_blocked_if_needed(fs_info); |
2123 | } |
2124 | /* Block group removed? */ |
2125 | spin_lock(lock: &bg->lock); |
2126 | if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) { |
2127 | spin_unlock(lock: &bg->lock); |
2128 | ret = 0; |
2129 | break; |
2130 | } |
2131 | spin_unlock(lock: &bg->lock); |
2132 | |
2133 | ret = queue_scrub_stripe(sctx, bg, dev: device, mirror_num, |
2134 | logical: cur_logical, length: logical_end - cur_logical, |
2135 | physical: cur_physical, found_logical_ret: &found_logical); |
2136 | if (ret > 0) { |
2137 | /* No more extent, just update the accounting */ |
2138 | sctx->stat.last_physical = physical + logical_length; |
2139 | ret = 0; |
2140 | break; |
2141 | } |
2142 | if (ret < 0) |
2143 | break; |
2144 | |
2145 | /* queue_scrub_stripe() returned 0, @found_logical must be updated. */ |
2146 | ASSERT(found_logical != U64_MAX); |
2147 | cur_logical = found_logical + BTRFS_STRIPE_LEN; |
2148 | |
2149 | /* Don't hold CPU for too long time */ |
2150 | cond_resched(); |
2151 | } |
2152 | return ret; |
2153 | } |
2154 | |
2155 | /* Calculate the full stripe length for simple stripe based profiles */ |
2156 | static u64 simple_stripe_full_stripe_len(const struct btrfs_chunk_map *map) |
2157 | { |
2158 | ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | |
2159 | BTRFS_BLOCK_GROUP_RAID10)); |
2160 | |
2161 | return btrfs_stripe_nr_to_offset(stripe_nr: map->num_stripes / map->sub_stripes); |
2162 | } |
2163 | |
2164 | /* Get the logical bytenr for the stripe */ |
2165 | static u64 simple_stripe_get_logical(struct btrfs_chunk_map *map, |
2166 | struct btrfs_block_group *bg, |
2167 | int stripe_index) |
2168 | { |
2169 | ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | |
2170 | BTRFS_BLOCK_GROUP_RAID10)); |
2171 | ASSERT(stripe_index < map->num_stripes); |
2172 | |
2173 | /* |
2174 | * (stripe_index / sub_stripes) gives how many data stripes we need to |
2175 | * skip. |
2176 | */ |
2177 | return btrfs_stripe_nr_to_offset(stripe_nr: stripe_index / map->sub_stripes) + |
2178 | bg->start; |
2179 | } |
2180 | |
2181 | /* Get the mirror number for the stripe */ |
2182 | static int simple_stripe_mirror_num(struct btrfs_chunk_map *map, int stripe_index) |
2183 | { |
2184 | ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | |
2185 | BTRFS_BLOCK_GROUP_RAID10)); |
2186 | ASSERT(stripe_index < map->num_stripes); |
2187 | |
2188 | /* For RAID0, it's fixed to 1, for RAID10 it's 0,1,0,1... */ |
2189 | return stripe_index % map->sub_stripes + 1; |
2190 | } |
2191 | |
2192 | static int scrub_simple_stripe(struct scrub_ctx *sctx, |
2193 | struct btrfs_block_group *bg, |
2194 | struct btrfs_chunk_map *map, |
2195 | struct btrfs_device *device, |
2196 | int stripe_index) |
2197 | { |
2198 | const u64 logical_increment = simple_stripe_full_stripe_len(map); |
2199 | const u64 orig_logical = simple_stripe_get_logical(map, bg, stripe_index); |
2200 | const u64 orig_physical = map->stripes[stripe_index].physical; |
2201 | const int mirror_num = simple_stripe_mirror_num(map, stripe_index); |
2202 | u64 cur_logical = orig_logical; |
2203 | u64 cur_physical = orig_physical; |
2204 | int ret = 0; |
2205 | |
2206 | while (cur_logical < bg->start + bg->length) { |
2207 | /* |
2208 | * Inside each stripe, RAID0 is just SINGLE, and RAID10 is |
2209 | * just RAID1, so we can reuse scrub_simple_mirror() to scrub |
2210 | * this stripe. |
2211 | */ |
2212 | ret = scrub_simple_mirror(sctx, bg, map, logical_start: cur_logical, |
2213 | BTRFS_STRIPE_LEN, device, physical: cur_physical, |
2214 | mirror_num); |
2215 | if (ret) |
2216 | return ret; |
2217 | /* Skip to next stripe which belongs to the target device */ |
2218 | cur_logical += logical_increment; |
2219 | /* For physical offset, we just go to next stripe */ |
2220 | cur_physical += BTRFS_STRIPE_LEN; |
2221 | } |
2222 | return ret; |
2223 | } |
2224 | |
2225 | static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, |
2226 | struct btrfs_block_group *bg, |
2227 | struct btrfs_chunk_map *map, |
2228 | struct btrfs_device *scrub_dev, |
2229 | int stripe_index) |
2230 | { |
2231 | struct btrfs_fs_info *fs_info = sctx->fs_info; |
2232 | const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK; |
2233 | const u64 chunk_logical = bg->start; |
2234 | int ret; |
2235 | int ret2; |
2236 | u64 physical = map->stripes[stripe_index].physical; |
2237 | const u64 dev_stripe_len = btrfs_calc_stripe_length(map); |
2238 | const u64 physical_end = physical + dev_stripe_len; |
2239 | u64 logical; |
2240 | u64 logic_end; |
2241 | /* The logical increment after finishing one stripe */ |
2242 | u64 increment; |
2243 | /* Offset inside the chunk */ |
2244 | u64 offset; |
2245 | u64 stripe_logical; |
2246 | int stop_loop = 0; |
2247 | |
2248 | /* Extent_path should be released by now. */ |
2249 | ASSERT(sctx->extent_path.nodes[0] == NULL); |
2250 | |
2251 | scrub_blocked_if_needed(fs_info); |
2252 | |
2253 | if (sctx->is_dev_replace && |
2254 | btrfs_dev_is_sequential(device: sctx->wr_tgtdev, pos: physical)) { |
2255 | mutex_lock(&sctx->wr_lock); |
2256 | sctx->write_pointer = physical; |
2257 | mutex_unlock(lock: &sctx->wr_lock); |
2258 | } |
2259 | |
2260 | /* Prepare the extra data stripes used by RAID56. */ |
2261 | if (profile & BTRFS_BLOCK_GROUP_RAID56_MASK) { |
2262 | ASSERT(sctx->raid56_data_stripes == NULL); |
2263 | |
2264 | sctx->raid56_data_stripes = kcalloc(n: nr_data_stripes(map), |
2265 | size: sizeof(struct scrub_stripe), |
2266 | GFP_KERNEL); |
2267 | if (!sctx->raid56_data_stripes) { |
2268 | ret = -ENOMEM; |
2269 | goto out; |
2270 | } |
2271 | for (int i = 0; i < nr_data_stripes(map); i++) { |
2272 | ret = init_scrub_stripe(fs_info, |
2273 | stripe: &sctx->raid56_data_stripes[i]); |
2274 | if (ret < 0) |
2275 | goto out; |
2276 | sctx->raid56_data_stripes[i].bg = bg; |
2277 | sctx->raid56_data_stripes[i].sctx = sctx; |
2278 | } |
2279 | } |
2280 | /* |
2281 | * There used to be a big double loop to handle all profiles using the |
2282 | * same routine, which grows larger and more gross over time. |
2283 | * |
2284 | * So here we handle each profile differently, so simpler profiles |
2285 | * have simpler scrubbing function. |
2286 | */ |
2287 | if (!(profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10 | |
2288 | BTRFS_BLOCK_GROUP_RAID56_MASK))) { |
2289 | /* |
2290 | * Above check rules out all complex profile, the remaining |
2291 | * profiles are SINGLE|DUP|RAID1|RAID1C*, which is simple |
2292 | * mirrored duplication without stripe. |
2293 | * |
2294 | * Only @physical and @mirror_num needs to calculated using |
2295 | * @stripe_index. |
2296 | */ |
2297 | ret = scrub_simple_mirror(sctx, bg, map, logical_start: bg->start, logical_length: bg->length, |
2298 | device: scrub_dev, physical: map->stripes[stripe_index].physical, |
2299 | mirror_num: stripe_index + 1); |
2300 | offset = 0; |
2301 | goto out; |
2302 | } |
2303 | if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) { |
2304 | ret = scrub_simple_stripe(sctx, bg, map, device: scrub_dev, stripe_index); |
2305 | offset = btrfs_stripe_nr_to_offset(stripe_nr: stripe_index / map->sub_stripes); |
2306 | goto out; |
2307 | } |
2308 | |
2309 | /* Only RAID56 goes through the old code */ |
2310 | ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK); |
2311 | ret = 0; |
2312 | |
2313 | /* Calculate the logical end of the stripe */ |
2314 | get_raid56_logic_offset(physical: physical_end, num: stripe_index, |
2315 | map, offset: &logic_end, NULL); |
2316 | logic_end += chunk_logical; |
2317 | |
2318 | /* Initialize @offset in case we need to go to out: label */ |
2319 | get_raid56_logic_offset(physical, num: stripe_index, map, offset: &offset, NULL); |
2320 | increment = btrfs_stripe_nr_to_offset(stripe_nr: nr_data_stripes(map)); |
2321 | |
2322 | /* |
2323 | * Due to the rotation, for RAID56 it's better to iterate each stripe |
2324 | * using their physical offset. |
2325 | */ |
2326 | while (physical < physical_end) { |
2327 | ret = get_raid56_logic_offset(physical, num: stripe_index, map, |
2328 | offset: &logical, stripe_start: &stripe_logical); |
2329 | logical += chunk_logical; |
2330 | if (ret) { |
2331 | /* it is parity strip */ |
2332 | stripe_logical += chunk_logical; |
2333 | ret = scrub_raid56_parity_stripe(sctx, scrub_dev, bg, |
2334 | map, full_stripe_start: stripe_logical); |
2335 | if (ret) |
2336 | goto out; |
2337 | goto next; |
2338 | } |
2339 | |
2340 | /* |
2341 | * Now we're at a data stripe, scrub each extents in the range. |
2342 | * |
2343 | * At this stage, if we ignore the repair part, inside each data |
2344 | * stripe it is no different than SINGLE profile. |
2345 | * We can reuse scrub_simple_mirror() here, as the repair part |
2346 | * is still based on @mirror_num. |
2347 | */ |
2348 | ret = scrub_simple_mirror(sctx, bg, map, logical_start: logical, BTRFS_STRIPE_LEN, |
2349 | device: scrub_dev, physical, mirror_num: 1); |
2350 | if (ret < 0) |
2351 | goto out; |
2352 | next: |
2353 | logical += increment; |
2354 | physical += BTRFS_STRIPE_LEN; |
2355 | spin_lock(lock: &sctx->stat_lock); |
2356 | if (stop_loop) |
2357 | sctx->stat.last_physical = |
2358 | map->stripes[stripe_index].physical + dev_stripe_len; |
2359 | else |
2360 | sctx->stat.last_physical = physical; |
2361 | spin_unlock(lock: &sctx->stat_lock); |
2362 | if (stop_loop) |
2363 | break; |
2364 | } |
2365 | out: |
2366 | ret2 = flush_scrub_stripes(sctx); |
2367 | if (!ret) |
2368 | ret = ret2; |
2369 | btrfs_release_path(p: &sctx->extent_path); |
2370 | btrfs_release_path(p: &sctx->csum_path); |
2371 | |
2372 | if (sctx->raid56_data_stripes) { |
2373 | for (int i = 0; i < nr_data_stripes(map); i++) |
2374 | release_scrub_stripe(stripe: &sctx->raid56_data_stripes[i]); |
2375 | kfree(objp: sctx->raid56_data_stripes); |
2376 | sctx->raid56_data_stripes = NULL; |
2377 | } |
2378 | |
2379 | if (sctx->is_dev_replace && ret >= 0) { |
2380 | int ret2; |
2381 | |
2382 | ret2 = sync_write_pointer_for_zoned(sctx, |
2383 | logical: chunk_logical + offset, |
2384 | physical: map->stripes[stripe_index].physical, |
2385 | physical_end); |
2386 | if (ret2) |
2387 | ret = ret2; |
2388 | } |
2389 | |
2390 | return ret < 0 ? ret : 0; |
2391 | } |
2392 | |
2393 | static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, |
2394 | struct btrfs_block_group *bg, |
2395 | struct btrfs_device *scrub_dev, |
2396 | u64 dev_offset, |
2397 | u64 dev_extent_len) |
2398 | { |
2399 | struct btrfs_fs_info *fs_info = sctx->fs_info; |
2400 | struct btrfs_chunk_map *map; |
2401 | int i; |
2402 | int ret = 0; |
2403 | |
2404 | map = btrfs_find_chunk_map(fs_info, logical: bg->start, length: bg->length); |
2405 | if (!map) { |
2406 | /* |
2407 | * Might have been an unused block group deleted by the cleaner |
2408 | * kthread or relocation. |
2409 | */ |
2410 | spin_lock(lock: &bg->lock); |
2411 | if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) |
2412 | ret = -EINVAL; |
2413 | spin_unlock(lock: &bg->lock); |
2414 | |
2415 | return ret; |
2416 | } |
2417 | if (map->start != bg->start) |
2418 | goto out; |
2419 | if (map->chunk_len < dev_extent_len) |
2420 | goto out; |
2421 | |
2422 | for (i = 0; i < map->num_stripes; ++i) { |
2423 | if (map->stripes[i].dev->bdev == scrub_dev->bdev && |
2424 | map->stripes[i].physical == dev_offset) { |
2425 | ret = scrub_stripe(sctx, bg, map, scrub_dev, stripe_index: i); |
2426 | if (ret) |
2427 | goto out; |
2428 | } |
2429 | } |
2430 | out: |
2431 | btrfs_free_chunk_map(map); |
2432 | |
2433 | return ret; |
2434 | } |
2435 | |
2436 | static int finish_extent_writes_for_zoned(struct btrfs_root *root, |
2437 | struct btrfs_block_group *cache) |
2438 | { |
2439 | struct btrfs_fs_info *fs_info = cache->fs_info; |
2440 | struct btrfs_trans_handle *trans; |
2441 | |
2442 | if (!btrfs_is_zoned(fs_info)) |
2443 | return 0; |
2444 | |
2445 | btrfs_wait_block_group_reservations(bg: cache); |
2446 | btrfs_wait_nocow_writers(bg: cache); |
2447 | btrfs_wait_ordered_roots(fs_info, U64_MAX, range_start: cache->start, range_len: cache->length); |
2448 | |
2449 | trans = btrfs_join_transaction(root); |
2450 | if (IS_ERR(ptr: trans)) |
2451 | return PTR_ERR(ptr: trans); |
2452 | return btrfs_commit_transaction(trans); |
2453 | } |
2454 | |
2455 | static noinline_for_stack |
2456 | int scrub_enumerate_chunks(struct scrub_ctx *sctx, |
2457 | struct btrfs_device *scrub_dev, u64 start, u64 end) |
2458 | { |
2459 | struct btrfs_dev_extent *dev_extent = NULL; |
2460 | struct btrfs_path *path; |
2461 | struct btrfs_fs_info *fs_info = sctx->fs_info; |
2462 | struct btrfs_root *root = fs_info->dev_root; |
2463 | u64 chunk_offset; |
2464 | int ret = 0; |
2465 | int ro_set; |
2466 | int slot; |
2467 | struct extent_buffer *l; |
2468 | struct btrfs_key key; |
2469 | struct btrfs_key found_key; |
2470 | struct btrfs_block_group *cache; |
2471 | struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; |
2472 | |
2473 | path = btrfs_alloc_path(); |
2474 | if (!path) |
2475 | return -ENOMEM; |
2476 | |
2477 | path->reada = READA_FORWARD; |
2478 | path->search_commit_root = 1; |
2479 | path->skip_locking = 1; |
2480 | |
2481 | key.objectid = scrub_dev->devid; |
2482 | key.offset = 0ull; |
2483 | key.type = BTRFS_DEV_EXTENT_KEY; |
2484 | |
2485 | while (1) { |
2486 | u64 dev_extent_len; |
2487 | |
2488 | ret = btrfs_search_slot(NULL, root, key: &key, p: path, ins_len: 0, cow: 0); |
2489 | if (ret < 0) |
2490 | break; |
2491 | if (ret > 0) { |
2492 | if (path->slots[0] >= |
2493 | btrfs_header_nritems(eb: path->nodes[0])) { |
2494 | ret = btrfs_next_leaf(root, path); |
2495 | if (ret < 0) |
2496 | break; |
2497 | if (ret > 0) { |
2498 | ret = 0; |
2499 | break; |
2500 | } |
2501 | } else { |
2502 | ret = 0; |
2503 | } |
2504 | } |
2505 | |
2506 | l = path->nodes[0]; |
2507 | slot = path->slots[0]; |
2508 | |
2509 | btrfs_item_key_to_cpu(eb: l, cpu_key: &found_key, nr: slot); |
2510 | |
2511 | if (found_key.objectid != scrub_dev->devid) |
2512 | break; |
2513 | |
2514 | if (found_key.type != BTRFS_DEV_EXTENT_KEY) |
2515 | break; |
2516 | |
2517 | if (found_key.offset >= end) |
2518 | break; |
2519 | |
2520 | if (found_key.offset < key.offset) |
2521 | break; |
2522 | |
2523 | dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); |
2524 | dev_extent_len = btrfs_dev_extent_length(eb: l, s: dev_extent); |
2525 | |
2526 | if (found_key.offset + dev_extent_len <= start) |
2527 | goto skip; |
2528 | |
2529 | chunk_offset = btrfs_dev_extent_chunk_offset(eb: l, s: dev_extent); |
2530 | |
2531 | /* |
2532 | * get a reference on the corresponding block group to prevent |
2533 | * the chunk from going away while we scrub it |
2534 | */ |
2535 | cache = btrfs_lookup_block_group(info: fs_info, bytenr: chunk_offset); |
2536 | |
2537 | /* some chunks are removed but not committed to disk yet, |
2538 | * continue scrubbing */ |
2539 | if (!cache) |
2540 | goto skip; |
2541 | |
2542 | ASSERT(cache->start <= chunk_offset); |
2543 | /* |
2544 | * We are using the commit root to search for device extents, so |
2545 | * that means we could have found a device extent item from a |
2546 | * block group that was deleted in the current transaction. The |
2547 | * logical start offset of the deleted block group, stored at |
2548 | * @chunk_offset, might be part of the logical address range of |
2549 | * a new block group (which uses different physical extents). |
2550 | * In this case btrfs_lookup_block_group() has returned the new |
2551 | * block group, and its start address is less than @chunk_offset. |
2552 | * |
2553 | * We skip such new block groups, because it's pointless to |
2554 | * process them, as we won't find their extents because we search |
2555 | * for them using the commit root of the extent tree. For a device |
2556 | * replace it's also fine to skip it, we won't miss copying them |
2557 | * to the target device because we have the write duplication |
2558 | * setup through the regular write path (by btrfs_map_block()), |
2559 | * and we have committed a transaction when we started the device |
2560 | * replace, right after setting up the device replace state. |
2561 | */ |
2562 | if (cache->start < chunk_offset) { |
2563 | btrfs_put_block_group(cache); |
2564 | goto skip; |
2565 | } |
2566 | |
2567 | if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) { |
2568 | if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) { |
2569 | btrfs_put_block_group(cache); |
2570 | goto skip; |
2571 | } |
2572 | } |
2573 | |
2574 | /* |
2575 | * Make sure that while we are scrubbing the corresponding block |
2576 | * group doesn't get its logical address and its device extents |
2577 | * reused for another block group, which can possibly be of a |
2578 | * different type and different profile. We do this to prevent |
2579 | * false error detections and crashes due to bogus attempts to |
2580 | * repair extents. |
2581 | */ |
2582 | spin_lock(lock: &cache->lock); |
2583 | if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) { |
2584 | spin_unlock(lock: &cache->lock); |
2585 | btrfs_put_block_group(cache); |
2586 | goto skip; |
2587 | } |
2588 | btrfs_freeze_block_group(cache); |
2589 | spin_unlock(lock: &cache->lock); |
2590 | |
2591 | /* |
2592 | * we need call btrfs_inc_block_group_ro() with scrubs_paused, |
2593 | * to avoid deadlock caused by: |
2594 | * btrfs_inc_block_group_ro() |
2595 | * -> btrfs_wait_for_commit() |
2596 | * -> btrfs_commit_transaction() |
2597 | * -> btrfs_scrub_pause() |
2598 | */ |
2599 | scrub_pause_on(fs_info); |
2600 | |
2601 | /* |
2602 | * Don't do chunk preallocation for scrub. |
2603 | * |
2604 | * This is especially important for SYSTEM bgs, or we can hit |
2605 | * -EFBIG from btrfs_finish_chunk_alloc() like: |
2606 | * 1. The only SYSTEM bg is marked RO. |
2607 | * Since SYSTEM bg is small, that's pretty common. |
2608 | * 2. New SYSTEM bg will be allocated |
2609 | * Due to regular version will allocate new chunk. |
2610 | * 3. New SYSTEM bg is empty and will get cleaned up |
2611 | * Before cleanup really happens, it's marked RO again. |
2612 | * 4. Empty SYSTEM bg get scrubbed |
2613 | * We go back to 2. |
2614 | * |
2615 | * This can easily boost the amount of SYSTEM chunks if cleaner |
2616 | * thread can't be triggered fast enough, and use up all space |
2617 | * of btrfs_super_block::sys_chunk_array |
2618 | * |
2619 | * While for dev replace, we need to try our best to mark block |
2620 | * group RO, to prevent race between: |
2621 | * - Write duplication |
2622 | * Contains latest data |
2623 | * - Scrub copy |
2624 | * Contains data from commit tree |
2625 | * |
2626 | * If target block group is not marked RO, nocow writes can |
2627 | * be overwritten by scrub copy, causing data corruption. |
2628 | * So for dev-replace, it's not allowed to continue if a block |
2629 | * group is not RO. |
2630 | */ |
2631 | ret = btrfs_inc_block_group_ro(cache, do_chunk_alloc: sctx->is_dev_replace); |
2632 | if (!ret && sctx->is_dev_replace) { |
2633 | ret = finish_extent_writes_for_zoned(root, cache); |
2634 | if (ret) { |
2635 | btrfs_dec_block_group_ro(cache); |
2636 | scrub_pause_off(fs_info); |
2637 | btrfs_put_block_group(cache); |
2638 | break; |
2639 | } |
2640 | } |
2641 | |
2642 | if (ret == 0) { |
2643 | ro_set = 1; |
2644 | } else if (ret == -ENOSPC && !sctx->is_dev_replace && |
2645 | !(cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) { |
2646 | /* |
2647 | * btrfs_inc_block_group_ro return -ENOSPC when it |
2648 | * failed in creating new chunk for metadata. |
2649 | * It is not a problem for scrub, because |
2650 | * metadata are always cowed, and our scrub paused |
2651 | * commit_transactions. |
2652 | * |
2653 | * For RAID56 chunks, we have to mark them read-only |
2654 | * for scrub, as later we would use our own cache |
2655 | * out of RAID56 realm. |
2656 | * Thus we want the RAID56 bg to be marked RO to |
2657 | * prevent RMW from screwing up out cache. |
2658 | */ |
2659 | ro_set = 0; |
2660 | } else if (ret == -ETXTBSY) { |
2661 | btrfs_warn(fs_info, |
2662 | "skipping scrub of block group %llu due to active swapfile" , |
2663 | cache->start); |
2664 | scrub_pause_off(fs_info); |
2665 | ret = 0; |
2666 | goto skip_unfreeze; |
2667 | } else { |
2668 | btrfs_warn(fs_info, |
2669 | "failed setting block group ro: %d" , ret); |
2670 | btrfs_unfreeze_block_group(cache); |
2671 | btrfs_put_block_group(cache); |
2672 | scrub_pause_off(fs_info); |
2673 | break; |
2674 | } |
2675 | |
2676 | /* |
2677 | * Now the target block is marked RO, wait for nocow writes to |
2678 | * finish before dev-replace. |
2679 | * COW is fine, as COW never overwrites extents in commit tree. |
2680 | */ |
2681 | if (sctx->is_dev_replace) { |
2682 | btrfs_wait_nocow_writers(bg: cache); |
2683 | btrfs_wait_ordered_roots(fs_info, U64_MAX, range_start: cache->start, |
2684 | range_len: cache->length); |
2685 | } |
2686 | |
2687 | scrub_pause_off(fs_info); |
2688 | down_write(sem: &dev_replace->rwsem); |
2689 | dev_replace->cursor_right = found_key.offset + dev_extent_len; |
2690 | dev_replace->cursor_left = found_key.offset; |
2691 | dev_replace->item_needs_writeback = 1; |
2692 | up_write(sem: &dev_replace->rwsem); |
2693 | |
2694 | ret = scrub_chunk(sctx, bg: cache, scrub_dev, dev_offset: found_key.offset, |
2695 | dev_extent_len); |
2696 | if (sctx->is_dev_replace && |
2697 | !btrfs_finish_block_group_to_copy(srcdev: dev_replace->srcdev, |
2698 | cache, physical: found_key.offset)) |
2699 | ro_set = 0; |
2700 | |
2701 | down_write(sem: &dev_replace->rwsem); |
2702 | dev_replace->cursor_left = dev_replace->cursor_right; |
2703 | dev_replace->item_needs_writeback = 1; |
2704 | up_write(sem: &dev_replace->rwsem); |
2705 | |
2706 | if (ro_set) |
2707 | btrfs_dec_block_group_ro(cache); |
2708 | |
2709 | /* |
2710 | * We might have prevented the cleaner kthread from deleting |
2711 | * this block group if it was already unused because we raced |
2712 | * and set it to RO mode first. So add it back to the unused |
2713 | * list, otherwise it might not ever be deleted unless a manual |
2714 | * balance is triggered or it becomes used and unused again. |
2715 | */ |
2716 | spin_lock(lock: &cache->lock); |
2717 | if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) && |
2718 | !cache->ro && cache->reserved == 0 && cache->used == 0) { |
2719 | spin_unlock(lock: &cache->lock); |
2720 | if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) |
2721 | btrfs_discard_queue_work(discard_ctl: &fs_info->discard_ctl, |
2722 | block_group: cache); |
2723 | else |
2724 | btrfs_mark_bg_unused(bg: cache); |
2725 | } else { |
2726 | spin_unlock(lock: &cache->lock); |
2727 | } |
2728 | skip_unfreeze: |
2729 | btrfs_unfreeze_block_group(cache); |
2730 | btrfs_put_block_group(cache); |
2731 | if (ret) |
2732 | break; |
2733 | if (sctx->is_dev_replace && |
2734 | atomic64_read(v: &dev_replace->num_write_errors) > 0) { |
2735 | ret = -EIO; |
2736 | break; |
2737 | } |
2738 | if (sctx->stat.malloc_errors > 0) { |
2739 | ret = -ENOMEM; |
2740 | break; |
2741 | } |
2742 | skip: |
2743 | key.offset = found_key.offset + dev_extent_len; |
2744 | btrfs_release_path(p: path); |
2745 | } |
2746 | |
2747 | btrfs_free_path(p: path); |
2748 | |
2749 | return ret; |
2750 | } |
2751 | |
2752 | static int scrub_one_super(struct scrub_ctx *sctx, struct btrfs_device *dev, |
2753 | struct page *page, u64 physical, u64 generation) |
2754 | { |
2755 | struct btrfs_fs_info *fs_info = sctx->fs_info; |
2756 | struct bio_vec bvec; |
2757 | struct bio bio; |
2758 | struct btrfs_super_block *sb = page_address(page); |
2759 | int ret; |
2760 | |
2761 | bio_init(bio: &bio, bdev: dev->bdev, table: &bvec, max_vecs: 1, opf: REQ_OP_READ); |
2762 | bio.bi_iter.bi_sector = physical >> SECTOR_SHIFT; |
2763 | __bio_add_page(bio: &bio, page, BTRFS_SUPER_INFO_SIZE, off: 0); |
2764 | ret = submit_bio_wait(bio: &bio); |
2765 | bio_uninit(&bio); |
2766 | |
2767 | if (ret < 0) |
2768 | return ret; |
2769 | ret = btrfs_check_super_csum(fs_info, disk_sb: sb); |
2770 | if (ret != 0) { |
2771 | btrfs_err_rl(fs_info, |
2772 | "super block at physical %llu devid %llu has bad csum" , |
2773 | physical, dev->devid); |
2774 | return -EIO; |
2775 | } |
2776 | if (btrfs_super_generation(s: sb) != generation) { |
2777 | btrfs_err_rl(fs_info, |
2778 | "super block at physical %llu devid %llu has bad generation %llu expect %llu" , |
2779 | physical, dev->devid, |
2780 | btrfs_super_generation(sb), generation); |
2781 | return -EUCLEAN; |
2782 | } |
2783 | |
2784 | return btrfs_validate_super(fs_info, sb, mirror_num: -1); |
2785 | } |
2786 | |
2787 | static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, |
2788 | struct btrfs_device *scrub_dev) |
2789 | { |
2790 | int i; |
2791 | u64 bytenr; |
2792 | u64 gen; |
2793 | int ret = 0; |
2794 | struct page *page; |
2795 | struct btrfs_fs_info *fs_info = sctx->fs_info; |
2796 | |
2797 | if (BTRFS_FS_ERROR(fs_info)) |
2798 | return -EROFS; |
2799 | |
2800 | page = alloc_page(GFP_KERNEL); |
2801 | if (!page) { |
2802 | spin_lock(lock: &sctx->stat_lock); |
2803 | sctx->stat.malloc_errors++; |
2804 | spin_unlock(lock: &sctx->stat_lock); |
2805 | return -ENOMEM; |
2806 | } |
2807 | |
2808 | /* Seed devices of a new filesystem has their own generation. */ |
2809 | if (scrub_dev->fs_devices != fs_info->fs_devices) |
2810 | gen = scrub_dev->generation; |
2811 | else |
2812 | gen = btrfs_get_last_trans_committed(fs_info); |
2813 | |
2814 | for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { |
2815 | ret = btrfs_sb_log_location(device: scrub_dev, mirror: i, rw: 0, bytenr_ret: &bytenr); |
2816 | if (ret == -ENOENT) |
2817 | break; |
2818 | |
2819 | if (ret) { |
2820 | spin_lock(lock: &sctx->stat_lock); |
2821 | sctx->stat.super_errors++; |
2822 | spin_unlock(lock: &sctx->stat_lock); |
2823 | continue; |
2824 | } |
2825 | |
2826 | if (bytenr + BTRFS_SUPER_INFO_SIZE > |
2827 | scrub_dev->commit_total_bytes) |
2828 | break; |
2829 | if (!btrfs_check_super_location(device: scrub_dev, pos: bytenr)) |
2830 | continue; |
2831 | |
2832 | ret = scrub_one_super(sctx, dev: scrub_dev, page, physical: bytenr, generation: gen); |
2833 | if (ret) { |
2834 | spin_lock(lock: &sctx->stat_lock); |
2835 | sctx->stat.super_errors++; |
2836 | spin_unlock(lock: &sctx->stat_lock); |
2837 | } |
2838 | } |
2839 | __free_page(page); |
2840 | return 0; |
2841 | } |
2842 | |
2843 | static void scrub_workers_put(struct btrfs_fs_info *fs_info) |
2844 | { |
2845 | if (refcount_dec_and_mutex_lock(r: &fs_info->scrub_workers_refcnt, |
2846 | lock: &fs_info->scrub_lock)) { |
2847 | struct workqueue_struct *scrub_workers = fs_info->scrub_workers; |
2848 | |
2849 | fs_info->scrub_workers = NULL; |
2850 | mutex_unlock(lock: &fs_info->scrub_lock); |
2851 | |
2852 | if (scrub_workers) |
2853 | destroy_workqueue(wq: scrub_workers); |
2854 | } |
2855 | } |
2856 | |
2857 | /* |
2858 | * get a reference count on fs_info->scrub_workers. start worker if necessary |
2859 | */ |
2860 | static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info) |
2861 | { |
2862 | struct workqueue_struct *scrub_workers = NULL; |
2863 | unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND; |
2864 | int max_active = fs_info->thread_pool_size; |
2865 | int ret = -ENOMEM; |
2866 | |
2867 | if (refcount_inc_not_zero(r: &fs_info->scrub_workers_refcnt)) |
2868 | return 0; |
2869 | |
2870 | scrub_workers = alloc_workqueue(fmt: "btrfs-scrub" , flags, max_active); |
2871 | if (!scrub_workers) |
2872 | return -ENOMEM; |
2873 | |
2874 | mutex_lock(&fs_info->scrub_lock); |
2875 | if (refcount_read(r: &fs_info->scrub_workers_refcnt) == 0) { |
2876 | ASSERT(fs_info->scrub_workers == NULL); |
2877 | fs_info->scrub_workers = scrub_workers; |
2878 | refcount_set(r: &fs_info->scrub_workers_refcnt, n: 1); |
2879 | mutex_unlock(lock: &fs_info->scrub_lock); |
2880 | return 0; |
2881 | } |
2882 | /* Other thread raced in and created the workers for us */ |
2883 | refcount_inc(r: &fs_info->scrub_workers_refcnt); |
2884 | mutex_unlock(lock: &fs_info->scrub_lock); |
2885 | |
2886 | ret = 0; |
2887 | |
2888 | destroy_workqueue(wq: scrub_workers); |
2889 | return ret; |
2890 | } |
2891 | |
2892 | int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, |
2893 | u64 end, struct btrfs_scrub_progress *progress, |
2894 | int readonly, int is_dev_replace) |
2895 | { |
2896 | struct btrfs_dev_lookup_args args = { .devid = devid }; |
2897 | struct scrub_ctx *sctx; |
2898 | int ret; |
2899 | struct btrfs_device *dev; |
2900 | unsigned int nofs_flag; |
2901 | bool need_commit = false; |
2902 | |
2903 | if (btrfs_fs_closing(fs_info)) |
2904 | return -EAGAIN; |
2905 | |
2906 | /* At mount time we have ensured nodesize is in the range of [4K, 64K]. */ |
2907 | ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN); |
2908 | |
2909 | /* |
2910 | * SCRUB_MAX_SECTORS_PER_BLOCK is calculated using the largest possible |
2911 | * value (max nodesize / min sectorsize), thus nodesize should always |
2912 | * be fine. |
2913 | */ |
2914 | ASSERT(fs_info->nodesize <= |
2915 | SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits); |
2916 | |
2917 | /* Allocate outside of device_list_mutex */ |
2918 | sctx = scrub_setup_ctx(fs_info, is_dev_replace); |
2919 | if (IS_ERR(ptr: sctx)) |
2920 | return PTR_ERR(ptr: sctx); |
2921 | |
2922 | ret = scrub_workers_get(fs_info); |
2923 | if (ret) |
2924 | goto out_free_ctx; |
2925 | |
2926 | mutex_lock(&fs_info->fs_devices->device_list_mutex); |
2927 | dev = btrfs_find_device(fs_devices: fs_info->fs_devices, args: &args); |
2928 | if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) && |
2929 | !is_dev_replace)) { |
2930 | mutex_unlock(lock: &fs_info->fs_devices->device_list_mutex); |
2931 | ret = -ENODEV; |
2932 | goto out; |
2933 | } |
2934 | |
2935 | if (!is_dev_replace && !readonly && |
2936 | !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) { |
2937 | mutex_unlock(lock: &fs_info->fs_devices->device_list_mutex); |
2938 | btrfs_err_in_rcu(fs_info, |
2939 | "scrub on devid %llu: filesystem on %s is not writable" , |
2940 | devid, btrfs_dev_name(dev)); |
2941 | ret = -EROFS; |
2942 | goto out; |
2943 | } |
2944 | |
2945 | mutex_lock(&fs_info->scrub_lock); |
2946 | if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || |
2947 | test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) { |
2948 | mutex_unlock(lock: &fs_info->scrub_lock); |
2949 | mutex_unlock(lock: &fs_info->fs_devices->device_list_mutex); |
2950 | ret = -EIO; |
2951 | goto out; |
2952 | } |
2953 | |
2954 | down_read(sem: &fs_info->dev_replace.rwsem); |
2955 | if (dev->scrub_ctx || |
2956 | (!is_dev_replace && |
2957 | btrfs_dev_replace_is_ongoing(dev_replace: &fs_info->dev_replace))) { |
2958 | up_read(sem: &fs_info->dev_replace.rwsem); |
2959 | mutex_unlock(lock: &fs_info->scrub_lock); |
2960 | mutex_unlock(lock: &fs_info->fs_devices->device_list_mutex); |
2961 | ret = -EINPROGRESS; |
2962 | goto out; |
2963 | } |
2964 | up_read(sem: &fs_info->dev_replace.rwsem); |
2965 | |
2966 | sctx->readonly = readonly; |
2967 | dev->scrub_ctx = sctx; |
2968 | mutex_unlock(lock: &fs_info->fs_devices->device_list_mutex); |
2969 | |
2970 | /* |
2971 | * checking @scrub_pause_req here, we can avoid |
2972 | * race between committing transaction and scrubbing. |
2973 | */ |
2974 | __scrub_blocked_if_needed(fs_info); |
2975 | atomic_inc(v: &fs_info->scrubs_running); |
2976 | mutex_unlock(lock: &fs_info->scrub_lock); |
2977 | |
2978 | /* |
2979 | * In order to avoid deadlock with reclaim when there is a transaction |
2980 | * trying to pause scrub, make sure we use GFP_NOFS for all the |
2981 | * allocations done at btrfs_scrub_sectors() and scrub_sectors_for_parity() |
2982 | * invoked by our callees. The pausing request is done when the |
2983 | * transaction commit starts, and it blocks the transaction until scrub |
2984 | * is paused (done at specific points at scrub_stripe() or right above |
2985 | * before incrementing fs_info->scrubs_running). |
2986 | */ |
2987 | nofs_flag = memalloc_nofs_save(); |
2988 | if (!is_dev_replace) { |
2989 | u64 old_super_errors; |
2990 | |
2991 | spin_lock(lock: &sctx->stat_lock); |
2992 | old_super_errors = sctx->stat.super_errors; |
2993 | spin_unlock(lock: &sctx->stat_lock); |
2994 | |
2995 | btrfs_info(fs_info, "scrub: started on devid %llu" , devid); |
2996 | /* |
2997 | * by holding device list mutex, we can |
2998 | * kick off writing super in log tree sync. |
2999 | */ |
3000 | mutex_lock(&fs_info->fs_devices->device_list_mutex); |
3001 | ret = scrub_supers(sctx, scrub_dev: dev); |
3002 | mutex_unlock(lock: &fs_info->fs_devices->device_list_mutex); |
3003 | |
3004 | spin_lock(lock: &sctx->stat_lock); |
3005 | /* |
3006 | * Super block errors found, but we can not commit transaction |
3007 | * at current context, since btrfs_commit_transaction() needs |
3008 | * to pause the current running scrub (hold by ourselves). |
3009 | */ |
3010 | if (sctx->stat.super_errors > old_super_errors && !sctx->readonly) |
3011 | need_commit = true; |
3012 | spin_unlock(lock: &sctx->stat_lock); |
3013 | } |
3014 | |
3015 | if (!ret) |
3016 | ret = scrub_enumerate_chunks(sctx, scrub_dev: dev, start, end); |
3017 | memalloc_nofs_restore(flags: nofs_flag); |
3018 | |
3019 | atomic_dec(v: &fs_info->scrubs_running); |
3020 | wake_up(&fs_info->scrub_pause_wait); |
3021 | |
3022 | if (progress) |
3023 | memcpy(progress, &sctx->stat, sizeof(*progress)); |
3024 | |
3025 | if (!is_dev_replace) |
3026 | btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d" , |
3027 | ret ? "not finished" : "finished" , devid, ret); |
3028 | |
3029 | mutex_lock(&fs_info->scrub_lock); |
3030 | dev->scrub_ctx = NULL; |
3031 | mutex_unlock(lock: &fs_info->scrub_lock); |
3032 | |
3033 | scrub_workers_put(fs_info); |
3034 | scrub_put_ctx(sctx); |
3035 | |
3036 | /* |
3037 | * We found some super block errors before, now try to force a |
3038 | * transaction commit, as scrub has finished. |
3039 | */ |
3040 | if (need_commit) { |
3041 | struct btrfs_trans_handle *trans; |
3042 | |
3043 | trans = btrfs_start_transaction(root: fs_info->tree_root, num_items: 0); |
3044 | if (IS_ERR(ptr: trans)) { |
3045 | ret = PTR_ERR(ptr: trans); |
3046 | btrfs_err(fs_info, |
3047 | "scrub: failed to start transaction to fix super block errors: %d" , ret); |
3048 | return ret; |
3049 | } |
3050 | ret = btrfs_commit_transaction(trans); |
3051 | if (ret < 0) |
3052 | btrfs_err(fs_info, |
3053 | "scrub: failed to commit transaction to fix super block errors: %d" , ret); |
3054 | } |
3055 | return ret; |
3056 | out: |
3057 | scrub_workers_put(fs_info); |
3058 | out_free_ctx: |
3059 | scrub_free_ctx(sctx); |
3060 | |
3061 | return ret; |
3062 | } |
3063 | |
3064 | void btrfs_scrub_pause(struct btrfs_fs_info *fs_info) |
3065 | { |
3066 | mutex_lock(&fs_info->scrub_lock); |
3067 | atomic_inc(v: &fs_info->scrub_pause_req); |
3068 | while (atomic_read(v: &fs_info->scrubs_paused) != |
3069 | atomic_read(v: &fs_info->scrubs_running)) { |
3070 | mutex_unlock(lock: &fs_info->scrub_lock); |
3071 | wait_event(fs_info->scrub_pause_wait, |
3072 | atomic_read(&fs_info->scrubs_paused) == |
3073 | atomic_read(&fs_info->scrubs_running)); |
3074 | mutex_lock(&fs_info->scrub_lock); |
3075 | } |
3076 | mutex_unlock(lock: &fs_info->scrub_lock); |
3077 | } |
3078 | |
3079 | void btrfs_scrub_continue(struct btrfs_fs_info *fs_info) |
3080 | { |
3081 | atomic_dec(v: &fs_info->scrub_pause_req); |
3082 | wake_up(&fs_info->scrub_pause_wait); |
3083 | } |
3084 | |
3085 | int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info) |
3086 | { |
3087 | mutex_lock(&fs_info->scrub_lock); |
3088 | if (!atomic_read(v: &fs_info->scrubs_running)) { |
3089 | mutex_unlock(lock: &fs_info->scrub_lock); |
3090 | return -ENOTCONN; |
3091 | } |
3092 | |
3093 | atomic_inc(v: &fs_info->scrub_cancel_req); |
3094 | while (atomic_read(v: &fs_info->scrubs_running)) { |
3095 | mutex_unlock(lock: &fs_info->scrub_lock); |
3096 | wait_event(fs_info->scrub_pause_wait, |
3097 | atomic_read(&fs_info->scrubs_running) == 0); |
3098 | mutex_lock(&fs_info->scrub_lock); |
3099 | } |
3100 | atomic_dec(v: &fs_info->scrub_cancel_req); |
3101 | mutex_unlock(lock: &fs_info->scrub_lock); |
3102 | |
3103 | return 0; |
3104 | } |
3105 | |
3106 | int btrfs_scrub_cancel_dev(struct btrfs_device *dev) |
3107 | { |
3108 | struct btrfs_fs_info *fs_info = dev->fs_info; |
3109 | struct scrub_ctx *sctx; |
3110 | |
3111 | mutex_lock(&fs_info->scrub_lock); |
3112 | sctx = dev->scrub_ctx; |
3113 | if (!sctx) { |
3114 | mutex_unlock(lock: &fs_info->scrub_lock); |
3115 | return -ENOTCONN; |
3116 | } |
3117 | atomic_inc(v: &sctx->cancel_req); |
3118 | while (dev->scrub_ctx) { |
3119 | mutex_unlock(lock: &fs_info->scrub_lock); |
3120 | wait_event(fs_info->scrub_pause_wait, |
3121 | dev->scrub_ctx == NULL); |
3122 | mutex_lock(&fs_info->scrub_lock); |
3123 | } |
3124 | mutex_unlock(lock: &fs_info->scrub_lock); |
3125 | |
3126 | return 0; |
3127 | } |
3128 | |
3129 | int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid, |
3130 | struct btrfs_scrub_progress *progress) |
3131 | { |
3132 | struct btrfs_dev_lookup_args args = { .devid = devid }; |
3133 | struct btrfs_device *dev; |
3134 | struct scrub_ctx *sctx = NULL; |
3135 | |
3136 | mutex_lock(&fs_info->fs_devices->device_list_mutex); |
3137 | dev = btrfs_find_device(fs_devices: fs_info->fs_devices, args: &args); |
3138 | if (dev) |
3139 | sctx = dev->scrub_ctx; |
3140 | if (sctx) |
3141 | memcpy(progress, &sctx->stat, sizeof(*progress)); |
3142 | mutex_unlock(lock: &fs_info->fs_devices->device_list_mutex); |
3143 | |
3144 | return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; |
3145 | } |
3146 | |