1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2009 Oracle. All rights reserved. |
4 | */ |
5 | |
6 | #include <linux/sched.h> |
7 | #include <linux/pagemap.h> |
8 | #include <linux/writeback.h> |
9 | #include <linux/blkdev.h> |
10 | #include <linux/rbtree.h> |
11 | #include <linux/slab.h> |
12 | #include <linux/error-injection.h> |
13 | #include "ctree.h" |
14 | #include "disk-io.h" |
15 | #include "transaction.h" |
16 | #include "volumes.h" |
17 | #include "locking.h" |
18 | #include "btrfs_inode.h" |
19 | #include "async-thread.h" |
20 | #include "free-space-cache.h" |
21 | #include "qgroup.h" |
22 | #include "print-tree.h" |
23 | #include "delalloc-space.h" |
24 | #include "block-group.h" |
25 | #include "backref.h" |
26 | #include "misc.h" |
27 | #include "subpage.h" |
28 | #include "zoned.h" |
29 | #include "inode-item.h" |
30 | #include "space-info.h" |
31 | #include "fs.h" |
32 | #include "accessors.h" |
33 | #include "extent-tree.h" |
34 | #include "root-tree.h" |
35 | #include "file-item.h" |
36 | #include "relocation.h" |
37 | #include "super.h" |
38 | #include "tree-checker.h" |
39 | |
40 | /* |
41 | * Relocation overview |
42 | * |
43 | * [What does relocation do] |
44 | * |
45 | * The objective of relocation is to relocate all extents of the target block |
46 | * group to other block groups. |
47 | * This is utilized by resize (shrink only), profile converting, compacting |
48 | * space, or balance routine to spread chunks over devices. |
49 | * |
50 | * Before | After |
51 | * ------------------------------------------------------------------ |
52 | * BG A: 10 data extents | BG A: deleted |
53 | * BG B: 2 data extents | BG B: 10 data extents (2 old + 8 relocated) |
54 | * BG C: 1 extents | BG C: 3 data extents (1 old + 2 relocated) |
55 | * |
56 | * [How does relocation work] |
57 | * |
58 | * 1. Mark the target block group read-only |
59 | * New extents won't be allocated from the target block group. |
60 | * |
61 | * 2.1 Record each extent in the target block group |
62 | * To build a proper map of extents to be relocated. |
63 | * |
64 | * 2.2 Build data reloc tree and reloc trees |
65 | * Data reloc tree will contain an inode, recording all newly relocated |
66 | * data extents. |
67 | * There will be only one data reloc tree for one data block group. |
68 | * |
69 | * Reloc tree will be a special snapshot of its source tree, containing |
70 | * relocated tree blocks. |
71 | * Each tree referring to a tree block in target block group will get its |
72 | * reloc tree built. |
73 | * |
74 | * 2.3 Swap source tree with its corresponding reloc tree |
75 | * Each involved tree only refers to new extents after swap. |
76 | * |
77 | * 3. Cleanup reloc trees and data reloc tree. |
78 | * As old extents in the target block group are still referenced by reloc |
79 | * trees, we need to clean them up before really freeing the target block |
80 | * group. |
81 | * |
82 | * The main complexity is in steps 2.2 and 2.3. |
83 | * |
84 | * The entry point of relocation is relocate_block_group() function. |
85 | */ |
86 | |
87 | #define RELOCATION_RESERVED_NODES 256 |
88 | /* |
89 | * map address of tree root to tree |
90 | */ |
91 | struct mapping_node { |
92 | struct { |
93 | struct rb_node rb_node; |
94 | u64 bytenr; |
95 | }; /* Use rb_simle_node for search/insert */ |
96 | void *data; |
97 | }; |
98 | |
99 | struct mapping_tree { |
100 | struct rb_root rb_root; |
101 | spinlock_t lock; |
102 | }; |
103 | |
104 | /* |
105 | * present a tree block to process |
106 | */ |
107 | struct tree_block { |
108 | struct { |
109 | struct rb_node rb_node; |
110 | u64 bytenr; |
111 | }; /* Use rb_simple_node for search/insert */ |
112 | u64 owner; |
113 | struct btrfs_key key; |
114 | u8 level; |
115 | bool key_ready; |
116 | }; |
117 | |
118 | #define MAX_EXTENTS 128 |
119 | |
120 | struct file_extent_cluster { |
121 | u64 start; |
122 | u64 end; |
123 | u64 boundary[MAX_EXTENTS]; |
124 | unsigned int nr; |
125 | u64 owning_root; |
126 | }; |
127 | |
128 | /* Stages of data relocation. */ |
129 | enum reloc_stage { |
130 | MOVE_DATA_EXTENTS, |
131 | UPDATE_DATA_PTRS |
132 | }; |
133 | |
134 | struct reloc_control { |
135 | /* block group to relocate */ |
136 | struct btrfs_block_group *block_group; |
137 | /* extent tree */ |
138 | struct btrfs_root *extent_root; |
139 | /* inode for moving data */ |
140 | struct inode *data_inode; |
141 | |
142 | struct btrfs_block_rsv *block_rsv; |
143 | |
144 | struct btrfs_backref_cache backref_cache; |
145 | |
146 | struct file_extent_cluster cluster; |
147 | /* tree blocks have been processed */ |
148 | struct extent_io_tree processed_blocks; |
149 | /* map start of tree root to corresponding reloc tree */ |
150 | struct mapping_tree reloc_root_tree; |
151 | /* list of reloc trees */ |
152 | struct list_head reloc_roots; |
153 | /* list of subvolume trees that get relocated */ |
154 | struct list_head dirty_subvol_roots; |
155 | /* size of metadata reservation for merging reloc trees */ |
156 | u64 merging_rsv_size; |
157 | /* size of relocated tree nodes */ |
158 | u64 nodes_relocated; |
159 | /* reserved size for block group relocation*/ |
160 | u64 reserved_bytes; |
161 | |
162 | u64 search_start; |
163 | u64 extents_found; |
164 | |
165 | enum reloc_stage stage; |
166 | bool create_reloc_tree; |
167 | bool merge_reloc_tree; |
168 | bool found_file_extent; |
169 | }; |
170 | |
171 | static void mark_block_processed(struct reloc_control *rc, |
172 | struct btrfs_backref_node *node) |
173 | { |
174 | u32 blocksize; |
175 | |
176 | if (node->level == 0 || |
177 | in_range(node->bytenr, rc->block_group->start, |
178 | rc->block_group->length)) { |
179 | blocksize = rc->extent_root->fs_info->nodesize; |
180 | set_extent_bit(tree: &rc->processed_blocks, start: node->bytenr, |
181 | end: node->bytenr + blocksize - 1, bits: EXTENT_DIRTY, NULL); |
182 | } |
183 | node->processed = 1; |
184 | } |
185 | |
186 | /* |
187 | * walk up backref nodes until reach node presents tree root |
188 | */ |
189 | static struct btrfs_backref_node *walk_up_backref( |
190 | struct btrfs_backref_node *node, |
191 | struct btrfs_backref_edge *edges[], int *index) |
192 | { |
193 | struct btrfs_backref_edge *edge; |
194 | int idx = *index; |
195 | |
196 | while (!list_empty(head: &node->upper)) { |
197 | edge = list_entry(node->upper.next, |
198 | struct btrfs_backref_edge, list[LOWER]); |
199 | edges[idx++] = edge; |
200 | node = edge->node[UPPER]; |
201 | } |
202 | BUG_ON(node->detached); |
203 | *index = idx; |
204 | return node; |
205 | } |
206 | |
207 | /* |
208 | * walk down backref nodes to find start of next reference path |
209 | */ |
210 | static struct btrfs_backref_node *walk_down_backref( |
211 | struct btrfs_backref_edge *edges[], int *index) |
212 | { |
213 | struct btrfs_backref_edge *edge; |
214 | struct btrfs_backref_node *lower; |
215 | int idx = *index; |
216 | |
217 | while (idx > 0) { |
218 | edge = edges[idx - 1]; |
219 | lower = edge->node[LOWER]; |
220 | if (list_is_last(list: &edge->list[LOWER], head: &lower->upper)) { |
221 | idx--; |
222 | continue; |
223 | } |
224 | edge = list_entry(edge->list[LOWER].next, |
225 | struct btrfs_backref_edge, list[LOWER]); |
226 | edges[idx - 1] = edge; |
227 | *index = idx; |
228 | return edge->node[UPPER]; |
229 | } |
230 | *index = 0; |
231 | return NULL; |
232 | } |
233 | |
234 | static void update_backref_node(struct btrfs_backref_cache *cache, |
235 | struct btrfs_backref_node *node, u64 bytenr) |
236 | { |
237 | struct rb_node *rb_node; |
238 | rb_erase(&node->rb_node, &cache->rb_root); |
239 | node->bytenr = bytenr; |
240 | rb_node = rb_simple_insert(root: &cache->rb_root, bytenr: node->bytenr, node: &node->rb_node); |
241 | if (rb_node) |
242 | btrfs_backref_panic(fs_info: cache->fs_info, bytenr, error: -EEXIST); |
243 | } |
244 | |
245 | /* |
246 | * update backref cache after a transaction commit |
247 | */ |
248 | static int update_backref_cache(struct btrfs_trans_handle *trans, |
249 | struct btrfs_backref_cache *cache) |
250 | { |
251 | struct btrfs_backref_node *node; |
252 | int level = 0; |
253 | |
254 | if (cache->last_trans == 0) { |
255 | cache->last_trans = trans->transid; |
256 | return 0; |
257 | } |
258 | |
259 | if (cache->last_trans == trans->transid) |
260 | return 0; |
261 | |
262 | /* |
263 | * detached nodes are used to avoid unnecessary backref |
264 | * lookup. transaction commit changes the extent tree. |
265 | * so the detached nodes are no longer useful. |
266 | */ |
267 | while (!list_empty(head: &cache->detached)) { |
268 | node = list_entry(cache->detached.next, |
269 | struct btrfs_backref_node, list); |
270 | btrfs_backref_cleanup_node(cache, node); |
271 | } |
272 | |
273 | while (!list_empty(head: &cache->changed)) { |
274 | node = list_entry(cache->changed.next, |
275 | struct btrfs_backref_node, list); |
276 | list_del_init(entry: &node->list); |
277 | BUG_ON(node->pending); |
278 | update_backref_node(cache, node, bytenr: node->new_bytenr); |
279 | } |
280 | |
281 | /* |
282 | * some nodes can be left in the pending list if there were |
283 | * errors during processing the pending nodes. |
284 | */ |
285 | for (level = 0; level < BTRFS_MAX_LEVEL; level++) { |
286 | list_for_each_entry(node, &cache->pending[level], list) { |
287 | BUG_ON(!node->pending); |
288 | if (node->bytenr == node->new_bytenr) |
289 | continue; |
290 | update_backref_node(cache, node, bytenr: node->new_bytenr); |
291 | } |
292 | } |
293 | |
294 | cache->last_trans = 0; |
295 | return 1; |
296 | } |
297 | |
298 | static bool reloc_root_is_dead(const struct btrfs_root *root) |
299 | { |
300 | /* |
301 | * Pair with set_bit/clear_bit in clean_dirty_subvols and |
302 | * btrfs_update_reloc_root. We need to see the updated bit before |
303 | * trying to access reloc_root |
304 | */ |
305 | smp_rmb(); |
306 | if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state)) |
307 | return true; |
308 | return false; |
309 | } |
310 | |
311 | /* |
312 | * Check if this subvolume tree has valid reloc tree. |
313 | * |
314 | * Reloc tree after swap is considered dead, thus not considered as valid. |
315 | * This is enough for most callers, as they don't distinguish dead reloc root |
316 | * from no reloc root. But btrfs_should_ignore_reloc_root() below is a |
317 | * special case. |
318 | */ |
319 | static bool have_reloc_root(const struct btrfs_root *root) |
320 | { |
321 | if (reloc_root_is_dead(root)) |
322 | return false; |
323 | if (!root->reloc_root) |
324 | return false; |
325 | return true; |
326 | } |
327 | |
328 | bool btrfs_should_ignore_reloc_root(const struct btrfs_root *root) |
329 | { |
330 | struct btrfs_root *reloc_root; |
331 | |
332 | if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) |
333 | return false; |
334 | |
335 | /* This root has been merged with its reloc tree, we can ignore it */ |
336 | if (reloc_root_is_dead(root)) |
337 | return true; |
338 | |
339 | reloc_root = root->reloc_root; |
340 | if (!reloc_root) |
341 | return false; |
342 | |
343 | if (btrfs_header_generation(eb: reloc_root->commit_root) == |
344 | root->fs_info->running_transaction->transid) |
345 | return false; |
346 | /* |
347 | * If there is reloc tree and it was created in previous transaction |
348 | * backref lookup can find the reloc tree, so backref node for the fs |
349 | * tree root is useless for relocation. |
350 | */ |
351 | return true; |
352 | } |
353 | |
354 | /* |
355 | * find reloc tree by address of tree root |
356 | */ |
357 | struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr) |
358 | { |
359 | struct reloc_control *rc = fs_info->reloc_ctl; |
360 | struct rb_node *rb_node; |
361 | struct mapping_node *node; |
362 | struct btrfs_root *root = NULL; |
363 | |
364 | ASSERT(rc); |
365 | spin_lock(lock: &rc->reloc_root_tree.lock); |
366 | rb_node = rb_simple_search(root: &rc->reloc_root_tree.rb_root, bytenr); |
367 | if (rb_node) { |
368 | node = rb_entry(rb_node, struct mapping_node, rb_node); |
369 | root = node->data; |
370 | } |
371 | spin_unlock(lock: &rc->reloc_root_tree.lock); |
372 | return btrfs_grab_root(root); |
373 | } |
374 | |
375 | /* |
376 | * For useless nodes, do two major clean ups: |
377 | * |
378 | * - Cleanup the children edges and nodes |
379 | * If child node is also orphan (no parent) during cleanup, then the child |
380 | * node will also be cleaned up. |
381 | * |
382 | * - Freeing up leaves (level 0), keeps nodes detached |
383 | * For nodes, the node is still cached as "detached" |
384 | * |
385 | * Return false if @node is not in the @useless_nodes list. |
386 | * Return true if @node is in the @useless_nodes list. |
387 | */ |
388 | static bool handle_useless_nodes(struct reloc_control *rc, |
389 | struct btrfs_backref_node *node) |
390 | { |
391 | struct btrfs_backref_cache *cache = &rc->backref_cache; |
392 | struct list_head *useless_node = &cache->useless_node; |
393 | bool ret = false; |
394 | |
395 | while (!list_empty(head: useless_node)) { |
396 | struct btrfs_backref_node *cur; |
397 | |
398 | cur = list_first_entry(useless_node, struct btrfs_backref_node, |
399 | list); |
400 | list_del_init(entry: &cur->list); |
401 | |
402 | /* Only tree root nodes can be added to @useless_nodes */ |
403 | ASSERT(list_empty(&cur->upper)); |
404 | |
405 | if (cur == node) |
406 | ret = true; |
407 | |
408 | /* The node is the lowest node */ |
409 | if (cur->lowest) { |
410 | list_del_init(entry: &cur->lower); |
411 | cur->lowest = 0; |
412 | } |
413 | |
414 | /* Cleanup the lower edges */ |
415 | while (!list_empty(head: &cur->lower)) { |
416 | struct btrfs_backref_edge *edge; |
417 | struct btrfs_backref_node *lower; |
418 | |
419 | edge = list_entry(cur->lower.next, |
420 | struct btrfs_backref_edge, list[UPPER]); |
421 | list_del(entry: &edge->list[UPPER]); |
422 | list_del(entry: &edge->list[LOWER]); |
423 | lower = edge->node[LOWER]; |
424 | btrfs_backref_free_edge(cache, edge); |
425 | |
426 | /* Child node is also orphan, queue for cleanup */ |
427 | if (list_empty(head: &lower->upper)) |
428 | list_add(new: &lower->list, head: useless_node); |
429 | } |
430 | /* Mark this block processed for relocation */ |
431 | mark_block_processed(rc, node: cur); |
432 | |
433 | /* |
434 | * Backref nodes for tree leaves are deleted from the cache. |
435 | * Backref nodes for upper level tree blocks are left in the |
436 | * cache to avoid unnecessary backref lookup. |
437 | */ |
438 | if (cur->level > 0) { |
439 | list_add(new: &cur->list, head: &cache->detached); |
440 | cur->detached = 1; |
441 | } else { |
442 | rb_erase(&cur->rb_node, &cache->rb_root); |
443 | btrfs_backref_free_node(cache, node: cur); |
444 | } |
445 | } |
446 | return ret; |
447 | } |
448 | |
449 | /* |
450 | * Build backref tree for a given tree block. Root of the backref tree |
451 | * corresponds the tree block, leaves of the backref tree correspond roots of |
452 | * b-trees that reference the tree block. |
453 | * |
454 | * The basic idea of this function is check backrefs of a given block to find |
455 | * upper level blocks that reference the block, and then check backrefs of |
456 | * these upper level blocks recursively. The recursion stops when tree root is |
457 | * reached or backrefs for the block is cached. |
458 | * |
459 | * NOTE: if we find that backrefs for a block are cached, we know backrefs for |
460 | * all upper level blocks that directly/indirectly reference the block are also |
461 | * cached. |
462 | */ |
463 | static noinline_for_stack struct btrfs_backref_node *build_backref_tree( |
464 | struct btrfs_trans_handle *trans, |
465 | struct reloc_control *rc, struct btrfs_key *node_key, |
466 | int level, u64 bytenr) |
467 | { |
468 | struct btrfs_backref_iter *iter; |
469 | struct btrfs_backref_cache *cache = &rc->backref_cache; |
470 | /* For searching parent of TREE_BLOCK_REF */ |
471 | struct btrfs_path *path; |
472 | struct btrfs_backref_node *cur; |
473 | struct btrfs_backref_node *node = NULL; |
474 | struct btrfs_backref_edge *edge; |
475 | int ret; |
476 | int err = 0; |
477 | |
478 | iter = btrfs_backref_iter_alloc(fs_info: rc->extent_root->fs_info); |
479 | if (!iter) |
480 | return ERR_PTR(error: -ENOMEM); |
481 | path = btrfs_alloc_path(); |
482 | if (!path) { |
483 | err = -ENOMEM; |
484 | goto out; |
485 | } |
486 | |
487 | node = btrfs_backref_alloc_node(cache, bytenr, level); |
488 | if (!node) { |
489 | err = -ENOMEM; |
490 | goto out; |
491 | } |
492 | |
493 | node->lowest = 1; |
494 | cur = node; |
495 | |
496 | /* Breadth-first search to build backref cache */ |
497 | do { |
498 | ret = btrfs_backref_add_tree_node(trans, cache, path, iter, |
499 | node_key, cur); |
500 | if (ret < 0) { |
501 | err = ret; |
502 | goto out; |
503 | } |
504 | edge = list_first_entry_or_null(&cache->pending_edge, |
505 | struct btrfs_backref_edge, list[UPPER]); |
506 | /* |
507 | * The pending list isn't empty, take the first block to |
508 | * process |
509 | */ |
510 | if (edge) { |
511 | list_del_init(entry: &edge->list[UPPER]); |
512 | cur = edge->node[UPPER]; |
513 | } |
514 | } while (edge); |
515 | |
516 | /* Finish the upper linkage of newly added edges/nodes */ |
517 | ret = btrfs_backref_finish_upper_links(cache, start: node); |
518 | if (ret < 0) { |
519 | err = ret; |
520 | goto out; |
521 | } |
522 | |
523 | if (handle_useless_nodes(rc, node)) |
524 | node = NULL; |
525 | out: |
526 | btrfs_free_path(p: iter->path); |
527 | kfree(objp: iter); |
528 | btrfs_free_path(p: path); |
529 | if (err) { |
530 | btrfs_backref_error_cleanup(cache, node); |
531 | return ERR_PTR(error: err); |
532 | } |
533 | ASSERT(!node || !node->detached); |
534 | ASSERT(list_empty(&cache->useless_node) && |
535 | list_empty(&cache->pending_edge)); |
536 | return node; |
537 | } |
538 | |
539 | /* |
540 | * helper to add backref node for the newly created snapshot. |
541 | * the backref node is created by cloning backref node that |
542 | * corresponds to root of source tree |
543 | */ |
544 | static int clone_backref_node(struct btrfs_trans_handle *trans, |
545 | struct reloc_control *rc, |
546 | const struct btrfs_root *src, |
547 | struct btrfs_root *dest) |
548 | { |
549 | struct btrfs_root *reloc_root = src->reloc_root; |
550 | struct btrfs_backref_cache *cache = &rc->backref_cache; |
551 | struct btrfs_backref_node *node = NULL; |
552 | struct btrfs_backref_node *new_node; |
553 | struct btrfs_backref_edge *edge; |
554 | struct btrfs_backref_edge *new_edge; |
555 | struct rb_node *rb_node; |
556 | |
557 | if (cache->last_trans > 0) |
558 | update_backref_cache(trans, cache); |
559 | |
560 | rb_node = rb_simple_search(root: &cache->rb_root, bytenr: src->commit_root->start); |
561 | if (rb_node) { |
562 | node = rb_entry(rb_node, struct btrfs_backref_node, rb_node); |
563 | if (node->detached) |
564 | node = NULL; |
565 | else |
566 | BUG_ON(node->new_bytenr != reloc_root->node->start); |
567 | } |
568 | |
569 | if (!node) { |
570 | rb_node = rb_simple_search(root: &cache->rb_root, |
571 | bytenr: reloc_root->commit_root->start); |
572 | if (rb_node) { |
573 | node = rb_entry(rb_node, struct btrfs_backref_node, |
574 | rb_node); |
575 | BUG_ON(node->detached); |
576 | } |
577 | } |
578 | |
579 | if (!node) |
580 | return 0; |
581 | |
582 | new_node = btrfs_backref_alloc_node(cache, bytenr: dest->node->start, |
583 | level: node->level); |
584 | if (!new_node) |
585 | return -ENOMEM; |
586 | |
587 | new_node->lowest = node->lowest; |
588 | new_node->checked = 1; |
589 | new_node->root = btrfs_grab_root(root: dest); |
590 | ASSERT(new_node->root); |
591 | |
592 | if (!node->lowest) { |
593 | list_for_each_entry(edge, &node->lower, list[UPPER]) { |
594 | new_edge = btrfs_backref_alloc_edge(cache); |
595 | if (!new_edge) |
596 | goto fail; |
597 | |
598 | btrfs_backref_link_edge(edge: new_edge, lower: edge->node[LOWER], |
599 | upper: new_node, LINK_UPPER); |
600 | } |
601 | } else { |
602 | list_add_tail(new: &new_node->lower, head: &cache->leaves); |
603 | } |
604 | |
605 | rb_node = rb_simple_insert(root: &cache->rb_root, bytenr: new_node->bytenr, |
606 | node: &new_node->rb_node); |
607 | if (rb_node) |
608 | btrfs_backref_panic(fs_info: trans->fs_info, bytenr: new_node->bytenr, error: -EEXIST); |
609 | |
610 | if (!new_node->lowest) { |
611 | list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) { |
612 | list_add_tail(new: &new_edge->list[LOWER], |
613 | head: &new_edge->node[LOWER]->upper); |
614 | } |
615 | } |
616 | return 0; |
617 | fail: |
618 | while (!list_empty(head: &new_node->lower)) { |
619 | new_edge = list_entry(new_node->lower.next, |
620 | struct btrfs_backref_edge, list[UPPER]); |
621 | list_del(entry: &new_edge->list[UPPER]); |
622 | btrfs_backref_free_edge(cache, edge: new_edge); |
623 | } |
624 | btrfs_backref_free_node(cache, node: new_node); |
625 | return -ENOMEM; |
626 | } |
627 | |
628 | /* |
629 | * helper to add 'address of tree root -> reloc tree' mapping |
630 | */ |
631 | static int __add_reloc_root(struct btrfs_root *root) |
632 | { |
633 | struct btrfs_fs_info *fs_info = root->fs_info; |
634 | struct rb_node *rb_node; |
635 | struct mapping_node *node; |
636 | struct reloc_control *rc = fs_info->reloc_ctl; |
637 | |
638 | node = kmalloc(size: sizeof(*node), GFP_NOFS); |
639 | if (!node) |
640 | return -ENOMEM; |
641 | |
642 | node->bytenr = root->commit_root->start; |
643 | node->data = root; |
644 | |
645 | spin_lock(lock: &rc->reloc_root_tree.lock); |
646 | rb_node = rb_simple_insert(root: &rc->reloc_root_tree.rb_root, |
647 | bytenr: node->bytenr, node: &node->rb_node); |
648 | spin_unlock(lock: &rc->reloc_root_tree.lock); |
649 | if (rb_node) { |
650 | btrfs_err(fs_info, |
651 | "Duplicate root found for start=%llu while inserting into relocation tree" , |
652 | node->bytenr); |
653 | return -EEXIST; |
654 | } |
655 | |
656 | list_add_tail(new: &root->root_list, head: &rc->reloc_roots); |
657 | return 0; |
658 | } |
659 | |
660 | /* |
661 | * helper to delete the 'address of tree root -> reloc tree' |
662 | * mapping |
663 | */ |
664 | static void __del_reloc_root(struct btrfs_root *root) |
665 | { |
666 | struct btrfs_fs_info *fs_info = root->fs_info; |
667 | struct rb_node *rb_node; |
668 | struct mapping_node *node = NULL; |
669 | struct reloc_control *rc = fs_info->reloc_ctl; |
670 | bool put_ref = false; |
671 | |
672 | if (rc && root->node) { |
673 | spin_lock(lock: &rc->reloc_root_tree.lock); |
674 | rb_node = rb_simple_search(root: &rc->reloc_root_tree.rb_root, |
675 | bytenr: root->commit_root->start); |
676 | if (rb_node) { |
677 | node = rb_entry(rb_node, struct mapping_node, rb_node); |
678 | rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); |
679 | RB_CLEAR_NODE(&node->rb_node); |
680 | } |
681 | spin_unlock(lock: &rc->reloc_root_tree.lock); |
682 | ASSERT(!node || (struct btrfs_root *)node->data == root); |
683 | } |
684 | |
685 | /* |
686 | * We only put the reloc root here if it's on the list. There's a lot |
687 | * of places where the pattern is to splice the rc->reloc_roots, process |
688 | * the reloc roots, and then add the reloc root back onto |
689 | * rc->reloc_roots. If we call __del_reloc_root while it's off of the |
690 | * list we don't want the reference being dropped, because the guy |
691 | * messing with the list is in charge of the reference. |
692 | */ |
693 | spin_lock(lock: &fs_info->trans_lock); |
694 | if (!list_empty(head: &root->root_list)) { |
695 | put_ref = true; |
696 | list_del_init(entry: &root->root_list); |
697 | } |
698 | spin_unlock(lock: &fs_info->trans_lock); |
699 | if (put_ref) |
700 | btrfs_put_root(root); |
701 | kfree(objp: node); |
702 | } |
703 | |
704 | /* |
705 | * helper to update the 'address of tree root -> reloc tree' |
706 | * mapping |
707 | */ |
708 | static int __update_reloc_root(struct btrfs_root *root) |
709 | { |
710 | struct btrfs_fs_info *fs_info = root->fs_info; |
711 | struct rb_node *rb_node; |
712 | struct mapping_node *node = NULL; |
713 | struct reloc_control *rc = fs_info->reloc_ctl; |
714 | |
715 | spin_lock(lock: &rc->reloc_root_tree.lock); |
716 | rb_node = rb_simple_search(root: &rc->reloc_root_tree.rb_root, |
717 | bytenr: root->commit_root->start); |
718 | if (rb_node) { |
719 | node = rb_entry(rb_node, struct mapping_node, rb_node); |
720 | rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); |
721 | } |
722 | spin_unlock(lock: &rc->reloc_root_tree.lock); |
723 | |
724 | if (!node) |
725 | return 0; |
726 | BUG_ON((struct btrfs_root *)node->data != root); |
727 | |
728 | spin_lock(lock: &rc->reloc_root_tree.lock); |
729 | node->bytenr = root->node->start; |
730 | rb_node = rb_simple_insert(root: &rc->reloc_root_tree.rb_root, |
731 | bytenr: node->bytenr, node: &node->rb_node); |
732 | spin_unlock(lock: &rc->reloc_root_tree.lock); |
733 | if (rb_node) |
734 | btrfs_backref_panic(fs_info, bytenr: node->bytenr, error: -EEXIST); |
735 | return 0; |
736 | } |
737 | |
738 | static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans, |
739 | struct btrfs_root *root, u64 objectid) |
740 | { |
741 | struct btrfs_fs_info *fs_info = root->fs_info; |
742 | struct btrfs_root *reloc_root; |
743 | struct extent_buffer *eb; |
744 | struct btrfs_root_item *root_item; |
745 | struct btrfs_key root_key; |
746 | int ret = 0; |
747 | bool must_abort = false; |
748 | |
749 | root_item = kmalloc(size: sizeof(*root_item), GFP_NOFS); |
750 | if (!root_item) |
751 | return ERR_PTR(error: -ENOMEM); |
752 | |
753 | root_key.objectid = BTRFS_TREE_RELOC_OBJECTID; |
754 | root_key.type = BTRFS_ROOT_ITEM_KEY; |
755 | root_key.offset = objectid; |
756 | |
757 | if (root->root_key.objectid == objectid) { |
758 | u64 commit_root_gen; |
759 | |
760 | /* called by btrfs_init_reloc_root */ |
761 | ret = btrfs_copy_root(trans, root, buf: root->commit_root, cow_ret: &eb, |
762 | BTRFS_TREE_RELOC_OBJECTID); |
763 | if (ret) |
764 | goto fail; |
765 | |
766 | /* |
767 | * Set the last_snapshot field to the generation of the commit |
768 | * root - like this ctree.c:btrfs_block_can_be_shared() behaves |
769 | * correctly (returns true) when the relocation root is created |
770 | * either inside the critical section of a transaction commit |
771 | * (through transaction.c:qgroup_account_snapshot()) and when |
772 | * it's created before the transaction commit is started. |
773 | */ |
774 | commit_root_gen = btrfs_header_generation(eb: root->commit_root); |
775 | btrfs_set_root_last_snapshot(s: &root->root_item, val: commit_root_gen); |
776 | } else { |
777 | /* |
778 | * called by btrfs_reloc_post_snapshot_hook. |
779 | * the source tree is a reloc tree, all tree blocks |
780 | * modified after it was created have RELOC flag |
781 | * set in their headers. so it's OK to not update |
782 | * the 'last_snapshot'. |
783 | */ |
784 | ret = btrfs_copy_root(trans, root, buf: root->node, cow_ret: &eb, |
785 | BTRFS_TREE_RELOC_OBJECTID); |
786 | if (ret) |
787 | goto fail; |
788 | } |
789 | |
790 | /* |
791 | * We have changed references at this point, we must abort the |
792 | * transaction if anything fails. |
793 | */ |
794 | must_abort = true; |
795 | |
796 | memcpy(root_item, &root->root_item, sizeof(*root_item)); |
797 | btrfs_set_root_bytenr(s: root_item, val: eb->start); |
798 | btrfs_set_root_level(s: root_item, val: btrfs_header_level(eb)); |
799 | btrfs_set_root_generation(s: root_item, val: trans->transid); |
800 | |
801 | if (root->root_key.objectid == objectid) { |
802 | btrfs_set_root_refs(s: root_item, val: 0); |
803 | memset(&root_item->drop_progress, 0, |
804 | sizeof(struct btrfs_disk_key)); |
805 | btrfs_set_root_drop_level(s: root_item, val: 0); |
806 | } |
807 | |
808 | btrfs_tree_unlock(eb); |
809 | free_extent_buffer(eb); |
810 | |
811 | ret = btrfs_insert_root(trans, root: fs_info->tree_root, |
812 | key: &root_key, item: root_item); |
813 | if (ret) |
814 | goto fail; |
815 | |
816 | kfree(objp: root_item); |
817 | |
818 | reloc_root = btrfs_read_tree_root(tree_root: fs_info->tree_root, key: &root_key); |
819 | if (IS_ERR(ptr: reloc_root)) { |
820 | ret = PTR_ERR(ptr: reloc_root); |
821 | goto abort; |
822 | } |
823 | set_bit(nr: BTRFS_ROOT_SHAREABLE, addr: &reloc_root->state); |
824 | reloc_root->last_trans = trans->transid; |
825 | return reloc_root; |
826 | fail: |
827 | kfree(objp: root_item); |
828 | abort: |
829 | if (must_abort) |
830 | btrfs_abort_transaction(trans, ret); |
831 | return ERR_PTR(error: ret); |
832 | } |
833 | |
834 | /* |
835 | * create reloc tree for a given fs tree. reloc tree is just a |
836 | * snapshot of the fs tree with special root objectid. |
837 | * |
838 | * The reloc_root comes out of here with two references, one for |
839 | * root->reloc_root, and another for being on the rc->reloc_roots list. |
840 | */ |
841 | int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, |
842 | struct btrfs_root *root) |
843 | { |
844 | struct btrfs_fs_info *fs_info = root->fs_info; |
845 | struct btrfs_root *reloc_root; |
846 | struct reloc_control *rc = fs_info->reloc_ctl; |
847 | struct btrfs_block_rsv *rsv; |
848 | int clear_rsv = 0; |
849 | int ret; |
850 | |
851 | if (!rc) |
852 | return 0; |
853 | |
854 | /* |
855 | * The subvolume has reloc tree but the swap is finished, no need to |
856 | * create/update the dead reloc tree |
857 | */ |
858 | if (reloc_root_is_dead(root)) |
859 | return 0; |
860 | |
861 | /* |
862 | * This is subtle but important. We do not do |
863 | * record_root_in_transaction for reloc roots, instead we record their |
864 | * corresponding fs root, and then here we update the last trans for the |
865 | * reloc root. This means that we have to do this for the entire life |
866 | * of the reloc root, regardless of which stage of the relocation we are |
867 | * in. |
868 | */ |
869 | if (root->reloc_root) { |
870 | reloc_root = root->reloc_root; |
871 | reloc_root->last_trans = trans->transid; |
872 | return 0; |
873 | } |
874 | |
875 | /* |
876 | * We are merging reloc roots, we do not need new reloc trees. Also |
877 | * reloc trees never need their own reloc tree. |
878 | */ |
879 | if (!rc->create_reloc_tree || |
880 | root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) |
881 | return 0; |
882 | |
883 | if (!trans->reloc_reserved) { |
884 | rsv = trans->block_rsv; |
885 | trans->block_rsv = rc->block_rsv; |
886 | clear_rsv = 1; |
887 | } |
888 | reloc_root = create_reloc_root(trans, root, objectid: root->root_key.objectid); |
889 | if (clear_rsv) |
890 | trans->block_rsv = rsv; |
891 | if (IS_ERR(ptr: reloc_root)) |
892 | return PTR_ERR(ptr: reloc_root); |
893 | |
894 | ret = __add_reloc_root(root: reloc_root); |
895 | ASSERT(ret != -EEXIST); |
896 | if (ret) { |
897 | /* Pairs with create_reloc_root */ |
898 | btrfs_put_root(root: reloc_root); |
899 | return ret; |
900 | } |
901 | root->reloc_root = btrfs_grab_root(root: reloc_root); |
902 | return 0; |
903 | } |
904 | |
905 | /* |
906 | * update root item of reloc tree |
907 | */ |
908 | int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, |
909 | struct btrfs_root *root) |
910 | { |
911 | struct btrfs_fs_info *fs_info = root->fs_info; |
912 | struct btrfs_root *reloc_root; |
913 | struct btrfs_root_item *root_item; |
914 | int ret; |
915 | |
916 | if (!have_reloc_root(root)) |
917 | return 0; |
918 | |
919 | reloc_root = root->reloc_root; |
920 | root_item = &reloc_root->root_item; |
921 | |
922 | /* |
923 | * We are probably ok here, but __del_reloc_root() will drop its ref of |
924 | * the root. We have the ref for root->reloc_root, but just in case |
925 | * hold it while we update the reloc root. |
926 | */ |
927 | btrfs_grab_root(root: reloc_root); |
928 | |
929 | /* root->reloc_root will stay until current relocation finished */ |
930 | if (fs_info->reloc_ctl->merge_reloc_tree && |
931 | btrfs_root_refs(s: root_item) == 0) { |
932 | set_bit(nr: BTRFS_ROOT_DEAD_RELOC_TREE, addr: &root->state); |
933 | /* |
934 | * Mark the tree as dead before we change reloc_root so |
935 | * have_reloc_root will not touch it from now on. |
936 | */ |
937 | smp_wmb(); |
938 | __del_reloc_root(root: reloc_root); |
939 | } |
940 | |
941 | if (reloc_root->commit_root != reloc_root->node) { |
942 | __update_reloc_root(root: reloc_root); |
943 | btrfs_set_root_node(item: root_item, node: reloc_root->node); |
944 | free_extent_buffer(eb: reloc_root->commit_root); |
945 | reloc_root->commit_root = btrfs_root_node(root: reloc_root); |
946 | } |
947 | |
948 | ret = btrfs_update_root(trans, root: fs_info->tree_root, |
949 | key: &reloc_root->root_key, item: root_item); |
950 | btrfs_put_root(root: reloc_root); |
951 | return ret; |
952 | } |
953 | |
954 | /* |
955 | * helper to find first cached inode with inode number >= objectid |
956 | * in a subvolume |
957 | */ |
958 | static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid) |
959 | { |
960 | struct rb_node *node; |
961 | struct rb_node *prev; |
962 | struct btrfs_inode *entry; |
963 | struct inode *inode; |
964 | |
965 | spin_lock(lock: &root->inode_lock); |
966 | again: |
967 | node = root->inode_tree.rb_node; |
968 | prev = NULL; |
969 | while (node) { |
970 | prev = node; |
971 | entry = rb_entry(node, struct btrfs_inode, rb_node); |
972 | |
973 | if (objectid < btrfs_ino(inode: entry)) |
974 | node = node->rb_left; |
975 | else if (objectid > btrfs_ino(inode: entry)) |
976 | node = node->rb_right; |
977 | else |
978 | break; |
979 | } |
980 | if (!node) { |
981 | while (prev) { |
982 | entry = rb_entry(prev, struct btrfs_inode, rb_node); |
983 | if (objectid <= btrfs_ino(inode: entry)) { |
984 | node = prev; |
985 | break; |
986 | } |
987 | prev = rb_next(prev); |
988 | } |
989 | } |
990 | while (node) { |
991 | entry = rb_entry(node, struct btrfs_inode, rb_node); |
992 | inode = igrab(&entry->vfs_inode); |
993 | if (inode) { |
994 | spin_unlock(lock: &root->inode_lock); |
995 | return inode; |
996 | } |
997 | |
998 | objectid = btrfs_ino(inode: entry) + 1; |
999 | if (cond_resched_lock(&root->inode_lock)) |
1000 | goto again; |
1001 | |
1002 | node = rb_next(node); |
1003 | } |
1004 | spin_unlock(lock: &root->inode_lock); |
1005 | return NULL; |
1006 | } |
1007 | |
1008 | /* |
1009 | * get new location of data |
1010 | */ |
1011 | static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr, |
1012 | u64 bytenr, u64 num_bytes) |
1013 | { |
1014 | struct btrfs_root *root = BTRFS_I(inode: reloc_inode)->root; |
1015 | struct btrfs_path *path; |
1016 | struct btrfs_file_extent_item *fi; |
1017 | struct extent_buffer *leaf; |
1018 | int ret; |
1019 | |
1020 | path = btrfs_alloc_path(); |
1021 | if (!path) |
1022 | return -ENOMEM; |
1023 | |
1024 | bytenr -= BTRFS_I(inode: reloc_inode)->index_cnt; |
1025 | ret = btrfs_lookup_file_extent(NULL, root, path, |
1026 | objectid: btrfs_ino(inode: BTRFS_I(inode: reloc_inode)), bytenr, mod: 0); |
1027 | if (ret < 0) |
1028 | goto out; |
1029 | if (ret > 0) { |
1030 | ret = -ENOENT; |
1031 | goto out; |
1032 | } |
1033 | |
1034 | leaf = path->nodes[0]; |
1035 | fi = btrfs_item_ptr(leaf, path->slots[0], |
1036 | struct btrfs_file_extent_item); |
1037 | |
1038 | BUG_ON(btrfs_file_extent_offset(leaf, fi) || |
1039 | btrfs_file_extent_compression(leaf, fi) || |
1040 | btrfs_file_extent_encryption(leaf, fi) || |
1041 | btrfs_file_extent_other_encoding(leaf, fi)); |
1042 | |
1043 | if (num_bytes != btrfs_file_extent_disk_num_bytes(eb: leaf, s: fi)) { |
1044 | ret = -EINVAL; |
1045 | goto out; |
1046 | } |
1047 | |
1048 | *new_bytenr = btrfs_file_extent_disk_bytenr(eb: leaf, s: fi); |
1049 | ret = 0; |
1050 | out: |
1051 | btrfs_free_path(p: path); |
1052 | return ret; |
1053 | } |
1054 | |
1055 | /* |
1056 | * update file extent items in the tree leaf to point to |
1057 | * the new locations. |
1058 | */ |
1059 | static noinline_for_stack |
1060 | int replace_file_extents(struct btrfs_trans_handle *trans, |
1061 | struct reloc_control *rc, |
1062 | struct btrfs_root *root, |
1063 | struct extent_buffer *leaf) |
1064 | { |
1065 | struct btrfs_fs_info *fs_info = root->fs_info; |
1066 | struct btrfs_key key; |
1067 | struct btrfs_file_extent_item *fi; |
1068 | struct inode *inode = NULL; |
1069 | u64 parent; |
1070 | u64 bytenr; |
1071 | u64 new_bytenr = 0; |
1072 | u64 num_bytes; |
1073 | u64 end; |
1074 | u32 nritems; |
1075 | u32 i; |
1076 | int ret = 0; |
1077 | int first = 1; |
1078 | int dirty = 0; |
1079 | |
1080 | if (rc->stage != UPDATE_DATA_PTRS) |
1081 | return 0; |
1082 | |
1083 | /* reloc trees always use full backref */ |
1084 | if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) |
1085 | parent = leaf->start; |
1086 | else |
1087 | parent = 0; |
1088 | |
1089 | nritems = btrfs_header_nritems(eb: leaf); |
1090 | for (i = 0; i < nritems; i++) { |
1091 | struct btrfs_ref ref = { 0 }; |
1092 | |
1093 | cond_resched(); |
1094 | btrfs_item_key_to_cpu(eb: leaf, cpu_key: &key, nr: i); |
1095 | if (key.type != BTRFS_EXTENT_DATA_KEY) |
1096 | continue; |
1097 | fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); |
1098 | if (btrfs_file_extent_type(eb: leaf, s: fi) == |
1099 | BTRFS_FILE_EXTENT_INLINE) |
1100 | continue; |
1101 | bytenr = btrfs_file_extent_disk_bytenr(eb: leaf, s: fi); |
1102 | num_bytes = btrfs_file_extent_disk_num_bytes(eb: leaf, s: fi); |
1103 | if (bytenr == 0) |
1104 | continue; |
1105 | if (!in_range(bytenr, rc->block_group->start, |
1106 | rc->block_group->length)) |
1107 | continue; |
1108 | |
1109 | /* |
1110 | * if we are modifying block in fs tree, wait for read_folio |
1111 | * to complete and drop the extent cache |
1112 | */ |
1113 | if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { |
1114 | if (first) { |
1115 | inode = find_next_inode(root, objectid: key.objectid); |
1116 | first = 0; |
1117 | } else if (inode && btrfs_ino(inode: BTRFS_I(inode)) < key.objectid) { |
1118 | btrfs_add_delayed_iput(inode: BTRFS_I(inode)); |
1119 | inode = find_next_inode(root, objectid: key.objectid); |
1120 | } |
1121 | if (inode && btrfs_ino(inode: BTRFS_I(inode)) == key.objectid) { |
1122 | struct extent_state *cached_state = NULL; |
1123 | |
1124 | end = key.offset + |
1125 | btrfs_file_extent_num_bytes(eb: leaf, s: fi); |
1126 | WARN_ON(!IS_ALIGNED(key.offset, |
1127 | fs_info->sectorsize)); |
1128 | WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize)); |
1129 | end--; |
1130 | ret = try_lock_extent(tree: &BTRFS_I(inode)->io_tree, |
1131 | start: key.offset, end, |
1132 | cached: &cached_state); |
1133 | if (!ret) |
1134 | continue; |
1135 | |
1136 | btrfs_drop_extent_map_range(inode: BTRFS_I(inode), |
1137 | start: key.offset, end, skip_pinned: true); |
1138 | unlock_extent(tree: &BTRFS_I(inode)->io_tree, |
1139 | start: key.offset, end, cached: &cached_state); |
1140 | } |
1141 | } |
1142 | |
1143 | ret = get_new_location(reloc_inode: rc->data_inode, new_bytenr: &new_bytenr, |
1144 | bytenr, num_bytes); |
1145 | if (ret) { |
1146 | /* |
1147 | * Don't have to abort since we've not changed anything |
1148 | * in the file extent yet. |
1149 | */ |
1150 | break; |
1151 | } |
1152 | |
1153 | btrfs_set_file_extent_disk_bytenr(eb: leaf, s: fi, val: new_bytenr); |
1154 | dirty = 1; |
1155 | |
1156 | key.offset -= btrfs_file_extent_offset(eb: leaf, s: fi); |
1157 | btrfs_init_generic_ref(generic_ref: &ref, action: BTRFS_ADD_DELAYED_REF, bytenr: new_bytenr, |
1158 | len: num_bytes, parent, owning_root: root->root_key.objectid); |
1159 | btrfs_init_data_ref(generic_ref: &ref, ref_root: btrfs_header_owner(eb: leaf), |
1160 | ino: key.objectid, offset: key.offset, |
1161 | mod_root: root->root_key.objectid, skip_qgroup: false); |
1162 | ret = btrfs_inc_extent_ref(trans, generic_ref: &ref); |
1163 | if (ret) { |
1164 | btrfs_abort_transaction(trans, ret); |
1165 | break; |
1166 | } |
1167 | |
1168 | btrfs_init_generic_ref(generic_ref: &ref, action: BTRFS_DROP_DELAYED_REF, bytenr, |
1169 | len: num_bytes, parent, owning_root: root->root_key.objectid); |
1170 | btrfs_init_data_ref(generic_ref: &ref, ref_root: btrfs_header_owner(eb: leaf), |
1171 | ino: key.objectid, offset: key.offset, |
1172 | mod_root: root->root_key.objectid, skip_qgroup: false); |
1173 | ret = btrfs_free_extent(trans, ref: &ref); |
1174 | if (ret) { |
1175 | btrfs_abort_transaction(trans, ret); |
1176 | break; |
1177 | } |
1178 | } |
1179 | if (dirty) |
1180 | btrfs_mark_buffer_dirty(trans, buf: leaf); |
1181 | if (inode) |
1182 | btrfs_add_delayed_iput(inode: BTRFS_I(inode)); |
1183 | return ret; |
1184 | } |
1185 | |
1186 | static noinline_for_stack int memcmp_node_keys(const struct extent_buffer *eb, |
1187 | int slot, const struct btrfs_path *path, |
1188 | int level) |
1189 | { |
1190 | struct btrfs_disk_key key1; |
1191 | struct btrfs_disk_key key2; |
1192 | btrfs_node_key(eb, disk_key: &key1, nr: slot); |
1193 | btrfs_node_key(eb: path->nodes[level], disk_key: &key2, nr: path->slots[level]); |
1194 | return memcmp(p: &key1, q: &key2, size: sizeof(key1)); |
1195 | } |
1196 | |
1197 | /* |
1198 | * try to replace tree blocks in fs tree with the new blocks |
1199 | * in reloc tree. tree blocks haven't been modified since the |
1200 | * reloc tree was create can be replaced. |
1201 | * |
1202 | * if a block was replaced, level of the block + 1 is returned. |
1203 | * if no block got replaced, 0 is returned. if there are other |
1204 | * errors, a negative error number is returned. |
1205 | */ |
1206 | static noinline_for_stack |
1207 | int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc, |
1208 | struct btrfs_root *dest, struct btrfs_root *src, |
1209 | struct btrfs_path *path, struct btrfs_key *next_key, |
1210 | int lowest_level, int max_level) |
1211 | { |
1212 | struct btrfs_fs_info *fs_info = dest->fs_info; |
1213 | struct extent_buffer *eb; |
1214 | struct extent_buffer *parent; |
1215 | struct btrfs_ref ref = { 0 }; |
1216 | struct btrfs_key key; |
1217 | u64 old_bytenr; |
1218 | u64 new_bytenr; |
1219 | u64 old_ptr_gen; |
1220 | u64 new_ptr_gen; |
1221 | u64 last_snapshot; |
1222 | u32 blocksize; |
1223 | int cow = 0; |
1224 | int level; |
1225 | int ret; |
1226 | int slot; |
1227 | |
1228 | ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID); |
1229 | ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); |
1230 | |
1231 | last_snapshot = btrfs_root_last_snapshot(s: &src->root_item); |
1232 | again: |
1233 | slot = path->slots[lowest_level]; |
1234 | btrfs_node_key_to_cpu(eb: path->nodes[lowest_level], cpu_key: &key, nr: slot); |
1235 | |
1236 | eb = btrfs_lock_root_node(root: dest); |
1237 | level = btrfs_header_level(eb); |
1238 | |
1239 | if (level < lowest_level) { |
1240 | btrfs_tree_unlock(eb); |
1241 | free_extent_buffer(eb); |
1242 | return 0; |
1243 | } |
1244 | |
1245 | if (cow) { |
1246 | ret = btrfs_cow_block(trans, root: dest, buf: eb, NULL, parent_slot: 0, cow_ret: &eb, |
1247 | nest: BTRFS_NESTING_COW); |
1248 | if (ret) { |
1249 | btrfs_tree_unlock(eb); |
1250 | free_extent_buffer(eb); |
1251 | return ret; |
1252 | } |
1253 | } |
1254 | |
1255 | if (next_key) { |
1256 | next_key->objectid = (u64)-1; |
1257 | next_key->type = (u8)-1; |
1258 | next_key->offset = (u64)-1; |
1259 | } |
1260 | |
1261 | parent = eb; |
1262 | while (1) { |
1263 | level = btrfs_header_level(eb: parent); |
1264 | ASSERT(level >= lowest_level); |
1265 | |
1266 | ret = btrfs_bin_search(eb: parent, first_slot: 0, key: &key, slot: &slot); |
1267 | if (ret < 0) |
1268 | break; |
1269 | if (ret && slot > 0) |
1270 | slot--; |
1271 | |
1272 | if (next_key && slot + 1 < btrfs_header_nritems(eb: parent)) |
1273 | btrfs_node_key_to_cpu(eb: parent, cpu_key: next_key, nr: slot + 1); |
1274 | |
1275 | old_bytenr = btrfs_node_blockptr(eb: parent, nr: slot); |
1276 | blocksize = fs_info->nodesize; |
1277 | old_ptr_gen = btrfs_node_ptr_generation(eb: parent, nr: slot); |
1278 | |
1279 | if (level <= max_level) { |
1280 | eb = path->nodes[level]; |
1281 | new_bytenr = btrfs_node_blockptr(eb, |
1282 | nr: path->slots[level]); |
1283 | new_ptr_gen = btrfs_node_ptr_generation(eb, |
1284 | nr: path->slots[level]); |
1285 | } else { |
1286 | new_bytenr = 0; |
1287 | new_ptr_gen = 0; |
1288 | } |
1289 | |
1290 | if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) { |
1291 | ret = level; |
1292 | break; |
1293 | } |
1294 | |
1295 | if (new_bytenr == 0 || old_ptr_gen > last_snapshot || |
1296 | memcmp_node_keys(eb: parent, slot, path, level)) { |
1297 | if (level <= lowest_level) { |
1298 | ret = 0; |
1299 | break; |
1300 | } |
1301 | |
1302 | eb = btrfs_read_node_slot(parent, slot); |
1303 | if (IS_ERR(ptr: eb)) { |
1304 | ret = PTR_ERR(ptr: eb); |
1305 | break; |
1306 | } |
1307 | btrfs_tree_lock(eb); |
1308 | if (cow) { |
1309 | ret = btrfs_cow_block(trans, root: dest, buf: eb, parent, |
1310 | parent_slot: slot, cow_ret: &eb, |
1311 | nest: BTRFS_NESTING_COW); |
1312 | if (ret) { |
1313 | btrfs_tree_unlock(eb); |
1314 | free_extent_buffer(eb); |
1315 | break; |
1316 | } |
1317 | } |
1318 | |
1319 | btrfs_tree_unlock(eb: parent); |
1320 | free_extent_buffer(eb: parent); |
1321 | |
1322 | parent = eb; |
1323 | continue; |
1324 | } |
1325 | |
1326 | if (!cow) { |
1327 | btrfs_tree_unlock(eb: parent); |
1328 | free_extent_buffer(eb: parent); |
1329 | cow = 1; |
1330 | goto again; |
1331 | } |
1332 | |
1333 | btrfs_node_key_to_cpu(eb: path->nodes[level], cpu_key: &key, |
1334 | nr: path->slots[level]); |
1335 | btrfs_release_path(p: path); |
1336 | |
1337 | path->lowest_level = level; |
1338 | set_bit(nr: BTRFS_ROOT_RESET_LOCKDEP_CLASS, addr: &src->state); |
1339 | ret = btrfs_search_slot(trans, root: src, key: &key, p: path, ins_len: 0, cow: 1); |
1340 | clear_bit(nr: BTRFS_ROOT_RESET_LOCKDEP_CLASS, addr: &src->state); |
1341 | path->lowest_level = 0; |
1342 | if (ret) { |
1343 | if (ret > 0) |
1344 | ret = -ENOENT; |
1345 | break; |
1346 | } |
1347 | |
1348 | /* |
1349 | * Info qgroup to trace both subtrees. |
1350 | * |
1351 | * We must trace both trees. |
1352 | * 1) Tree reloc subtree |
1353 | * If not traced, we will leak data numbers |
1354 | * 2) Fs subtree |
1355 | * If not traced, we will double count old data |
1356 | * |
1357 | * We don't scan the subtree right now, but only record |
1358 | * the swapped tree blocks. |
1359 | * The real subtree rescan is delayed until we have new |
1360 | * CoW on the subtree root node before transaction commit. |
1361 | */ |
1362 | ret = btrfs_qgroup_add_swapped_blocks(trans, subvol_root: dest, |
1363 | bg: rc->block_group, subvol_parent: parent, subvol_slot: slot, |
1364 | reloc_parent: path->nodes[level], reloc_slot: path->slots[level], |
1365 | last_snapshot); |
1366 | if (ret < 0) |
1367 | break; |
1368 | /* |
1369 | * swap blocks in fs tree and reloc tree. |
1370 | */ |
1371 | btrfs_set_node_blockptr(eb: parent, nr: slot, val: new_bytenr); |
1372 | btrfs_set_node_ptr_generation(eb: parent, nr: slot, val: new_ptr_gen); |
1373 | btrfs_mark_buffer_dirty(trans, buf: parent); |
1374 | |
1375 | btrfs_set_node_blockptr(eb: path->nodes[level], |
1376 | nr: path->slots[level], val: old_bytenr); |
1377 | btrfs_set_node_ptr_generation(eb: path->nodes[level], |
1378 | nr: path->slots[level], val: old_ptr_gen); |
1379 | btrfs_mark_buffer_dirty(trans, buf: path->nodes[level]); |
1380 | |
1381 | btrfs_init_generic_ref(generic_ref: &ref, action: BTRFS_ADD_DELAYED_REF, bytenr: old_bytenr, |
1382 | len: blocksize, parent: path->nodes[level]->start, |
1383 | owning_root: src->root_key.objectid); |
1384 | btrfs_init_tree_ref(generic_ref: &ref, level: level - 1, root: src->root_key.objectid, |
1385 | mod_root: 0, skip_qgroup: true); |
1386 | ret = btrfs_inc_extent_ref(trans, generic_ref: &ref); |
1387 | if (ret) { |
1388 | btrfs_abort_transaction(trans, ret); |
1389 | break; |
1390 | } |
1391 | btrfs_init_generic_ref(generic_ref: &ref, action: BTRFS_ADD_DELAYED_REF, bytenr: new_bytenr, |
1392 | len: blocksize, parent: 0, owning_root: dest->root_key.objectid); |
1393 | btrfs_init_tree_ref(generic_ref: &ref, level: level - 1, root: dest->root_key.objectid, mod_root: 0, |
1394 | skip_qgroup: true); |
1395 | ret = btrfs_inc_extent_ref(trans, generic_ref: &ref); |
1396 | if (ret) { |
1397 | btrfs_abort_transaction(trans, ret); |
1398 | break; |
1399 | } |
1400 | |
1401 | /* We don't know the real owning_root, use 0. */ |
1402 | btrfs_init_generic_ref(generic_ref: &ref, action: BTRFS_DROP_DELAYED_REF, bytenr: new_bytenr, |
1403 | len: blocksize, parent: path->nodes[level]->start, owning_root: 0); |
1404 | btrfs_init_tree_ref(generic_ref: &ref, level: level - 1, root: src->root_key.objectid, |
1405 | mod_root: 0, skip_qgroup: true); |
1406 | ret = btrfs_free_extent(trans, ref: &ref); |
1407 | if (ret) { |
1408 | btrfs_abort_transaction(trans, ret); |
1409 | break; |
1410 | } |
1411 | |
1412 | /* We don't know the real owning_root, use 0. */ |
1413 | btrfs_init_generic_ref(generic_ref: &ref, action: BTRFS_DROP_DELAYED_REF, bytenr: old_bytenr, |
1414 | len: blocksize, parent: 0, owning_root: 0); |
1415 | btrfs_init_tree_ref(generic_ref: &ref, level: level - 1, root: dest->root_key.objectid, |
1416 | mod_root: 0, skip_qgroup: true); |
1417 | ret = btrfs_free_extent(trans, ref: &ref); |
1418 | if (ret) { |
1419 | btrfs_abort_transaction(trans, ret); |
1420 | break; |
1421 | } |
1422 | |
1423 | btrfs_unlock_up_safe(path, level: 0); |
1424 | |
1425 | ret = level; |
1426 | break; |
1427 | } |
1428 | btrfs_tree_unlock(eb: parent); |
1429 | free_extent_buffer(eb: parent); |
1430 | return ret; |
1431 | } |
1432 | |
1433 | /* |
1434 | * helper to find next relocated block in reloc tree |
1435 | */ |
1436 | static noinline_for_stack |
1437 | int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, |
1438 | int *level) |
1439 | { |
1440 | struct extent_buffer *eb; |
1441 | int i; |
1442 | u64 last_snapshot; |
1443 | u32 nritems; |
1444 | |
1445 | last_snapshot = btrfs_root_last_snapshot(s: &root->root_item); |
1446 | |
1447 | for (i = 0; i < *level; i++) { |
1448 | free_extent_buffer(eb: path->nodes[i]); |
1449 | path->nodes[i] = NULL; |
1450 | } |
1451 | |
1452 | for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { |
1453 | eb = path->nodes[i]; |
1454 | nritems = btrfs_header_nritems(eb); |
1455 | while (path->slots[i] + 1 < nritems) { |
1456 | path->slots[i]++; |
1457 | if (btrfs_node_ptr_generation(eb, nr: path->slots[i]) <= |
1458 | last_snapshot) |
1459 | continue; |
1460 | |
1461 | *level = i; |
1462 | return 0; |
1463 | } |
1464 | free_extent_buffer(eb: path->nodes[i]); |
1465 | path->nodes[i] = NULL; |
1466 | } |
1467 | return 1; |
1468 | } |
1469 | |
1470 | /* |
1471 | * walk down reloc tree to find relocated block of lowest level |
1472 | */ |
1473 | static noinline_for_stack |
1474 | int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, |
1475 | int *level) |
1476 | { |
1477 | struct extent_buffer *eb = NULL; |
1478 | int i; |
1479 | u64 ptr_gen = 0; |
1480 | u64 last_snapshot; |
1481 | u32 nritems; |
1482 | |
1483 | last_snapshot = btrfs_root_last_snapshot(s: &root->root_item); |
1484 | |
1485 | for (i = *level; i > 0; i--) { |
1486 | eb = path->nodes[i]; |
1487 | nritems = btrfs_header_nritems(eb); |
1488 | while (path->slots[i] < nritems) { |
1489 | ptr_gen = btrfs_node_ptr_generation(eb, nr: path->slots[i]); |
1490 | if (ptr_gen > last_snapshot) |
1491 | break; |
1492 | path->slots[i]++; |
1493 | } |
1494 | if (path->slots[i] >= nritems) { |
1495 | if (i == *level) |
1496 | break; |
1497 | *level = i + 1; |
1498 | return 0; |
1499 | } |
1500 | if (i == 1) { |
1501 | *level = i; |
1502 | return 0; |
1503 | } |
1504 | |
1505 | eb = btrfs_read_node_slot(parent: eb, slot: path->slots[i]); |
1506 | if (IS_ERR(ptr: eb)) |
1507 | return PTR_ERR(ptr: eb); |
1508 | BUG_ON(btrfs_header_level(eb) != i - 1); |
1509 | path->nodes[i - 1] = eb; |
1510 | path->slots[i - 1] = 0; |
1511 | } |
1512 | return 1; |
1513 | } |
1514 | |
1515 | /* |
1516 | * invalidate extent cache for file extents whose key in range of |
1517 | * [min_key, max_key) |
1518 | */ |
1519 | static int invalidate_extent_cache(struct btrfs_root *root, |
1520 | const struct btrfs_key *min_key, |
1521 | const struct btrfs_key *max_key) |
1522 | { |
1523 | struct btrfs_fs_info *fs_info = root->fs_info; |
1524 | struct inode *inode = NULL; |
1525 | u64 objectid; |
1526 | u64 start, end; |
1527 | u64 ino; |
1528 | |
1529 | objectid = min_key->objectid; |
1530 | while (1) { |
1531 | struct extent_state *cached_state = NULL; |
1532 | |
1533 | cond_resched(); |
1534 | iput(inode); |
1535 | |
1536 | if (objectid > max_key->objectid) |
1537 | break; |
1538 | |
1539 | inode = find_next_inode(root, objectid); |
1540 | if (!inode) |
1541 | break; |
1542 | ino = btrfs_ino(inode: BTRFS_I(inode)); |
1543 | |
1544 | if (ino > max_key->objectid) { |
1545 | iput(inode); |
1546 | break; |
1547 | } |
1548 | |
1549 | objectid = ino + 1; |
1550 | if (!S_ISREG(inode->i_mode)) |
1551 | continue; |
1552 | |
1553 | if (unlikely(min_key->objectid == ino)) { |
1554 | if (min_key->type > BTRFS_EXTENT_DATA_KEY) |
1555 | continue; |
1556 | if (min_key->type < BTRFS_EXTENT_DATA_KEY) |
1557 | start = 0; |
1558 | else { |
1559 | start = min_key->offset; |
1560 | WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize)); |
1561 | } |
1562 | } else { |
1563 | start = 0; |
1564 | } |
1565 | |
1566 | if (unlikely(max_key->objectid == ino)) { |
1567 | if (max_key->type < BTRFS_EXTENT_DATA_KEY) |
1568 | continue; |
1569 | if (max_key->type > BTRFS_EXTENT_DATA_KEY) { |
1570 | end = (u64)-1; |
1571 | } else { |
1572 | if (max_key->offset == 0) |
1573 | continue; |
1574 | end = max_key->offset; |
1575 | WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize)); |
1576 | end--; |
1577 | } |
1578 | } else { |
1579 | end = (u64)-1; |
1580 | } |
1581 | |
1582 | /* the lock_extent waits for read_folio to complete */ |
1583 | lock_extent(tree: &BTRFS_I(inode)->io_tree, start, end, cached: &cached_state); |
1584 | btrfs_drop_extent_map_range(inode: BTRFS_I(inode), start, end, skip_pinned: true); |
1585 | unlock_extent(tree: &BTRFS_I(inode)->io_tree, start, end, cached: &cached_state); |
1586 | } |
1587 | return 0; |
1588 | } |
1589 | |
1590 | static int find_next_key(struct btrfs_path *path, int level, |
1591 | struct btrfs_key *key) |
1592 | |
1593 | { |
1594 | while (level < BTRFS_MAX_LEVEL) { |
1595 | if (!path->nodes[level]) |
1596 | break; |
1597 | if (path->slots[level] + 1 < |
1598 | btrfs_header_nritems(eb: path->nodes[level])) { |
1599 | btrfs_node_key_to_cpu(eb: path->nodes[level], cpu_key: key, |
1600 | nr: path->slots[level] + 1); |
1601 | return 0; |
1602 | } |
1603 | level++; |
1604 | } |
1605 | return 1; |
1606 | } |
1607 | |
1608 | /* |
1609 | * Insert current subvolume into reloc_control::dirty_subvol_roots |
1610 | */ |
1611 | static int insert_dirty_subvol(struct btrfs_trans_handle *trans, |
1612 | struct reloc_control *rc, |
1613 | struct btrfs_root *root) |
1614 | { |
1615 | struct btrfs_root *reloc_root = root->reloc_root; |
1616 | struct btrfs_root_item *reloc_root_item; |
1617 | int ret; |
1618 | |
1619 | /* @root must be a subvolume tree root with a valid reloc tree */ |
1620 | ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); |
1621 | ASSERT(reloc_root); |
1622 | |
1623 | reloc_root_item = &reloc_root->root_item; |
1624 | memset(&reloc_root_item->drop_progress, 0, |
1625 | sizeof(reloc_root_item->drop_progress)); |
1626 | btrfs_set_root_drop_level(s: reloc_root_item, val: 0); |
1627 | btrfs_set_root_refs(s: reloc_root_item, val: 0); |
1628 | ret = btrfs_update_reloc_root(trans, root); |
1629 | if (ret) |
1630 | return ret; |
1631 | |
1632 | if (list_empty(head: &root->reloc_dirty_list)) { |
1633 | btrfs_grab_root(root); |
1634 | list_add_tail(new: &root->reloc_dirty_list, head: &rc->dirty_subvol_roots); |
1635 | } |
1636 | |
1637 | return 0; |
1638 | } |
1639 | |
1640 | static int clean_dirty_subvols(struct reloc_control *rc) |
1641 | { |
1642 | struct btrfs_root *root; |
1643 | struct btrfs_root *next; |
1644 | int ret = 0; |
1645 | int ret2; |
1646 | |
1647 | list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots, |
1648 | reloc_dirty_list) { |
1649 | if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { |
1650 | /* Merged subvolume, cleanup its reloc root */ |
1651 | struct btrfs_root *reloc_root = root->reloc_root; |
1652 | |
1653 | list_del_init(entry: &root->reloc_dirty_list); |
1654 | root->reloc_root = NULL; |
1655 | /* |
1656 | * Need barrier to ensure clear_bit() only happens after |
1657 | * root->reloc_root = NULL. Pairs with have_reloc_root. |
1658 | */ |
1659 | smp_wmb(); |
1660 | clear_bit(nr: BTRFS_ROOT_DEAD_RELOC_TREE, addr: &root->state); |
1661 | if (reloc_root) { |
1662 | /* |
1663 | * btrfs_drop_snapshot drops our ref we hold for |
1664 | * ->reloc_root. If it fails however we must |
1665 | * drop the ref ourselves. |
1666 | */ |
1667 | ret2 = btrfs_drop_snapshot(root: reloc_root, update_ref: 0, for_reloc: 1); |
1668 | if (ret2 < 0) { |
1669 | btrfs_put_root(root: reloc_root); |
1670 | if (!ret) |
1671 | ret = ret2; |
1672 | } |
1673 | } |
1674 | btrfs_put_root(root); |
1675 | } else { |
1676 | /* Orphan reloc tree, just clean it up */ |
1677 | ret2 = btrfs_drop_snapshot(root, update_ref: 0, for_reloc: 1); |
1678 | if (ret2 < 0) { |
1679 | btrfs_put_root(root); |
1680 | if (!ret) |
1681 | ret = ret2; |
1682 | } |
1683 | } |
1684 | } |
1685 | return ret; |
1686 | } |
1687 | |
1688 | /* |
1689 | * merge the relocated tree blocks in reloc tree with corresponding |
1690 | * fs tree. |
1691 | */ |
1692 | static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, |
1693 | struct btrfs_root *root) |
1694 | { |
1695 | struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; |
1696 | struct btrfs_key key; |
1697 | struct btrfs_key next_key; |
1698 | struct btrfs_trans_handle *trans = NULL; |
1699 | struct btrfs_root *reloc_root; |
1700 | struct btrfs_root_item *root_item; |
1701 | struct btrfs_path *path; |
1702 | struct extent_buffer *leaf; |
1703 | int reserve_level; |
1704 | int level; |
1705 | int max_level; |
1706 | int replaced = 0; |
1707 | int ret = 0; |
1708 | u32 min_reserved; |
1709 | |
1710 | path = btrfs_alloc_path(); |
1711 | if (!path) |
1712 | return -ENOMEM; |
1713 | path->reada = READA_FORWARD; |
1714 | |
1715 | reloc_root = root->reloc_root; |
1716 | root_item = &reloc_root->root_item; |
1717 | |
1718 | if (btrfs_disk_key_objectid(s: &root_item->drop_progress) == 0) { |
1719 | level = btrfs_root_level(s: root_item); |
1720 | atomic_inc(v: &reloc_root->node->refs); |
1721 | path->nodes[level] = reloc_root->node; |
1722 | path->slots[level] = 0; |
1723 | } else { |
1724 | btrfs_disk_key_to_cpu(cpu_key: &key, disk_key: &root_item->drop_progress); |
1725 | |
1726 | level = btrfs_root_drop_level(s: root_item); |
1727 | BUG_ON(level == 0); |
1728 | path->lowest_level = level; |
1729 | ret = btrfs_search_slot(NULL, root: reloc_root, key: &key, p: path, ins_len: 0, cow: 0); |
1730 | path->lowest_level = 0; |
1731 | if (ret < 0) { |
1732 | btrfs_free_path(p: path); |
1733 | return ret; |
1734 | } |
1735 | |
1736 | btrfs_node_key_to_cpu(eb: path->nodes[level], cpu_key: &next_key, |
1737 | nr: path->slots[level]); |
1738 | WARN_ON(memcmp(&key, &next_key, sizeof(key))); |
1739 | |
1740 | btrfs_unlock_up_safe(path, level: 0); |
1741 | } |
1742 | |
1743 | /* |
1744 | * In merge_reloc_root(), we modify the upper level pointer to swap the |
1745 | * tree blocks between reloc tree and subvolume tree. Thus for tree |
1746 | * block COW, we COW at most from level 1 to root level for each tree. |
1747 | * |
1748 | * Thus the needed metadata size is at most root_level * nodesize, |
1749 | * and * 2 since we have two trees to COW. |
1750 | */ |
1751 | reserve_level = max_t(int, 1, btrfs_root_level(root_item)); |
1752 | min_reserved = fs_info->nodesize * reserve_level * 2; |
1753 | memset(&next_key, 0, sizeof(next_key)); |
1754 | |
1755 | while (1) { |
1756 | ret = btrfs_block_rsv_refill(fs_info, block_rsv: rc->block_rsv, |
1757 | num_bytes: min_reserved, |
1758 | flush: BTRFS_RESERVE_FLUSH_LIMIT); |
1759 | if (ret) |
1760 | goto out; |
1761 | trans = btrfs_start_transaction(root, num_items: 0); |
1762 | if (IS_ERR(ptr: trans)) { |
1763 | ret = PTR_ERR(ptr: trans); |
1764 | trans = NULL; |
1765 | goto out; |
1766 | } |
1767 | |
1768 | /* |
1769 | * At this point we no longer have a reloc_control, so we can't |
1770 | * depend on btrfs_init_reloc_root to update our last_trans. |
1771 | * |
1772 | * But that's ok, we started the trans handle on our |
1773 | * corresponding fs_root, which means it's been added to the |
1774 | * dirty list. At commit time we'll still call |
1775 | * btrfs_update_reloc_root() and update our root item |
1776 | * appropriately. |
1777 | */ |
1778 | reloc_root->last_trans = trans->transid; |
1779 | trans->block_rsv = rc->block_rsv; |
1780 | |
1781 | replaced = 0; |
1782 | max_level = level; |
1783 | |
1784 | ret = walk_down_reloc_tree(root: reloc_root, path, level: &level); |
1785 | if (ret < 0) |
1786 | goto out; |
1787 | if (ret > 0) |
1788 | break; |
1789 | |
1790 | if (!find_next_key(path, level, key: &key) && |
1791 | btrfs_comp_cpu_keys(k1: &next_key, k2: &key) >= 0) { |
1792 | ret = 0; |
1793 | } else { |
1794 | ret = replace_path(trans, rc, dest: root, src: reloc_root, path, |
1795 | next_key: &next_key, lowest_level: level, max_level); |
1796 | } |
1797 | if (ret < 0) |
1798 | goto out; |
1799 | if (ret > 0) { |
1800 | level = ret; |
1801 | btrfs_node_key_to_cpu(eb: path->nodes[level], cpu_key: &key, |
1802 | nr: path->slots[level]); |
1803 | replaced = 1; |
1804 | } |
1805 | |
1806 | ret = walk_up_reloc_tree(root: reloc_root, path, level: &level); |
1807 | if (ret > 0) |
1808 | break; |
1809 | |
1810 | BUG_ON(level == 0); |
1811 | /* |
1812 | * save the merging progress in the drop_progress. |
1813 | * this is OK since root refs == 1 in this case. |
1814 | */ |
1815 | btrfs_node_key(eb: path->nodes[level], disk_key: &root_item->drop_progress, |
1816 | nr: path->slots[level]); |
1817 | btrfs_set_root_drop_level(s: root_item, val: level); |
1818 | |
1819 | btrfs_end_transaction_throttle(trans); |
1820 | trans = NULL; |
1821 | |
1822 | btrfs_btree_balance_dirty(fs_info); |
1823 | |
1824 | if (replaced && rc->stage == UPDATE_DATA_PTRS) |
1825 | invalidate_extent_cache(root, min_key: &key, max_key: &next_key); |
1826 | } |
1827 | |
1828 | /* |
1829 | * handle the case only one block in the fs tree need to be |
1830 | * relocated and the block is tree root. |
1831 | */ |
1832 | leaf = btrfs_lock_root_node(root); |
1833 | ret = btrfs_cow_block(trans, root, buf: leaf, NULL, parent_slot: 0, cow_ret: &leaf, |
1834 | nest: BTRFS_NESTING_COW); |
1835 | btrfs_tree_unlock(eb: leaf); |
1836 | free_extent_buffer(eb: leaf); |
1837 | out: |
1838 | btrfs_free_path(p: path); |
1839 | |
1840 | if (ret == 0) { |
1841 | ret = insert_dirty_subvol(trans, rc, root); |
1842 | if (ret) |
1843 | btrfs_abort_transaction(trans, ret); |
1844 | } |
1845 | |
1846 | if (trans) |
1847 | btrfs_end_transaction_throttle(trans); |
1848 | |
1849 | btrfs_btree_balance_dirty(fs_info); |
1850 | |
1851 | if (replaced && rc->stage == UPDATE_DATA_PTRS) |
1852 | invalidate_extent_cache(root, min_key: &key, max_key: &next_key); |
1853 | |
1854 | return ret; |
1855 | } |
1856 | |
1857 | static noinline_for_stack |
1858 | int prepare_to_merge(struct reloc_control *rc, int err) |
1859 | { |
1860 | struct btrfs_root *root = rc->extent_root; |
1861 | struct btrfs_fs_info *fs_info = root->fs_info; |
1862 | struct btrfs_root *reloc_root; |
1863 | struct btrfs_trans_handle *trans; |
1864 | LIST_HEAD(reloc_roots); |
1865 | u64 num_bytes = 0; |
1866 | int ret; |
1867 | |
1868 | mutex_lock(&fs_info->reloc_mutex); |
1869 | rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; |
1870 | rc->merging_rsv_size += rc->nodes_relocated * 2; |
1871 | mutex_unlock(lock: &fs_info->reloc_mutex); |
1872 | |
1873 | again: |
1874 | if (!err) { |
1875 | num_bytes = rc->merging_rsv_size; |
1876 | ret = btrfs_block_rsv_add(fs_info, block_rsv: rc->block_rsv, num_bytes, |
1877 | flush: BTRFS_RESERVE_FLUSH_ALL); |
1878 | if (ret) |
1879 | err = ret; |
1880 | } |
1881 | |
1882 | trans = btrfs_join_transaction(root: rc->extent_root); |
1883 | if (IS_ERR(ptr: trans)) { |
1884 | if (!err) |
1885 | btrfs_block_rsv_release(fs_info, block_rsv: rc->block_rsv, |
1886 | num_bytes, NULL); |
1887 | return PTR_ERR(ptr: trans); |
1888 | } |
1889 | |
1890 | if (!err) { |
1891 | if (num_bytes != rc->merging_rsv_size) { |
1892 | btrfs_end_transaction(trans); |
1893 | btrfs_block_rsv_release(fs_info, block_rsv: rc->block_rsv, |
1894 | num_bytes, NULL); |
1895 | goto again; |
1896 | } |
1897 | } |
1898 | |
1899 | rc->merge_reloc_tree = true; |
1900 | |
1901 | while (!list_empty(head: &rc->reloc_roots)) { |
1902 | reloc_root = list_entry(rc->reloc_roots.next, |
1903 | struct btrfs_root, root_list); |
1904 | list_del_init(entry: &reloc_root->root_list); |
1905 | |
1906 | root = btrfs_get_fs_root(fs_info, objectid: reloc_root->root_key.offset, |
1907 | check_ref: false); |
1908 | if (IS_ERR(ptr: root)) { |
1909 | /* |
1910 | * Even if we have an error we need this reloc root |
1911 | * back on our list so we can clean up properly. |
1912 | */ |
1913 | list_add(new: &reloc_root->root_list, head: &reloc_roots); |
1914 | btrfs_abort_transaction(trans, (int)PTR_ERR(root)); |
1915 | if (!err) |
1916 | err = PTR_ERR(ptr: root); |
1917 | break; |
1918 | } |
1919 | |
1920 | if (unlikely(root->reloc_root != reloc_root)) { |
1921 | if (root->reloc_root) { |
1922 | btrfs_err(fs_info, |
1923 | "reloc tree mismatch, root %lld has reloc root key (%lld %u %llu) gen %llu, expect reloc root key (%lld %u %llu) gen %llu" , |
1924 | root->root_key.objectid, |
1925 | root->reloc_root->root_key.objectid, |
1926 | root->reloc_root->root_key.type, |
1927 | root->reloc_root->root_key.offset, |
1928 | btrfs_root_generation( |
1929 | &root->reloc_root->root_item), |
1930 | reloc_root->root_key.objectid, |
1931 | reloc_root->root_key.type, |
1932 | reloc_root->root_key.offset, |
1933 | btrfs_root_generation( |
1934 | &reloc_root->root_item)); |
1935 | } else { |
1936 | btrfs_err(fs_info, |
1937 | "reloc tree mismatch, root %lld has no reloc root, expect reloc root key (%lld %u %llu) gen %llu" , |
1938 | root->root_key.objectid, |
1939 | reloc_root->root_key.objectid, |
1940 | reloc_root->root_key.type, |
1941 | reloc_root->root_key.offset, |
1942 | btrfs_root_generation( |
1943 | &reloc_root->root_item)); |
1944 | } |
1945 | list_add(new: &reloc_root->root_list, head: &reloc_roots); |
1946 | btrfs_put_root(root); |
1947 | btrfs_abort_transaction(trans, -EUCLEAN); |
1948 | if (!err) |
1949 | err = -EUCLEAN; |
1950 | break; |
1951 | } |
1952 | |
1953 | /* |
1954 | * set reference count to 1, so btrfs_recover_relocation |
1955 | * knows it should resumes merging |
1956 | */ |
1957 | if (!err) |
1958 | btrfs_set_root_refs(s: &reloc_root->root_item, val: 1); |
1959 | ret = btrfs_update_reloc_root(trans, root); |
1960 | |
1961 | /* |
1962 | * Even if we have an error we need this reloc root back on our |
1963 | * list so we can clean up properly. |
1964 | */ |
1965 | list_add(new: &reloc_root->root_list, head: &reloc_roots); |
1966 | btrfs_put_root(root); |
1967 | |
1968 | if (ret) { |
1969 | btrfs_abort_transaction(trans, ret); |
1970 | if (!err) |
1971 | err = ret; |
1972 | break; |
1973 | } |
1974 | } |
1975 | |
1976 | list_splice(list: &reloc_roots, head: &rc->reloc_roots); |
1977 | |
1978 | if (!err) |
1979 | err = btrfs_commit_transaction(trans); |
1980 | else |
1981 | btrfs_end_transaction(trans); |
1982 | return err; |
1983 | } |
1984 | |
1985 | static noinline_for_stack |
1986 | void free_reloc_roots(struct list_head *list) |
1987 | { |
1988 | struct btrfs_root *reloc_root, *tmp; |
1989 | |
1990 | list_for_each_entry_safe(reloc_root, tmp, list, root_list) |
1991 | __del_reloc_root(root: reloc_root); |
1992 | } |
1993 | |
1994 | static noinline_for_stack |
1995 | void merge_reloc_roots(struct reloc_control *rc) |
1996 | { |
1997 | struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; |
1998 | struct btrfs_root *root; |
1999 | struct btrfs_root *reloc_root; |
2000 | LIST_HEAD(reloc_roots); |
2001 | int found = 0; |
2002 | int ret = 0; |
2003 | again: |
2004 | root = rc->extent_root; |
2005 | |
2006 | /* |
2007 | * this serializes us with btrfs_record_root_in_transaction, |
2008 | * we have to make sure nobody is in the middle of |
2009 | * adding their roots to the list while we are |
2010 | * doing this splice |
2011 | */ |
2012 | mutex_lock(&fs_info->reloc_mutex); |
2013 | list_splice_init(list: &rc->reloc_roots, head: &reloc_roots); |
2014 | mutex_unlock(lock: &fs_info->reloc_mutex); |
2015 | |
2016 | while (!list_empty(head: &reloc_roots)) { |
2017 | found = 1; |
2018 | reloc_root = list_entry(reloc_roots.next, |
2019 | struct btrfs_root, root_list); |
2020 | |
2021 | root = btrfs_get_fs_root(fs_info, objectid: reloc_root->root_key.offset, |
2022 | check_ref: false); |
2023 | if (btrfs_root_refs(s: &reloc_root->root_item) > 0) { |
2024 | if (WARN_ON(IS_ERR(root))) { |
2025 | /* |
2026 | * For recovery we read the fs roots on mount, |
2027 | * and if we didn't find the root then we marked |
2028 | * the reloc root as a garbage root. For normal |
2029 | * relocation obviously the root should exist in |
2030 | * memory. However there's no reason we can't |
2031 | * handle the error properly here just in case. |
2032 | */ |
2033 | ret = PTR_ERR(ptr: root); |
2034 | goto out; |
2035 | } |
2036 | if (WARN_ON(root->reloc_root != reloc_root)) { |
2037 | /* |
2038 | * This can happen if on-disk metadata has some |
2039 | * corruption, e.g. bad reloc tree key offset. |
2040 | */ |
2041 | ret = -EINVAL; |
2042 | goto out; |
2043 | } |
2044 | ret = merge_reloc_root(rc, root); |
2045 | btrfs_put_root(root); |
2046 | if (ret) { |
2047 | if (list_empty(head: &reloc_root->root_list)) |
2048 | list_add_tail(new: &reloc_root->root_list, |
2049 | head: &reloc_roots); |
2050 | goto out; |
2051 | } |
2052 | } else { |
2053 | if (!IS_ERR(ptr: root)) { |
2054 | if (root->reloc_root == reloc_root) { |
2055 | root->reloc_root = NULL; |
2056 | btrfs_put_root(root: reloc_root); |
2057 | } |
2058 | clear_bit(nr: BTRFS_ROOT_DEAD_RELOC_TREE, |
2059 | addr: &root->state); |
2060 | btrfs_put_root(root); |
2061 | } |
2062 | |
2063 | list_del_init(entry: &reloc_root->root_list); |
2064 | /* Don't forget to queue this reloc root for cleanup */ |
2065 | list_add_tail(new: &reloc_root->reloc_dirty_list, |
2066 | head: &rc->dirty_subvol_roots); |
2067 | } |
2068 | } |
2069 | |
2070 | if (found) { |
2071 | found = 0; |
2072 | goto again; |
2073 | } |
2074 | out: |
2075 | if (ret) { |
2076 | btrfs_handle_fs_error(fs_info, ret, NULL); |
2077 | free_reloc_roots(list: &reloc_roots); |
2078 | |
2079 | /* new reloc root may be added */ |
2080 | mutex_lock(&fs_info->reloc_mutex); |
2081 | list_splice_init(list: &rc->reloc_roots, head: &reloc_roots); |
2082 | mutex_unlock(lock: &fs_info->reloc_mutex); |
2083 | free_reloc_roots(list: &reloc_roots); |
2084 | } |
2085 | |
2086 | /* |
2087 | * We used to have |
2088 | * |
2089 | * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); |
2090 | * |
2091 | * here, but it's wrong. If we fail to start the transaction in |
2092 | * prepare_to_merge() we will have only 0 ref reloc roots, none of which |
2093 | * have actually been removed from the reloc_root_tree rb tree. This is |
2094 | * fine because we're bailing here, and we hold a reference on the root |
2095 | * for the list that holds it, so these roots will be cleaned up when we |
2096 | * do the reloc_dirty_list afterwards. Meanwhile the root->reloc_root |
2097 | * will be cleaned up on unmount. |
2098 | * |
2099 | * The remaining nodes will be cleaned up by free_reloc_control. |
2100 | */ |
2101 | } |
2102 | |
2103 | static void free_block_list(struct rb_root *blocks) |
2104 | { |
2105 | struct tree_block *block; |
2106 | struct rb_node *rb_node; |
2107 | while ((rb_node = rb_first(blocks))) { |
2108 | block = rb_entry(rb_node, struct tree_block, rb_node); |
2109 | rb_erase(rb_node, blocks); |
2110 | kfree(objp: block); |
2111 | } |
2112 | } |
2113 | |
2114 | static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans, |
2115 | struct btrfs_root *reloc_root) |
2116 | { |
2117 | struct btrfs_fs_info *fs_info = reloc_root->fs_info; |
2118 | struct btrfs_root *root; |
2119 | int ret; |
2120 | |
2121 | if (reloc_root->last_trans == trans->transid) |
2122 | return 0; |
2123 | |
2124 | root = btrfs_get_fs_root(fs_info, objectid: reloc_root->root_key.offset, check_ref: false); |
2125 | |
2126 | /* |
2127 | * This should succeed, since we can't have a reloc root without having |
2128 | * already looked up the actual root and created the reloc root for this |
2129 | * root. |
2130 | * |
2131 | * However if there's some sort of corruption where we have a ref to a |
2132 | * reloc root without a corresponding root this could return ENOENT. |
2133 | */ |
2134 | if (IS_ERR(ptr: root)) { |
2135 | ASSERT(0); |
2136 | return PTR_ERR(ptr: root); |
2137 | } |
2138 | if (root->reloc_root != reloc_root) { |
2139 | ASSERT(0); |
2140 | btrfs_err(fs_info, |
2141 | "root %llu has two reloc roots associated with it" , |
2142 | reloc_root->root_key.offset); |
2143 | btrfs_put_root(root); |
2144 | return -EUCLEAN; |
2145 | } |
2146 | ret = btrfs_record_root_in_trans(trans, root); |
2147 | btrfs_put_root(root); |
2148 | |
2149 | return ret; |
2150 | } |
2151 | |
2152 | static noinline_for_stack |
2153 | struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans, |
2154 | struct reloc_control *rc, |
2155 | struct btrfs_backref_node *node, |
2156 | struct btrfs_backref_edge *edges[]) |
2157 | { |
2158 | struct btrfs_backref_node *next; |
2159 | struct btrfs_root *root; |
2160 | int index = 0; |
2161 | int ret; |
2162 | |
2163 | next = node; |
2164 | while (1) { |
2165 | cond_resched(); |
2166 | next = walk_up_backref(node: next, edges, index: &index); |
2167 | root = next->root; |
2168 | |
2169 | /* |
2170 | * If there is no root, then our references for this block are |
2171 | * incomplete, as we should be able to walk all the way up to a |
2172 | * block that is owned by a root. |
2173 | * |
2174 | * This path is only for SHAREABLE roots, so if we come upon a |
2175 | * non-SHAREABLE root then we have backrefs that resolve |
2176 | * improperly. |
2177 | * |
2178 | * Both of these cases indicate file system corruption, or a bug |
2179 | * in the backref walking code. |
2180 | */ |
2181 | if (!root) { |
2182 | ASSERT(0); |
2183 | btrfs_err(trans->fs_info, |
2184 | "bytenr %llu doesn't have a backref path ending in a root" , |
2185 | node->bytenr); |
2186 | return ERR_PTR(error: -EUCLEAN); |
2187 | } |
2188 | if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { |
2189 | ASSERT(0); |
2190 | btrfs_err(trans->fs_info, |
2191 | "bytenr %llu has multiple refs with one ending in a non-shareable root" , |
2192 | node->bytenr); |
2193 | return ERR_PTR(error: -EUCLEAN); |
2194 | } |
2195 | |
2196 | if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { |
2197 | ret = record_reloc_root_in_trans(trans, reloc_root: root); |
2198 | if (ret) |
2199 | return ERR_PTR(error: ret); |
2200 | break; |
2201 | } |
2202 | |
2203 | ret = btrfs_record_root_in_trans(trans, root); |
2204 | if (ret) |
2205 | return ERR_PTR(error: ret); |
2206 | root = root->reloc_root; |
2207 | |
2208 | /* |
2209 | * We could have raced with another thread which failed, so |
2210 | * root->reloc_root may not be set, return ENOENT in this case. |
2211 | */ |
2212 | if (!root) |
2213 | return ERR_PTR(error: -ENOENT); |
2214 | |
2215 | if (next->new_bytenr != root->node->start) { |
2216 | /* |
2217 | * We just created the reloc root, so we shouldn't have |
2218 | * ->new_bytenr set and this shouldn't be in the changed |
2219 | * list. If it is then we have multiple roots pointing |
2220 | * at the same bytenr which indicates corruption, or |
2221 | * we've made a mistake in the backref walking code. |
2222 | */ |
2223 | ASSERT(next->new_bytenr == 0); |
2224 | ASSERT(list_empty(&next->list)); |
2225 | if (next->new_bytenr || !list_empty(head: &next->list)) { |
2226 | btrfs_err(trans->fs_info, |
2227 | "bytenr %llu possibly has multiple roots pointing at the same bytenr %llu" , |
2228 | node->bytenr, next->bytenr); |
2229 | return ERR_PTR(error: -EUCLEAN); |
2230 | } |
2231 | |
2232 | next->new_bytenr = root->node->start; |
2233 | btrfs_put_root(root: next->root); |
2234 | next->root = btrfs_grab_root(root); |
2235 | ASSERT(next->root); |
2236 | list_add_tail(new: &next->list, |
2237 | head: &rc->backref_cache.changed); |
2238 | mark_block_processed(rc, node: next); |
2239 | break; |
2240 | } |
2241 | |
2242 | WARN_ON(1); |
2243 | root = NULL; |
2244 | next = walk_down_backref(edges, index: &index); |
2245 | if (!next || next->level <= node->level) |
2246 | break; |
2247 | } |
2248 | if (!root) { |
2249 | /* |
2250 | * This can happen if there's fs corruption or if there's a bug |
2251 | * in the backref lookup code. |
2252 | */ |
2253 | ASSERT(0); |
2254 | return ERR_PTR(error: -ENOENT); |
2255 | } |
2256 | |
2257 | next = node; |
2258 | /* setup backref node path for btrfs_reloc_cow_block */ |
2259 | while (1) { |
2260 | rc->backref_cache.path[next->level] = next; |
2261 | if (--index < 0) |
2262 | break; |
2263 | next = edges[index]->node[UPPER]; |
2264 | } |
2265 | return root; |
2266 | } |
2267 | |
2268 | /* |
2269 | * Select a tree root for relocation. |
2270 | * |
2271 | * Return NULL if the block is not shareable. We should use do_relocation() in |
2272 | * this case. |
2273 | * |
2274 | * Return a tree root pointer if the block is shareable. |
2275 | * Return -ENOENT if the block is root of reloc tree. |
2276 | */ |
2277 | static noinline_for_stack |
2278 | struct btrfs_root *select_one_root(struct btrfs_backref_node *node) |
2279 | { |
2280 | struct btrfs_backref_node *next; |
2281 | struct btrfs_root *root; |
2282 | struct btrfs_root *fs_root = NULL; |
2283 | struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; |
2284 | int index = 0; |
2285 | |
2286 | next = node; |
2287 | while (1) { |
2288 | cond_resched(); |
2289 | next = walk_up_backref(node: next, edges, index: &index); |
2290 | root = next->root; |
2291 | |
2292 | /* |
2293 | * This can occur if we have incomplete extent refs leading all |
2294 | * the way up a particular path, in this case return -EUCLEAN. |
2295 | */ |
2296 | if (!root) |
2297 | return ERR_PTR(error: -EUCLEAN); |
2298 | |
2299 | /* No other choice for non-shareable tree */ |
2300 | if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) |
2301 | return root; |
2302 | |
2303 | if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) |
2304 | fs_root = root; |
2305 | |
2306 | if (next != node) |
2307 | return NULL; |
2308 | |
2309 | next = walk_down_backref(edges, index: &index); |
2310 | if (!next || next->level <= node->level) |
2311 | break; |
2312 | } |
2313 | |
2314 | if (!fs_root) |
2315 | return ERR_PTR(error: -ENOENT); |
2316 | return fs_root; |
2317 | } |
2318 | |
2319 | static noinline_for_stack |
2320 | u64 calcu_metadata_size(struct reloc_control *rc, |
2321 | struct btrfs_backref_node *node, int reserve) |
2322 | { |
2323 | struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; |
2324 | struct btrfs_backref_node *next = node; |
2325 | struct btrfs_backref_edge *edge; |
2326 | struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; |
2327 | u64 num_bytes = 0; |
2328 | int index = 0; |
2329 | |
2330 | BUG_ON(reserve && node->processed); |
2331 | |
2332 | while (next) { |
2333 | cond_resched(); |
2334 | while (1) { |
2335 | if (next->processed && (reserve || next != node)) |
2336 | break; |
2337 | |
2338 | num_bytes += fs_info->nodesize; |
2339 | |
2340 | if (list_empty(head: &next->upper)) |
2341 | break; |
2342 | |
2343 | edge = list_entry(next->upper.next, |
2344 | struct btrfs_backref_edge, list[LOWER]); |
2345 | edges[index++] = edge; |
2346 | next = edge->node[UPPER]; |
2347 | } |
2348 | next = walk_down_backref(edges, index: &index); |
2349 | } |
2350 | return num_bytes; |
2351 | } |
2352 | |
2353 | static int reserve_metadata_space(struct btrfs_trans_handle *trans, |
2354 | struct reloc_control *rc, |
2355 | struct btrfs_backref_node *node) |
2356 | { |
2357 | struct btrfs_root *root = rc->extent_root; |
2358 | struct btrfs_fs_info *fs_info = root->fs_info; |
2359 | u64 num_bytes; |
2360 | int ret; |
2361 | u64 tmp; |
2362 | |
2363 | num_bytes = calcu_metadata_size(rc, node, reserve: 1) * 2; |
2364 | |
2365 | trans->block_rsv = rc->block_rsv; |
2366 | rc->reserved_bytes += num_bytes; |
2367 | |
2368 | /* |
2369 | * We are under a transaction here so we can only do limited flushing. |
2370 | * If we get an enospc just kick back -EAGAIN so we know to drop the |
2371 | * transaction and try to refill when we can flush all the things. |
2372 | */ |
2373 | ret = btrfs_block_rsv_refill(fs_info, block_rsv: rc->block_rsv, num_bytes, |
2374 | flush: BTRFS_RESERVE_FLUSH_LIMIT); |
2375 | if (ret) { |
2376 | tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES; |
2377 | while (tmp <= rc->reserved_bytes) |
2378 | tmp <<= 1; |
2379 | /* |
2380 | * only one thread can access block_rsv at this point, |
2381 | * so we don't need hold lock to protect block_rsv. |
2382 | * we expand more reservation size here to allow enough |
2383 | * space for relocation and we will return earlier in |
2384 | * enospc case. |
2385 | */ |
2386 | rc->block_rsv->size = tmp + fs_info->nodesize * |
2387 | RELOCATION_RESERVED_NODES; |
2388 | return -EAGAIN; |
2389 | } |
2390 | |
2391 | return 0; |
2392 | } |
2393 | |
2394 | /* |
2395 | * relocate a block tree, and then update pointers in upper level |
2396 | * blocks that reference the block to point to the new location. |
2397 | * |
2398 | * if called by link_to_upper, the block has already been relocated. |
2399 | * in that case this function just updates pointers. |
2400 | */ |
2401 | static int do_relocation(struct btrfs_trans_handle *trans, |
2402 | struct reloc_control *rc, |
2403 | struct btrfs_backref_node *node, |
2404 | struct btrfs_key *key, |
2405 | struct btrfs_path *path, int lowest) |
2406 | { |
2407 | struct btrfs_backref_node *upper; |
2408 | struct btrfs_backref_edge *edge; |
2409 | struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; |
2410 | struct btrfs_root *root; |
2411 | struct extent_buffer *eb; |
2412 | u32 blocksize; |
2413 | u64 bytenr; |
2414 | int slot; |
2415 | int ret = 0; |
2416 | |
2417 | /* |
2418 | * If we are lowest then this is the first time we're processing this |
2419 | * block, and thus shouldn't have an eb associated with it yet. |
2420 | */ |
2421 | ASSERT(!lowest || !node->eb); |
2422 | |
2423 | path->lowest_level = node->level + 1; |
2424 | rc->backref_cache.path[node->level] = node; |
2425 | list_for_each_entry(edge, &node->upper, list[LOWER]) { |
2426 | struct btrfs_ref ref = { 0 }; |
2427 | |
2428 | cond_resched(); |
2429 | |
2430 | upper = edge->node[UPPER]; |
2431 | root = select_reloc_root(trans, rc, node: upper, edges); |
2432 | if (IS_ERR(ptr: root)) { |
2433 | ret = PTR_ERR(ptr: root); |
2434 | goto next; |
2435 | } |
2436 | |
2437 | if (upper->eb && !upper->locked) { |
2438 | if (!lowest) { |
2439 | ret = btrfs_bin_search(eb: upper->eb, first_slot: 0, key, slot: &slot); |
2440 | if (ret < 0) |
2441 | goto next; |
2442 | BUG_ON(ret); |
2443 | bytenr = btrfs_node_blockptr(eb: upper->eb, nr: slot); |
2444 | if (node->eb->start == bytenr) |
2445 | goto next; |
2446 | } |
2447 | btrfs_backref_drop_node_buffer(node: upper); |
2448 | } |
2449 | |
2450 | if (!upper->eb) { |
2451 | ret = btrfs_search_slot(trans, root, key, p: path, ins_len: 0, cow: 1); |
2452 | if (ret) { |
2453 | if (ret > 0) |
2454 | ret = -ENOENT; |
2455 | |
2456 | btrfs_release_path(p: path); |
2457 | break; |
2458 | } |
2459 | |
2460 | if (!upper->eb) { |
2461 | upper->eb = path->nodes[upper->level]; |
2462 | path->nodes[upper->level] = NULL; |
2463 | } else { |
2464 | BUG_ON(upper->eb != path->nodes[upper->level]); |
2465 | } |
2466 | |
2467 | upper->locked = 1; |
2468 | path->locks[upper->level] = 0; |
2469 | |
2470 | slot = path->slots[upper->level]; |
2471 | btrfs_release_path(p: path); |
2472 | } else { |
2473 | ret = btrfs_bin_search(eb: upper->eb, first_slot: 0, key, slot: &slot); |
2474 | if (ret < 0) |
2475 | goto next; |
2476 | BUG_ON(ret); |
2477 | } |
2478 | |
2479 | bytenr = btrfs_node_blockptr(eb: upper->eb, nr: slot); |
2480 | if (lowest) { |
2481 | if (bytenr != node->bytenr) { |
2482 | btrfs_err(root->fs_info, |
2483 | "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu" , |
2484 | bytenr, node->bytenr, slot, |
2485 | upper->eb->start); |
2486 | ret = -EIO; |
2487 | goto next; |
2488 | } |
2489 | } else { |
2490 | if (node->eb->start == bytenr) |
2491 | goto next; |
2492 | } |
2493 | |
2494 | blocksize = root->fs_info->nodesize; |
2495 | eb = btrfs_read_node_slot(parent: upper->eb, slot); |
2496 | if (IS_ERR(ptr: eb)) { |
2497 | ret = PTR_ERR(ptr: eb); |
2498 | goto next; |
2499 | } |
2500 | btrfs_tree_lock(eb); |
2501 | |
2502 | if (!node->eb) { |
2503 | ret = btrfs_cow_block(trans, root, buf: eb, parent: upper->eb, |
2504 | parent_slot: slot, cow_ret: &eb, nest: BTRFS_NESTING_COW); |
2505 | btrfs_tree_unlock(eb); |
2506 | free_extent_buffer(eb); |
2507 | if (ret < 0) |
2508 | goto next; |
2509 | /* |
2510 | * We've just COWed this block, it should have updated |
2511 | * the correct backref node entry. |
2512 | */ |
2513 | ASSERT(node->eb == eb); |
2514 | } else { |
2515 | btrfs_set_node_blockptr(eb: upper->eb, nr: slot, |
2516 | val: node->eb->start); |
2517 | btrfs_set_node_ptr_generation(eb: upper->eb, nr: slot, |
2518 | val: trans->transid); |
2519 | btrfs_mark_buffer_dirty(trans, buf: upper->eb); |
2520 | |
2521 | btrfs_init_generic_ref(generic_ref: &ref, action: BTRFS_ADD_DELAYED_REF, |
2522 | bytenr: node->eb->start, len: blocksize, |
2523 | parent: upper->eb->start, |
2524 | owning_root: btrfs_header_owner(eb: upper->eb)); |
2525 | btrfs_init_tree_ref(generic_ref: &ref, level: node->level, |
2526 | root: btrfs_header_owner(eb: upper->eb), |
2527 | mod_root: root->root_key.objectid, skip_qgroup: false); |
2528 | ret = btrfs_inc_extent_ref(trans, generic_ref: &ref); |
2529 | if (!ret) |
2530 | ret = btrfs_drop_subtree(trans, root, node: eb, |
2531 | parent: upper->eb); |
2532 | if (ret) |
2533 | btrfs_abort_transaction(trans, ret); |
2534 | } |
2535 | next: |
2536 | if (!upper->pending) |
2537 | btrfs_backref_drop_node_buffer(node: upper); |
2538 | else |
2539 | btrfs_backref_unlock_node_buffer(node: upper); |
2540 | if (ret) |
2541 | break; |
2542 | } |
2543 | |
2544 | if (!ret && node->pending) { |
2545 | btrfs_backref_drop_node_buffer(node); |
2546 | list_move_tail(list: &node->list, head: &rc->backref_cache.changed); |
2547 | node->pending = 0; |
2548 | } |
2549 | |
2550 | path->lowest_level = 0; |
2551 | |
2552 | /* |
2553 | * We should have allocated all of our space in the block rsv and thus |
2554 | * shouldn't ENOSPC. |
2555 | */ |
2556 | ASSERT(ret != -ENOSPC); |
2557 | return ret; |
2558 | } |
2559 | |
2560 | static int link_to_upper(struct btrfs_trans_handle *trans, |
2561 | struct reloc_control *rc, |
2562 | struct btrfs_backref_node *node, |
2563 | struct btrfs_path *path) |
2564 | { |
2565 | struct btrfs_key key; |
2566 | |
2567 | btrfs_node_key_to_cpu(eb: node->eb, cpu_key: &key, nr: 0); |
2568 | return do_relocation(trans, rc, node, key: &key, path, lowest: 0); |
2569 | } |
2570 | |
2571 | static int finish_pending_nodes(struct btrfs_trans_handle *trans, |
2572 | struct reloc_control *rc, |
2573 | struct btrfs_path *path, int err) |
2574 | { |
2575 | LIST_HEAD(list); |
2576 | struct btrfs_backref_cache *cache = &rc->backref_cache; |
2577 | struct btrfs_backref_node *node; |
2578 | int level; |
2579 | int ret; |
2580 | |
2581 | for (level = 0; level < BTRFS_MAX_LEVEL; level++) { |
2582 | while (!list_empty(head: &cache->pending[level])) { |
2583 | node = list_entry(cache->pending[level].next, |
2584 | struct btrfs_backref_node, list); |
2585 | list_move_tail(list: &node->list, head: &list); |
2586 | BUG_ON(!node->pending); |
2587 | |
2588 | if (!err) { |
2589 | ret = link_to_upper(trans, rc, node, path); |
2590 | if (ret < 0) |
2591 | err = ret; |
2592 | } |
2593 | } |
2594 | list_splice_init(list: &list, head: &cache->pending[level]); |
2595 | } |
2596 | return err; |
2597 | } |
2598 | |
2599 | /* |
2600 | * mark a block and all blocks directly/indirectly reference the block |
2601 | * as processed. |
2602 | */ |
2603 | static void update_processed_blocks(struct reloc_control *rc, |
2604 | struct btrfs_backref_node *node) |
2605 | { |
2606 | struct btrfs_backref_node *next = node; |
2607 | struct btrfs_backref_edge *edge; |
2608 | struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; |
2609 | int index = 0; |
2610 | |
2611 | while (next) { |
2612 | cond_resched(); |
2613 | while (1) { |
2614 | if (next->processed) |
2615 | break; |
2616 | |
2617 | mark_block_processed(rc, node: next); |
2618 | |
2619 | if (list_empty(head: &next->upper)) |
2620 | break; |
2621 | |
2622 | edge = list_entry(next->upper.next, |
2623 | struct btrfs_backref_edge, list[LOWER]); |
2624 | edges[index++] = edge; |
2625 | next = edge->node[UPPER]; |
2626 | } |
2627 | next = walk_down_backref(edges, index: &index); |
2628 | } |
2629 | } |
2630 | |
2631 | static int tree_block_processed(u64 bytenr, struct reloc_control *rc) |
2632 | { |
2633 | u32 blocksize = rc->extent_root->fs_info->nodesize; |
2634 | |
2635 | if (test_range_bit(tree: &rc->processed_blocks, start: bytenr, |
2636 | end: bytenr + blocksize - 1, bit: EXTENT_DIRTY, NULL)) |
2637 | return 1; |
2638 | return 0; |
2639 | } |
2640 | |
2641 | static int get_tree_block_key(struct btrfs_fs_info *fs_info, |
2642 | struct tree_block *block) |
2643 | { |
2644 | struct btrfs_tree_parent_check check = { |
2645 | .level = block->level, |
2646 | .owner_root = block->owner, |
2647 | .transid = block->key.offset |
2648 | }; |
2649 | struct extent_buffer *eb; |
2650 | |
2651 | eb = read_tree_block(fs_info, bytenr: block->bytenr, check: &check); |
2652 | if (IS_ERR(ptr: eb)) |
2653 | return PTR_ERR(ptr: eb); |
2654 | if (!extent_buffer_uptodate(eb)) { |
2655 | free_extent_buffer(eb); |
2656 | return -EIO; |
2657 | } |
2658 | if (block->level == 0) |
2659 | btrfs_item_key_to_cpu(eb, cpu_key: &block->key, nr: 0); |
2660 | else |
2661 | btrfs_node_key_to_cpu(eb, cpu_key: &block->key, nr: 0); |
2662 | free_extent_buffer(eb); |
2663 | block->key_ready = true; |
2664 | return 0; |
2665 | } |
2666 | |
2667 | /* |
2668 | * helper function to relocate a tree block |
2669 | */ |
2670 | static int relocate_tree_block(struct btrfs_trans_handle *trans, |
2671 | struct reloc_control *rc, |
2672 | struct btrfs_backref_node *node, |
2673 | struct btrfs_key *key, |
2674 | struct btrfs_path *path) |
2675 | { |
2676 | struct btrfs_root *root; |
2677 | int ret = 0; |
2678 | |
2679 | if (!node) |
2680 | return 0; |
2681 | |
2682 | /* |
2683 | * If we fail here we want to drop our backref_node because we are going |
2684 | * to start over and regenerate the tree for it. |
2685 | */ |
2686 | ret = reserve_metadata_space(trans, rc, node); |
2687 | if (ret) |
2688 | goto out; |
2689 | |
2690 | BUG_ON(node->processed); |
2691 | root = select_one_root(node); |
2692 | if (IS_ERR(ptr: root)) { |
2693 | ret = PTR_ERR(ptr: root); |
2694 | |
2695 | /* See explanation in select_one_root for the -EUCLEAN case. */ |
2696 | ASSERT(ret == -ENOENT); |
2697 | if (ret == -ENOENT) { |
2698 | ret = 0; |
2699 | update_processed_blocks(rc, node); |
2700 | } |
2701 | goto out; |
2702 | } |
2703 | |
2704 | if (root) { |
2705 | if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { |
2706 | /* |
2707 | * This block was the root block of a root, and this is |
2708 | * the first time we're processing the block and thus it |
2709 | * should not have had the ->new_bytenr modified and |
2710 | * should have not been included on the changed list. |
2711 | * |
2712 | * However in the case of corruption we could have |
2713 | * multiple refs pointing to the same block improperly, |
2714 | * and thus we would trip over these checks. ASSERT() |
2715 | * for the developer case, because it could indicate a |
2716 | * bug in the backref code, however error out for a |
2717 | * normal user in the case of corruption. |
2718 | */ |
2719 | ASSERT(node->new_bytenr == 0); |
2720 | ASSERT(list_empty(&node->list)); |
2721 | if (node->new_bytenr || !list_empty(head: &node->list)) { |
2722 | btrfs_err(root->fs_info, |
2723 | "bytenr %llu has improper references to it" , |
2724 | node->bytenr); |
2725 | ret = -EUCLEAN; |
2726 | goto out; |
2727 | } |
2728 | ret = btrfs_record_root_in_trans(trans, root); |
2729 | if (ret) |
2730 | goto out; |
2731 | /* |
2732 | * Another thread could have failed, need to check if we |
2733 | * have reloc_root actually set. |
2734 | */ |
2735 | if (!root->reloc_root) { |
2736 | ret = -ENOENT; |
2737 | goto out; |
2738 | } |
2739 | root = root->reloc_root; |
2740 | node->new_bytenr = root->node->start; |
2741 | btrfs_put_root(root: node->root); |
2742 | node->root = btrfs_grab_root(root); |
2743 | ASSERT(node->root); |
2744 | list_add_tail(new: &node->list, head: &rc->backref_cache.changed); |
2745 | } else { |
2746 | path->lowest_level = node->level; |
2747 | if (root == root->fs_info->chunk_root) |
2748 | btrfs_reserve_chunk_metadata(trans, is_item_insertion: false); |
2749 | ret = btrfs_search_slot(trans, root, key, p: path, ins_len: 0, cow: 1); |
2750 | btrfs_release_path(p: path); |
2751 | if (root == root->fs_info->chunk_root) |
2752 | btrfs_trans_release_chunk_metadata(trans); |
2753 | if (ret > 0) |
2754 | ret = 0; |
2755 | } |
2756 | if (!ret) |
2757 | update_processed_blocks(rc, node); |
2758 | } else { |
2759 | ret = do_relocation(trans, rc, node, key, path, lowest: 1); |
2760 | } |
2761 | out: |
2762 | if (ret || node->level == 0 || node->cowonly) |
2763 | btrfs_backref_cleanup_node(cache: &rc->backref_cache, node); |
2764 | return ret; |
2765 | } |
2766 | |
2767 | /* |
2768 | * relocate a list of blocks |
2769 | */ |
2770 | static noinline_for_stack |
2771 | int relocate_tree_blocks(struct btrfs_trans_handle *trans, |
2772 | struct reloc_control *rc, struct rb_root *blocks) |
2773 | { |
2774 | struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; |
2775 | struct btrfs_backref_node *node; |
2776 | struct btrfs_path *path; |
2777 | struct tree_block *block; |
2778 | struct tree_block *next; |
2779 | int ret; |
2780 | int err = 0; |
2781 | |
2782 | path = btrfs_alloc_path(); |
2783 | if (!path) { |
2784 | err = -ENOMEM; |
2785 | goto out_free_blocks; |
2786 | } |
2787 | |
2788 | /* Kick in readahead for tree blocks with missing keys */ |
2789 | rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) { |
2790 | if (!block->key_ready) |
2791 | btrfs_readahead_tree_block(fs_info, bytenr: block->bytenr, |
2792 | owner_root: block->owner, gen: 0, |
2793 | level: block->level); |
2794 | } |
2795 | |
2796 | /* Get first keys */ |
2797 | rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) { |
2798 | if (!block->key_ready) { |
2799 | err = get_tree_block_key(fs_info, block); |
2800 | if (err) |
2801 | goto out_free_path; |
2802 | } |
2803 | } |
2804 | |
2805 | /* Do tree relocation */ |
2806 | rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) { |
2807 | node = build_backref_tree(trans, rc, node_key: &block->key, |
2808 | level: block->level, bytenr: block->bytenr); |
2809 | if (IS_ERR(ptr: node)) { |
2810 | err = PTR_ERR(ptr: node); |
2811 | goto out; |
2812 | } |
2813 | |
2814 | ret = relocate_tree_block(trans, rc, node, key: &block->key, |
2815 | path); |
2816 | if (ret < 0) { |
2817 | err = ret; |
2818 | break; |
2819 | } |
2820 | } |
2821 | out: |
2822 | err = finish_pending_nodes(trans, rc, path, err); |
2823 | |
2824 | out_free_path: |
2825 | btrfs_free_path(p: path); |
2826 | out_free_blocks: |
2827 | free_block_list(blocks); |
2828 | return err; |
2829 | } |
2830 | |
2831 | static noinline_for_stack int prealloc_file_extent_cluster( |
2832 | struct btrfs_inode *inode, |
2833 | const struct file_extent_cluster *cluster) |
2834 | { |
2835 | u64 alloc_hint = 0; |
2836 | u64 start; |
2837 | u64 end; |
2838 | u64 offset = inode->index_cnt; |
2839 | u64 num_bytes; |
2840 | int nr; |
2841 | int ret = 0; |
2842 | u64 i_size = i_size_read(inode: &inode->vfs_inode); |
2843 | u64 prealloc_start = cluster->start - offset; |
2844 | u64 prealloc_end = cluster->end - offset; |
2845 | u64 cur_offset = prealloc_start; |
2846 | |
2847 | /* |
2848 | * For subpage case, previous i_size may not be aligned to PAGE_SIZE. |
2849 | * This means the range [i_size, PAGE_END + 1) is filled with zeros by |
2850 | * btrfs_do_readpage() call of previously relocated file cluster. |
2851 | * |
2852 | * If the current cluster starts in the above range, btrfs_do_readpage() |
2853 | * will skip the read, and relocate_one_page() will later writeback |
2854 | * the padding zeros as new data, causing data corruption. |
2855 | * |
2856 | * Here we have to manually invalidate the range (i_size, PAGE_END + 1). |
2857 | */ |
2858 | if (!PAGE_ALIGNED(i_size)) { |
2859 | struct address_space *mapping = inode->vfs_inode.i_mapping; |
2860 | struct btrfs_fs_info *fs_info = inode->root->fs_info; |
2861 | const u32 sectorsize = fs_info->sectorsize; |
2862 | struct page *page; |
2863 | |
2864 | ASSERT(sectorsize < PAGE_SIZE); |
2865 | ASSERT(IS_ALIGNED(i_size, sectorsize)); |
2866 | |
2867 | /* |
2868 | * Subpage can't handle page with DIRTY but without UPTODATE |
2869 | * bit as it can lead to the following deadlock: |
2870 | * |
2871 | * btrfs_read_folio() |
2872 | * | Page already *locked* |
2873 | * |- btrfs_lock_and_flush_ordered_range() |
2874 | * |- btrfs_start_ordered_extent() |
2875 | * |- extent_write_cache_pages() |
2876 | * |- lock_page() |
2877 | * We try to lock the page we already hold. |
2878 | * |
2879 | * Here we just writeback the whole data reloc inode, so that |
2880 | * we will be ensured to have no dirty range in the page, and |
2881 | * are safe to clear the uptodate bits. |
2882 | * |
2883 | * This shouldn't cause too much overhead, as we need to write |
2884 | * the data back anyway. |
2885 | */ |
2886 | ret = filemap_write_and_wait(mapping); |
2887 | if (ret < 0) |
2888 | return ret; |
2889 | |
2890 | clear_extent_bits(tree: &inode->io_tree, start: i_size, |
2891 | round_up(i_size, PAGE_SIZE) - 1, |
2892 | bits: EXTENT_UPTODATE); |
2893 | page = find_lock_page(mapping, index: i_size >> PAGE_SHIFT); |
2894 | /* |
2895 | * If page is freed we don't need to do anything then, as we |
2896 | * will re-read the whole page anyway. |
2897 | */ |
2898 | if (page) { |
2899 | btrfs_subpage_clear_uptodate(fs_info, page_folio(page), start: i_size, |
2900 | round_up(i_size, PAGE_SIZE) - i_size); |
2901 | unlock_page(page); |
2902 | put_page(page); |
2903 | } |
2904 | } |
2905 | |
2906 | BUG_ON(cluster->start != cluster->boundary[0]); |
2907 | ret = btrfs_alloc_data_chunk_ondemand(inode, |
2908 | bytes: prealloc_end + 1 - prealloc_start); |
2909 | if (ret) |
2910 | return ret; |
2911 | |
2912 | btrfs_inode_lock(inode, ilock_flags: 0); |
2913 | for (nr = 0; nr < cluster->nr; nr++) { |
2914 | struct extent_state *cached_state = NULL; |
2915 | |
2916 | start = cluster->boundary[nr] - offset; |
2917 | if (nr + 1 < cluster->nr) |
2918 | end = cluster->boundary[nr + 1] - 1 - offset; |
2919 | else |
2920 | end = cluster->end - offset; |
2921 | |
2922 | lock_extent(tree: &inode->io_tree, start, end, cached: &cached_state); |
2923 | num_bytes = end + 1 - start; |
2924 | ret = btrfs_prealloc_file_range(inode: &inode->vfs_inode, mode: 0, start, |
2925 | num_bytes, min_size: num_bytes, |
2926 | actual_len: end + 1, alloc_hint: &alloc_hint); |
2927 | cur_offset = end + 1; |
2928 | unlock_extent(tree: &inode->io_tree, start, end, cached: &cached_state); |
2929 | if (ret) |
2930 | break; |
2931 | } |
2932 | btrfs_inode_unlock(inode, ilock_flags: 0); |
2933 | |
2934 | if (cur_offset < prealloc_end) |
2935 | btrfs_free_reserved_data_space_noquota(fs_info: inode->root->fs_info, |
2936 | len: prealloc_end + 1 - cur_offset); |
2937 | return ret; |
2938 | } |
2939 | |
2940 | static noinline_for_stack int setup_relocation_extent_mapping(struct inode *inode, |
2941 | u64 start, u64 end, u64 block_start) |
2942 | { |
2943 | struct extent_map *em; |
2944 | struct extent_state *cached_state = NULL; |
2945 | int ret = 0; |
2946 | |
2947 | em = alloc_extent_map(); |
2948 | if (!em) |
2949 | return -ENOMEM; |
2950 | |
2951 | em->start = start; |
2952 | em->len = end + 1 - start; |
2953 | em->block_len = em->len; |
2954 | em->block_start = block_start; |
2955 | em->flags |= EXTENT_FLAG_PINNED; |
2956 | |
2957 | lock_extent(tree: &BTRFS_I(inode)->io_tree, start, end, cached: &cached_state); |
2958 | ret = btrfs_replace_extent_map_range(inode: BTRFS_I(inode), new_em: em, modified: false); |
2959 | unlock_extent(tree: &BTRFS_I(inode)->io_tree, start, end, cached: &cached_state); |
2960 | free_extent_map(em); |
2961 | |
2962 | return ret; |
2963 | } |
2964 | |
2965 | /* |
2966 | * Allow error injection to test balance/relocation cancellation |
2967 | */ |
2968 | noinline int btrfs_should_cancel_balance(const struct btrfs_fs_info *fs_info) |
2969 | { |
2970 | return atomic_read(v: &fs_info->balance_cancel_req) || |
2971 | atomic_read(v: &fs_info->reloc_cancel_req) || |
2972 | fatal_signal_pending(current); |
2973 | } |
2974 | ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE); |
2975 | |
2976 | static u64 get_cluster_boundary_end(const struct file_extent_cluster *cluster, |
2977 | int cluster_nr) |
2978 | { |
2979 | /* Last extent, use cluster end directly */ |
2980 | if (cluster_nr >= cluster->nr - 1) |
2981 | return cluster->end; |
2982 | |
2983 | /* Use next boundary start*/ |
2984 | return cluster->boundary[cluster_nr + 1] - 1; |
2985 | } |
2986 | |
2987 | static int relocate_one_page(struct inode *inode, struct file_ra_state *ra, |
2988 | const struct file_extent_cluster *cluster, |
2989 | int *cluster_nr, unsigned long page_index) |
2990 | { |
2991 | struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); |
2992 | u64 offset = BTRFS_I(inode)->index_cnt; |
2993 | const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT; |
2994 | gfp_t mask = btrfs_alloc_write_mask(mapping: inode->i_mapping); |
2995 | struct page *page; |
2996 | u64 page_start; |
2997 | u64 page_end; |
2998 | u64 cur; |
2999 | int ret; |
3000 | |
3001 | ASSERT(page_index <= last_index); |
3002 | page = find_lock_page(mapping: inode->i_mapping, index: page_index); |
3003 | if (!page) { |
3004 | page_cache_sync_readahead(mapping: inode->i_mapping, ra, NULL, |
3005 | index: page_index, req_count: last_index + 1 - page_index); |
3006 | page = find_or_create_page(mapping: inode->i_mapping, index: page_index, gfp_mask: mask); |
3007 | if (!page) |
3008 | return -ENOMEM; |
3009 | } |
3010 | |
3011 | if (PageReadahead(page)) |
3012 | page_cache_async_readahead(mapping: inode->i_mapping, ra, NULL, |
3013 | page_folio(page), index: page_index, |
3014 | req_count: last_index + 1 - page_index); |
3015 | |
3016 | if (!PageUptodate(page)) { |
3017 | btrfs_read_folio(NULL, page_folio(page)); |
3018 | lock_page(page); |
3019 | if (!PageUptodate(page)) { |
3020 | ret = -EIO; |
3021 | goto release_page; |
3022 | } |
3023 | } |
3024 | |
3025 | /* |
3026 | * We could have lost page private when we dropped the lock to read the |
3027 | * page above, make sure we set_page_extent_mapped here so we have any |
3028 | * of the subpage blocksize stuff we need in place. |
3029 | */ |
3030 | ret = set_page_extent_mapped(page); |
3031 | if (ret < 0) |
3032 | goto release_page; |
3033 | |
3034 | page_start = page_offset(page); |
3035 | page_end = page_start + PAGE_SIZE - 1; |
3036 | |
3037 | /* |
3038 | * Start from the cluster, as for subpage case, the cluster can start |
3039 | * inside the page. |
3040 | */ |
3041 | cur = max(page_start, cluster->boundary[*cluster_nr] - offset); |
3042 | while (cur <= page_end) { |
3043 | struct extent_state *cached_state = NULL; |
3044 | u64 extent_start = cluster->boundary[*cluster_nr] - offset; |
3045 | u64 extent_end = get_cluster_boundary_end(cluster, |
3046 | cluster_nr: *cluster_nr) - offset; |
3047 | u64 clamped_start = max(page_start, extent_start); |
3048 | u64 clamped_end = min(page_end, extent_end); |
3049 | u32 clamped_len = clamped_end + 1 - clamped_start; |
3050 | |
3051 | /* Reserve metadata for this range */ |
3052 | ret = btrfs_delalloc_reserve_metadata(inode: BTRFS_I(inode), |
3053 | num_bytes: clamped_len, disk_num_bytes: clamped_len, |
3054 | noflush: false); |
3055 | if (ret) |
3056 | goto release_page; |
3057 | |
3058 | /* Mark the range delalloc and dirty for later writeback */ |
3059 | lock_extent(tree: &BTRFS_I(inode)->io_tree, start: clamped_start, end: clamped_end, |
3060 | cached: &cached_state); |
3061 | ret = btrfs_set_extent_delalloc(inode: BTRFS_I(inode), start: clamped_start, |
3062 | end: clamped_end, extra_bits: 0, cached_state: &cached_state); |
3063 | if (ret) { |
3064 | clear_extent_bit(tree: &BTRFS_I(inode)->io_tree, |
3065 | start: clamped_start, end: clamped_end, |
3066 | bits: EXTENT_LOCKED | EXTENT_BOUNDARY, |
3067 | cached: &cached_state); |
3068 | btrfs_delalloc_release_metadata(inode: BTRFS_I(inode), |
3069 | num_bytes: clamped_len, qgroup_free: true); |
3070 | btrfs_delalloc_release_extents(inode: BTRFS_I(inode), |
3071 | num_bytes: clamped_len); |
3072 | goto release_page; |
3073 | } |
3074 | btrfs_folio_set_dirty(fs_info, page_folio(page), |
3075 | start: clamped_start, len: clamped_len); |
3076 | |
3077 | /* |
3078 | * Set the boundary if it's inside the page. |
3079 | * Data relocation requires the destination extents to have the |
3080 | * same size as the source. |
3081 | * EXTENT_BOUNDARY bit prevents current extent from being merged |
3082 | * with previous extent. |
3083 | */ |
3084 | if (in_range(cluster->boundary[*cluster_nr] - offset, |
3085 | page_start, PAGE_SIZE)) { |
3086 | u64 boundary_start = cluster->boundary[*cluster_nr] - |
3087 | offset; |
3088 | u64 boundary_end = boundary_start + |
3089 | fs_info->sectorsize - 1; |
3090 | |
3091 | set_extent_bit(tree: &BTRFS_I(inode)->io_tree, |
3092 | start: boundary_start, end: boundary_end, |
3093 | bits: EXTENT_BOUNDARY, NULL); |
3094 | } |
3095 | unlock_extent(tree: &BTRFS_I(inode)->io_tree, start: clamped_start, end: clamped_end, |
3096 | cached: &cached_state); |
3097 | btrfs_delalloc_release_extents(inode: BTRFS_I(inode), num_bytes: clamped_len); |
3098 | cur += clamped_len; |
3099 | |
3100 | /* Crossed extent end, go to next extent */ |
3101 | if (cur >= extent_end) { |
3102 | (*cluster_nr)++; |
3103 | /* Just finished the last extent of the cluster, exit. */ |
3104 | if (*cluster_nr >= cluster->nr) |
3105 | break; |
3106 | } |
3107 | } |
3108 | unlock_page(page); |
3109 | put_page(page); |
3110 | |
3111 | balance_dirty_pages_ratelimited(mapping: inode->i_mapping); |
3112 | btrfs_throttle(fs_info); |
3113 | if (btrfs_should_cancel_balance(fs_info)) |
3114 | ret = -ECANCELED; |
3115 | return ret; |
3116 | |
3117 | release_page: |
3118 | unlock_page(page); |
3119 | put_page(page); |
3120 | return ret; |
3121 | } |
3122 | |
3123 | static int relocate_file_extent_cluster(struct inode *inode, |
3124 | const struct file_extent_cluster *cluster) |
3125 | { |
3126 | u64 offset = BTRFS_I(inode)->index_cnt; |
3127 | unsigned long index; |
3128 | unsigned long last_index; |
3129 | struct file_ra_state *ra; |
3130 | int cluster_nr = 0; |
3131 | int ret = 0; |
3132 | |
3133 | if (!cluster->nr) |
3134 | return 0; |
3135 | |
3136 | ra = kzalloc(size: sizeof(*ra), GFP_NOFS); |
3137 | if (!ra) |
3138 | return -ENOMEM; |
3139 | |
3140 | ret = prealloc_file_extent_cluster(inode: BTRFS_I(inode), cluster); |
3141 | if (ret) |
3142 | goto out; |
3143 | |
3144 | file_ra_state_init(ra, mapping: inode->i_mapping); |
3145 | |
3146 | ret = setup_relocation_extent_mapping(inode, start: cluster->start - offset, |
3147 | end: cluster->end - offset, block_start: cluster->start); |
3148 | if (ret) |
3149 | goto out; |
3150 | |
3151 | last_index = (cluster->end - offset) >> PAGE_SHIFT; |
3152 | for (index = (cluster->start - offset) >> PAGE_SHIFT; |
3153 | index <= last_index && !ret; index++) |
3154 | ret = relocate_one_page(inode, ra, cluster, cluster_nr: &cluster_nr, page_index: index); |
3155 | if (ret == 0) |
3156 | WARN_ON(cluster_nr != cluster->nr); |
3157 | out: |
3158 | kfree(objp: ra); |
3159 | return ret; |
3160 | } |
3161 | |
3162 | static noinline_for_stack int relocate_data_extent(struct inode *inode, |
3163 | const struct btrfs_key *extent_key, |
3164 | struct file_extent_cluster *cluster) |
3165 | { |
3166 | int ret; |
3167 | struct btrfs_root *root = BTRFS_I(inode)->root; |
3168 | |
3169 | if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) { |
3170 | ret = relocate_file_extent_cluster(inode, cluster); |
3171 | if (ret) |
3172 | return ret; |
3173 | cluster->nr = 0; |
3174 | } |
3175 | |
3176 | /* |
3177 | * Under simple quotas, we set root->relocation_src_root when we find |
3178 | * the extent. If adjacent extents have different owners, we can't merge |
3179 | * them while relocating. Handle this by storing the owning root that |
3180 | * started a cluster and if we see an extent from a different root break |
3181 | * cluster formation (just like the above case of non-adjacent extents). |
3182 | * |
3183 | * Without simple quotas, relocation_src_root is always 0, so we should |
3184 | * never see a mismatch, and it should have no effect on relocation |
3185 | * clusters. |
3186 | */ |
3187 | if (cluster->nr > 0 && cluster->owning_root != root->relocation_src_root) { |
3188 | u64 tmp = root->relocation_src_root; |
3189 | |
3190 | /* |
3191 | * root->relocation_src_root is the state that actually affects |
3192 | * the preallocation we do here, so set it to the root owning |
3193 | * the cluster we need to relocate. |
3194 | */ |
3195 | root->relocation_src_root = cluster->owning_root; |
3196 | ret = relocate_file_extent_cluster(inode, cluster); |
3197 | if (ret) |
3198 | return ret; |
3199 | cluster->nr = 0; |
3200 | /* And reset it back for the current extent's owning root. */ |
3201 | root->relocation_src_root = tmp; |
3202 | } |
3203 | |
3204 | if (!cluster->nr) { |
3205 | cluster->start = extent_key->objectid; |
3206 | cluster->owning_root = root->relocation_src_root; |
3207 | } |
3208 | else |
3209 | BUG_ON(cluster->nr >= MAX_EXTENTS); |
3210 | cluster->end = extent_key->objectid + extent_key->offset - 1; |
3211 | cluster->boundary[cluster->nr] = extent_key->objectid; |
3212 | cluster->nr++; |
3213 | |
3214 | if (cluster->nr >= MAX_EXTENTS) { |
3215 | ret = relocate_file_extent_cluster(inode, cluster); |
3216 | if (ret) |
3217 | return ret; |
3218 | cluster->nr = 0; |
3219 | } |
3220 | return 0; |
3221 | } |
3222 | |
3223 | /* |
3224 | * helper to add a tree block to the list. |
3225 | * the major work is getting the generation and level of the block |
3226 | */ |
3227 | static int add_tree_block(struct reloc_control *rc, |
3228 | const struct btrfs_key *extent_key, |
3229 | struct btrfs_path *path, |
3230 | struct rb_root *blocks) |
3231 | { |
3232 | struct extent_buffer *eb; |
3233 | struct btrfs_extent_item *ei; |
3234 | struct btrfs_tree_block_info *bi; |
3235 | struct tree_block *block; |
3236 | struct rb_node *rb_node; |
3237 | u32 item_size; |
3238 | int level = -1; |
3239 | u64 generation; |
3240 | u64 owner = 0; |
3241 | |
3242 | eb = path->nodes[0]; |
3243 | item_size = btrfs_item_size(eb, slot: path->slots[0]); |
3244 | |
3245 | if (extent_key->type == BTRFS_METADATA_ITEM_KEY || |
3246 | item_size >= sizeof(*ei) + sizeof(*bi)) { |
3247 | unsigned long ptr = 0, end; |
3248 | |
3249 | ei = btrfs_item_ptr(eb, path->slots[0], |
3250 | struct btrfs_extent_item); |
3251 | end = (unsigned long)ei + item_size; |
3252 | if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) { |
3253 | bi = (struct btrfs_tree_block_info *)(ei + 1); |
3254 | level = btrfs_tree_block_level(eb, s: bi); |
3255 | ptr = (unsigned long)(bi + 1); |
3256 | } else { |
3257 | level = (int)extent_key->offset; |
3258 | ptr = (unsigned long)(ei + 1); |
3259 | } |
3260 | generation = btrfs_extent_generation(eb, s: ei); |
3261 | |
3262 | /* |
3263 | * We're reading random blocks without knowing their owner ahead |
3264 | * of time. This is ok most of the time, as all reloc roots and |
3265 | * fs roots have the same lock type. However normal trees do |
3266 | * not, and the only way to know ahead of time is to read the |
3267 | * inline ref offset. We know it's an fs root if |
3268 | * |
3269 | * 1. There's more than one ref. |
3270 | * 2. There's a SHARED_DATA_REF_KEY set. |
3271 | * 3. FULL_BACKREF is set on the flags. |
3272 | * |
3273 | * Otherwise it's safe to assume that the ref offset == the |
3274 | * owner of this block, so we can use that when calling |
3275 | * read_tree_block. |
3276 | */ |
3277 | if (btrfs_extent_refs(eb, s: ei) == 1 && |
3278 | !(btrfs_extent_flags(eb, s: ei) & |
3279 | BTRFS_BLOCK_FLAG_FULL_BACKREF) && |
3280 | ptr < end) { |
3281 | struct btrfs_extent_inline_ref *iref; |
3282 | int type; |
3283 | |
3284 | iref = (struct btrfs_extent_inline_ref *)ptr; |
3285 | type = btrfs_get_extent_inline_ref_type(eb, iref, |
3286 | is_data: BTRFS_REF_TYPE_BLOCK); |
3287 | if (type == BTRFS_REF_TYPE_INVALID) |
3288 | return -EINVAL; |
3289 | if (type == BTRFS_TREE_BLOCK_REF_KEY) |
3290 | owner = btrfs_extent_inline_ref_offset(eb, s: iref); |
3291 | } |
3292 | } else { |
3293 | btrfs_print_leaf(l: eb); |
3294 | btrfs_err(rc->block_group->fs_info, |
3295 | "unrecognized tree backref at tree block %llu slot %u" , |
3296 | eb->start, path->slots[0]); |
3297 | btrfs_release_path(p: path); |
3298 | return -EUCLEAN; |
3299 | } |
3300 | |
3301 | btrfs_release_path(p: path); |
3302 | |
3303 | BUG_ON(level == -1); |
3304 | |
3305 | block = kmalloc(size: sizeof(*block), GFP_NOFS); |
3306 | if (!block) |
3307 | return -ENOMEM; |
3308 | |
3309 | block->bytenr = extent_key->objectid; |
3310 | block->key.objectid = rc->extent_root->fs_info->nodesize; |
3311 | block->key.offset = generation; |
3312 | block->level = level; |
3313 | block->key_ready = false; |
3314 | block->owner = owner; |
3315 | |
3316 | rb_node = rb_simple_insert(root: blocks, bytenr: block->bytenr, node: &block->rb_node); |
3317 | if (rb_node) |
3318 | btrfs_backref_panic(fs_info: rc->extent_root->fs_info, bytenr: block->bytenr, |
3319 | error: -EEXIST); |
3320 | |
3321 | return 0; |
3322 | } |
3323 | |
3324 | /* |
3325 | * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY |
3326 | */ |
3327 | static int __add_tree_block(struct reloc_control *rc, |
3328 | u64 bytenr, u32 blocksize, |
3329 | struct rb_root *blocks) |
3330 | { |
3331 | struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; |
3332 | struct btrfs_path *path; |
3333 | struct btrfs_key key; |
3334 | int ret; |
3335 | bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA); |
3336 | |
3337 | if (tree_block_processed(bytenr, rc)) |
3338 | return 0; |
3339 | |
3340 | if (rb_simple_search(root: blocks, bytenr)) |
3341 | return 0; |
3342 | |
3343 | path = btrfs_alloc_path(); |
3344 | if (!path) |
3345 | return -ENOMEM; |
3346 | again: |
3347 | key.objectid = bytenr; |
3348 | if (skinny) { |
3349 | key.type = BTRFS_METADATA_ITEM_KEY; |
3350 | key.offset = (u64)-1; |
3351 | } else { |
3352 | key.type = BTRFS_EXTENT_ITEM_KEY; |
3353 | key.offset = blocksize; |
3354 | } |
3355 | |
3356 | path->search_commit_root = 1; |
3357 | path->skip_locking = 1; |
3358 | ret = btrfs_search_slot(NULL, root: rc->extent_root, key: &key, p: path, ins_len: 0, cow: 0); |
3359 | if (ret < 0) |
3360 | goto out; |
3361 | |
3362 | if (ret > 0 && skinny) { |
3363 | if (path->slots[0]) { |
3364 | path->slots[0]--; |
3365 | btrfs_item_key_to_cpu(eb: path->nodes[0], cpu_key: &key, |
3366 | nr: path->slots[0]); |
3367 | if (key.objectid == bytenr && |
3368 | (key.type == BTRFS_METADATA_ITEM_KEY || |
3369 | (key.type == BTRFS_EXTENT_ITEM_KEY && |
3370 | key.offset == blocksize))) |
3371 | ret = 0; |
3372 | } |
3373 | |
3374 | if (ret) { |
3375 | skinny = false; |
3376 | btrfs_release_path(p: path); |
3377 | goto again; |
3378 | } |
3379 | } |
3380 | if (ret) { |
3381 | ASSERT(ret == 1); |
3382 | btrfs_print_leaf(l: path->nodes[0]); |
3383 | btrfs_err(fs_info, |
3384 | "tree block extent item (%llu) is not found in extent tree" , |
3385 | bytenr); |
3386 | WARN_ON(1); |
3387 | ret = -EINVAL; |
3388 | goto out; |
3389 | } |
3390 | |
3391 | ret = add_tree_block(rc, extent_key: &key, path, blocks); |
3392 | out: |
3393 | btrfs_free_path(p: path); |
3394 | return ret; |
3395 | } |
3396 | |
3397 | static int delete_block_group_cache(struct btrfs_fs_info *fs_info, |
3398 | struct btrfs_block_group *block_group, |
3399 | struct inode *inode, |
3400 | u64 ino) |
3401 | { |
3402 | struct btrfs_root *root = fs_info->tree_root; |
3403 | struct btrfs_trans_handle *trans; |
3404 | int ret = 0; |
3405 | |
3406 | if (inode) |
3407 | goto truncate; |
3408 | |
3409 | inode = btrfs_iget(s: fs_info->sb, ino, root); |
3410 | if (IS_ERR(ptr: inode)) |
3411 | return -ENOENT; |
3412 | |
3413 | truncate: |
3414 | ret = btrfs_check_trunc_cache_free_space(fs_info, |
3415 | rsv: &fs_info->global_block_rsv); |
3416 | if (ret) |
3417 | goto out; |
3418 | |
3419 | trans = btrfs_join_transaction(root); |
3420 | if (IS_ERR(ptr: trans)) { |
3421 | ret = PTR_ERR(ptr: trans); |
3422 | goto out; |
3423 | } |
3424 | |
3425 | ret = btrfs_truncate_free_space_cache(trans, block_group, inode); |
3426 | |
3427 | btrfs_end_transaction(trans); |
3428 | btrfs_btree_balance_dirty(fs_info); |
3429 | out: |
3430 | iput(inode); |
3431 | return ret; |
3432 | } |
3433 | |
3434 | /* |
3435 | * Locate the free space cache EXTENT_DATA in root tree leaf and delete the |
3436 | * cache inode, to avoid free space cache data extent blocking data relocation. |
3437 | */ |
3438 | static int delete_v1_space_cache(struct extent_buffer *leaf, |
3439 | struct btrfs_block_group *block_group, |
3440 | u64 data_bytenr) |
3441 | { |
3442 | u64 space_cache_ino; |
3443 | struct btrfs_file_extent_item *ei; |
3444 | struct btrfs_key key; |
3445 | bool found = false; |
3446 | int i; |
3447 | int ret; |
3448 | |
3449 | if (btrfs_header_owner(eb: leaf) != BTRFS_ROOT_TREE_OBJECTID) |
3450 | return 0; |
3451 | |
3452 | for (i = 0; i < btrfs_header_nritems(eb: leaf); i++) { |
3453 | u8 type; |
3454 | |
3455 | btrfs_item_key_to_cpu(eb: leaf, cpu_key: &key, nr: i); |
3456 | if (key.type != BTRFS_EXTENT_DATA_KEY) |
3457 | continue; |
3458 | ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); |
3459 | type = btrfs_file_extent_type(eb: leaf, s: ei); |
3460 | |
3461 | if ((type == BTRFS_FILE_EXTENT_REG || |
3462 | type == BTRFS_FILE_EXTENT_PREALLOC) && |
3463 | btrfs_file_extent_disk_bytenr(eb: leaf, s: ei) == data_bytenr) { |
3464 | found = true; |
3465 | space_cache_ino = key.objectid; |
3466 | break; |
3467 | } |
3468 | } |
3469 | if (!found) |
3470 | return -ENOENT; |
3471 | ret = delete_block_group_cache(fs_info: leaf->fs_info, block_group, NULL, |
3472 | ino: space_cache_ino); |
3473 | return ret; |
3474 | } |
3475 | |
3476 | /* |
3477 | * helper to find all tree blocks that reference a given data extent |
3478 | */ |
3479 | static noinline_for_stack int add_data_references(struct reloc_control *rc, |
3480 | const struct btrfs_key *extent_key, |
3481 | struct btrfs_path *path, |
3482 | struct rb_root *blocks) |
3483 | { |
3484 | struct btrfs_backref_walk_ctx ctx = { 0 }; |
3485 | struct ulist_iterator leaf_uiter; |
3486 | struct ulist_node *ref_node = NULL; |
3487 | const u32 blocksize = rc->extent_root->fs_info->nodesize; |
3488 | int ret = 0; |
3489 | |
3490 | btrfs_release_path(p: path); |
3491 | |
3492 | ctx.bytenr = extent_key->objectid; |
3493 | ctx.skip_inode_ref_list = true; |
3494 | ctx.fs_info = rc->extent_root->fs_info; |
3495 | |
3496 | ret = btrfs_find_all_leafs(ctx: &ctx); |
3497 | if (ret < 0) |
3498 | return ret; |
3499 | |
3500 | ULIST_ITER_INIT(&leaf_uiter); |
3501 | while ((ref_node = ulist_next(ulist: ctx.refs, uiter: &leaf_uiter))) { |
3502 | struct btrfs_tree_parent_check check = { 0 }; |
3503 | struct extent_buffer *eb; |
3504 | |
3505 | eb = read_tree_block(fs_info: ctx.fs_info, bytenr: ref_node->val, check: &check); |
3506 | if (IS_ERR(ptr: eb)) { |
3507 | ret = PTR_ERR(ptr: eb); |
3508 | break; |
3509 | } |
3510 | ret = delete_v1_space_cache(leaf: eb, block_group: rc->block_group, |
3511 | data_bytenr: extent_key->objectid); |
3512 | free_extent_buffer(eb); |
3513 | if (ret < 0) |
3514 | break; |
3515 | ret = __add_tree_block(rc, bytenr: ref_node->val, blocksize, blocks); |
3516 | if (ret < 0) |
3517 | break; |
3518 | } |
3519 | if (ret < 0) |
3520 | free_block_list(blocks); |
3521 | ulist_free(ulist: ctx.refs); |
3522 | return ret; |
3523 | } |
3524 | |
3525 | /* |
3526 | * helper to find next unprocessed extent |
3527 | */ |
3528 | static noinline_for_stack |
3529 | int find_next_extent(struct reloc_control *rc, struct btrfs_path *path, |
3530 | struct btrfs_key *extent_key) |
3531 | { |
3532 | struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; |
3533 | struct btrfs_key key; |
3534 | struct extent_buffer *leaf; |
3535 | u64 start, end, last; |
3536 | int ret; |
3537 | |
3538 | last = rc->block_group->start + rc->block_group->length; |
3539 | while (1) { |
3540 | bool block_found; |
3541 | |
3542 | cond_resched(); |
3543 | if (rc->search_start >= last) { |
3544 | ret = 1; |
3545 | break; |
3546 | } |
3547 | |
3548 | key.objectid = rc->search_start; |
3549 | key.type = BTRFS_EXTENT_ITEM_KEY; |
3550 | key.offset = 0; |
3551 | |
3552 | path->search_commit_root = 1; |
3553 | path->skip_locking = 1; |
3554 | ret = btrfs_search_slot(NULL, root: rc->extent_root, key: &key, p: path, |
3555 | ins_len: 0, cow: 0); |
3556 | if (ret < 0) |
3557 | break; |
3558 | next: |
3559 | leaf = path->nodes[0]; |
3560 | if (path->slots[0] >= btrfs_header_nritems(eb: leaf)) { |
3561 | ret = btrfs_next_leaf(root: rc->extent_root, path); |
3562 | if (ret != 0) |
3563 | break; |
3564 | leaf = path->nodes[0]; |
3565 | } |
3566 | |
3567 | btrfs_item_key_to_cpu(eb: leaf, cpu_key: &key, nr: path->slots[0]); |
3568 | if (key.objectid >= last) { |
3569 | ret = 1; |
3570 | break; |
3571 | } |
3572 | |
3573 | if (key.type != BTRFS_EXTENT_ITEM_KEY && |
3574 | key.type != BTRFS_METADATA_ITEM_KEY) { |
3575 | path->slots[0]++; |
3576 | goto next; |
3577 | } |
3578 | |
3579 | if (key.type == BTRFS_EXTENT_ITEM_KEY && |
3580 | key.objectid + key.offset <= rc->search_start) { |
3581 | path->slots[0]++; |
3582 | goto next; |
3583 | } |
3584 | |
3585 | if (key.type == BTRFS_METADATA_ITEM_KEY && |
3586 | key.objectid + fs_info->nodesize <= |
3587 | rc->search_start) { |
3588 | path->slots[0]++; |
3589 | goto next; |
3590 | } |
3591 | |
3592 | block_found = find_first_extent_bit(tree: &rc->processed_blocks, |
3593 | start: key.objectid, start_ret: &start, end_ret: &end, |
3594 | bits: EXTENT_DIRTY, NULL); |
3595 | |
3596 | if (block_found && start <= key.objectid) { |
3597 | btrfs_release_path(p: path); |
3598 | rc->search_start = end + 1; |
3599 | } else { |
3600 | if (key.type == BTRFS_EXTENT_ITEM_KEY) |
3601 | rc->search_start = key.objectid + key.offset; |
3602 | else |
3603 | rc->search_start = key.objectid + |
3604 | fs_info->nodesize; |
3605 | memcpy(extent_key, &key, sizeof(key)); |
3606 | return 0; |
3607 | } |
3608 | } |
3609 | btrfs_release_path(p: path); |
3610 | return ret; |
3611 | } |
3612 | |
3613 | static void set_reloc_control(struct reloc_control *rc) |
3614 | { |
3615 | struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; |
3616 | |
3617 | mutex_lock(&fs_info->reloc_mutex); |
3618 | fs_info->reloc_ctl = rc; |
3619 | mutex_unlock(lock: &fs_info->reloc_mutex); |
3620 | } |
3621 | |
3622 | static void unset_reloc_control(struct reloc_control *rc) |
3623 | { |
3624 | struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; |
3625 | |
3626 | mutex_lock(&fs_info->reloc_mutex); |
3627 | fs_info->reloc_ctl = NULL; |
3628 | mutex_unlock(lock: &fs_info->reloc_mutex); |
3629 | } |
3630 | |
3631 | static noinline_for_stack |
3632 | int prepare_to_relocate(struct reloc_control *rc) |
3633 | { |
3634 | struct btrfs_trans_handle *trans; |
3635 | int ret; |
3636 | |
3637 | rc->block_rsv = btrfs_alloc_block_rsv(fs_info: rc->extent_root->fs_info, |
3638 | type: BTRFS_BLOCK_RSV_TEMP); |
3639 | if (!rc->block_rsv) |
3640 | return -ENOMEM; |
3641 | |
3642 | memset(&rc->cluster, 0, sizeof(rc->cluster)); |
3643 | rc->search_start = rc->block_group->start; |
3644 | rc->extents_found = 0; |
3645 | rc->nodes_relocated = 0; |
3646 | rc->merging_rsv_size = 0; |
3647 | rc->reserved_bytes = 0; |
3648 | rc->block_rsv->size = rc->extent_root->fs_info->nodesize * |
3649 | RELOCATION_RESERVED_NODES; |
3650 | ret = btrfs_block_rsv_refill(fs_info: rc->extent_root->fs_info, |
3651 | block_rsv: rc->block_rsv, num_bytes: rc->block_rsv->size, |
3652 | flush: BTRFS_RESERVE_FLUSH_ALL); |
3653 | if (ret) |
3654 | return ret; |
3655 | |
3656 | rc->create_reloc_tree = true; |
3657 | set_reloc_control(rc); |
3658 | |
3659 | trans = btrfs_join_transaction(root: rc->extent_root); |
3660 | if (IS_ERR(ptr: trans)) { |
3661 | unset_reloc_control(rc); |
3662 | /* |
3663 | * extent tree is not a ref_cow tree and has no reloc_root to |
3664 | * cleanup. And callers are responsible to free the above |
3665 | * block rsv. |
3666 | */ |
3667 | return PTR_ERR(ptr: trans); |
3668 | } |
3669 | |
3670 | ret = btrfs_commit_transaction(trans); |
3671 | if (ret) |
3672 | unset_reloc_control(rc); |
3673 | |
3674 | return ret; |
3675 | } |
3676 | |
3677 | static noinline_for_stack int relocate_block_group(struct reloc_control *rc) |
3678 | { |
3679 | struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; |
3680 | struct rb_root blocks = RB_ROOT; |
3681 | struct btrfs_key key; |
3682 | struct btrfs_trans_handle *trans = NULL; |
3683 | struct btrfs_path *path; |
3684 | struct btrfs_extent_item *ei; |
3685 | u64 flags; |
3686 | int ret; |
3687 | int err = 0; |
3688 | int progress = 0; |
3689 | |
3690 | path = btrfs_alloc_path(); |
3691 | if (!path) |
3692 | return -ENOMEM; |
3693 | path->reada = READA_FORWARD; |
3694 | |
3695 | ret = prepare_to_relocate(rc); |
3696 | if (ret) { |
3697 | err = ret; |
3698 | goto out_free; |
3699 | } |
3700 | |
3701 | while (1) { |
3702 | rc->reserved_bytes = 0; |
3703 | ret = btrfs_block_rsv_refill(fs_info, block_rsv: rc->block_rsv, |
3704 | num_bytes: rc->block_rsv->size, |
3705 | flush: BTRFS_RESERVE_FLUSH_ALL); |
3706 | if (ret) { |
3707 | err = ret; |
3708 | break; |
3709 | } |
3710 | progress++; |
3711 | trans = btrfs_start_transaction(root: rc->extent_root, num_items: 0); |
3712 | if (IS_ERR(ptr: trans)) { |
3713 | err = PTR_ERR(ptr: trans); |
3714 | trans = NULL; |
3715 | break; |
3716 | } |
3717 | restart: |
3718 | if (update_backref_cache(trans, cache: &rc->backref_cache)) { |
3719 | btrfs_end_transaction(trans); |
3720 | trans = NULL; |
3721 | continue; |
3722 | } |
3723 | |
3724 | ret = find_next_extent(rc, path, extent_key: &key); |
3725 | if (ret < 0) |
3726 | err = ret; |
3727 | if (ret != 0) |
3728 | break; |
3729 | |
3730 | rc->extents_found++; |
3731 | |
3732 | ei = btrfs_item_ptr(path->nodes[0], path->slots[0], |
3733 | struct btrfs_extent_item); |
3734 | flags = btrfs_extent_flags(eb: path->nodes[0], s: ei); |
3735 | |
3736 | /* |
3737 | * If we are relocating a simple quota owned extent item, we |
3738 | * need to note the owner on the reloc data root so that when |
3739 | * we allocate the replacement item, we can attribute it to the |
3740 | * correct eventual owner (rather than the reloc data root). |
3741 | */ |
3742 | if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) { |
3743 | struct btrfs_root *root = BTRFS_I(inode: rc->data_inode)->root; |
3744 | u64 owning_root_id = btrfs_get_extent_owner_root(fs_info, |
3745 | leaf: path->nodes[0], |
3746 | slot: path->slots[0]); |
3747 | |
3748 | root->relocation_src_root = owning_root_id; |
3749 | } |
3750 | |
3751 | if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { |
3752 | ret = add_tree_block(rc, extent_key: &key, path, blocks: &blocks); |
3753 | } else if (rc->stage == UPDATE_DATA_PTRS && |
3754 | (flags & BTRFS_EXTENT_FLAG_DATA)) { |
3755 | ret = add_data_references(rc, extent_key: &key, path, blocks: &blocks); |
3756 | } else { |
3757 | btrfs_release_path(p: path); |
3758 | ret = 0; |
3759 | } |
3760 | if (ret < 0) { |
3761 | err = ret; |
3762 | break; |
3763 | } |
3764 | |
3765 | if (!RB_EMPTY_ROOT(&blocks)) { |
3766 | ret = relocate_tree_blocks(trans, rc, blocks: &blocks); |
3767 | if (ret < 0) { |
3768 | if (ret != -EAGAIN) { |
3769 | err = ret; |
3770 | break; |
3771 | } |
3772 | rc->extents_found--; |
3773 | rc->search_start = key.objectid; |
3774 | } |
3775 | } |
3776 | |
3777 | btrfs_end_transaction_throttle(trans); |
3778 | btrfs_btree_balance_dirty(fs_info); |
3779 | trans = NULL; |
3780 | |
3781 | if (rc->stage == MOVE_DATA_EXTENTS && |
3782 | (flags & BTRFS_EXTENT_FLAG_DATA)) { |
3783 | rc->found_file_extent = true; |
3784 | ret = relocate_data_extent(inode: rc->data_inode, |
3785 | extent_key: &key, cluster: &rc->cluster); |
3786 | if (ret < 0) { |
3787 | err = ret; |
3788 | break; |
3789 | } |
3790 | } |
3791 | if (btrfs_should_cancel_balance(fs_info)) { |
3792 | err = -ECANCELED; |
3793 | break; |
3794 | } |
3795 | } |
3796 | if (trans && progress && err == -ENOSPC) { |
3797 | ret = btrfs_force_chunk_alloc(trans, type: rc->block_group->flags); |
3798 | if (ret == 1) { |
3799 | err = 0; |
3800 | progress = 0; |
3801 | goto restart; |
3802 | } |
3803 | } |
3804 | |
3805 | btrfs_release_path(p: path); |
3806 | clear_extent_bits(tree: &rc->processed_blocks, start: 0, end: (u64)-1, bits: EXTENT_DIRTY); |
3807 | |
3808 | if (trans) { |
3809 | btrfs_end_transaction_throttle(trans); |
3810 | btrfs_btree_balance_dirty(fs_info); |
3811 | } |
3812 | |
3813 | if (!err) { |
3814 | ret = relocate_file_extent_cluster(inode: rc->data_inode, |
3815 | cluster: &rc->cluster); |
3816 | if (ret < 0) |
3817 | err = ret; |
3818 | } |
3819 | |
3820 | rc->create_reloc_tree = false; |
3821 | set_reloc_control(rc); |
3822 | |
3823 | btrfs_backref_release_cache(cache: &rc->backref_cache); |
3824 | btrfs_block_rsv_release(fs_info, block_rsv: rc->block_rsv, num_bytes: (u64)-1, NULL); |
3825 | |
3826 | /* |
3827 | * Even in the case when the relocation is cancelled, we should all go |
3828 | * through prepare_to_merge() and merge_reloc_roots(). |
3829 | * |
3830 | * For error (including cancelled balance), prepare_to_merge() will |
3831 | * mark all reloc trees orphan, then queue them for cleanup in |
3832 | * merge_reloc_roots() |
3833 | */ |
3834 | err = prepare_to_merge(rc, err); |
3835 | |
3836 | merge_reloc_roots(rc); |
3837 | |
3838 | rc->merge_reloc_tree = false; |
3839 | unset_reloc_control(rc); |
3840 | btrfs_block_rsv_release(fs_info, block_rsv: rc->block_rsv, num_bytes: (u64)-1, NULL); |
3841 | |
3842 | /* get rid of pinned extents */ |
3843 | trans = btrfs_join_transaction(root: rc->extent_root); |
3844 | if (IS_ERR(ptr: trans)) { |
3845 | err = PTR_ERR(ptr: trans); |
3846 | goto out_free; |
3847 | } |
3848 | ret = btrfs_commit_transaction(trans); |
3849 | if (ret && !err) |
3850 | err = ret; |
3851 | out_free: |
3852 | ret = clean_dirty_subvols(rc); |
3853 | if (ret < 0 && !err) |
3854 | err = ret; |
3855 | btrfs_free_block_rsv(fs_info, rsv: rc->block_rsv); |
3856 | btrfs_free_path(p: path); |
3857 | return err; |
3858 | } |
3859 | |
3860 | static int __insert_orphan_inode(struct btrfs_trans_handle *trans, |
3861 | struct btrfs_root *root, u64 objectid) |
3862 | { |
3863 | struct btrfs_path *path; |
3864 | struct btrfs_inode_item *item; |
3865 | struct extent_buffer *leaf; |
3866 | int ret; |
3867 | |
3868 | path = btrfs_alloc_path(); |
3869 | if (!path) |
3870 | return -ENOMEM; |
3871 | |
3872 | ret = btrfs_insert_empty_inode(trans, root, path, objectid); |
3873 | if (ret) |
3874 | goto out; |
3875 | |
3876 | leaf = path->nodes[0]; |
3877 | item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item); |
3878 | memzero_extent_buffer(eb: leaf, start: (unsigned long)item, len: sizeof(*item)); |
3879 | btrfs_set_inode_generation(eb: leaf, s: item, val: 1); |
3880 | btrfs_set_inode_size(eb: leaf, s: item, val: 0); |
3881 | btrfs_set_inode_mode(eb: leaf, s: item, S_IFREG | 0600); |
3882 | btrfs_set_inode_flags(eb: leaf, s: item, BTRFS_INODE_NOCOMPRESS | |
3883 | BTRFS_INODE_PREALLOC); |
3884 | btrfs_mark_buffer_dirty(trans, buf: leaf); |
3885 | out: |
3886 | btrfs_free_path(p: path); |
3887 | return ret; |
3888 | } |
3889 | |
3890 | static void delete_orphan_inode(struct btrfs_trans_handle *trans, |
3891 | struct btrfs_root *root, u64 objectid) |
3892 | { |
3893 | struct btrfs_path *path; |
3894 | struct btrfs_key key; |
3895 | int ret = 0; |
3896 | |
3897 | path = btrfs_alloc_path(); |
3898 | if (!path) { |
3899 | ret = -ENOMEM; |
3900 | goto out; |
3901 | } |
3902 | |
3903 | key.objectid = objectid; |
3904 | key.type = BTRFS_INODE_ITEM_KEY; |
3905 | key.offset = 0; |
3906 | ret = btrfs_search_slot(trans, root, key: &key, p: path, ins_len: -1, cow: 1); |
3907 | if (ret) { |
3908 | if (ret > 0) |
3909 | ret = -ENOENT; |
3910 | goto out; |
3911 | } |
3912 | ret = btrfs_del_item(trans, root, path); |
3913 | out: |
3914 | if (ret) |
3915 | btrfs_abort_transaction(trans, ret); |
3916 | btrfs_free_path(p: path); |
3917 | } |
3918 | |
3919 | /* |
3920 | * helper to create inode for data relocation. |
3921 | * the inode is in data relocation tree and its link count is 0 |
3922 | */ |
3923 | static noinline_for_stack struct inode *create_reloc_inode( |
3924 | struct btrfs_fs_info *fs_info, |
3925 | const struct btrfs_block_group *group) |
3926 | { |
3927 | struct inode *inode = NULL; |
3928 | struct btrfs_trans_handle *trans; |
3929 | struct btrfs_root *root; |
3930 | u64 objectid; |
3931 | int err = 0; |
3932 | |
3933 | root = btrfs_grab_root(root: fs_info->data_reloc_root); |
3934 | trans = btrfs_start_transaction(root, num_items: 6); |
3935 | if (IS_ERR(ptr: trans)) { |
3936 | btrfs_put_root(root); |
3937 | return ERR_CAST(ptr: trans); |
3938 | } |
3939 | |
3940 | err = btrfs_get_free_objectid(root, objectid: &objectid); |
3941 | if (err) |
3942 | goto out; |
3943 | |
3944 | err = __insert_orphan_inode(trans, root, objectid); |
3945 | if (err) |
3946 | goto out; |
3947 | |
3948 | inode = btrfs_iget(s: fs_info->sb, ino: objectid, root); |
3949 | if (IS_ERR(ptr: inode)) { |
3950 | delete_orphan_inode(trans, root, objectid); |
3951 | err = PTR_ERR(ptr: inode); |
3952 | inode = NULL; |
3953 | goto out; |
3954 | } |
3955 | BTRFS_I(inode)->index_cnt = group->start; |
3956 | |
3957 | err = btrfs_orphan_add(trans, inode: BTRFS_I(inode)); |
3958 | out: |
3959 | btrfs_put_root(root); |
3960 | btrfs_end_transaction(trans); |
3961 | btrfs_btree_balance_dirty(fs_info); |
3962 | if (err) { |
3963 | iput(inode); |
3964 | inode = ERR_PTR(error: err); |
3965 | } |
3966 | return inode; |
3967 | } |
3968 | |
3969 | /* |
3970 | * Mark start of chunk relocation that is cancellable. Check if the cancellation |
3971 | * has been requested meanwhile and don't start in that case. |
3972 | * |
3973 | * Return: |
3974 | * 0 success |
3975 | * -EINPROGRESS operation is already in progress, that's probably a bug |
3976 | * -ECANCELED cancellation request was set before the operation started |
3977 | */ |
3978 | static int reloc_chunk_start(struct btrfs_fs_info *fs_info) |
3979 | { |
3980 | if (test_and_set_bit(nr: BTRFS_FS_RELOC_RUNNING, addr: &fs_info->flags)) { |
3981 | /* This should not happen */ |
3982 | btrfs_err(fs_info, "reloc already running, cannot start" ); |
3983 | return -EINPROGRESS; |
3984 | } |
3985 | |
3986 | if (atomic_read(v: &fs_info->reloc_cancel_req) > 0) { |
3987 | btrfs_info(fs_info, "chunk relocation canceled on start" ); |
3988 | /* |
3989 | * On cancel, clear all requests but let the caller mark |
3990 | * the end after cleanup operations. |
3991 | */ |
3992 | atomic_set(v: &fs_info->reloc_cancel_req, i: 0); |
3993 | return -ECANCELED; |
3994 | } |
3995 | return 0; |
3996 | } |
3997 | |
3998 | /* |
3999 | * Mark end of chunk relocation that is cancellable and wake any waiters. |
4000 | */ |
4001 | static void reloc_chunk_end(struct btrfs_fs_info *fs_info) |
4002 | { |
4003 | /* Requested after start, clear bit first so any waiters can continue */ |
4004 | if (atomic_read(v: &fs_info->reloc_cancel_req) > 0) |
4005 | btrfs_info(fs_info, "chunk relocation canceled during operation" ); |
4006 | clear_and_wake_up_bit(bit: BTRFS_FS_RELOC_RUNNING, word: &fs_info->flags); |
4007 | atomic_set(v: &fs_info->reloc_cancel_req, i: 0); |
4008 | } |
4009 | |
4010 | static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info) |
4011 | { |
4012 | struct reloc_control *rc; |
4013 | |
4014 | rc = kzalloc(size: sizeof(*rc), GFP_NOFS); |
4015 | if (!rc) |
4016 | return NULL; |
4017 | |
4018 | INIT_LIST_HEAD(list: &rc->reloc_roots); |
4019 | INIT_LIST_HEAD(list: &rc->dirty_subvol_roots); |
4020 | btrfs_backref_init_cache(fs_info, cache: &rc->backref_cache, is_reloc: true); |
4021 | rc->reloc_root_tree.rb_root = RB_ROOT; |
4022 | spin_lock_init(&rc->reloc_root_tree.lock); |
4023 | extent_io_tree_init(fs_info, tree: &rc->processed_blocks, owner: IO_TREE_RELOC_BLOCKS); |
4024 | return rc; |
4025 | } |
4026 | |
4027 | static void free_reloc_control(struct reloc_control *rc) |
4028 | { |
4029 | struct mapping_node *node, *tmp; |
4030 | |
4031 | free_reloc_roots(list: &rc->reloc_roots); |
4032 | rbtree_postorder_for_each_entry_safe(node, tmp, |
4033 | &rc->reloc_root_tree.rb_root, rb_node) |
4034 | kfree(objp: node); |
4035 | |
4036 | kfree(objp: rc); |
4037 | } |
4038 | |
4039 | /* |
4040 | * Print the block group being relocated |
4041 | */ |
4042 | static void describe_relocation(struct btrfs_fs_info *fs_info, |
4043 | struct btrfs_block_group *block_group) |
4044 | { |
4045 | char buf[128] = {'\0'}; |
4046 | |
4047 | btrfs_describe_block_groups(flags: block_group->flags, buf, size_buf: sizeof(buf)); |
4048 | |
4049 | btrfs_info(fs_info, |
4050 | "relocating block group %llu flags %s" , |
4051 | block_group->start, buf); |
4052 | } |
4053 | |
4054 | static const char *stage_to_string(enum reloc_stage stage) |
4055 | { |
4056 | if (stage == MOVE_DATA_EXTENTS) |
4057 | return "move data extents" ; |
4058 | if (stage == UPDATE_DATA_PTRS) |
4059 | return "update data pointers" ; |
4060 | return "unknown" ; |
4061 | } |
4062 | |
4063 | /* |
4064 | * function to relocate all extents in a block group. |
4065 | */ |
4066 | int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) |
4067 | { |
4068 | struct btrfs_block_group *bg; |
4069 | struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr: group_start); |
4070 | struct reloc_control *rc; |
4071 | struct inode *inode; |
4072 | struct btrfs_path *path; |
4073 | int ret; |
4074 | int rw = 0; |
4075 | int err = 0; |
4076 | |
4077 | /* |
4078 | * This only gets set if we had a half-deleted snapshot on mount. We |
4079 | * cannot allow relocation to start while we're still trying to clean up |
4080 | * these pending deletions. |
4081 | */ |
4082 | ret = wait_on_bit(word: &fs_info->flags, bit: BTRFS_FS_UNFINISHED_DROPS, TASK_INTERRUPTIBLE); |
4083 | if (ret) |
4084 | return ret; |
4085 | |
4086 | /* We may have been woken up by close_ctree, so bail if we're closing. */ |
4087 | if (btrfs_fs_closing(fs_info)) |
4088 | return -EINTR; |
4089 | |
4090 | bg = btrfs_lookup_block_group(info: fs_info, bytenr: group_start); |
4091 | if (!bg) |
4092 | return -ENOENT; |
4093 | |
4094 | /* |
4095 | * Relocation of a data block group creates ordered extents. Without |
4096 | * sb_start_write(), we can freeze the filesystem while unfinished |
4097 | * ordered extents are left. Such ordered extents can cause a deadlock |
4098 | * e.g. when syncfs() is waiting for their completion but they can't |
4099 | * finish because they block when joining a transaction, due to the |
4100 | * fact that the freeze locks are being held in write mode. |
4101 | */ |
4102 | if (bg->flags & BTRFS_BLOCK_GROUP_DATA) |
4103 | ASSERT(sb_write_started(fs_info->sb)); |
4104 | |
4105 | if (btrfs_pinned_by_swapfile(fs_info, ptr: bg)) { |
4106 | btrfs_put_block_group(cache: bg); |
4107 | return -ETXTBSY; |
4108 | } |
4109 | |
4110 | rc = alloc_reloc_control(fs_info); |
4111 | if (!rc) { |
4112 | btrfs_put_block_group(cache: bg); |
4113 | return -ENOMEM; |
4114 | } |
4115 | |
4116 | ret = reloc_chunk_start(fs_info); |
4117 | if (ret < 0) { |
4118 | err = ret; |
4119 | goto out_put_bg; |
4120 | } |
4121 | |
4122 | rc->extent_root = extent_root; |
4123 | rc->block_group = bg; |
4124 | |
4125 | ret = btrfs_inc_block_group_ro(cache: rc->block_group, do_chunk_alloc: true); |
4126 | if (ret) { |
4127 | err = ret; |
4128 | goto out; |
4129 | } |
4130 | rw = 1; |
4131 | |
4132 | path = btrfs_alloc_path(); |
4133 | if (!path) { |
4134 | err = -ENOMEM; |
4135 | goto out; |
4136 | } |
4137 | |
4138 | inode = lookup_free_space_inode(block_group: rc->block_group, path); |
4139 | btrfs_free_path(p: path); |
4140 | |
4141 | if (!IS_ERR(ptr: inode)) |
4142 | ret = delete_block_group_cache(fs_info, block_group: rc->block_group, inode, ino: 0); |
4143 | else |
4144 | ret = PTR_ERR(ptr: inode); |
4145 | |
4146 | if (ret && ret != -ENOENT) { |
4147 | err = ret; |
4148 | goto out; |
4149 | } |
4150 | |
4151 | rc->data_inode = create_reloc_inode(fs_info, group: rc->block_group); |
4152 | if (IS_ERR(ptr: rc->data_inode)) { |
4153 | err = PTR_ERR(ptr: rc->data_inode); |
4154 | rc->data_inode = NULL; |
4155 | goto out; |
4156 | } |
4157 | |
4158 | describe_relocation(fs_info, block_group: rc->block_group); |
4159 | |
4160 | btrfs_wait_block_group_reservations(bg: rc->block_group); |
4161 | btrfs_wait_nocow_writers(bg: rc->block_group); |
4162 | btrfs_wait_ordered_roots(fs_info, U64_MAX, |
4163 | range_start: rc->block_group->start, |
4164 | range_len: rc->block_group->length); |
4165 | |
4166 | ret = btrfs_zone_finish(block_group: rc->block_group); |
4167 | WARN_ON(ret && ret != -EAGAIN); |
4168 | |
4169 | while (1) { |
4170 | enum reloc_stage finishes_stage; |
4171 | |
4172 | mutex_lock(&fs_info->cleaner_mutex); |
4173 | ret = relocate_block_group(rc); |
4174 | mutex_unlock(lock: &fs_info->cleaner_mutex); |
4175 | if (ret < 0) |
4176 | err = ret; |
4177 | |
4178 | finishes_stage = rc->stage; |
4179 | /* |
4180 | * We may have gotten ENOSPC after we already dirtied some |
4181 | * extents. If writeout happens while we're relocating a |
4182 | * different block group we could end up hitting the |
4183 | * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in |
4184 | * btrfs_reloc_cow_block. Make sure we write everything out |
4185 | * properly so we don't trip over this problem, and then break |
4186 | * out of the loop if we hit an error. |
4187 | */ |
4188 | if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) { |
4189 | ret = btrfs_wait_ordered_range(inode: rc->data_inode, start: 0, |
4190 | len: (u64)-1); |
4191 | if (ret) |
4192 | err = ret; |
4193 | invalidate_mapping_pages(mapping: rc->data_inode->i_mapping, |
4194 | start: 0, end: -1); |
4195 | rc->stage = UPDATE_DATA_PTRS; |
4196 | } |
4197 | |
4198 | if (err < 0) |
4199 | goto out; |
4200 | |
4201 | if (rc->extents_found == 0) |
4202 | break; |
4203 | |
4204 | btrfs_info(fs_info, "found %llu extents, stage: %s" , |
4205 | rc->extents_found, stage_to_string(finishes_stage)); |
4206 | } |
4207 | |
4208 | WARN_ON(rc->block_group->pinned > 0); |
4209 | WARN_ON(rc->block_group->reserved > 0); |
4210 | WARN_ON(rc->block_group->used > 0); |
4211 | out: |
4212 | if (err && rw) |
4213 | btrfs_dec_block_group_ro(cache: rc->block_group); |
4214 | iput(rc->data_inode); |
4215 | out_put_bg: |
4216 | btrfs_put_block_group(cache: bg); |
4217 | reloc_chunk_end(fs_info); |
4218 | free_reloc_control(rc); |
4219 | return err; |
4220 | } |
4221 | |
4222 | static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) |
4223 | { |
4224 | struct btrfs_fs_info *fs_info = root->fs_info; |
4225 | struct btrfs_trans_handle *trans; |
4226 | int ret, err; |
4227 | |
4228 | trans = btrfs_start_transaction(root: fs_info->tree_root, num_items: 0); |
4229 | if (IS_ERR(ptr: trans)) |
4230 | return PTR_ERR(ptr: trans); |
4231 | |
4232 | memset(&root->root_item.drop_progress, 0, |
4233 | sizeof(root->root_item.drop_progress)); |
4234 | btrfs_set_root_drop_level(s: &root->root_item, val: 0); |
4235 | btrfs_set_root_refs(s: &root->root_item, val: 0); |
4236 | ret = btrfs_update_root(trans, root: fs_info->tree_root, |
4237 | key: &root->root_key, item: &root->root_item); |
4238 | |
4239 | err = btrfs_end_transaction(trans); |
4240 | if (err) |
4241 | return err; |
4242 | return ret; |
4243 | } |
4244 | |
4245 | /* |
4246 | * recover relocation interrupted by system crash. |
4247 | * |
4248 | * this function resumes merging reloc trees with corresponding fs trees. |
4249 | * this is important for keeping the sharing of tree blocks |
4250 | */ |
4251 | int btrfs_recover_relocation(struct btrfs_fs_info *fs_info) |
4252 | { |
4253 | LIST_HEAD(reloc_roots); |
4254 | struct btrfs_key key; |
4255 | struct btrfs_root *fs_root; |
4256 | struct btrfs_root *reloc_root; |
4257 | struct btrfs_path *path; |
4258 | struct extent_buffer *leaf; |
4259 | struct reloc_control *rc = NULL; |
4260 | struct btrfs_trans_handle *trans; |
4261 | int ret; |
4262 | int err = 0; |
4263 | |
4264 | path = btrfs_alloc_path(); |
4265 | if (!path) |
4266 | return -ENOMEM; |
4267 | path->reada = READA_BACK; |
4268 | |
4269 | key.objectid = BTRFS_TREE_RELOC_OBJECTID; |
4270 | key.type = BTRFS_ROOT_ITEM_KEY; |
4271 | key.offset = (u64)-1; |
4272 | |
4273 | while (1) { |
4274 | ret = btrfs_search_slot(NULL, root: fs_info->tree_root, key: &key, |
4275 | p: path, ins_len: 0, cow: 0); |
4276 | if (ret < 0) { |
4277 | err = ret; |
4278 | goto out; |
4279 | } |
4280 | if (ret > 0) { |
4281 | if (path->slots[0] == 0) |
4282 | break; |
4283 | path->slots[0]--; |
4284 | } |
4285 | leaf = path->nodes[0]; |
4286 | btrfs_item_key_to_cpu(eb: leaf, cpu_key: &key, nr: path->slots[0]); |
4287 | btrfs_release_path(p: path); |
4288 | |
4289 | if (key.objectid != BTRFS_TREE_RELOC_OBJECTID || |
4290 | key.type != BTRFS_ROOT_ITEM_KEY) |
4291 | break; |
4292 | |
4293 | reloc_root = btrfs_read_tree_root(tree_root: fs_info->tree_root, key: &key); |
4294 | if (IS_ERR(ptr: reloc_root)) { |
4295 | err = PTR_ERR(ptr: reloc_root); |
4296 | goto out; |
4297 | } |
4298 | |
4299 | set_bit(nr: BTRFS_ROOT_SHAREABLE, addr: &reloc_root->state); |
4300 | list_add(new: &reloc_root->root_list, head: &reloc_roots); |
4301 | |
4302 | if (btrfs_root_refs(s: &reloc_root->root_item) > 0) { |
4303 | fs_root = btrfs_get_fs_root(fs_info, |
4304 | objectid: reloc_root->root_key.offset, check_ref: false); |
4305 | if (IS_ERR(ptr: fs_root)) { |
4306 | ret = PTR_ERR(ptr: fs_root); |
4307 | if (ret != -ENOENT) { |
4308 | err = ret; |
4309 | goto out; |
4310 | } |
4311 | ret = mark_garbage_root(root: reloc_root); |
4312 | if (ret < 0) { |
4313 | err = ret; |
4314 | goto out; |
4315 | } |
4316 | } else { |
4317 | btrfs_put_root(root: fs_root); |
4318 | } |
4319 | } |
4320 | |
4321 | if (key.offset == 0) |
4322 | break; |
4323 | |
4324 | key.offset--; |
4325 | } |
4326 | btrfs_release_path(p: path); |
4327 | |
4328 | if (list_empty(head: &reloc_roots)) |
4329 | goto out; |
4330 | |
4331 | rc = alloc_reloc_control(fs_info); |
4332 | if (!rc) { |
4333 | err = -ENOMEM; |
4334 | goto out; |
4335 | } |
4336 | |
4337 | ret = reloc_chunk_start(fs_info); |
4338 | if (ret < 0) { |
4339 | err = ret; |
4340 | goto out_end; |
4341 | } |
4342 | |
4343 | rc->extent_root = btrfs_extent_root(fs_info, bytenr: 0); |
4344 | |
4345 | set_reloc_control(rc); |
4346 | |
4347 | trans = btrfs_join_transaction(root: rc->extent_root); |
4348 | if (IS_ERR(ptr: trans)) { |
4349 | err = PTR_ERR(ptr: trans); |
4350 | goto out_unset; |
4351 | } |
4352 | |
4353 | rc->merge_reloc_tree = true; |
4354 | |
4355 | while (!list_empty(head: &reloc_roots)) { |
4356 | reloc_root = list_entry(reloc_roots.next, |
4357 | struct btrfs_root, root_list); |
4358 | list_del(entry: &reloc_root->root_list); |
4359 | |
4360 | if (btrfs_root_refs(s: &reloc_root->root_item) == 0) { |
4361 | list_add_tail(new: &reloc_root->root_list, |
4362 | head: &rc->reloc_roots); |
4363 | continue; |
4364 | } |
4365 | |
4366 | fs_root = btrfs_get_fs_root(fs_info, objectid: reloc_root->root_key.offset, |
4367 | check_ref: false); |
4368 | if (IS_ERR(ptr: fs_root)) { |
4369 | err = PTR_ERR(ptr: fs_root); |
4370 | list_add_tail(new: &reloc_root->root_list, head: &reloc_roots); |
4371 | btrfs_end_transaction(trans); |
4372 | goto out_unset; |
4373 | } |
4374 | |
4375 | err = __add_reloc_root(root: reloc_root); |
4376 | ASSERT(err != -EEXIST); |
4377 | if (err) { |
4378 | list_add_tail(new: &reloc_root->root_list, head: &reloc_roots); |
4379 | btrfs_put_root(root: fs_root); |
4380 | btrfs_end_transaction(trans); |
4381 | goto out_unset; |
4382 | } |
4383 | fs_root->reloc_root = btrfs_grab_root(root: reloc_root); |
4384 | btrfs_put_root(root: fs_root); |
4385 | } |
4386 | |
4387 | err = btrfs_commit_transaction(trans); |
4388 | if (err) |
4389 | goto out_unset; |
4390 | |
4391 | merge_reloc_roots(rc); |
4392 | |
4393 | unset_reloc_control(rc); |
4394 | |
4395 | trans = btrfs_join_transaction(root: rc->extent_root); |
4396 | if (IS_ERR(ptr: trans)) { |
4397 | err = PTR_ERR(ptr: trans); |
4398 | goto out_clean; |
4399 | } |
4400 | err = btrfs_commit_transaction(trans); |
4401 | out_clean: |
4402 | ret = clean_dirty_subvols(rc); |
4403 | if (ret < 0 && !err) |
4404 | err = ret; |
4405 | out_unset: |
4406 | unset_reloc_control(rc); |
4407 | out_end: |
4408 | reloc_chunk_end(fs_info); |
4409 | free_reloc_control(rc); |
4410 | out: |
4411 | free_reloc_roots(list: &reloc_roots); |
4412 | |
4413 | btrfs_free_path(p: path); |
4414 | |
4415 | if (err == 0) { |
4416 | /* cleanup orphan inode in data relocation tree */ |
4417 | fs_root = btrfs_grab_root(root: fs_info->data_reloc_root); |
4418 | ASSERT(fs_root); |
4419 | err = btrfs_orphan_cleanup(root: fs_root); |
4420 | btrfs_put_root(root: fs_root); |
4421 | } |
4422 | return err; |
4423 | } |
4424 | |
4425 | /* |
4426 | * helper to add ordered checksum for data relocation. |
4427 | * |
4428 | * cloning checksum properly handles the nodatasum extents. |
4429 | * it also saves CPU time to re-calculate the checksum. |
4430 | */ |
4431 | int btrfs_reloc_clone_csums(struct btrfs_ordered_extent *ordered) |
4432 | { |
4433 | struct btrfs_inode *inode = BTRFS_I(inode: ordered->inode); |
4434 | struct btrfs_fs_info *fs_info = inode->root->fs_info; |
4435 | u64 disk_bytenr = ordered->file_offset + inode->index_cnt; |
4436 | struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bytenr: disk_bytenr); |
4437 | LIST_HEAD(list); |
4438 | int ret; |
4439 | |
4440 | ret = btrfs_lookup_csums_list(root: csum_root, start: disk_bytenr, |
4441 | end: disk_bytenr + ordered->num_bytes - 1, |
4442 | list: &list, search_commit: 0, nowait: false); |
4443 | if (ret) |
4444 | return ret; |
4445 | |
4446 | while (!list_empty(head: &list)) { |
4447 | struct btrfs_ordered_sum *sums = |
4448 | list_entry(list.next, struct btrfs_ordered_sum, list); |
4449 | |
4450 | list_del_init(entry: &sums->list); |
4451 | |
4452 | /* |
4453 | * We need to offset the new_bytenr based on where the csum is. |
4454 | * We need to do this because we will read in entire prealloc |
4455 | * extents but we may have written to say the middle of the |
4456 | * prealloc extent, so we need to make sure the csum goes with |
4457 | * the right disk offset. |
4458 | * |
4459 | * We can do this because the data reloc inode refers strictly |
4460 | * to the on disk bytes, so we don't have to worry about |
4461 | * disk_len vs real len like with real inodes since it's all |
4462 | * disk length. |
4463 | */ |
4464 | sums->logical = ordered->disk_bytenr + sums->logical - disk_bytenr; |
4465 | btrfs_add_ordered_sum(entry: ordered, sum: sums); |
4466 | } |
4467 | |
4468 | return 0; |
4469 | } |
4470 | |
4471 | int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, |
4472 | struct btrfs_root *root, |
4473 | const struct extent_buffer *buf, |
4474 | struct extent_buffer *cow) |
4475 | { |
4476 | struct btrfs_fs_info *fs_info = root->fs_info; |
4477 | struct reloc_control *rc; |
4478 | struct btrfs_backref_node *node; |
4479 | int first_cow = 0; |
4480 | int level; |
4481 | int ret = 0; |
4482 | |
4483 | rc = fs_info->reloc_ctl; |
4484 | if (!rc) |
4485 | return 0; |
4486 | |
4487 | BUG_ON(rc->stage == UPDATE_DATA_PTRS && btrfs_is_data_reloc_root(root)); |
4488 | |
4489 | level = btrfs_header_level(eb: buf); |
4490 | if (btrfs_header_generation(eb: buf) <= |
4491 | btrfs_root_last_snapshot(s: &root->root_item)) |
4492 | first_cow = 1; |
4493 | |
4494 | if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID && |
4495 | rc->create_reloc_tree) { |
4496 | WARN_ON(!first_cow && level == 0); |
4497 | |
4498 | node = rc->backref_cache.path[level]; |
4499 | BUG_ON(node->bytenr != buf->start && |
4500 | node->new_bytenr != buf->start); |
4501 | |
4502 | btrfs_backref_drop_node_buffer(node); |
4503 | atomic_inc(v: &cow->refs); |
4504 | node->eb = cow; |
4505 | node->new_bytenr = cow->start; |
4506 | |
4507 | if (!node->pending) { |
4508 | list_move_tail(list: &node->list, |
4509 | head: &rc->backref_cache.pending[level]); |
4510 | node->pending = 1; |
4511 | } |
4512 | |
4513 | if (first_cow) |
4514 | mark_block_processed(rc, node); |
4515 | |
4516 | if (first_cow && level > 0) |
4517 | rc->nodes_relocated += buf->len; |
4518 | } |
4519 | |
4520 | if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS) |
4521 | ret = replace_file_extents(trans, rc, root, leaf: cow); |
4522 | return ret; |
4523 | } |
4524 | |
4525 | /* |
4526 | * called before creating snapshot. it calculates metadata reservation |
4527 | * required for relocating tree blocks in the snapshot |
4528 | */ |
4529 | void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending, |
4530 | u64 *bytes_to_reserve) |
4531 | { |
4532 | struct btrfs_root *root = pending->root; |
4533 | struct reloc_control *rc = root->fs_info->reloc_ctl; |
4534 | |
4535 | if (!rc || !have_reloc_root(root)) |
4536 | return; |
4537 | |
4538 | if (!rc->merge_reloc_tree) |
4539 | return; |
4540 | |
4541 | root = root->reloc_root; |
4542 | BUG_ON(btrfs_root_refs(&root->root_item) == 0); |
4543 | /* |
4544 | * relocation is in the stage of merging trees. the space |
4545 | * used by merging a reloc tree is twice the size of |
4546 | * relocated tree nodes in the worst case. half for cowing |
4547 | * the reloc tree, half for cowing the fs tree. the space |
4548 | * used by cowing the reloc tree will be freed after the |
4549 | * tree is dropped. if we create snapshot, cowing the fs |
4550 | * tree may use more space than it frees. so we need |
4551 | * reserve extra space. |
4552 | */ |
4553 | *bytes_to_reserve += rc->nodes_relocated; |
4554 | } |
4555 | |
4556 | /* |
4557 | * called after snapshot is created. migrate block reservation |
4558 | * and create reloc root for the newly created snapshot |
4559 | * |
4560 | * This is similar to btrfs_init_reloc_root(), we come out of here with two |
4561 | * references held on the reloc_root, one for root->reloc_root and one for |
4562 | * rc->reloc_roots. |
4563 | */ |
4564 | int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, |
4565 | struct btrfs_pending_snapshot *pending) |
4566 | { |
4567 | struct btrfs_root *root = pending->root; |
4568 | struct btrfs_root *reloc_root; |
4569 | struct btrfs_root *new_root; |
4570 | struct reloc_control *rc = root->fs_info->reloc_ctl; |
4571 | int ret; |
4572 | |
4573 | if (!rc || !have_reloc_root(root)) |
4574 | return 0; |
4575 | |
4576 | rc = root->fs_info->reloc_ctl; |
4577 | rc->merging_rsv_size += rc->nodes_relocated; |
4578 | |
4579 | if (rc->merge_reloc_tree) { |
4580 | ret = btrfs_block_rsv_migrate(src_rsv: &pending->block_rsv, |
4581 | dst_rsv: rc->block_rsv, |
4582 | num_bytes: rc->nodes_relocated, update_size: true); |
4583 | if (ret) |
4584 | return ret; |
4585 | } |
4586 | |
4587 | new_root = pending->snap; |
4588 | reloc_root = create_reloc_root(trans, root: root->reloc_root, |
4589 | objectid: new_root->root_key.objectid); |
4590 | if (IS_ERR(ptr: reloc_root)) |
4591 | return PTR_ERR(ptr: reloc_root); |
4592 | |
4593 | ret = __add_reloc_root(root: reloc_root); |
4594 | ASSERT(ret != -EEXIST); |
4595 | if (ret) { |
4596 | /* Pairs with create_reloc_root */ |
4597 | btrfs_put_root(root: reloc_root); |
4598 | return ret; |
4599 | } |
4600 | new_root->reloc_root = btrfs_grab_root(root: reloc_root); |
4601 | |
4602 | if (rc->create_reloc_tree) |
4603 | ret = clone_backref_node(trans, rc, src: root, dest: reloc_root); |
4604 | return ret; |
4605 | } |
4606 | |
4607 | /* |
4608 | * Get the current bytenr for the block group which is being relocated. |
4609 | * |
4610 | * Return U64_MAX if no running relocation. |
4611 | */ |
4612 | u64 btrfs_get_reloc_bg_bytenr(const struct btrfs_fs_info *fs_info) |
4613 | { |
4614 | u64 logical = U64_MAX; |
4615 | |
4616 | lockdep_assert_held(&fs_info->reloc_mutex); |
4617 | |
4618 | if (fs_info->reloc_ctl && fs_info->reloc_ctl->block_group) |
4619 | logical = fs_info->reloc_ctl->block_group->start; |
4620 | return logical; |
4621 | } |
4622 | |