1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2007 Oracle. All rights reserved. |
4 | */ |
5 | |
6 | #include <linux/sched.h> |
7 | #include <linux/sched/signal.h> |
8 | #include <linux/pagemap.h> |
9 | #include <linux/writeback.h> |
10 | #include <linux/blkdev.h> |
11 | #include <linux/sort.h> |
12 | #include <linux/rcupdate.h> |
13 | #include <linux/kthread.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/ratelimit.h> |
16 | #include <linux/percpu_counter.h> |
17 | #include <linux/lockdep.h> |
18 | #include <linux/crc32c.h> |
19 | #include "ctree.h" |
20 | #include "extent-tree.h" |
21 | #include "transaction.h" |
22 | #include "disk-io.h" |
23 | #include "print-tree.h" |
24 | #include "volumes.h" |
25 | #include "raid56.h" |
26 | #include "locking.h" |
27 | #include "free-space-cache.h" |
28 | #include "free-space-tree.h" |
29 | #include "qgroup.h" |
30 | #include "ref-verify.h" |
31 | #include "space-info.h" |
32 | #include "block-rsv.h" |
33 | #include "discard.h" |
34 | #include "zoned.h" |
35 | #include "dev-replace.h" |
36 | #include "fs.h" |
37 | #include "accessors.h" |
38 | #include "root-tree.h" |
39 | #include "file-item.h" |
40 | #include "orphan.h" |
41 | #include "tree-checker.h" |
42 | #include "raid-stripe-tree.h" |
43 | |
44 | #undef SCRAMBLE_DELAYED_REFS |
45 | |
46 | |
47 | static int __btrfs_free_extent(struct btrfs_trans_handle *trans, |
48 | struct btrfs_delayed_ref_head *href, |
49 | struct btrfs_delayed_ref_node *node, u64 parent, |
50 | u64 root_objectid, u64 owner_objectid, |
51 | u64 owner_offset, |
52 | struct btrfs_delayed_extent_op *); |
53 | static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, |
54 | struct extent_buffer *leaf, |
55 | struct btrfs_extent_item *ei); |
56 | static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, |
57 | u64 parent, u64 root_objectid, |
58 | u64 flags, u64 owner, u64 offset, |
59 | struct btrfs_key *ins, int ref_mod, u64 oref_root); |
60 | static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, |
61 | struct btrfs_delayed_ref_node *node, |
62 | struct btrfs_delayed_extent_op *extent_op); |
63 | static int find_next_key(struct btrfs_path *path, int level, |
64 | struct btrfs_key *key); |
65 | |
66 | static int block_group_bits(struct btrfs_block_group *cache, u64 bits) |
67 | { |
68 | return (cache->flags & bits) == bits; |
69 | } |
70 | |
71 | /* simple helper to search for an existing data extent at a given offset */ |
72 | int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len) |
73 | { |
74 | struct btrfs_root *root = btrfs_extent_root(fs_info, bytenr: start); |
75 | int ret; |
76 | struct btrfs_key key; |
77 | struct btrfs_path *path; |
78 | |
79 | path = btrfs_alloc_path(); |
80 | if (!path) |
81 | return -ENOMEM; |
82 | |
83 | key.objectid = start; |
84 | key.offset = len; |
85 | key.type = BTRFS_EXTENT_ITEM_KEY; |
86 | ret = btrfs_search_slot(NULL, root, key: &key, p: path, ins_len: 0, cow: 0); |
87 | btrfs_free_path(p: path); |
88 | return ret; |
89 | } |
90 | |
91 | /* |
92 | * helper function to lookup reference count and flags of a tree block. |
93 | * |
94 | * the head node for delayed ref is used to store the sum of all the |
95 | * reference count modifications queued up in the rbtree. the head |
96 | * node may also store the extent flags to set. This way you can check |
97 | * to see what the reference count and extent flags would be if all of |
98 | * the delayed refs are not processed. |
99 | */ |
100 | int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, |
101 | struct btrfs_fs_info *fs_info, u64 bytenr, |
102 | u64 offset, int metadata, u64 *refs, u64 *flags, |
103 | u64 *owning_root) |
104 | { |
105 | struct btrfs_root *extent_root; |
106 | struct btrfs_delayed_ref_head *head; |
107 | struct btrfs_delayed_ref_root *delayed_refs; |
108 | struct btrfs_path *path; |
109 | struct btrfs_extent_item *ei; |
110 | struct extent_buffer *leaf; |
111 | struct btrfs_key key; |
112 | u32 item_size; |
113 | u64 num_refs; |
114 | u64 extent_flags; |
115 | u64 owner = 0; |
116 | int ret; |
117 | |
118 | /* |
119 | * If we don't have skinny metadata, don't bother doing anything |
120 | * different |
121 | */ |
122 | if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) { |
123 | offset = fs_info->nodesize; |
124 | metadata = 0; |
125 | } |
126 | |
127 | path = btrfs_alloc_path(); |
128 | if (!path) |
129 | return -ENOMEM; |
130 | |
131 | if (!trans) { |
132 | path->skip_locking = 1; |
133 | path->search_commit_root = 1; |
134 | } |
135 | |
136 | search_again: |
137 | key.objectid = bytenr; |
138 | key.offset = offset; |
139 | if (metadata) |
140 | key.type = BTRFS_METADATA_ITEM_KEY; |
141 | else |
142 | key.type = BTRFS_EXTENT_ITEM_KEY; |
143 | |
144 | extent_root = btrfs_extent_root(fs_info, bytenr); |
145 | ret = btrfs_search_slot(NULL, root: extent_root, key: &key, p: path, ins_len: 0, cow: 0); |
146 | if (ret < 0) |
147 | goto out_free; |
148 | |
149 | if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) { |
150 | if (path->slots[0]) { |
151 | path->slots[0]--; |
152 | btrfs_item_key_to_cpu(eb: path->nodes[0], cpu_key: &key, |
153 | nr: path->slots[0]); |
154 | if (key.objectid == bytenr && |
155 | key.type == BTRFS_EXTENT_ITEM_KEY && |
156 | key.offset == fs_info->nodesize) |
157 | ret = 0; |
158 | } |
159 | } |
160 | |
161 | if (ret == 0) { |
162 | leaf = path->nodes[0]; |
163 | item_size = btrfs_item_size(eb: leaf, slot: path->slots[0]); |
164 | if (item_size >= sizeof(*ei)) { |
165 | ei = btrfs_item_ptr(leaf, path->slots[0], |
166 | struct btrfs_extent_item); |
167 | num_refs = btrfs_extent_refs(eb: leaf, s: ei); |
168 | extent_flags = btrfs_extent_flags(eb: leaf, s: ei); |
169 | owner = btrfs_get_extent_owner_root(fs_info, leaf, |
170 | slot: path->slots[0]); |
171 | } else { |
172 | ret = -EUCLEAN; |
173 | btrfs_err(fs_info, |
174 | "unexpected extent item size, has %u expect >= %zu" , |
175 | item_size, sizeof(*ei)); |
176 | if (trans) |
177 | btrfs_abort_transaction(trans, ret); |
178 | else |
179 | btrfs_handle_fs_error(fs_info, ret, NULL); |
180 | |
181 | goto out_free; |
182 | } |
183 | |
184 | BUG_ON(num_refs == 0); |
185 | } else { |
186 | num_refs = 0; |
187 | extent_flags = 0; |
188 | ret = 0; |
189 | } |
190 | |
191 | if (!trans) |
192 | goto out; |
193 | |
194 | delayed_refs = &trans->transaction->delayed_refs; |
195 | spin_lock(lock: &delayed_refs->lock); |
196 | head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); |
197 | if (head) { |
198 | if (!mutex_trylock(lock: &head->mutex)) { |
199 | refcount_inc(r: &head->refs); |
200 | spin_unlock(lock: &delayed_refs->lock); |
201 | |
202 | btrfs_release_path(p: path); |
203 | |
204 | /* |
205 | * Mutex was contended, block until it's released and try |
206 | * again |
207 | */ |
208 | mutex_lock(&head->mutex); |
209 | mutex_unlock(lock: &head->mutex); |
210 | btrfs_put_delayed_ref_head(head); |
211 | goto search_again; |
212 | } |
213 | spin_lock(lock: &head->lock); |
214 | if (head->extent_op && head->extent_op->update_flags) |
215 | extent_flags |= head->extent_op->flags_to_set; |
216 | else |
217 | BUG_ON(num_refs == 0); |
218 | |
219 | num_refs += head->ref_mod; |
220 | spin_unlock(lock: &head->lock); |
221 | mutex_unlock(lock: &head->mutex); |
222 | } |
223 | spin_unlock(lock: &delayed_refs->lock); |
224 | out: |
225 | WARN_ON(num_refs == 0); |
226 | if (refs) |
227 | *refs = num_refs; |
228 | if (flags) |
229 | *flags = extent_flags; |
230 | if (owning_root) |
231 | *owning_root = owner; |
232 | out_free: |
233 | btrfs_free_path(p: path); |
234 | return ret; |
235 | } |
236 | |
237 | /* |
238 | * Back reference rules. Back refs have three main goals: |
239 | * |
240 | * 1) differentiate between all holders of references to an extent so that |
241 | * when a reference is dropped we can make sure it was a valid reference |
242 | * before freeing the extent. |
243 | * |
244 | * 2) Provide enough information to quickly find the holders of an extent |
245 | * if we notice a given block is corrupted or bad. |
246 | * |
247 | * 3) Make it easy to migrate blocks for FS shrinking or storage pool |
248 | * maintenance. This is actually the same as #2, but with a slightly |
249 | * different use case. |
250 | * |
251 | * There are two kinds of back refs. The implicit back refs is optimized |
252 | * for pointers in non-shared tree blocks. For a given pointer in a block, |
253 | * back refs of this kind provide information about the block's owner tree |
254 | * and the pointer's key. These information allow us to find the block by |
255 | * b-tree searching. The full back refs is for pointers in tree blocks not |
256 | * referenced by their owner trees. The location of tree block is recorded |
257 | * in the back refs. Actually the full back refs is generic, and can be |
258 | * used in all cases the implicit back refs is used. The major shortcoming |
259 | * of the full back refs is its overhead. Every time a tree block gets |
260 | * COWed, we have to update back refs entry for all pointers in it. |
261 | * |
262 | * For a newly allocated tree block, we use implicit back refs for |
263 | * pointers in it. This means most tree related operations only involve |
264 | * implicit back refs. For a tree block created in old transaction, the |
265 | * only way to drop a reference to it is COW it. So we can detect the |
266 | * event that tree block loses its owner tree's reference and do the |
267 | * back refs conversion. |
268 | * |
269 | * When a tree block is COWed through a tree, there are four cases: |
270 | * |
271 | * The reference count of the block is one and the tree is the block's |
272 | * owner tree. Nothing to do in this case. |
273 | * |
274 | * The reference count of the block is one and the tree is not the |
275 | * block's owner tree. In this case, full back refs is used for pointers |
276 | * in the block. Remove these full back refs, add implicit back refs for |
277 | * every pointers in the new block. |
278 | * |
279 | * The reference count of the block is greater than one and the tree is |
280 | * the block's owner tree. In this case, implicit back refs is used for |
281 | * pointers in the block. Add full back refs for every pointers in the |
282 | * block, increase lower level extents' reference counts. The original |
283 | * implicit back refs are entailed to the new block. |
284 | * |
285 | * The reference count of the block is greater than one and the tree is |
286 | * not the block's owner tree. Add implicit back refs for every pointer in |
287 | * the new block, increase lower level extents' reference count. |
288 | * |
289 | * Back Reference Key composing: |
290 | * |
291 | * The key objectid corresponds to the first byte in the extent, |
292 | * The key type is used to differentiate between types of back refs. |
293 | * There are different meanings of the key offset for different types |
294 | * of back refs. |
295 | * |
296 | * File extents can be referenced by: |
297 | * |
298 | * - multiple snapshots, subvolumes, or different generations in one subvol |
299 | * - different files inside a single subvolume |
300 | * - different offsets inside a file (bookend extents in file.c) |
301 | * |
302 | * The extent ref structure for the implicit back refs has fields for: |
303 | * |
304 | * - Objectid of the subvolume root |
305 | * - objectid of the file holding the reference |
306 | * - original offset in the file |
307 | * - how many bookend extents |
308 | * |
309 | * The key offset for the implicit back refs is hash of the first |
310 | * three fields. |
311 | * |
312 | * The extent ref structure for the full back refs has field for: |
313 | * |
314 | * - number of pointers in the tree leaf |
315 | * |
316 | * The key offset for the implicit back refs is the first byte of |
317 | * the tree leaf |
318 | * |
319 | * When a file extent is allocated, The implicit back refs is used. |
320 | * the fields are filled in: |
321 | * |
322 | * (root_key.objectid, inode objectid, offset in file, 1) |
323 | * |
324 | * When a file extent is removed file truncation, we find the |
325 | * corresponding implicit back refs and check the following fields: |
326 | * |
327 | * (btrfs_header_owner(leaf), inode objectid, offset in file) |
328 | * |
329 | * Btree extents can be referenced by: |
330 | * |
331 | * - Different subvolumes |
332 | * |
333 | * Both the implicit back refs and the full back refs for tree blocks |
334 | * only consist of key. The key offset for the implicit back refs is |
335 | * objectid of block's owner tree. The key offset for the full back refs |
336 | * is the first byte of parent block. |
337 | * |
338 | * When implicit back refs is used, information about the lowest key and |
339 | * level of the tree block are required. These information are stored in |
340 | * tree block info structure. |
341 | */ |
342 | |
343 | /* |
344 | * is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required, |
345 | * is_data == BTRFS_REF_TYPE_DATA, data type is requiried, |
346 | * is_data == BTRFS_REF_TYPE_ANY, either type is OK. |
347 | */ |
348 | int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, |
349 | struct btrfs_extent_inline_ref *iref, |
350 | enum btrfs_inline_ref_type is_data) |
351 | { |
352 | struct btrfs_fs_info *fs_info = eb->fs_info; |
353 | int type = btrfs_extent_inline_ref_type(eb, s: iref); |
354 | u64 offset = btrfs_extent_inline_ref_offset(eb, s: iref); |
355 | |
356 | if (type == BTRFS_EXTENT_OWNER_REF_KEY) { |
357 | ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA)); |
358 | return type; |
359 | } |
360 | |
361 | if (type == BTRFS_TREE_BLOCK_REF_KEY || |
362 | type == BTRFS_SHARED_BLOCK_REF_KEY || |
363 | type == BTRFS_SHARED_DATA_REF_KEY || |
364 | type == BTRFS_EXTENT_DATA_REF_KEY) { |
365 | if (is_data == BTRFS_REF_TYPE_BLOCK) { |
366 | if (type == BTRFS_TREE_BLOCK_REF_KEY) |
367 | return type; |
368 | if (type == BTRFS_SHARED_BLOCK_REF_KEY) { |
369 | ASSERT(fs_info); |
370 | /* |
371 | * Every shared one has parent tree block, |
372 | * which must be aligned to sector size. |
373 | */ |
374 | if (offset && IS_ALIGNED(offset, fs_info->sectorsize)) |
375 | return type; |
376 | } |
377 | } else if (is_data == BTRFS_REF_TYPE_DATA) { |
378 | if (type == BTRFS_EXTENT_DATA_REF_KEY) |
379 | return type; |
380 | if (type == BTRFS_SHARED_DATA_REF_KEY) { |
381 | ASSERT(fs_info); |
382 | /* |
383 | * Every shared one has parent tree block, |
384 | * which must be aligned to sector size. |
385 | */ |
386 | if (offset && |
387 | IS_ALIGNED(offset, fs_info->sectorsize)) |
388 | return type; |
389 | } |
390 | } else { |
391 | ASSERT(is_data == BTRFS_REF_TYPE_ANY); |
392 | return type; |
393 | } |
394 | } |
395 | |
396 | WARN_ON(1); |
397 | btrfs_print_leaf(l: eb); |
398 | btrfs_err(fs_info, |
399 | "eb %llu iref 0x%lx invalid extent inline ref type %d" , |
400 | eb->start, (unsigned long)iref, type); |
401 | |
402 | return BTRFS_REF_TYPE_INVALID; |
403 | } |
404 | |
405 | u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset) |
406 | { |
407 | u32 high_crc = ~(u32)0; |
408 | u32 low_crc = ~(u32)0; |
409 | __le64 lenum; |
410 | |
411 | lenum = cpu_to_le64(root_objectid); |
412 | high_crc = crc32c(crc: high_crc, address: &lenum, length: sizeof(lenum)); |
413 | lenum = cpu_to_le64(owner); |
414 | low_crc = crc32c(crc: low_crc, address: &lenum, length: sizeof(lenum)); |
415 | lenum = cpu_to_le64(offset); |
416 | low_crc = crc32c(crc: low_crc, address: &lenum, length: sizeof(lenum)); |
417 | |
418 | return ((u64)high_crc << 31) ^ (u64)low_crc; |
419 | } |
420 | |
421 | static u64 hash_extent_data_ref_item(struct extent_buffer *leaf, |
422 | struct btrfs_extent_data_ref *ref) |
423 | { |
424 | return hash_extent_data_ref(root_objectid: btrfs_extent_data_ref_root(eb: leaf, s: ref), |
425 | owner: btrfs_extent_data_ref_objectid(eb: leaf, s: ref), |
426 | offset: btrfs_extent_data_ref_offset(eb: leaf, s: ref)); |
427 | } |
428 | |
429 | static int match_extent_data_ref(struct extent_buffer *leaf, |
430 | struct btrfs_extent_data_ref *ref, |
431 | u64 root_objectid, u64 owner, u64 offset) |
432 | { |
433 | if (btrfs_extent_data_ref_root(eb: leaf, s: ref) != root_objectid || |
434 | btrfs_extent_data_ref_objectid(eb: leaf, s: ref) != owner || |
435 | btrfs_extent_data_ref_offset(eb: leaf, s: ref) != offset) |
436 | return 0; |
437 | return 1; |
438 | } |
439 | |
440 | static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans, |
441 | struct btrfs_path *path, |
442 | u64 bytenr, u64 parent, |
443 | u64 root_objectid, |
444 | u64 owner, u64 offset) |
445 | { |
446 | struct btrfs_root *root = btrfs_extent_root(fs_info: trans->fs_info, bytenr); |
447 | struct btrfs_key key; |
448 | struct btrfs_extent_data_ref *ref; |
449 | struct extent_buffer *leaf; |
450 | u32 nritems; |
451 | int ret; |
452 | int recow; |
453 | int err = -ENOENT; |
454 | |
455 | key.objectid = bytenr; |
456 | if (parent) { |
457 | key.type = BTRFS_SHARED_DATA_REF_KEY; |
458 | key.offset = parent; |
459 | } else { |
460 | key.type = BTRFS_EXTENT_DATA_REF_KEY; |
461 | key.offset = hash_extent_data_ref(root_objectid, |
462 | owner, offset); |
463 | } |
464 | again: |
465 | recow = 0; |
466 | ret = btrfs_search_slot(trans, root, key: &key, p: path, ins_len: -1, cow: 1); |
467 | if (ret < 0) { |
468 | err = ret; |
469 | goto fail; |
470 | } |
471 | |
472 | if (parent) { |
473 | if (!ret) |
474 | return 0; |
475 | goto fail; |
476 | } |
477 | |
478 | leaf = path->nodes[0]; |
479 | nritems = btrfs_header_nritems(eb: leaf); |
480 | while (1) { |
481 | if (path->slots[0] >= nritems) { |
482 | ret = btrfs_next_leaf(root, path); |
483 | if (ret < 0) |
484 | err = ret; |
485 | if (ret) |
486 | goto fail; |
487 | |
488 | leaf = path->nodes[0]; |
489 | nritems = btrfs_header_nritems(eb: leaf); |
490 | recow = 1; |
491 | } |
492 | |
493 | btrfs_item_key_to_cpu(eb: leaf, cpu_key: &key, nr: path->slots[0]); |
494 | if (key.objectid != bytenr || |
495 | key.type != BTRFS_EXTENT_DATA_REF_KEY) |
496 | goto fail; |
497 | |
498 | ref = btrfs_item_ptr(leaf, path->slots[0], |
499 | struct btrfs_extent_data_ref); |
500 | |
501 | if (match_extent_data_ref(leaf, ref, root_objectid, |
502 | owner, offset)) { |
503 | if (recow) { |
504 | btrfs_release_path(p: path); |
505 | goto again; |
506 | } |
507 | err = 0; |
508 | break; |
509 | } |
510 | path->slots[0]++; |
511 | } |
512 | fail: |
513 | return err; |
514 | } |
515 | |
516 | static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans, |
517 | struct btrfs_path *path, |
518 | u64 bytenr, u64 parent, |
519 | u64 root_objectid, u64 owner, |
520 | u64 offset, int refs_to_add) |
521 | { |
522 | struct btrfs_root *root = btrfs_extent_root(fs_info: trans->fs_info, bytenr); |
523 | struct btrfs_key key; |
524 | struct extent_buffer *leaf; |
525 | u32 size; |
526 | u32 num_refs; |
527 | int ret; |
528 | |
529 | key.objectid = bytenr; |
530 | if (parent) { |
531 | key.type = BTRFS_SHARED_DATA_REF_KEY; |
532 | key.offset = parent; |
533 | size = sizeof(struct btrfs_shared_data_ref); |
534 | } else { |
535 | key.type = BTRFS_EXTENT_DATA_REF_KEY; |
536 | key.offset = hash_extent_data_ref(root_objectid, |
537 | owner, offset); |
538 | size = sizeof(struct btrfs_extent_data_ref); |
539 | } |
540 | |
541 | ret = btrfs_insert_empty_item(trans, root, path, key: &key, data_size: size); |
542 | if (ret && ret != -EEXIST) |
543 | goto fail; |
544 | |
545 | leaf = path->nodes[0]; |
546 | if (parent) { |
547 | struct btrfs_shared_data_ref *ref; |
548 | ref = btrfs_item_ptr(leaf, path->slots[0], |
549 | struct btrfs_shared_data_ref); |
550 | if (ret == 0) { |
551 | btrfs_set_shared_data_ref_count(eb: leaf, s: ref, val: refs_to_add); |
552 | } else { |
553 | num_refs = btrfs_shared_data_ref_count(eb: leaf, s: ref); |
554 | num_refs += refs_to_add; |
555 | btrfs_set_shared_data_ref_count(eb: leaf, s: ref, val: num_refs); |
556 | } |
557 | } else { |
558 | struct btrfs_extent_data_ref *ref; |
559 | while (ret == -EEXIST) { |
560 | ref = btrfs_item_ptr(leaf, path->slots[0], |
561 | struct btrfs_extent_data_ref); |
562 | if (match_extent_data_ref(leaf, ref, root_objectid, |
563 | owner, offset)) |
564 | break; |
565 | btrfs_release_path(p: path); |
566 | key.offset++; |
567 | ret = btrfs_insert_empty_item(trans, root, path, key: &key, |
568 | data_size: size); |
569 | if (ret && ret != -EEXIST) |
570 | goto fail; |
571 | |
572 | leaf = path->nodes[0]; |
573 | } |
574 | ref = btrfs_item_ptr(leaf, path->slots[0], |
575 | struct btrfs_extent_data_ref); |
576 | if (ret == 0) { |
577 | btrfs_set_extent_data_ref_root(eb: leaf, s: ref, |
578 | val: root_objectid); |
579 | btrfs_set_extent_data_ref_objectid(eb: leaf, s: ref, val: owner); |
580 | btrfs_set_extent_data_ref_offset(eb: leaf, s: ref, val: offset); |
581 | btrfs_set_extent_data_ref_count(eb: leaf, s: ref, val: refs_to_add); |
582 | } else { |
583 | num_refs = btrfs_extent_data_ref_count(eb: leaf, s: ref); |
584 | num_refs += refs_to_add; |
585 | btrfs_set_extent_data_ref_count(eb: leaf, s: ref, val: num_refs); |
586 | } |
587 | } |
588 | btrfs_mark_buffer_dirty(trans, buf: leaf); |
589 | ret = 0; |
590 | fail: |
591 | btrfs_release_path(p: path); |
592 | return ret; |
593 | } |
594 | |
595 | static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans, |
596 | struct btrfs_root *root, |
597 | struct btrfs_path *path, |
598 | int refs_to_drop) |
599 | { |
600 | struct btrfs_key key; |
601 | struct btrfs_extent_data_ref *ref1 = NULL; |
602 | struct btrfs_shared_data_ref *ref2 = NULL; |
603 | struct extent_buffer *leaf; |
604 | u32 num_refs = 0; |
605 | int ret = 0; |
606 | |
607 | leaf = path->nodes[0]; |
608 | btrfs_item_key_to_cpu(eb: leaf, cpu_key: &key, nr: path->slots[0]); |
609 | |
610 | if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { |
611 | ref1 = btrfs_item_ptr(leaf, path->slots[0], |
612 | struct btrfs_extent_data_ref); |
613 | num_refs = btrfs_extent_data_ref_count(eb: leaf, s: ref1); |
614 | } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { |
615 | ref2 = btrfs_item_ptr(leaf, path->slots[0], |
616 | struct btrfs_shared_data_ref); |
617 | num_refs = btrfs_shared_data_ref_count(eb: leaf, s: ref2); |
618 | } else { |
619 | btrfs_err(trans->fs_info, |
620 | "unrecognized backref key (%llu %u %llu)" , |
621 | key.objectid, key.type, key.offset); |
622 | btrfs_abort_transaction(trans, -EUCLEAN); |
623 | return -EUCLEAN; |
624 | } |
625 | |
626 | BUG_ON(num_refs < refs_to_drop); |
627 | num_refs -= refs_to_drop; |
628 | |
629 | if (num_refs == 0) { |
630 | ret = btrfs_del_item(trans, root, path); |
631 | } else { |
632 | if (key.type == BTRFS_EXTENT_DATA_REF_KEY) |
633 | btrfs_set_extent_data_ref_count(eb: leaf, s: ref1, val: num_refs); |
634 | else if (key.type == BTRFS_SHARED_DATA_REF_KEY) |
635 | btrfs_set_shared_data_ref_count(eb: leaf, s: ref2, val: num_refs); |
636 | btrfs_mark_buffer_dirty(trans, buf: leaf); |
637 | } |
638 | return ret; |
639 | } |
640 | |
641 | static noinline u32 extent_data_ref_count(struct btrfs_path *path, |
642 | struct btrfs_extent_inline_ref *iref) |
643 | { |
644 | struct btrfs_key key; |
645 | struct extent_buffer *leaf; |
646 | struct btrfs_extent_data_ref *ref1; |
647 | struct btrfs_shared_data_ref *ref2; |
648 | u32 num_refs = 0; |
649 | int type; |
650 | |
651 | leaf = path->nodes[0]; |
652 | btrfs_item_key_to_cpu(eb: leaf, cpu_key: &key, nr: path->slots[0]); |
653 | |
654 | if (iref) { |
655 | /* |
656 | * If type is invalid, we should have bailed out earlier than |
657 | * this call. |
658 | */ |
659 | type = btrfs_get_extent_inline_ref_type(eb: leaf, iref, is_data: BTRFS_REF_TYPE_DATA); |
660 | ASSERT(type != BTRFS_REF_TYPE_INVALID); |
661 | if (type == BTRFS_EXTENT_DATA_REF_KEY) { |
662 | ref1 = (struct btrfs_extent_data_ref *)(&iref->offset); |
663 | num_refs = btrfs_extent_data_ref_count(eb: leaf, s: ref1); |
664 | } else { |
665 | ref2 = (struct btrfs_shared_data_ref *)(iref + 1); |
666 | num_refs = btrfs_shared_data_ref_count(eb: leaf, s: ref2); |
667 | } |
668 | } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { |
669 | ref1 = btrfs_item_ptr(leaf, path->slots[0], |
670 | struct btrfs_extent_data_ref); |
671 | num_refs = btrfs_extent_data_ref_count(eb: leaf, s: ref1); |
672 | } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { |
673 | ref2 = btrfs_item_ptr(leaf, path->slots[0], |
674 | struct btrfs_shared_data_ref); |
675 | num_refs = btrfs_shared_data_ref_count(eb: leaf, s: ref2); |
676 | } else { |
677 | WARN_ON(1); |
678 | } |
679 | return num_refs; |
680 | } |
681 | |
682 | static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans, |
683 | struct btrfs_path *path, |
684 | u64 bytenr, u64 parent, |
685 | u64 root_objectid) |
686 | { |
687 | struct btrfs_root *root = btrfs_extent_root(fs_info: trans->fs_info, bytenr); |
688 | struct btrfs_key key; |
689 | int ret; |
690 | |
691 | key.objectid = bytenr; |
692 | if (parent) { |
693 | key.type = BTRFS_SHARED_BLOCK_REF_KEY; |
694 | key.offset = parent; |
695 | } else { |
696 | key.type = BTRFS_TREE_BLOCK_REF_KEY; |
697 | key.offset = root_objectid; |
698 | } |
699 | |
700 | ret = btrfs_search_slot(trans, root, key: &key, p: path, ins_len: -1, cow: 1); |
701 | if (ret > 0) |
702 | ret = -ENOENT; |
703 | return ret; |
704 | } |
705 | |
706 | static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans, |
707 | struct btrfs_path *path, |
708 | u64 bytenr, u64 parent, |
709 | u64 root_objectid) |
710 | { |
711 | struct btrfs_root *root = btrfs_extent_root(fs_info: trans->fs_info, bytenr); |
712 | struct btrfs_key key; |
713 | int ret; |
714 | |
715 | key.objectid = bytenr; |
716 | if (parent) { |
717 | key.type = BTRFS_SHARED_BLOCK_REF_KEY; |
718 | key.offset = parent; |
719 | } else { |
720 | key.type = BTRFS_TREE_BLOCK_REF_KEY; |
721 | key.offset = root_objectid; |
722 | } |
723 | |
724 | ret = btrfs_insert_empty_item(trans, root, path, key: &key, data_size: 0); |
725 | btrfs_release_path(p: path); |
726 | return ret; |
727 | } |
728 | |
729 | static inline int extent_ref_type(u64 parent, u64 owner) |
730 | { |
731 | int type; |
732 | if (owner < BTRFS_FIRST_FREE_OBJECTID) { |
733 | if (parent > 0) |
734 | type = BTRFS_SHARED_BLOCK_REF_KEY; |
735 | else |
736 | type = BTRFS_TREE_BLOCK_REF_KEY; |
737 | } else { |
738 | if (parent > 0) |
739 | type = BTRFS_SHARED_DATA_REF_KEY; |
740 | else |
741 | type = BTRFS_EXTENT_DATA_REF_KEY; |
742 | } |
743 | return type; |
744 | } |
745 | |
746 | static int find_next_key(struct btrfs_path *path, int level, |
747 | struct btrfs_key *key) |
748 | |
749 | { |
750 | for (; level < BTRFS_MAX_LEVEL; level++) { |
751 | if (!path->nodes[level]) |
752 | break; |
753 | if (path->slots[level] + 1 >= |
754 | btrfs_header_nritems(eb: path->nodes[level])) |
755 | continue; |
756 | if (level == 0) |
757 | btrfs_item_key_to_cpu(eb: path->nodes[level], cpu_key: key, |
758 | nr: path->slots[level] + 1); |
759 | else |
760 | btrfs_node_key_to_cpu(eb: path->nodes[level], cpu_key: key, |
761 | nr: path->slots[level] + 1); |
762 | return 0; |
763 | } |
764 | return 1; |
765 | } |
766 | |
767 | /* |
768 | * look for inline back ref. if back ref is found, *ref_ret is set |
769 | * to the address of inline back ref, and 0 is returned. |
770 | * |
771 | * if back ref isn't found, *ref_ret is set to the address where it |
772 | * should be inserted, and -ENOENT is returned. |
773 | * |
774 | * if insert is true and there are too many inline back refs, the path |
775 | * points to the extent item, and -EAGAIN is returned. |
776 | * |
777 | * NOTE: inline back refs are ordered in the same way that back ref |
778 | * items in the tree are ordered. |
779 | */ |
780 | static noinline_for_stack |
781 | int lookup_inline_extent_backref(struct btrfs_trans_handle *trans, |
782 | struct btrfs_path *path, |
783 | struct btrfs_extent_inline_ref **ref_ret, |
784 | u64 bytenr, u64 num_bytes, |
785 | u64 parent, u64 root_objectid, |
786 | u64 owner, u64 offset, int insert) |
787 | { |
788 | struct btrfs_fs_info *fs_info = trans->fs_info; |
789 | struct btrfs_root *root = btrfs_extent_root(fs_info, bytenr); |
790 | struct btrfs_key key; |
791 | struct extent_buffer *leaf; |
792 | struct btrfs_extent_item *ei; |
793 | struct btrfs_extent_inline_ref *iref; |
794 | u64 flags; |
795 | u64 item_size; |
796 | unsigned long ptr; |
797 | unsigned long end; |
798 | int ; |
799 | int type; |
800 | int want; |
801 | int ret; |
802 | bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); |
803 | int needed; |
804 | |
805 | key.objectid = bytenr; |
806 | key.type = BTRFS_EXTENT_ITEM_KEY; |
807 | key.offset = num_bytes; |
808 | |
809 | want = extent_ref_type(parent, owner); |
810 | if (insert) { |
811 | extra_size = btrfs_extent_inline_ref_size(type: want); |
812 | path->search_for_extension = 1; |
813 | path->keep_locks = 1; |
814 | } else |
815 | extra_size = -1; |
816 | |
817 | /* |
818 | * Owner is our level, so we can just add one to get the level for the |
819 | * block we are interested in. |
820 | */ |
821 | if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) { |
822 | key.type = BTRFS_METADATA_ITEM_KEY; |
823 | key.offset = owner; |
824 | } |
825 | |
826 | again: |
827 | ret = btrfs_search_slot(trans, root, key: &key, p: path, ins_len: extra_size, cow: 1); |
828 | if (ret < 0) |
829 | goto out; |
830 | |
831 | /* |
832 | * We may be a newly converted file system which still has the old fat |
833 | * extent entries for metadata, so try and see if we have one of those. |
834 | */ |
835 | if (ret > 0 && skinny_metadata) { |
836 | skinny_metadata = false; |
837 | if (path->slots[0]) { |
838 | path->slots[0]--; |
839 | btrfs_item_key_to_cpu(eb: path->nodes[0], cpu_key: &key, |
840 | nr: path->slots[0]); |
841 | if (key.objectid == bytenr && |
842 | key.type == BTRFS_EXTENT_ITEM_KEY && |
843 | key.offset == num_bytes) |
844 | ret = 0; |
845 | } |
846 | if (ret) { |
847 | key.objectid = bytenr; |
848 | key.type = BTRFS_EXTENT_ITEM_KEY; |
849 | key.offset = num_bytes; |
850 | btrfs_release_path(p: path); |
851 | goto again; |
852 | } |
853 | } |
854 | |
855 | if (ret && !insert) { |
856 | ret = -ENOENT; |
857 | goto out; |
858 | } else if (WARN_ON(ret)) { |
859 | btrfs_print_leaf(l: path->nodes[0]); |
860 | btrfs_err(fs_info, |
861 | "extent item not found for insert, bytenr %llu num_bytes %llu parent %llu root_objectid %llu owner %llu offset %llu" , |
862 | bytenr, num_bytes, parent, root_objectid, owner, |
863 | offset); |
864 | ret = -EUCLEAN; |
865 | goto out; |
866 | } |
867 | |
868 | leaf = path->nodes[0]; |
869 | item_size = btrfs_item_size(eb: leaf, slot: path->slots[0]); |
870 | if (unlikely(item_size < sizeof(*ei))) { |
871 | ret = -EUCLEAN; |
872 | btrfs_err(fs_info, |
873 | "unexpected extent item size, has %llu expect >= %zu" , |
874 | item_size, sizeof(*ei)); |
875 | btrfs_abort_transaction(trans, ret); |
876 | goto out; |
877 | } |
878 | |
879 | ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); |
880 | flags = btrfs_extent_flags(eb: leaf, s: ei); |
881 | |
882 | ptr = (unsigned long)(ei + 1); |
883 | end = (unsigned long)ei + item_size; |
884 | |
885 | if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) { |
886 | ptr += sizeof(struct btrfs_tree_block_info); |
887 | BUG_ON(ptr > end); |
888 | } |
889 | |
890 | if (owner >= BTRFS_FIRST_FREE_OBJECTID) |
891 | needed = BTRFS_REF_TYPE_DATA; |
892 | else |
893 | needed = BTRFS_REF_TYPE_BLOCK; |
894 | |
895 | ret = -ENOENT; |
896 | while (ptr < end) { |
897 | iref = (struct btrfs_extent_inline_ref *)ptr; |
898 | type = btrfs_get_extent_inline_ref_type(eb: leaf, iref, is_data: needed); |
899 | if (type == BTRFS_EXTENT_OWNER_REF_KEY) { |
900 | ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA)); |
901 | ptr += btrfs_extent_inline_ref_size(type); |
902 | continue; |
903 | } |
904 | if (type == BTRFS_REF_TYPE_INVALID) { |
905 | ret = -EUCLEAN; |
906 | goto out; |
907 | } |
908 | |
909 | if (want < type) |
910 | break; |
911 | if (want > type) { |
912 | ptr += btrfs_extent_inline_ref_size(type); |
913 | continue; |
914 | } |
915 | |
916 | if (type == BTRFS_EXTENT_DATA_REF_KEY) { |
917 | struct btrfs_extent_data_ref *dref; |
918 | dref = (struct btrfs_extent_data_ref *)(&iref->offset); |
919 | if (match_extent_data_ref(leaf, ref: dref, root_objectid, |
920 | owner, offset)) { |
921 | ret = 0; |
922 | break; |
923 | } |
924 | if (hash_extent_data_ref_item(leaf, ref: dref) < |
925 | hash_extent_data_ref(root_objectid, owner, offset)) |
926 | break; |
927 | } else { |
928 | u64 ref_offset; |
929 | ref_offset = btrfs_extent_inline_ref_offset(eb: leaf, s: iref); |
930 | if (parent > 0) { |
931 | if (parent == ref_offset) { |
932 | ret = 0; |
933 | break; |
934 | } |
935 | if (ref_offset < parent) |
936 | break; |
937 | } else { |
938 | if (root_objectid == ref_offset) { |
939 | ret = 0; |
940 | break; |
941 | } |
942 | if (ref_offset < root_objectid) |
943 | break; |
944 | } |
945 | } |
946 | ptr += btrfs_extent_inline_ref_size(type); |
947 | } |
948 | |
949 | if (unlikely(ptr > end)) { |
950 | ret = -EUCLEAN; |
951 | btrfs_print_leaf(l: path->nodes[0]); |
952 | btrfs_crit(fs_info, |
953 | "overrun extent record at slot %d while looking for inline extent for root %llu owner %llu offset %llu parent %llu" , |
954 | path->slots[0], root_objectid, owner, offset, parent); |
955 | goto out; |
956 | } |
957 | |
958 | if (ret == -ENOENT && insert) { |
959 | if (item_size + extra_size >= |
960 | BTRFS_MAX_EXTENT_ITEM_SIZE(root)) { |
961 | ret = -EAGAIN; |
962 | goto out; |
963 | } |
964 | /* |
965 | * To add new inline back ref, we have to make sure |
966 | * there is no corresponding back ref item. |
967 | * For simplicity, we just do not add new inline back |
968 | * ref if there is any kind of item for this block |
969 | */ |
970 | if (find_next_key(path, level: 0, key: &key) == 0 && |
971 | key.objectid == bytenr && |
972 | key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) { |
973 | ret = -EAGAIN; |
974 | goto out; |
975 | } |
976 | } |
977 | *ref_ret = (struct btrfs_extent_inline_ref *)ptr; |
978 | out: |
979 | if (insert) { |
980 | path->keep_locks = 0; |
981 | path->search_for_extension = 0; |
982 | btrfs_unlock_up_safe(path, level: 1); |
983 | } |
984 | return ret; |
985 | } |
986 | |
987 | /* |
988 | * helper to add new inline back ref |
989 | */ |
990 | static noinline_for_stack |
991 | void setup_inline_extent_backref(struct btrfs_trans_handle *trans, |
992 | struct btrfs_path *path, |
993 | struct btrfs_extent_inline_ref *iref, |
994 | u64 parent, u64 root_objectid, |
995 | u64 owner, u64 offset, int refs_to_add, |
996 | struct btrfs_delayed_extent_op *extent_op) |
997 | { |
998 | struct extent_buffer *leaf; |
999 | struct btrfs_extent_item *ei; |
1000 | unsigned long ptr; |
1001 | unsigned long end; |
1002 | unsigned long item_offset; |
1003 | u64 refs; |
1004 | int size; |
1005 | int type; |
1006 | |
1007 | leaf = path->nodes[0]; |
1008 | ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); |
1009 | item_offset = (unsigned long)iref - (unsigned long)ei; |
1010 | |
1011 | type = extent_ref_type(parent, owner); |
1012 | size = btrfs_extent_inline_ref_size(type); |
1013 | |
1014 | btrfs_extend_item(trans, path, data_size: size); |
1015 | |
1016 | ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); |
1017 | refs = btrfs_extent_refs(eb: leaf, s: ei); |
1018 | refs += refs_to_add; |
1019 | btrfs_set_extent_refs(eb: leaf, s: ei, val: refs); |
1020 | if (extent_op) |
1021 | __run_delayed_extent_op(extent_op, leaf, ei); |
1022 | |
1023 | ptr = (unsigned long)ei + item_offset; |
1024 | end = (unsigned long)ei + btrfs_item_size(eb: leaf, slot: path->slots[0]); |
1025 | if (ptr < end - size) |
1026 | memmove_extent_buffer(dst: leaf, dst_offset: ptr + size, src_offset: ptr, |
1027 | len: end - size - ptr); |
1028 | |
1029 | iref = (struct btrfs_extent_inline_ref *)ptr; |
1030 | btrfs_set_extent_inline_ref_type(eb: leaf, s: iref, val: type); |
1031 | if (type == BTRFS_EXTENT_DATA_REF_KEY) { |
1032 | struct btrfs_extent_data_ref *dref; |
1033 | dref = (struct btrfs_extent_data_ref *)(&iref->offset); |
1034 | btrfs_set_extent_data_ref_root(eb: leaf, s: dref, val: root_objectid); |
1035 | btrfs_set_extent_data_ref_objectid(eb: leaf, s: dref, val: owner); |
1036 | btrfs_set_extent_data_ref_offset(eb: leaf, s: dref, val: offset); |
1037 | btrfs_set_extent_data_ref_count(eb: leaf, s: dref, val: refs_to_add); |
1038 | } else if (type == BTRFS_SHARED_DATA_REF_KEY) { |
1039 | struct btrfs_shared_data_ref *sref; |
1040 | sref = (struct btrfs_shared_data_ref *)(iref + 1); |
1041 | btrfs_set_shared_data_ref_count(eb: leaf, s: sref, val: refs_to_add); |
1042 | btrfs_set_extent_inline_ref_offset(eb: leaf, s: iref, val: parent); |
1043 | } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) { |
1044 | btrfs_set_extent_inline_ref_offset(eb: leaf, s: iref, val: parent); |
1045 | } else { |
1046 | btrfs_set_extent_inline_ref_offset(eb: leaf, s: iref, val: root_objectid); |
1047 | } |
1048 | btrfs_mark_buffer_dirty(trans, buf: leaf); |
1049 | } |
1050 | |
1051 | static int lookup_extent_backref(struct btrfs_trans_handle *trans, |
1052 | struct btrfs_path *path, |
1053 | struct btrfs_extent_inline_ref **ref_ret, |
1054 | u64 bytenr, u64 num_bytes, u64 parent, |
1055 | u64 root_objectid, u64 owner, u64 offset) |
1056 | { |
1057 | int ret; |
1058 | |
1059 | ret = lookup_inline_extent_backref(trans, path, ref_ret, bytenr, |
1060 | num_bytes, parent, root_objectid, |
1061 | owner, offset, insert: 0); |
1062 | if (ret != -ENOENT) |
1063 | return ret; |
1064 | |
1065 | btrfs_release_path(p: path); |
1066 | *ref_ret = NULL; |
1067 | |
1068 | if (owner < BTRFS_FIRST_FREE_OBJECTID) { |
1069 | ret = lookup_tree_block_ref(trans, path, bytenr, parent, |
1070 | root_objectid); |
1071 | } else { |
1072 | ret = lookup_extent_data_ref(trans, path, bytenr, parent, |
1073 | root_objectid, owner, offset); |
1074 | } |
1075 | return ret; |
1076 | } |
1077 | |
1078 | /* |
1079 | * helper to update/remove inline back ref |
1080 | */ |
1081 | static noinline_for_stack int update_inline_extent_backref( |
1082 | struct btrfs_trans_handle *trans, |
1083 | struct btrfs_path *path, |
1084 | struct btrfs_extent_inline_ref *iref, |
1085 | int refs_to_mod, |
1086 | struct btrfs_delayed_extent_op *extent_op) |
1087 | { |
1088 | struct extent_buffer *leaf = path->nodes[0]; |
1089 | struct btrfs_fs_info *fs_info = leaf->fs_info; |
1090 | struct btrfs_extent_item *ei; |
1091 | struct btrfs_extent_data_ref *dref = NULL; |
1092 | struct btrfs_shared_data_ref *sref = NULL; |
1093 | unsigned long ptr; |
1094 | unsigned long end; |
1095 | u32 item_size; |
1096 | int size; |
1097 | int type; |
1098 | u64 refs; |
1099 | |
1100 | ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); |
1101 | refs = btrfs_extent_refs(eb: leaf, s: ei); |
1102 | if (unlikely(refs_to_mod < 0 && refs + refs_to_mod <= 0)) { |
1103 | struct btrfs_key key; |
1104 | u32 extent_size; |
1105 | |
1106 | btrfs_item_key_to_cpu(eb: leaf, cpu_key: &key, nr: path->slots[0]); |
1107 | if (key.type == BTRFS_METADATA_ITEM_KEY) |
1108 | extent_size = fs_info->nodesize; |
1109 | else |
1110 | extent_size = key.offset; |
1111 | btrfs_print_leaf(l: leaf); |
1112 | btrfs_err(fs_info, |
1113 | "invalid refs_to_mod for extent %llu num_bytes %u, has %d expect >= -%llu" , |
1114 | key.objectid, extent_size, refs_to_mod, refs); |
1115 | return -EUCLEAN; |
1116 | } |
1117 | refs += refs_to_mod; |
1118 | btrfs_set_extent_refs(eb: leaf, s: ei, val: refs); |
1119 | if (extent_op) |
1120 | __run_delayed_extent_op(extent_op, leaf, ei); |
1121 | |
1122 | type = btrfs_get_extent_inline_ref_type(eb: leaf, iref, is_data: BTRFS_REF_TYPE_ANY); |
1123 | /* |
1124 | * Function btrfs_get_extent_inline_ref_type() has already printed |
1125 | * error messages. |
1126 | */ |
1127 | if (unlikely(type == BTRFS_REF_TYPE_INVALID)) |
1128 | return -EUCLEAN; |
1129 | |
1130 | if (type == BTRFS_EXTENT_DATA_REF_KEY) { |
1131 | dref = (struct btrfs_extent_data_ref *)(&iref->offset); |
1132 | refs = btrfs_extent_data_ref_count(eb: leaf, s: dref); |
1133 | } else if (type == BTRFS_SHARED_DATA_REF_KEY) { |
1134 | sref = (struct btrfs_shared_data_ref *)(iref + 1); |
1135 | refs = btrfs_shared_data_ref_count(eb: leaf, s: sref); |
1136 | } else { |
1137 | refs = 1; |
1138 | /* |
1139 | * For tree blocks we can only drop one ref for it, and tree |
1140 | * blocks should not have refs > 1. |
1141 | * |
1142 | * Furthermore if we're inserting a new inline backref, we |
1143 | * won't reach this path either. That would be |
1144 | * setup_inline_extent_backref(). |
1145 | */ |
1146 | if (unlikely(refs_to_mod != -1)) { |
1147 | struct btrfs_key key; |
1148 | |
1149 | btrfs_item_key_to_cpu(eb: leaf, cpu_key: &key, nr: path->slots[0]); |
1150 | |
1151 | btrfs_print_leaf(l: leaf); |
1152 | btrfs_err(fs_info, |
1153 | "invalid refs_to_mod for tree block %llu, has %d expect -1" , |
1154 | key.objectid, refs_to_mod); |
1155 | return -EUCLEAN; |
1156 | } |
1157 | } |
1158 | |
1159 | if (unlikely(refs_to_mod < 0 && refs < -refs_to_mod)) { |
1160 | struct btrfs_key key; |
1161 | u32 extent_size; |
1162 | |
1163 | btrfs_item_key_to_cpu(eb: leaf, cpu_key: &key, nr: path->slots[0]); |
1164 | if (key.type == BTRFS_METADATA_ITEM_KEY) |
1165 | extent_size = fs_info->nodesize; |
1166 | else |
1167 | extent_size = key.offset; |
1168 | btrfs_print_leaf(l: leaf); |
1169 | btrfs_err(fs_info, |
1170 | "invalid refs_to_mod for backref entry, iref %lu extent %llu num_bytes %u, has %d expect >= -%llu" , |
1171 | (unsigned long)iref, key.objectid, extent_size, |
1172 | refs_to_mod, refs); |
1173 | return -EUCLEAN; |
1174 | } |
1175 | refs += refs_to_mod; |
1176 | |
1177 | if (refs > 0) { |
1178 | if (type == BTRFS_EXTENT_DATA_REF_KEY) |
1179 | btrfs_set_extent_data_ref_count(eb: leaf, s: dref, val: refs); |
1180 | else |
1181 | btrfs_set_shared_data_ref_count(eb: leaf, s: sref, val: refs); |
1182 | } else { |
1183 | size = btrfs_extent_inline_ref_size(type); |
1184 | item_size = btrfs_item_size(eb: leaf, slot: path->slots[0]); |
1185 | ptr = (unsigned long)iref; |
1186 | end = (unsigned long)ei + item_size; |
1187 | if (ptr + size < end) |
1188 | memmove_extent_buffer(dst: leaf, dst_offset: ptr, src_offset: ptr + size, |
1189 | len: end - ptr - size); |
1190 | item_size -= size; |
1191 | btrfs_truncate_item(trans, path, new_size: item_size, from_end: 1); |
1192 | } |
1193 | btrfs_mark_buffer_dirty(trans, buf: leaf); |
1194 | return 0; |
1195 | } |
1196 | |
1197 | static noinline_for_stack |
1198 | int insert_inline_extent_backref(struct btrfs_trans_handle *trans, |
1199 | struct btrfs_path *path, |
1200 | u64 bytenr, u64 num_bytes, u64 parent, |
1201 | u64 root_objectid, u64 owner, |
1202 | u64 offset, int refs_to_add, |
1203 | struct btrfs_delayed_extent_op *extent_op) |
1204 | { |
1205 | struct btrfs_extent_inline_ref *iref; |
1206 | int ret; |
1207 | |
1208 | ret = lookup_inline_extent_backref(trans, path, ref_ret: &iref, bytenr, |
1209 | num_bytes, parent, root_objectid, |
1210 | owner, offset, insert: 1); |
1211 | if (ret == 0) { |
1212 | /* |
1213 | * We're adding refs to a tree block we already own, this |
1214 | * should not happen at all. |
1215 | */ |
1216 | if (owner < BTRFS_FIRST_FREE_OBJECTID) { |
1217 | btrfs_print_leaf(l: path->nodes[0]); |
1218 | btrfs_crit(trans->fs_info, |
1219 | "adding refs to an existing tree ref, bytenr %llu num_bytes %llu root_objectid %llu slot %u" , |
1220 | bytenr, num_bytes, root_objectid, path->slots[0]); |
1221 | return -EUCLEAN; |
1222 | } |
1223 | ret = update_inline_extent_backref(trans, path, iref, |
1224 | refs_to_mod: refs_to_add, extent_op); |
1225 | } else if (ret == -ENOENT) { |
1226 | setup_inline_extent_backref(trans, path, iref, parent, |
1227 | root_objectid, owner, offset, |
1228 | refs_to_add, extent_op); |
1229 | ret = 0; |
1230 | } |
1231 | return ret; |
1232 | } |
1233 | |
1234 | static int remove_extent_backref(struct btrfs_trans_handle *trans, |
1235 | struct btrfs_root *root, |
1236 | struct btrfs_path *path, |
1237 | struct btrfs_extent_inline_ref *iref, |
1238 | int refs_to_drop, int is_data) |
1239 | { |
1240 | int ret = 0; |
1241 | |
1242 | BUG_ON(!is_data && refs_to_drop != 1); |
1243 | if (iref) |
1244 | ret = update_inline_extent_backref(trans, path, iref, |
1245 | refs_to_mod: -refs_to_drop, NULL); |
1246 | else if (is_data) |
1247 | ret = remove_extent_data_ref(trans, root, path, refs_to_drop); |
1248 | else |
1249 | ret = btrfs_del_item(trans, root, path); |
1250 | return ret; |
1251 | } |
1252 | |
1253 | static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len, |
1254 | u64 *discarded_bytes) |
1255 | { |
1256 | int j, ret = 0; |
1257 | u64 bytes_left, end; |
1258 | u64 aligned_start = ALIGN(start, 1 << SECTOR_SHIFT); |
1259 | |
1260 | /* Adjust the range to be aligned to 512B sectors if necessary. */ |
1261 | if (start != aligned_start) { |
1262 | len -= aligned_start - start; |
1263 | len = round_down(len, 1 << SECTOR_SHIFT); |
1264 | start = aligned_start; |
1265 | } |
1266 | |
1267 | *discarded_bytes = 0; |
1268 | |
1269 | if (!len) |
1270 | return 0; |
1271 | |
1272 | end = start + len; |
1273 | bytes_left = len; |
1274 | |
1275 | /* Skip any superblocks on this device. */ |
1276 | for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) { |
1277 | u64 sb_start = btrfs_sb_offset(mirror: j); |
1278 | u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE; |
1279 | u64 size = sb_start - start; |
1280 | |
1281 | if (!in_range(sb_start, start, bytes_left) && |
1282 | !in_range(sb_end, start, bytes_left) && |
1283 | !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE)) |
1284 | continue; |
1285 | |
1286 | /* |
1287 | * Superblock spans beginning of range. Adjust start and |
1288 | * try again. |
1289 | */ |
1290 | if (sb_start <= start) { |
1291 | start += sb_end - start; |
1292 | if (start > end) { |
1293 | bytes_left = 0; |
1294 | break; |
1295 | } |
1296 | bytes_left = end - start; |
1297 | continue; |
1298 | } |
1299 | |
1300 | if (size) { |
1301 | ret = blkdev_issue_discard(bdev, sector: start >> SECTOR_SHIFT, |
1302 | nr_sects: size >> SECTOR_SHIFT, |
1303 | GFP_NOFS); |
1304 | if (!ret) |
1305 | *discarded_bytes += size; |
1306 | else if (ret != -EOPNOTSUPP) |
1307 | return ret; |
1308 | } |
1309 | |
1310 | start = sb_end; |
1311 | if (start > end) { |
1312 | bytes_left = 0; |
1313 | break; |
1314 | } |
1315 | bytes_left = end - start; |
1316 | } |
1317 | |
1318 | if (bytes_left) { |
1319 | ret = blkdev_issue_discard(bdev, sector: start >> SECTOR_SHIFT, |
1320 | nr_sects: bytes_left >> SECTOR_SHIFT, |
1321 | GFP_NOFS); |
1322 | if (!ret) |
1323 | *discarded_bytes += bytes_left; |
1324 | } |
1325 | return ret; |
1326 | } |
1327 | |
1328 | static int do_discard_extent(struct btrfs_discard_stripe *stripe, u64 *bytes) |
1329 | { |
1330 | struct btrfs_device *dev = stripe->dev; |
1331 | struct btrfs_fs_info *fs_info = dev->fs_info; |
1332 | struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; |
1333 | u64 phys = stripe->physical; |
1334 | u64 len = stripe->length; |
1335 | u64 discarded = 0; |
1336 | int ret = 0; |
1337 | |
1338 | /* Zone reset on a zoned filesystem */ |
1339 | if (btrfs_can_zone_reset(device: dev, physical: phys, length: len)) { |
1340 | u64 src_disc; |
1341 | |
1342 | ret = btrfs_reset_device_zone(device: dev, physical: phys, length: len, bytes: &discarded); |
1343 | if (ret) |
1344 | goto out; |
1345 | |
1346 | if (!btrfs_dev_replace_is_ongoing(dev_replace) || |
1347 | dev != dev_replace->srcdev) |
1348 | goto out; |
1349 | |
1350 | src_disc = discarded; |
1351 | |
1352 | /* Send to replace target as well */ |
1353 | ret = btrfs_reset_device_zone(device: dev_replace->tgtdev, physical: phys, length: len, |
1354 | bytes: &discarded); |
1355 | discarded += src_disc; |
1356 | } else if (bdev_max_discard_sectors(bdev: stripe->dev->bdev)) { |
1357 | ret = btrfs_issue_discard(bdev: dev->bdev, start: phys, len, discarded_bytes: &discarded); |
1358 | } else { |
1359 | ret = 0; |
1360 | *bytes = 0; |
1361 | } |
1362 | |
1363 | out: |
1364 | *bytes = discarded; |
1365 | return ret; |
1366 | } |
1367 | |
1368 | int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, |
1369 | u64 num_bytes, u64 *actual_bytes) |
1370 | { |
1371 | int ret = 0; |
1372 | u64 discarded_bytes = 0; |
1373 | u64 end = bytenr + num_bytes; |
1374 | u64 cur = bytenr; |
1375 | |
1376 | /* |
1377 | * Avoid races with device replace and make sure the devices in the |
1378 | * stripes don't go away while we are discarding. |
1379 | */ |
1380 | btrfs_bio_counter_inc_blocked(fs_info); |
1381 | while (cur < end) { |
1382 | struct btrfs_discard_stripe *stripes; |
1383 | unsigned int num_stripes; |
1384 | int i; |
1385 | |
1386 | num_bytes = end - cur; |
1387 | stripes = btrfs_map_discard(fs_info, logical: cur, length_ret: &num_bytes, num_stripes: &num_stripes); |
1388 | if (IS_ERR(ptr: stripes)) { |
1389 | ret = PTR_ERR(ptr: stripes); |
1390 | if (ret == -EOPNOTSUPP) |
1391 | ret = 0; |
1392 | break; |
1393 | } |
1394 | |
1395 | for (i = 0; i < num_stripes; i++) { |
1396 | struct btrfs_discard_stripe *stripe = stripes + i; |
1397 | u64 bytes; |
1398 | |
1399 | if (!stripe->dev->bdev) { |
1400 | ASSERT(btrfs_test_opt(fs_info, DEGRADED)); |
1401 | continue; |
1402 | } |
1403 | |
1404 | if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, |
1405 | &stripe->dev->dev_state)) |
1406 | continue; |
1407 | |
1408 | ret = do_discard_extent(stripe, bytes: &bytes); |
1409 | if (ret) { |
1410 | /* |
1411 | * Keep going if discard is not supported by the |
1412 | * device. |
1413 | */ |
1414 | if (ret != -EOPNOTSUPP) |
1415 | break; |
1416 | ret = 0; |
1417 | } else { |
1418 | discarded_bytes += bytes; |
1419 | } |
1420 | } |
1421 | kfree(objp: stripes); |
1422 | if (ret) |
1423 | break; |
1424 | cur += num_bytes; |
1425 | } |
1426 | btrfs_bio_counter_dec(fs_info); |
1427 | if (actual_bytes) |
1428 | *actual_bytes = discarded_bytes; |
1429 | return ret; |
1430 | } |
1431 | |
1432 | /* Can return -ENOMEM */ |
1433 | int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, |
1434 | struct btrfs_ref *generic_ref) |
1435 | { |
1436 | struct btrfs_fs_info *fs_info = trans->fs_info; |
1437 | int ret; |
1438 | |
1439 | ASSERT(generic_ref->type != BTRFS_REF_NOT_SET && |
1440 | generic_ref->action); |
1441 | BUG_ON(generic_ref->type == BTRFS_REF_METADATA && |
1442 | generic_ref->tree_ref.ref_root == BTRFS_TREE_LOG_OBJECTID); |
1443 | |
1444 | if (generic_ref->type == BTRFS_REF_METADATA) |
1445 | ret = btrfs_add_delayed_tree_ref(trans, generic_ref, NULL); |
1446 | else |
1447 | ret = btrfs_add_delayed_data_ref(trans, generic_ref, reserved: 0); |
1448 | |
1449 | btrfs_ref_tree_mod(fs_info, generic_ref); |
1450 | |
1451 | return ret; |
1452 | } |
1453 | |
1454 | /* |
1455 | * Insert backreference for a given extent. |
1456 | * |
1457 | * The counterpart is in __btrfs_free_extent(), with examples and more details |
1458 | * how it works. |
1459 | * |
1460 | * @trans: Handle of transaction |
1461 | * |
1462 | * @node: The delayed ref node used to get the bytenr/length for |
1463 | * extent whose references are incremented. |
1464 | * |
1465 | * @parent: If this is a shared extent (BTRFS_SHARED_DATA_REF_KEY/ |
1466 | * BTRFS_SHARED_BLOCK_REF_KEY) then it holds the logical |
1467 | * bytenr of the parent block. Since new extents are always |
1468 | * created with indirect references, this will only be the case |
1469 | * when relocating a shared extent. In that case, root_objectid |
1470 | * will be BTRFS_TREE_RELOC_OBJECTID. Otherwise, parent must |
1471 | * be 0 |
1472 | * |
1473 | * @root_objectid: The id of the root where this modification has originated, |
1474 | * this can be either one of the well-known metadata trees or |
1475 | * the subvolume id which references this extent. |
1476 | * |
1477 | * @owner: For data extents it is the inode number of the owning file. |
1478 | * For metadata extents this parameter holds the level in the |
1479 | * tree of the extent. |
1480 | * |
1481 | * @offset: For metadata extents the offset is ignored and is currently |
1482 | * always passed as 0. For data extents it is the fileoffset |
1483 | * this extent belongs to. |
1484 | * |
1485 | * @extent_op Pointer to a structure, holding information necessary when |
1486 | * updating a tree block's flags |
1487 | * |
1488 | */ |
1489 | static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, |
1490 | struct btrfs_delayed_ref_node *node, |
1491 | u64 parent, u64 root_objectid, |
1492 | u64 owner, u64 offset, |
1493 | struct btrfs_delayed_extent_op *extent_op) |
1494 | { |
1495 | struct btrfs_path *path; |
1496 | struct extent_buffer *leaf; |
1497 | struct btrfs_extent_item *item; |
1498 | struct btrfs_key key; |
1499 | u64 bytenr = node->bytenr; |
1500 | u64 num_bytes = node->num_bytes; |
1501 | u64 refs; |
1502 | int refs_to_add = node->ref_mod; |
1503 | int ret; |
1504 | |
1505 | path = btrfs_alloc_path(); |
1506 | if (!path) |
1507 | return -ENOMEM; |
1508 | |
1509 | /* this will setup the path even if it fails to insert the back ref */ |
1510 | ret = insert_inline_extent_backref(trans, path, bytenr, num_bytes, |
1511 | parent, root_objectid, owner, |
1512 | offset, refs_to_add, extent_op); |
1513 | if ((ret < 0 && ret != -EAGAIN) || !ret) |
1514 | goto out; |
1515 | |
1516 | /* |
1517 | * Ok we had -EAGAIN which means we didn't have space to insert and |
1518 | * inline extent ref, so just update the reference count and add a |
1519 | * normal backref. |
1520 | */ |
1521 | leaf = path->nodes[0]; |
1522 | btrfs_item_key_to_cpu(eb: leaf, cpu_key: &key, nr: path->slots[0]); |
1523 | item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); |
1524 | refs = btrfs_extent_refs(eb: leaf, s: item); |
1525 | btrfs_set_extent_refs(eb: leaf, s: item, val: refs + refs_to_add); |
1526 | if (extent_op) |
1527 | __run_delayed_extent_op(extent_op, leaf, ei: item); |
1528 | |
1529 | btrfs_mark_buffer_dirty(trans, buf: leaf); |
1530 | btrfs_release_path(p: path); |
1531 | |
1532 | /* now insert the actual backref */ |
1533 | if (owner < BTRFS_FIRST_FREE_OBJECTID) |
1534 | ret = insert_tree_block_ref(trans, path, bytenr, parent, |
1535 | root_objectid); |
1536 | else |
1537 | ret = insert_extent_data_ref(trans, path, bytenr, parent, |
1538 | root_objectid, owner, offset, |
1539 | refs_to_add); |
1540 | |
1541 | if (ret) |
1542 | btrfs_abort_transaction(trans, ret); |
1543 | out: |
1544 | btrfs_free_path(p: path); |
1545 | return ret; |
1546 | } |
1547 | |
1548 | static void free_head_ref_squota_rsv(struct btrfs_fs_info *fs_info, |
1549 | struct btrfs_delayed_ref_head *href) |
1550 | { |
1551 | u64 root = href->owning_root; |
1552 | |
1553 | /* |
1554 | * Don't check must_insert_reserved, as this is called from contexts |
1555 | * where it has already been unset. |
1556 | */ |
1557 | if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE || |
1558 | !href->is_data || !is_fstree(rootid: root)) |
1559 | return; |
1560 | |
1561 | btrfs_qgroup_free_refroot(fs_info, ref_root: root, num_bytes: href->reserved_bytes, |
1562 | type: BTRFS_QGROUP_RSV_DATA); |
1563 | } |
1564 | |
1565 | static int run_delayed_data_ref(struct btrfs_trans_handle *trans, |
1566 | struct btrfs_delayed_ref_head *href, |
1567 | struct btrfs_delayed_ref_node *node, |
1568 | struct btrfs_delayed_extent_op *extent_op, |
1569 | bool insert_reserved) |
1570 | { |
1571 | int ret = 0; |
1572 | struct btrfs_delayed_data_ref *ref; |
1573 | u64 parent = 0; |
1574 | u64 flags = 0; |
1575 | |
1576 | ref = btrfs_delayed_node_to_data_ref(node); |
1577 | trace_run_delayed_data_ref(fs_info: trans->fs_info, ref: node, full_ref: ref, action: node->action); |
1578 | |
1579 | if (node->type == BTRFS_SHARED_DATA_REF_KEY) |
1580 | parent = ref->parent; |
1581 | |
1582 | if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { |
1583 | struct btrfs_key key; |
1584 | struct btrfs_squota_delta delta = { |
1585 | .root = href->owning_root, |
1586 | .num_bytes = node->num_bytes, |
1587 | .is_data = true, |
1588 | .is_inc = true, |
1589 | .generation = trans->transid, |
1590 | }; |
1591 | |
1592 | if (extent_op) |
1593 | flags |= extent_op->flags_to_set; |
1594 | |
1595 | key.objectid = node->bytenr; |
1596 | key.type = BTRFS_EXTENT_ITEM_KEY; |
1597 | key.offset = node->num_bytes; |
1598 | |
1599 | ret = alloc_reserved_file_extent(trans, parent, root_objectid: ref->root, |
1600 | flags, owner: ref->objectid, |
1601 | offset: ref->offset, ins: &key, |
1602 | ref_mod: node->ref_mod, oref_root: href->owning_root); |
1603 | free_head_ref_squota_rsv(fs_info: trans->fs_info, href); |
1604 | if (!ret) |
1605 | ret = btrfs_record_squota_delta(fs_info: trans->fs_info, delta: &delta); |
1606 | } else if (node->action == BTRFS_ADD_DELAYED_REF) { |
1607 | ret = __btrfs_inc_extent_ref(trans, node, parent, root_objectid: ref->root, |
1608 | owner: ref->objectid, offset: ref->offset, |
1609 | extent_op); |
1610 | } else if (node->action == BTRFS_DROP_DELAYED_REF) { |
1611 | ret = __btrfs_free_extent(trans, href, node, parent, |
1612 | root_objectid: ref->root, owner_objectid: ref->objectid, |
1613 | owner_offset: ref->offset, extra_op: extent_op); |
1614 | } else { |
1615 | BUG(); |
1616 | } |
1617 | return ret; |
1618 | } |
1619 | |
1620 | static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, |
1621 | struct extent_buffer *leaf, |
1622 | struct btrfs_extent_item *ei) |
1623 | { |
1624 | u64 flags = btrfs_extent_flags(eb: leaf, s: ei); |
1625 | if (extent_op->update_flags) { |
1626 | flags |= extent_op->flags_to_set; |
1627 | btrfs_set_extent_flags(eb: leaf, s: ei, val: flags); |
1628 | } |
1629 | |
1630 | if (extent_op->update_key) { |
1631 | struct btrfs_tree_block_info *bi; |
1632 | BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)); |
1633 | bi = (struct btrfs_tree_block_info *)(ei + 1); |
1634 | btrfs_set_tree_block_key(eb: leaf, item: bi, key: &extent_op->key); |
1635 | } |
1636 | } |
1637 | |
1638 | static int run_delayed_extent_op(struct btrfs_trans_handle *trans, |
1639 | struct btrfs_delayed_ref_head *head, |
1640 | struct btrfs_delayed_extent_op *extent_op) |
1641 | { |
1642 | struct btrfs_fs_info *fs_info = trans->fs_info; |
1643 | struct btrfs_root *root; |
1644 | struct btrfs_key key; |
1645 | struct btrfs_path *path; |
1646 | struct btrfs_extent_item *ei; |
1647 | struct extent_buffer *leaf; |
1648 | u32 item_size; |
1649 | int ret; |
1650 | int metadata = 1; |
1651 | |
1652 | if (TRANS_ABORTED(trans)) |
1653 | return 0; |
1654 | |
1655 | if (!btrfs_fs_incompat(fs_info, SKINNY_METADATA)) |
1656 | metadata = 0; |
1657 | |
1658 | path = btrfs_alloc_path(); |
1659 | if (!path) |
1660 | return -ENOMEM; |
1661 | |
1662 | key.objectid = head->bytenr; |
1663 | |
1664 | if (metadata) { |
1665 | key.type = BTRFS_METADATA_ITEM_KEY; |
1666 | key.offset = extent_op->level; |
1667 | } else { |
1668 | key.type = BTRFS_EXTENT_ITEM_KEY; |
1669 | key.offset = head->num_bytes; |
1670 | } |
1671 | |
1672 | root = btrfs_extent_root(fs_info, bytenr: key.objectid); |
1673 | again: |
1674 | ret = btrfs_search_slot(trans, root, key: &key, p: path, ins_len: 0, cow: 1); |
1675 | if (ret < 0) { |
1676 | goto out; |
1677 | } else if (ret > 0) { |
1678 | if (metadata) { |
1679 | if (path->slots[0] > 0) { |
1680 | path->slots[0]--; |
1681 | btrfs_item_key_to_cpu(eb: path->nodes[0], cpu_key: &key, |
1682 | nr: path->slots[0]); |
1683 | if (key.objectid == head->bytenr && |
1684 | key.type == BTRFS_EXTENT_ITEM_KEY && |
1685 | key.offset == head->num_bytes) |
1686 | ret = 0; |
1687 | } |
1688 | if (ret > 0) { |
1689 | btrfs_release_path(p: path); |
1690 | metadata = 0; |
1691 | |
1692 | key.objectid = head->bytenr; |
1693 | key.offset = head->num_bytes; |
1694 | key.type = BTRFS_EXTENT_ITEM_KEY; |
1695 | goto again; |
1696 | } |
1697 | } else { |
1698 | ret = -EUCLEAN; |
1699 | btrfs_err(fs_info, |
1700 | "missing extent item for extent %llu num_bytes %llu level %d" , |
1701 | head->bytenr, head->num_bytes, extent_op->level); |
1702 | goto out; |
1703 | } |
1704 | } |
1705 | |
1706 | leaf = path->nodes[0]; |
1707 | item_size = btrfs_item_size(eb: leaf, slot: path->slots[0]); |
1708 | |
1709 | if (unlikely(item_size < sizeof(*ei))) { |
1710 | ret = -EUCLEAN; |
1711 | btrfs_err(fs_info, |
1712 | "unexpected extent item size, has %u expect >= %zu" , |
1713 | item_size, sizeof(*ei)); |
1714 | btrfs_abort_transaction(trans, ret); |
1715 | goto out; |
1716 | } |
1717 | |
1718 | ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); |
1719 | __run_delayed_extent_op(extent_op, leaf, ei); |
1720 | |
1721 | btrfs_mark_buffer_dirty(trans, buf: leaf); |
1722 | out: |
1723 | btrfs_free_path(p: path); |
1724 | return ret; |
1725 | } |
1726 | |
1727 | static int run_delayed_tree_ref(struct btrfs_trans_handle *trans, |
1728 | struct btrfs_delayed_ref_head *href, |
1729 | struct btrfs_delayed_ref_node *node, |
1730 | struct btrfs_delayed_extent_op *extent_op, |
1731 | bool insert_reserved) |
1732 | { |
1733 | int ret = 0; |
1734 | struct btrfs_fs_info *fs_info = trans->fs_info; |
1735 | struct btrfs_delayed_tree_ref *ref; |
1736 | u64 parent = 0; |
1737 | u64 ref_root = 0; |
1738 | |
1739 | ref = btrfs_delayed_node_to_tree_ref(node); |
1740 | trace_run_delayed_tree_ref(fs_info: trans->fs_info, ref: node, full_ref: ref, action: node->action); |
1741 | |
1742 | if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) |
1743 | parent = ref->parent; |
1744 | ref_root = ref->root; |
1745 | |
1746 | if (unlikely(node->ref_mod != 1)) { |
1747 | btrfs_err(trans->fs_info, |
1748 | "btree block %llu has %d references rather than 1: action %d ref_root %llu parent %llu" , |
1749 | node->bytenr, node->ref_mod, node->action, ref_root, |
1750 | parent); |
1751 | return -EUCLEAN; |
1752 | } |
1753 | if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { |
1754 | struct btrfs_squota_delta delta = { |
1755 | .root = href->owning_root, |
1756 | .num_bytes = fs_info->nodesize, |
1757 | .is_data = false, |
1758 | .is_inc = true, |
1759 | .generation = trans->transid, |
1760 | }; |
1761 | |
1762 | BUG_ON(!extent_op || !extent_op->update_flags); |
1763 | ret = alloc_reserved_tree_block(trans, node, extent_op); |
1764 | if (!ret) |
1765 | btrfs_record_squota_delta(fs_info, delta: &delta); |
1766 | } else if (node->action == BTRFS_ADD_DELAYED_REF) { |
1767 | ret = __btrfs_inc_extent_ref(trans, node, parent, root_objectid: ref_root, |
1768 | owner: ref->level, offset: 0, extent_op); |
1769 | } else if (node->action == BTRFS_DROP_DELAYED_REF) { |
1770 | ret = __btrfs_free_extent(trans, href, node, parent, root_objectid: ref_root, |
1771 | owner_objectid: ref->level, owner_offset: 0, extra_op: extent_op); |
1772 | } else { |
1773 | BUG(); |
1774 | } |
1775 | return ret; |
1776 | } |
1777 | |
1778 | /* helper function to actually process a single delayed ref entry */ |
1779 | static int run_one_delayed_ref(struct btrfs_trans_handle *trans, |
1780 | struct btrfs_delayed_ref_head *href, |
1781 | struct btrfs_delayed_ref_node *node, |
1782 | struct btrfs_delayed_extent_op *extent_op, |
1783 | bool insert_reserved) |
1784 | { |
1785 | int ret = 0; |
1786 | |
1787 | if (TRANS_ABORTED(trans)) { |
1788 | if (insert_reserved) { |
1789 | btrfs_pin_extent(trans, bytenr: node->bytenr, num: node->num_bytes, reserved: 1); |
1790 | free_head_ref_squota_rsv(fs_info: trans->fs_info, href); |
1791 | } |
1792 | return 0; |
1793 | } |
1794 | |
1795 | if (node->type == BTRFS_TREE_BLOCK_REF_KEY || |
1796 | node->type == BTRFS_SHARED_BLOCK_REF_KEY) |
1797 | ret = run_delayed_tree_ref(trans, href, node, extent_op, |
1798 | insert_reserved); |
1799 | else if (node->type == BTRFS_EXTENT_DATA_REF_KEY || |
1800 | node->type == BTRFS_SHARED_DATA_REF_KEY) |
1801 | ret = run_delayed_data_ref(trans, href, node, extent_op, |
1802 | insert_reserved); |
1803 | else if (node->type == BTRFS_EXTENT_OWNER_REF_KEY) |
1804 | ret = 0; |
1805 | else |
1806 | BUG(); |
1807 | if (ret && insert_reserved) |
1808 | btrfs_pin_extent(trans, bytenr: node->bytenr, num: node->num_bytes, reserved: 1); |
1809 | if (ret < 0) |
1810 | btrfs_err(trans->fs_info, |
1811 | "failed to run delayed ref for logical %llu num_bytes %llu type %u action %u ref_mod %d: %d" , |
1812 | node->bytenr, node->num_bytes, node->type, |
1813 | node->action, node->ref_mod, ret); |
1814 | return ret; |
1815 | } |
1816 | |
1817 | static inline struct btrfs_delayed_ref_node * |
1818 | select_delayed_ref(struct btrfs_delayed_ref_head *head) |
1819 | { |
1820 | struct btrfs_delayed_ref_node *ref; |
1821 | |
1822 | if (RB_EMPTY_ROOT(&head->ref_tree.rb_root)) |
1823 | return NULL; |
1824 | |
1825 | /* |
1826 | * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first. |
1827 | * This is to prevent a ref count from going down to zero, which deletes |
1828 | * the extent item from the extent tree, when there still are references |
1829 | * to add, which would fail because they would not find the extent item. |
1830 | */ |
1831 | if (!list_empty(head: &head->ref_add_list)) |
1832 | return list_first_entry(&head->ref_add_list, |
1833 | struct btrfs_delayed_ref_node, add_list); |
1834 | |
1835 | ref = rb_entry(rb_first_cached(&head->ref_tree), |
1836 | struct btrfs_delayed_ref_node, ref_node); |
1837 | ASSERT(list_empty(&ref->add_list)); |
1838 | return ref; |
1839 | } |
1840 | |
1841 | static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, |
1842 | struct btrfs_delayed_ref_head *head) |
1843 | { |
1844 | spin_lock(lock: &delayed_refs->lock); |
1845 | head->processing = false; |
1846 | delayed_refs->num_heads_ready++; |
1847 | spin_unlock(lock: &delayed_refs->lock); |
1848 | btrfs_delayed_ref_unlock(head); |
1849 | } |
1850 | |
1851 | static struct btrfs_delayed_extent_op *cleanup_extent_op( |
1852 | struct btrfs_delayed_ref_head *head) |
1853 | { |
1854 | struct btrfs_delayed_extent_op *extent_op = head->extent_op; |
1855 | |
1856 | if (!extent_op) |
1857 | return NULL; |
1858 | |
1859 | if (head->must_insert_reserved) { |
1860 | head->extent_op = NULL; |
1861 | btrfs_free_delayed_extent_op(op: extent_op); |
1862 | return NULL; |
1863 | } |
1864 | return extent_op; |
1865 | } |
1866 | |
1867 | static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans, |
1868 | struct btrfs_delayed_ref_head *head) |
1869 | { |
1870 | struct btrfs_delayed_extent_op *extent_op; |
1871 | int ret; |
1872 | |
1873 | extent_op = cleanup_extent_op(head); |
1874 | if (!extent_op) |
1875 | return 0; |
1876 | head->extent_op = NULL; |
1877 | spin_unlock(lock: &head->lock); |
1878 | ret = run_delayed_extent_op(trans, head, extent_op); |
1879 | btrfs_free_delayed_extent_op(op: extent_op); |
1880 | return ret ? ret : 1; |
1881 | } |
1882 | |
1883 | u64 btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, |
1884 | struct btrfs_delayed_ref_root *delayed_refs, |
1885 | struct btrfs_delayed_ref_head *head) |
1886 | { |
1887 | u64 ret = 0; |
1888 | |
1889 | /* |
1890 | * We had csum deletions accounted for in our delayed refs rsv, we need |
1891 | * to drop the csum leaves for this update from our delayed_refs_rsv. |
1892 | */ |
1893 | if (head->total_ref_mod < 0 && head->is_data) { |
1894 | int nr_csums; |
1895 | |
1896 | spin_lock(lock: &delayed_refs->lock); |
1897 | delayed_refs->pending_csums -= head->num_bytes; |
1898 | spin_unlock(lock: &delayed_refs->lock); |
1899 | nr_csums = btrfs_csum_bytes_to_leaves(fs_info, csum_bytes: head->num_bytes); |
1900 | |
1901 | btrfs_delayed_refs_rsv_release(fs_info, nr_refs: 0, nr_csums); |
1902 | |
1903 | ret = btrfs_calc_delayed_ref_csum_bytes(fs_info, num_csum_items: nr_csums); |
1904 | } |
1905 | /* must_insert_reserved can be set only if we didn't run the head ref. */ |
1906 | if (head->must_insert_reserved) |
1907 | free_head_ref_squota_rsv(fs_info, href: head); |
1908 | |
1909 | return ret; |
1910 | } |
1911 | |
1912 | static int cleanup_ref_head(struct btrfs_trans_handle *trans, |
1913 | struct btrfs_delayed_ref_head *head, |
1914 | u64 *bytes_released) |
1915 | { |
1916 | |
1917 | struct btrfs_fs_info *fs_info = trans->fs_info; |
1918 | struct btrfs_delayed_ref_root *delayed_refs; |
1919 | int ret; |
1920 | |
1921 | delayed_refs = &trans->transaction->delayed_refs; |
1922 | |
1923 | ret = run_and_cleanup_extent_op(trans, head); |
1924 | if (ret < 0) { |
1925 | unselect_delayed_ref_head(delayed_refs, head); |
1926 | btrfs_debug(fs_info, "run_delayed_extent_op returned %d" , ret); |
1927 | return ret; |
1928 | } else if (ret) { |
1929 | return ret; |
1930 | } |
1931 | |
1932 | /* |
1933 | * Need to drop our head ref lock and re-acquire the delayed ref lock |
1934 | * and then re-check to make sure nobody got added. |
1935 | */ |
1936 | spin_unlock(lock: &head->lock); |
1937 | spin_lock(lock: &delayed_refs->lock); |
1938 | spin_lock(lock: &head->lock); |
1939 | if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) { |
1940 | spin_unlock(lock: &head->lock); |
1941 | spin_unlock(lock: &delayed_refs->lock); |
1942 | return 1; |
1943 | } |
1944 | btrfs_delete_ref_head(delayed_refs, head); |
1945 | spin_unlock(lock: &head->lock); |
1946 | spin_unlock(lock: &delayed_refs->lock); |
1947 | |
1948 | if (head->must_insert_reserved) { |
1949 | btrfs_pin_extent(trans, bytenr: head->bytenr, num: head->num_bytes, reserved: 1); |
1950 | if (head->is_data) { |
1951 | struct btrfs_root *csum_root; |
1952 | |
1953 | csum_root = btrfs_csum_root(fs_info, bytenr: head->bytenr); |
1954 | ret = btrfs_del_csums(trans, root: csum_root, bytenr: head->bytenr, |
1955 | len: head->num_bytes); |
1956 | } |
1957 | } |
1958 | |
1959 | *bytes_released += btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); |
1960 | |
1961 | trace_run_delayed_ref_head(fs_info, head_ref: head, action: 0); |
1962 | btrfs_delayed_ref_unlock(head); |
1963 | btrfs_put_delayed_ref_head(head); |
1964 | return ret; |
1965 | } |
1966 | |
1967 | static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head( |
1968 | struct btrfs_trans_handle *trans) |
1969 | { |
1970 | struct btrfs_delayed_ref_root *delayed_refs = |
1971 | &trans->transaction->delayed_refs; |
1972 | struct btrfs_delayed_ref_head *head = NULL; |
1973 | int ret; |
1974 | |
1975 | spin_lock(lock: &delayed_refs->lock); |
1976 | head = btrfs_select_ref_head(delayed_refs); |
1977 | if (!head) { |
1978 | spin_unlock(lock: &delayed_refs->lock); |
1979 | return head; |
1980 | } |
1981 | |
1982 | /* |
1983 | * Grab the lock that says we are going to process all the refs for |
1984 | * this head |
1985 | */ |
1986 | ret = btrfs_delayed_ref_lock(delayed_refs, head); |
1987 | spin_unlock(lock: &delayed_refs->lock); |
1988 | |
1989 | /* |
1990 | * We may have dropped the spin lock to get the head mutex lock, and |
1991 | * that might have given someone else time to free the head. If that's |
1992 | * true, it has been removed from our list and we can move on. |
1993 | */ |
1994 | if (ret == -EAGAIN) |
1995 | head = ERR_PTR(error: -EAGAIN); |
1996 | |
1997 | return head; |
1998 | } |
1999 | |
2000 | static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans, |
2001 | struct btrfs_delayed_ref_head *locked_ref, |
2002 | u64 *bytes_released) |
2003 | { |
2004 | struct btrfs_fs_info *fs_info = trans->fs_info; |
2005 | struct btrfs_delayed_ref_root *delayed_refs; |
2006 | struct btrfs_delayed_extent_op *extent_op; |
2007 | struct btrfs_delayed_ref_node *ref; |
2008 | bool must_insert_reserved; |
2009 | int ret; |
2010 | |
2011 | delayed_refs = &trans->transaction->delayed_refs; |
2012 | |
2013 | lockdep_assert_held(&locked_ref->mutex); |
2014 | lockdep_assert_held(&locked_ref->lock); |
2015 | |
2016 | while ((ref = select_delayed_ref(head: locked_ref))) { |
2017 | if (ref->seq && |
2018 | btrfs_check_delayed_seq(fs_info, seq: ref->seq)) { |
2019 | spin_unlock(lock: &locked_ref->lock); |
2020 | unselect_delayed_ref_head(delayed_refs, head: locked_ref); |
2021 | return -EAGAIN; |
2022 | } |
2023 | |
2024 | rb_erase_cached(node: &ref->ref_node, root: &locked_ref->ref_tree); |
2025 | RB_CLEAR_NODE(&ref->ref_node); |
2026 | if (!list_empty(head: &ref->add_list)) |
2027 | list_del(entry: &ref->add_list); |
2028 | /* |
2029 | * When we play the delayed ref, also correct the ref_mod on |
2030 | * head |
2031 | */ |
2032 | switch (ref->action) { |
2033 | case BTRFS_ADD_DELAYED_REF: |
2034 | case BTRFS_ADD_DELAYED_EXTENT: |
2035 | locked_ref->ref_mod -= ref->ref_mod; |
2036 | break; |
2037 | case BTRFS_DROP_DELAYED_REF: |
2038 | locked_ref->ref_mod += ref->ref_mod; |
2039 | break; |
2040 | default: |
2041 | WARN_ON(1); |
2042 | } |
2043 | atomic_dec(v: &delayed_refs->num_entries); |
2044 | |
2045 | /* |
2046 | * Record the must_insert_reserved flag before we drop the |
2047 | * spin lock. |
2048 | */ |
2049 | must_insert_reserved = locked_ref->must_insert_reserved; |
2050 | /* |
2051 | * Unsetting this on the head ref relinquishes ownership of |
2052 | * the rsv_bytes, so it is critical that every possible code |
2053 | * path from here forward frees all reserves including qgroup |
2054 | * reserve. |
2055 | */ |
2056 | locked_ref->must_insert_reserved = false; |
2057 | |
2058 | extent_op = locked_ref->extent_op; |
2059 | locked_ref->extent_op = NULL; |
2060 | spin_unlock(lock: &locked_ref->lock); |
2061 | |
2062 | ret = run_one_delayed_ref(trans, href: locked_ref, node: ref, extent_op, |
2063 | insert_reserved: must_insert_reserved); |
2064 | btrfs_delayed_refs_rsv_release(fs_info, nr_refs: 1, nr_csums: 0); |
2065 | *bytes_released += btrfs_calc_delayed_ref_bytes(fs_info, num_delayed_refs: 1); |
2066 | |
2067 | btrfs_free_delayed_extent_op(op: extent_op); |
2068 | if (ret) { |
2069 | unselect_delayed_ref_head(delayed_refs, head: locked_ref); |
2070 | btrfs_put_delayed_ref(ref); |
2071 | return ret; |
2072 | } |
2073 | |
2074 | btrfs_put_delayed_ref(ref); |
2075 | cond_resched(); |
2076 | |
2077 | spin_lock(lock: &locked_ref->lock); |
2078 | btrfs_merge_delayed_refs(fs_info, delayed_refs, head: locked_ref); |
2079 | } |
2080 | |
2081 | return 0; |
2082 | } |
2083 | |
2084 | /* |
2085 | * Returns 0 on success or if called with an already aborted transaction. |
2086 | * Returns -ENOMEM or -EIO on failure and will abort the transaction. |
2087 | */ |
2088 | static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, |
2089 | u64 min_bytes) |
2090 | { |
2091 | struct btrfs_fs_info *fs_info = trans->fs_info; |
2092 | struct btrfs_delayed_ref_root *delayed_refs; |
2093 | struct btrfs_delayed_ref_head *locked_ref = NULL; |
2094 | int ret; |
2095 | unsigned long count = 0; |
2096 | unsigned long max_count = 0; |
2097 | u64 bytes_processed = 0; |
2098 | |
2099 | delayed_refs = &trans->transaction->delayed_refs; |
2100 | if (min_bytes == 0) { |
2101 | max_count = delayed_refs->num_heads_ready; |
2102 | min_bytes = U64_MAX; |
2103 | } |
2104 | |
2105 | do { |
2106 | if (!locked_ref) { |
2107 | locked_ref = btrfs_obtain_ref_head(trans); |
2108 | if (IS_ERR_OR_NULL(ptr: locked_ref)) { |
2109 | if (PTR_ERR(ptr: locked_ref) == -EAGAIN) { |
2110 | continue; |
2111 | } else { |
2112 | break; |
2113 | } |
2114 | } |
2115 | count++; |
2116 | } |
2117 | /* |
2118 | * We need to try and merge add/drops of the same ref since we |
2119 | * can run into issues with relocate dropping the implicit ref |
2120 | * and then it being added back again before the drop can |
2121 | * finish. If we merged anything we need to re-loop so we can |
2122 | * get a good ref. |
2123 | * Or we can get node references of the same type that weren't |
2124 | * merged when created due to bumps in the tree mod seq, and |
2125 | * we need to merge them to prevent adding an inline extent |
2126 | * backref before dropping it (triggering a BUG_ON at |
2127 | * insert_inline_extent_backref()). |
2128 | */ |
2129 | spin_lock(lock: &locked_ref->lock); |
2130 | btrfs_merge_delayed_refs(fs_info, delayed_refs, head: locked_ref); |
2131 | |
2132 | ret = btrfs_run_delayed_refs_for_head(trans, locked_ref, bytes_released: &bytes_processed); |
2133 | if (ret < 0 && ret != -EAGAIN) { |
2134 | /* |
2135 | * Error, btrfs_run_delayed_refs_for_head already |
2136 | * unlocked everything so just bail out |
2137 | */ |
2138 | return ret; |
2139 | } else if (!ret) { |
2140 | /* |
2141 | * Success, perform the usual cleanup of a processed |
2142 | * head |
2143 | */ |
2144 | ret = cleanup_ref_head(trans, head: locked_ref, bytes_released: &bytes_processed); |
2145 | if (ret > 0 ) { |
2146 | /* We dropped our lock, we need to loop. */ |
2147 | ret = 0; |
2148 | continue; |
2149 | } else if (ret) { |
2150 | return ret; |
2151 | } |
2152 | } |
2153 | |
2154 | /* |
2155 | * Either success case or btrfs_run_delayed_refs_for_head |
2156 | * returned -EAGAIN, meaning we need to select another head |
2157 | */ |
2158 | |
2159 | locked_ref = NULL; |
2160 | cond_resched(); |
2161 | } while ((min_bytes != U64_MAX && bytes_processed < min_bytes) || |
2162 | (max_count > 0 && count < max_count) || |
2163 | locked_ref); |
2164 | |
2165 | return 0; |
2166 | } |
2167 | |
2168 | #ifdef SCRAMBLE_DELAYED_REFS |
2169 | /* |
2170 | * Normally delayed refs get processed in ascending bytenr order. This |
2171 | * correlates in most cases to the order added. To expose dependencies on this |
2172 | * order, we start to process the tree in the middle instead of the beginning |
2173 | */ |
2174 | static u64 find_middle(struct rb_root *root) |
2175 | { |
2176 | struct rb_node *n = root->rb_node; |
2177 | struct btrfs_delayed_ref_node *entry; |
2178 | int alt = 1; |
2179 | u64 middle; |
2180 | u64 first = 0, last = 0; |
2181 | |
2182 | n = rb_first(root); |
2183 | if (n) { |
2184 | entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); |
2185 | first = entry->bytenr; |
2186 | } |
2187 | n = rb_last(root); |
2188 | if (n) { |
2189 | entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); |
2190 | last = entry->bytenr; |
2191 | } |
2192 | n = root->rb_node; |
2193 | |
2194 | while (n) { |
2195 | entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); |
2196 | WARN_ON(!entry->in_tree); |
2197 | |
2198 | middle = entry->bytenr; |
2199 | |
2200 | if (alt) |
2201 | n = n->rb_left; |
2202 | else |
2203 | n = n->rb_right; |
2204 | |
2205 | alt = 1 - alt; |
2206 | } |
2207 | return middle; |
2208 | } |
2209 | #endif |
2210 | |
2211 | /* |
2212 | * Start processing the delayed reference count updates and extent insertions |
2213 | * we have queued up so far. |
2214 | * |
2215 | * @trans: Transaction handle. |
2216 | * @min_bytes: How many bytes of delayed references to process. After this |
2217 | * many bytes we stop processing delayed references if there are |
2218 | * any more. If 0 it means to run all existing delayed references, |
2219 | * but not new ones added after running all existing ones. |
2220 | * Use (u64)-1 (U64_MAX) to run all existing delayed references |
2221 | * plus any new ones that are added. |
2222 | * |
2223 | * Returns 0 on success or if called with an aborted transaction |
2224 | * Returns <0 on error and aborts the transaction |
2225 | */ |
2226 | int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, u64 min_bytes) |
2227 | { |
2228 | struct btrfs_fs_info *fs_info = trans->fs_info; |
2229 | struct btrfs_delayed_ref_root *delayed_refs; |
2230 | int ret; |
2231 | |
2232 | /* We'll clean this up in btrfs_cleanup_transaction */ |
2233 | if (TRANS_ABORTED(trans)) |
2234 | return 0; |
2235 | |
2236 | if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags)) |
2237 | return 0; |
2238 | |
2239 | delayed_refs = &trans->transaction->delayed_refs; |
2240 | again: |
2241 | #ifdef SCRAMBLE_DELAYED_REFS |
2242 | delayed_refs->run_delayed_start = find_middle(&delayed_refs->root); |
2243 | #endif |
2244 | ret = __btrfs_run_delayed_refs(trans, min_bytes); |
2245 | if (ret < 0) { |
2246 | btrfs_abort_transaction(trans, ret); |
2247 | return ret; |
2248 | } |
2249 | |
2250 | if (min_bytes == U64_MAX) { |
2251 | btrfs_create_pending_block_groups(trans); |
2252 | |
2253 | spin_lock(lock: &delayed_refs->lock); |
2254 | if (RB_EMPTY_ROOT(&delayed_refs->href_root.rb_root)) { |
2255 | spin_unlock(lock: &delayed_refs->lock); |
2256 | return 0; |
2257 | } |
2258 | spin_unlock(lock: &delayed_refs->lock); |
2259 | |
2260 | cond_resched(); |
2261 | goto again; |
2262 | } |
2263 | |
2264 | return 0; |
2265 | } |
2266 | |
2267 | int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, |
2268 | struct extent_buffer *eb, u64 flags) |
2269 | { |
2270 | struct btrfs_delayed_extent_op *extent_op; |
2271 | int level = btrfs_header_level(eb); |
2272 | int ret; |
2273 | |
2274 | extent_op = btrfs_alloc_delayed_extent_op(); |
2275 | if (!extent_op) |
2276 | return -ENOMEM; |
2277 | |
2278 | extent_op->flags_to_set = flags; |
2279 | extent_op->update_flags = true; |
2280 | extent_op->update_key = false; |
2281 | extent_op->level = level; |
2282 | |
2283 | ret = btrfs_add_delayed_extent_op(trans, bytenr: eb->start, num_bytes: eb->len, extent_op); |
2284 | if (ret) |
2285 | btrfs_free_delayed_extent_op(op: extent_op); |
2286 | return ret; |
2287 | } |
2288 | |
2289 | static noinline int check_delayed_ref(struct btrfs_root *root, |
2290 | struct btrfs_path *path, |
2291 | u64 objectid, u64 offset, u64 bytenr) |
2292 | { |
2293 | struct btrfs_delayed_ref_head *head; |
2294 | struct btrfs_delayed_ref_node *ref; |
2295 | struct btrfs_delayed_data_ref *data_ref; |
2296 | struct btrfs_delayed_ref_root *delayed_refs; |
2297 | struct btrfs_transaction *cur_trans; |
2298 | struct rb_node *node; |
2299 | int ret = 0; |
2300 | |
2301 | spin_lock(lock: &root->fs_info->trans_lock); |
2302 | cur_trans = root->fs_info->running_transaction; |
2303 | if (cur_trans) |
2304 | refcount_inc(r: &cur_trans->use_count); |
2305 | spin_unlock(lock: &root->fs_info->trans_lock); |
2306 | if (!cur_trans) |
2307 | return 0; |
2308 | |
2309 | delayed_refs = &cur_trans->delayed_refs; |
2310 | spin_lock(lock: &delayed_refs->lock); |
2311 | head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); |
2312 | if (!head) { |
2313 | spin_unlock(lock: &delayed_refs->lock); |
2314 | btrfs_put_transaction(transaction: cur_trans); |
2315 | return 0; |
2316 | } |
2317 | |
2318 | if (!mutex_trylock(lock: &head->mutex)) { |
2319 | if (path->nowait) { |
2320 | spin_unlock(lock: &delayed_refs->lock); |
2321 | btrfs_put_transaction(transaction: cur_trans); |
2322 | return -EAGAIN; |
2323 | } |
2324 | |
2325 | refcount_inc(r: &head->refs); |
2326 | spin_unlock(lock: &delayed_refs->lock); |
2327 | |
2328 | btrfs_release_path(p: path); |
2329 | |
2330 | /* |
2331 | * Mutex was contended, block until it's released and let |
2332 | * caller try again |
2333 | */ |
2334 | mutex_lock(&head->mutex); |
2335 | mutex_unlock(lock: &head->mutex); |
2336 | btrfs_put_delayed_ref_head(head); |
2337 | btrfs_put_transaction(transaction: cur_trans); |
2338 | return -EAGAIN; |
2339 | } |
2340 | spin_unlock(lock: &delayed_refs->lock); |
2341 | |
2342 | spin_lock(lock: &head->lock); |
2343 | /* |
2344 | * XXX: We should replace this with a proper search function in the |
2345 | * future. |
2346 | */ |
2347 | for (node = rb_first_cached(&head->ref_tree); node; |
2348 | node = rb_next(node)) { |
2349 | ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node); |
2350 | /* If it's a shared ref we know a cross reference exists */ |
2351 | if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) { |
2352 | ret = 1; |
2353 | break; |
2354 | } |
2355 | |
2356 | data_ref = btrfs_delayed_node_to_data_ref(node: ref); |
2357 | |
2358 | /* |
2359 | * If our ref doesn't match the one we're currently looking at |
2360 | * then we have a cross reference. |
2361 | */ |
2362 | if (data_ref->root != root->root_key.objectid || |
2363 | data_ref->objectid != objectid || |
2364 | data_ref->offset != offset) { |
2365 | ret = 1; |
2366 | break; |
2367 | } |
2368 | } |
2369 | spin_unlock(lock: &head->lock); |
2370 | mutex_unlock(lock: &head->mutex); |
2371 | btrfs_put_transaction(transaction: cur_trans); |
2372 | return ret; |
2373 | } |
2374 | |
2375 | static noinline int check_committed_ref(struct btrfs_root *root, |
2376 | struct btrfs_path *path, |
2377 | u64 objectid, u64 offset, u64 bytenr, |
2378 | bool strict) |
2379 | { |
2380 | struct btrfs_fs_info *fs_info = root->fs_info; |
2381 | struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr); |
2382 | struct extent_buffer *leaf; |
2383 | struct btrfs_extent_data_ref *ref; |
2384 | struct btrfs_extent_inline_ref *iref; |
2385 | struct btrfs_extent_item *ei; |
2386 | struct btrfs_key key; |
2387 | u32 item_size; |
2388 | u32 expected_size; |
2389 | int type; |
2390 | int ret; |
2391 | |
2392 | key.objectid = bytenr; |
2393 | key.offset = (u64)-1; |
2394 | key.type = BTRFS_EXTENT_ITEM_KEY; |
2395 | |
2396 | ret = btrfs_search_slot(NULL, root: extent_root, key: &key, p: path, ins_len: 0, cow: 0); |
2397 | if (ret < 0) |
2398 | goto out; |
2399 | if (ret == 0) { |
2400 | /* |
2401 | * Key with offset -1 found, there would have to exist an extent |
2402 | * item with such offset, but this is out of the valid range. |
2403 | */ |
2404 | ret = -EUCLEAN; |
2405 | goto out; |
2406 | } |
2407 | |
2408 | ret = -ENOENT; |
2409 | if (path->slots[0] == 0) |
2410 | goto out; |
2411 | |
2412 | path->slots[0]--; |
2413 | leaf = path->nodes[0]; |
2414 | btrfs_item_key_to_cpu(eb: leaf, cpu_key: &key, nr: path->slots[0]); |
2415 | |
2416 | if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY) |
2417 | goto out; |
2418 | |
2419 | ret = 1; |
2420 | item_size = btrfs_item_size(eb: leaf, slot: path->slots[0]); |
2421 | ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); |
2422 | expected_size = sizeof(*ei) + btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY); |
2423 | |
2424 | /* No inline refs; we need to bail before checking for owner ref. */ |
2425 | if (item_size == sizeof(*ei)) |
2426 | goto out; |
2427 | |
2428 | /* Check for an owner ref; skip over it to the real inline refs. */ |
2429 | iref = (struct btrfs_extent_inline_ref *)(ei + 1); |
2430 | type = btrfs_get_extent_inline_ref_type(eb: leaf, iref, is_data: BTRFS_REF_TYPE_DATA); |
2431 | if (btrfs_fs_incompat(fs_info, SIMPLE_QUOTA) && type == BTRFS_EXTENT_OWNER_REF_KEY) { |
2432 | expected_size += btrfs_extent_inline_ref_size(BTRFS_EXTENT_OWNER_REF_KEY); |
2433 | iref = (struct btrfs_extent_inline_ref *)(iref + 1); |
2434 | } |
2435 | |
2436 | /* If extent item has more than 1 inline ref then it's shared */ |
2437 | if (item_size != expected_size) |
2438 | goto out; |
2439 | |
2440 | /* |
2441 | * If extent created before last snapshot => it's shared unless the |
2442 | * snapshot has been deleted. Use the heuristic if strict is false. |
2443 | */ |
2444 | if (!strict && |
2445 | (btrfs_extent_generation(eb: leaf, s: ei) <= |
2446 | btrfs_root_last_snapshot(s: &root->root_item))) |
2447 | goto out; |
2448 | |
2449 | /* If this extent has SHARED_DATA_REF then it's shared */ |
2450 | type = btrfs_get_extent_inline_ref_type(eb: leaf, iref, is_data: BTRFS_REF_TYPE_DATA); |
2451 | if (type != BTRFS_EXTENT_DATA_REF_KEY) |
2452 | goto out; |
2453 | |
2454 | ref = (struct btrfs_extent_data_ref *)(&iref->offset); |
2455 | if (btrfs_extent_refs(eb: leaf, s: ei) != |
2456 | btrfs_extent_data_ref_count(eb: leaf, s: ref) || |
2457 | btrfs_extent_data_ref_root(eb: leaf, s: ref) != |
2458 | root->root_key.objectid || |
2459 | btrfs_extent_data_ref_objectid(eb: leaf, s: ref) != objectid || |
2460 | btrfs_extent_data_ref_offset(eb: leaf, s: ref) != offset) |
2461 | goto out; |
2462 | |
2463 | ret = 0; |
2464 | out: |
2465 | return ret; |
2466 | } |
2467 | |
2468 | int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset, |
2469 | u64 bytenr, bool strict, struct btrfs_path *path) |
2470 | { |
2471 | int ret; |
2472 | |
2473 | do { |
2474 | ret = check_committed_ref(root, path, objectid, |
2475 | offset, bytenr, strict); |
2476 | if (ret && ret != -ENOENT) |
2477 | goto out; |
2478 | |
2479 | ret = check_delayed_ref(root, path, objectid, offset, bytenr); |
2480 | } while (ret == -EAGAIN); |
2481 | |
2482 | out: |
2483 | btrfs_release_path(p: path); |
2484 | if (btrfs_is_data_reloc_root(root)) |
2485 | WARN_ON(ret > 0); |
2486 | return ret; |
2487 | } |
2488 | |
2489 | static int __btrfs_mod_ref(struct btrfs_trans_handle *trans, |
2490 | struct btrfs_root *root, |
2491 | struct extent_buffer *buf, |
2492 | int full_backref, int inc) |
2493 | { |
2494 | struct btrfs_fs_info *fs_info = root->fs_info; |
2495 | u64 bytenr; |
2496 | u64 num_bytes; |
2497 | u64 parent; |
2498 | u64 ref_root; |
2499 | u32 nritems; |
2500 | struct btrfs_key key; |
2501 | struct btrfs_file_extent_item *fi; |
2502 | struct btrfs_ref generic_ref = { 0 }; |
2503 | bool for_reloc = btrfs_header_flag(eb: buf, BTRFS_HEADER_FLAG_RELOC); |
2504 | int i; |
2505 | int action; |
2506 | int level; |
2507 | int ret = 0; |
2508 | |
2509 | if (btrfs_is_testing(fs_info)) |
2510 | return 0; |
2511 | |
2512 | ref_root = btrfs_header_owner(eb: buf); |
2513 | nritems = btrfs_header_nritems(eb: buf); |
2514 | level = btrfs_header_level(eb: buf); |
2515 | |
2516 | if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && level == 0) |
2517 | return 0; |
2518 | |
2519 | if (full_backref) |
2520 | parent = buf->start; |
2521 | else |
2522 | parent = 0; |
2523 | if (inc) |
2524 | action = BTRFS_ADD_DELAYED_REF; |
2525 | else |
2526 | action = BTRFS_DROP_DELAYED_REF; |
2527 | |
2528 | for (i = 0; i < nritems; i++) { |
2529 | if (level == 0) { |
2530 | btrfs_item_key_to_cpu(eb: buf, cpu_key: &key, nr: i); |
2531 | if (key.type != BTRFS_EXTENT_DATA_KEY) |
2532 | continue; |
2533 | fi = btrfs_item_ptr(buf, i, |
2534 | struct btrfs_file_extent_item); |
2535 | if (btrfs_file_extent_type(eb: buf, s: fi) == |
2536 | BTRFS_FILE_EXTENT_INLINE) |
2537 | continue; |
2538 | bytenr = btrfs_file_extent_disk_bytenr(eb: buf, s: fi); |
2539 | if (bytenr == 0) |
2540 | continue; |
2541 | |
2542 | num_bytes = btrfs_file_extent_disk_num_bytes(eb: buf, s: fi); |
2543 | key.offset -= btrfs_file_extent_offset(eb: buf, s: fi); |
2544 | btrfs_init_generic_ref(generic_ref: &generic_ref, action, bytenr, |
2545 | len: num_bytes, parent, owning_root: ref_root); |
2546 | btrfs_init_data_ref(generic_ref: &generic_ref, ref_root, ino: key.objectid, |
2547 | offset: key.offset, mod_root: root->root_key.objectid, |
2548 | skip_qgroup: for_reloc); |
2549 | if (inc) |
2550 | ret = btrfs_inc_extent_ref(trans, generic_ref: &generic_ref); |
2551 | else |
2552 | ret = btrfs_free_extent(trans, ref: &generic_ref); |
2553 | if (ret) |
2554 | goto fail; |
2555 | } else { |
2556 | bytenr = btrfs_node_blockptr(eb: buf, nr: i); |
2557 | num_bytes = fs_info->nodesize; |
2558 | /* We don't know the owning_root, use 0. */ |
2559 | btrfs_init_generic_ref(generic_ref: &generic_ref, action, bytenr, |
2560 | len: num_bytes, parent, owning_root: 0); |
2561 | btrfs_init_tree_ref(generic_ref: &generic_ref, level: level - 1, root: ref_root, |
2562 | mod_root: root->root_key.objectid, skip_qgroup: for_reloc); |
2563 | if (inc) |
2564 | ret = btrfs_inc_extent_ref(trans, generic_ref: &generic_ref); |
2565 | else |
2566 | ret = btrfs_free_extent(trans, ref: &generic_ref); |
2567 | if (ret) |
2568 | goto fail; |
2569 | } |
2570 | } |
2571 | return 0; |
2572 | fail: |
2573 | return ret; |
2574 | } |
2575 | |
2576 | int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, |
2577 | struct extent_buffer *buf, int full_backref) |
2578 | { |
2579 | return __btrfs_mod_ref(trans, root, buf, full_backref, inc: 1); |
2580 | } |
2581 | |
2582 | int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, |
2583 | struct extent_buffer *buf, int full_backref) |
2584 | { |
2585 | return __btrfs_mod_ref(trans, root, buf, full_backref, inc: 0); |
2586 | } |
2587 | |
2588 | static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data) |
2589 | { |
2590 | struct btrfs_fs_info *fs_info = root->fs_info; |
2591 | u64 flags; |
2592 | u64 ret; |
2593 | |
2594 | if (data) |
2595 | flags = BTRFS_BLOCK_GROUP_DATA; |
2596 | else if (root == fs_info->chunk_root) |
2597 | flags = BTRFS_BLOCK_GROUP_SYSTEM; |
2598 | else |
2599 | flags = BTRFS_BLOCK_GROUP_METADATA; |
2600 | |
2601 | ret = btrfs_get_alloc_profile(fs_info, orig_flags: flags); |
2602 | return ret; |
2603 | } |
2604 | |
2605 | static u64 first_logical_byte(struct btrfs_fs_info *fs_info) |
2606 | { |
2607 | struct rb_node *leftmost; |
2608 | u64 bytenr = 0; |
2609 | |
2610 | read_lock(&fs_info->block_group_cache_lock); |
2611 | /* Get the block group with the lowest logical start address. */ |
2612 | leftmost = rb_first_cached(&fs_info->block_group_cache_tree); |
2613 | if (leftmost) { |
2614 | struct btrfs_block_group *bg; |
2615 | |
2616 | bg = rb_entry(leftmost, struct btrfs_block_group, cache_node); |
2617 | bytenr = bg->start; |
2618 | } |
2619 | read_unlock(&fs_info->block_group_cache_lock); |
2620 | |
2621 | return bytenr; |
2622 | } |
2623 | |
2624 | static int pin_down_extent(struct btrfs_trans_handle *trans, |
2625 | struct btrfs_block_group *cache, |
2626 | u64 bytenr, u64 num_bytes, int reserved) |
2627 | { |
2628 | struct btrfs_fs_info *fs_info = cache->fs_info; |
2629 | |
2630 | spin_lock(lock: &cache->space_info->lock); |
2631 | spin_lock(lock: &cache->lock); |
2632 | cache->pinned += num_bytes; |
2633 | btrfs_space_info_update_bytes_pinned(fs_info, sinfo: cache->space_info, |
2634 | bytes: num_bytes); |
2635 | if (reserved) { |
2636 | cache->reserved -= num_bytes; |
2637 | cache->space_info->bytes_reserved -= num_bytes; |
2638 | } |
2639 | spin_unlock(lock: &cache->lock); |
2640 | spin_unlock(lock: &cache->space_info->lock); |
2641 | |
2642 | set_extent_bit(tree: &trans->transaction->pinned_extents, start: bytenr, |
2643 | end: bytenr + num_bytes - 1, bits: EXTENT_DIRTY, NULL); |
2644 | return 0; |
2645 | } |
2646 | |
2647 | int btrfs_pin_extent(struct btrfs_trans_handle *trans, |
2648 | u64 bytenr, u64 num_bytes, int reserved) |
2649 | { |
2650 | struct btrfs_block_group *cache; |
2651 | |
2652 | cache = btrfs_lookup_block_group(info: trans->fs_info, bytenr); |
2653 | BUG_ON(!cache); /* Logic error */ |
2654 | |
2655 | pin_down_extent(trans, cache, bytenr, num_bytes, reserved); |
2656 | |
2657 | btrfs_put_block_group(cache); |
2658 | return 0; |
2659 | } |
2660 | |
2661 | int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans, |
2662 | const struct extent_buffer *eb) |
2663 | { |
2664 | struct btrfs_block_group *cache; |
2665 | int ret; |
2666 | |
2667 | cache = btrfs_lookup_block_group(info: trans->fs_info, bytenr: eb->start); |
2668 | if (!cache) |
2669 | return -EINVAL; |
2670 | |
2671 | /* |
2672 | * Fully cache the free space first so that our pin removes the free space |
2673 | * from the cache. |
2674 | */ |
2675 | ret = btrfs_cache_block_group(cache, wait: true); |
2676 | if (ret) |
2677 | goto out; |
2678 | |
2679 | pin_down_extent(trans, cache, bytenr: eb->start, num_bytes: eb->len, reserved: 0); |
2680 | |
2681 | /* remove us from the free space cache (if we're there at all) */ |
2682 | ret = btrfs_remove_free_space(block_group: cache, bytenr: eb->start, size: eb->len); |
2683 | out: |
2684 | btrfs_put_block_group(cache); |
2685 | return ret; |
2686 | } |
2687 | |
2688 | static int __exclude_logged_extent(struct btrfs_fs_info *fs_info, |
2689 | u64 start, u64 num_bytes) |
2690 | { |
2691 | int ret; |
2692 | struct btrfs_block_group *block_group; |
2693 | |
2694 | block_group = btrfs_lookup_block_group(info: fs_info, bytenr: start); |
2695 | if (!block_group) |
2696 | return -EINVAL; |
2697 | |
2698 | ret = btrfs_cache_block_group(cache: block_group, wait: true); |
2699 | if (ret) |
2700 | goto out; |
2701 | |
2702 | ret = btrfs_remove_free_space(block_group, bytenr: start, size: num_bytes); |
2703 | out: |
2704 | btrfs_put_block_group(cache: block_group); |
2705 | return ret; |
2706 | } |
2707 | |
2708 | int btrfs_exclude_logged_extents(struct extent_buffer *eb) |
2709 | { |
2710 | struct btrfs_fs_info *fs_info = eb->fs_info; |
2711 | struct btrfs_file_extent_item *item; |
2712 | struct btrfs_key key; |
2713 | int found_type; |
2714 | int i; |
2715 | int ret = 0; |
2716 | |
2717 | if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) |
2718 | return 0; |
2719 | |
2720 | for (i = 0; i < btrfs_header_nritems(eb); i++) { |
2721 | btrfs_item_key_to_cpu(eb, cpu_key: &key, nr: i); |
2722 | if (key.type != BTRFS_EXTENT_DATA_KEY) |
2723 | continue; |
2724 | item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); |
2725 | found_type = btrfs_file_extent_type(eb, s: item); |
2726 | if (found_type == BTRFS_FILE_EXTENT_INLINE) |
2727 | continue; |
2728 | if (btrfs_file_extent_disk_bytenr(eb, s: item) == 0) |
2729 | continue; |
2730 | key.objectid = btrfs_file_extent_disk_bytenr(eb, s: item); |
2731 | key.offset = btrfs_file_extent_disk_num_bytes(eb, s: item); |
2732 | ret = __exclude_logged_extent(fs_info, start: key.objectid, num_bytes: key.offset); |
2733 | if (ret) |
2734 | break; |
2735 | } |
2736 | |
2737 | return ret; |
2738 | } |
2739 | |
2740 | static void |
2741 | btrfs_inc_block_group_reservations(struct btrfs_block_group *bg) |
2742 | { |
2743 | atomic_inc(v: &bg->reservations); |
2744 | } |
2745 | |
2746 | /* |
2747 | * Returns the free cluster for the given space info and sets empty_cluster to |
2748 | * what it should be based on the mount options. |
2749 | */ |
2750 | static struct btrfs_free_cluster * |
2751 | fetch_cluster_info(struct btrfs_fs_info *fs_info, |
2752 | struct btrfs_space_info *space_info, u64 *empty_cluster) |
2753 | { |
2754 | struct btrfs_free_cluster *ret = NULL; |
2755 | |
2756 | *empty_cluster = 0; |
2757 | if (btrfs_mixed_space_info(space_info)) |
2758 | return ret; |
2759 | |
2760 | if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { |
2761 | ret = &fs_info->meta_alloc_cluster; |
2762 | if (btrfs_test_opt(fs_info, SSD)) |
2763 | *empty_cluster = SZ_2M; |
2764 | else |
2765 | *empty_cluster = SZ_64K; |
2766 | } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && |
2767 | btrfs_test_opt(fs_info, SSD_SPREAD)) { |
2768 | *empty_cluster = SZ_2M; |
2769 | ret = &fs_info->data_alloc_cluster; |
2770 | } |
2771 | |
2772 | return ret; |
2773 | } |
2774 | |
2775 | static int unpin_extent_range(struct btrfs_fs_info *fs_info, |
2776 | u64 start, u64 end, |
2777 | const bool return_free_space) |
2778 | { |
2779 | struct btrfs_block_group *cache = NULL; |
2780 | struct btrfs_space_info *space_info; |
2781 | struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; |
2782 | struct btrfs_free_cluster *cluster = NULL; |
2783 | u64 len; |
2784 | u64 total_unpinned = 0; |
2785 | u64 empty_cluster = 0; |
2786 | bool readonly; |
2787 | int ret = 0; |
2788 | |
2789 | while (start <= end) { |
2790 | readonly = false; |
2791 | if (!cache || |
2792 | start >= cache->start + cache->length) { |
2793 | if (cache) |
2794 | btrfs_put_block_group(cache); |
2795 | total_unpinned = 0; |
2796 | cache = btrfs_lookup_block_group(info: fs_info, bytenr: start); |
2797 | if (cache == NULL) { |
2798 | /* Logic error, something removed the block group. */ |
2799 | ret = -EUCLEAN; |
2800 | goto out; |
2801 | } |
2802 | |
2803 | cluster = fetch_cluster_info(fs_info, |
2804 | space_info: cache->space_info, |
2805 | empty_cluster: &empty_cluster); |
2806 | empty_cluster <<= 1; |
2807 | } |
2808 | |
2809 | len = cache->start + cache->length - start; |
2810 | len = min(len, end + 1 - start); |
2811 | |
2812 | if (return_free_space) |
2813 | btrfs_add_free_space(block_group: cache, bytenr: start, size: len); |
2814 | |
2815 | start += len; |
2816 | total_unpinned += len; |
2817 | space_info = cache->space_info; |
2818 | |
2819 | /* |
2820 | * If this space cluster has been marked as fragmented and we've |
2821 | * unpinned enough in this block group to potentially allow a |
2822 | * cluster to be created inside of it go ahead and clear the |
2823 | * fragmented check. |
2824 | */ |
2825 | if (cluster && cluster->fragmented && |
2826 | total_unpinned > empty_cluster) { |
2827 | spin_lock(lock: &cluster->lock); |
2828 | cluster->fragmented = 0; |
2829 | spin_unlock(lock: &cluster->lock); |
2830 | } |
2831 | |
2832 | spin_lock(lock: &space_info->lock); |
2833 | spin_lock(lock: &cache->lock); |
2834 | cache->pinned -= len; |
2835 | btrfs_space_info_update_bytes_pinned(fs_info, sinfo: space_info, bytes: -len); |
2836 | space_info->max_extent_size = 0; |
2837 | if (cache->ro) { |
2838 | space_info->bytes_readonly += len; |
2839 | readonly = true; |
2840 | } else if (btrfs_is_zoned(fs_info)) { |
2841 | /* Need reset before reusing in a zoned block group */ |
2842 | space_info->bytes_zone_unusable += len; |
2843 | readonly = true; |
2844 | } |
2845 | spin_unlock(lock: &cache->lock); |
2846 | if (!readonly && return_free_space && |
2847 | global_rsv->space_info == space_info) { |
2848 | spin_lock(lock: &global_rsv->lock); |
2849 | if (!global_rsv->full) { |
2850 | u64 to_add = min(len, global_rsv->size - |
2851 | global_rsv->reserved); |
2852 | |
2853 | global_rsv->reserved += to_add; |
2854 | btrfs_space_info_update_bytes_may_use(fs_info, |
2855 | sinfo: space_info, bytes: to_add); |
2856 | if (global_rsv->reserved >= global_rsv->size) |
2857 | global_rsv->full = 1; |
2858 | len -= to_add; |
2859 | } |
2860 | spin_unlock(lock: &global_rsv->lock); |
2861 | } |
2862 | /* Add to any tickets we may have */ |
2863 | if (!readonly && return_free_space && len) |
2864 | btrfs_try_granting_tickets(fs_info, space_info); |
2865 | spin_unlock(lock: &space_info->lock); |
2866 | } |
2867 | |
2868 | if (cache) |
2869 | btrfs_put_block_group(cache); |
2870 | out: |
2871 | return ret; |
2872 | } |
2873 | |
2874 | int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans) |
2875 | { |
2876 | struct btrfs_fs_info *fs_info = trans->fs_info; |
2877 | struct btrfs_block_group *block_group, *tmp; |
2878 | struct list_head *deleted_bgs; |
2879 | struct extent_io_tree *unpin; |
2880 | u64 start; |
2881 | u64 end; |
2882 | int ret; |
2883 | |
2884 | unpin = &trans->transaction->pinned_extents; |
2885 | |
2886 | while (!TRANS_ABORTED(trans)) { |
2887 | struct extent_state *cached_state = NULL; |
2888 | |
2889 | mutex_lock(&fs_info->unused_bg_unpin_mutex); |
2890 | if (!find_first_extent_bit(tree: unpin, start: 0, start_ret: &start, end_ret: &end, |
2891 | bits: EXTENT_DIRTY, cached_state: &cached_state)) { |
2892 | mutex_unlock(lock: &fs_info->unused_bg_unpin_mutex); |
2893 | break; |
2894 | } |
2895 | |
2896 | if (btrfs_test_opt(fs_info, DISCARD_SYNC)) |
2897 | ret = btrfs_discard_extent(fs_info, bytenr: start, |
2898 | num_bytes: end + 1 - start, NULL); |
2899 | |
2900 | clear_extent_dirty(tree: unpin, start, end, cached: &cached_state); |
2901 | ret = unpin_extent_range(fs_info, start, end, return_free_space: true); |
2902 | BUG_ON(ret); |
2903 | mutex_unlock(lock: &fs_info->unused_bg_unpin_mutex); |
2904 | free_extent_state(state: cached_state); |
2905 | cond_resched(); |
2906 | } |
2907 | |
2908 | if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) { |
2909 | btrfs_discard_calc_delay(discard_ctl: &fs_info->discard_ctl); |
2910 | btrfs_discard_schedule_work(discard_ctl: &fs_info->discard_ctl, override: true); |
2911 | } |
2912 | |
2913 | /* |
2914 | * Transaction is finished. We don't need the lock anymore. We |
2915 | * do need to clean up the block groups in case of a transaction |
2916 | * abort. |
2917 | */ |
2918 | deleted_bgs = &trans->transaction->deleted_bgs; |
2919 | list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) { |
2920 | u64 trimmed = 0; |
2921 | |
2922 | ret = -EROFS; |
2923 | if (!TRANS_ABORTED(trans)) |
2924 | ret = btrfs_discard_extent(fs_info, |
2925 | bytenr: block_group->start, |
2926 | num_bytes: block_group->length, |
2927 | actual_bytes: &trimmed); |
2928 | |
2929 | list_del_init(entry: &block_group->bg_list); |
2930 | btrfs_unfreeze_block_group(cache: block_group); |
2931 | btrfs_put_block_group(cache: block_group); |
2932 | |
2933 | if (ret) { |
2934 | const char *errstr = btrfs_decode_error(error: ret); |
2935 | btrfs_warn(fs_info, |
2936 | "discard failed while removing blockgroup: errno=%d %s" , |
2937 | ret, errstr); |
2938 | } |
2939 | } |
2940 | |
2941 | return 0; |
2942 | } |
2943 | |
2944 | /* |
2945 | * Parse an extent item's inline extents looking for a simple quotas owner ref. |
2946 | * |
2947 | * @fs_info: the btrfs_fs_info for this mount |
2948 | * @leaf: a leaf in the extent tree containing the extent item |
2949 | * @slot: the slot in the leaf where the extent item is found |
2950 | * |
2951 | * Returns the objectid of the root that originally allocated the extent item |
2952 | * if the inline owner ref is expected and present, otherwise 0. |
2953 | * |
2954 | * If an extent item has an owner ref item, it will be the first inline ref |
2955 | * item. Therefore the logic is to check whether there are any inline ref |
2956 | * items, then check the type of the first one. |
2957 | */ |
2958 | u64 btrfs_get_extent_owner_root(struct btrfs_fs_info *fs_info, |
2959 | struct extent_buffer *leaf, int slot) |
2960 | { |
2961 | struct btrfs_extent_item *ei; |
2962 | struct btrfs_extent_inline_ref *iref; |
2963 | struct btrfs_extent_owner_ref *oref; |
2964 | unsigned long ptr; |
2965 | unsigned long end; |
2966 | int type; |
2967 | |
2968 | if (!btrfs_fs_incompat(fs_info, SIMPLE_QUOTA)) |
2969 | return 0; |
2970 | |
2971 | ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); |
2972 | ptr = (unsigned long)(ei + 1); |
2973 | end = (unsigned long)ei + btrfs_item_size(eb: leaf, slot); |
2974 | |
2975 | /* No inline ref items of any kind, can't check type. */ |
2976 | if (ptr == end) |
2977 | return 0; |
2978 | |
2979 | iref = (struct btrfs_extent_inline_ref *)ptr; |
2980 | type = btrfs_get_extent_inline_ref_type(eb: leaf, iref, is_data: BTRFS_REF_TYPE_ANY); |
2981 | |
2982 | /* We found an owner ref, get the root out of it. */ |
2983 | if (type == BTRFS_EXTENT_OWNER_REF_KEY) { |
2984 | oref = (struct btrfs_extent_owner_ref *)(&iref->offset); |
2985 | return btrfs_extent_owner_ref_root_id(eb: leaf, s: oref); |
2986 | } |
2987 | |
2988 | /* We have inline refs, but not an owner ref. */ |
2989 | return 0; |
2990 | } |
2991 | |
2992 | static int do_free_extent_accounting(struct btrfs_trans_handle *trans, |
2993 | u64 bytenr, struct btrfs_squota_delta *delta) |
2994 | { |
2995 | int ret; |
2996 | u64 num_bytes = delta->num_bytes; |
2997 | |
2998 | if (delta->is_data) { |
2999 | struct btrfs_root *csum_root; |
3000 | |
3001 | csum_root = btrfs_csum_root(fs_info: trans->fs_info, bytenr); |
3002 | ret = btrfs_del_csums(trans, root: csum_root, bytenr, len: num_bytes); |
3003 | if (ret) { |
3004 | btrfs_abort_transaction(trans, ret); |
3005 | return ret; |
3006 | } |
3007 | |
3008 | ret = btrfs_delete_raid_extent(trans, start: bytenr, length: num_bytes); |
3009 | if (ret) { |
3010 | btrfs_abort_transaction(trans, ret); |
3011 | return ret; |
3012 | } |
3013 | } |
3014 | |
3015 | ret = btrfs_record_squota_delta(fs_info: trans->fs_info, delta); |
3016 | if (ret) { |
3017 | btrfs_abort_transaction(trans, ret); |
3018 | return ret; |
3019 | } |
3020 | |
3021 | ret = add_to_free_space_tree(trans, start: bytenr, size: num_bytes); |
3022 | if (ret) { |
3023 | btrfs_abort_transaction(trans, ret); |
3024 | return ret; |
3025 | } |
3026 | |
3027 | ret = btrfs_update_block_group(trans, bytenr, num_bytes, alloc: false); |
3028 | if (ret) |
3029 | btrfs_abort_transaction(trans, ret); |
3030 | |
3031 | return ret; |
3032 | } |
3033 | |
3034 | #define abort_and_dump(trans, path, fmt, args...) \ |
3035 | ({ \ |
3036 | btrfs_abort_transaction(trans, -EUCLEAN); \ |
3037 | btrfs_print_leaf(path->nodes[0]); \ |
3038 | btrfs_crit(trans->fs_info, fmt, ##args); \ |
3039 | }) |
3040 | |
3041 | /* |
3042 | * Drop one or more refs of @node. |
3043 | * |
3044 | * 1. Locate the extent refs. |
3045 | * It's either inline in EXTENT/METADATA_ITEM or in keyed SHARED_* item. |
3046 | * Locate it, then reduce the refs number or remove the ref line completely. |
3047 | * |
3048 | * 2. Update the refs count in EXTENT/METADATA_ITEM |
3049 | * |
3050 | * Inline backref case: |
3051 | * |
3052 | * in extent tree we have: |
3053 | * |
3054 | * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 16201 itemsize 82 |
3055 | * refs 2 gen 6 flags DATA |
3056 | * extent data backref root FS_TREE objectid 258 offset 0 count 1 |
3057 | * extent data backref root FS_TREE objectid 257 offset 0 count 1 |
3058 | * |
3059 | * This function gets called with: |
3060 | * |
3061 | * node->bytenr = 13631488 |
3062 | * node->num_bytes = 1048576 |
3063 | * root_objectid = FS_TREE |
3064 | * owner_objectid = 257 |
3065 | * owner_offset = 0 |
3066 | * refs_to_drop = 1 |
3067 | * |
3068 | * Then we should get some like: |
3069 | * |
3070 | * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 16201 itemsize 82 |
3071 | * refs 1 gen 6 flags DATA |
3072 | * extent data backref root FS_TREE objectid 258 offset 0 count 1 |
3073 | * |
3074 | * Keyed backref case: |
3075 | * |
3076 | * in extent tree we have: |
3077 | * |
3078 | * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 3971 itemsize 24 |
3079 | * refs 754 gen 6 flags DATA |
3080 | * [...] |
3081 | * item 2 key (13631488 EXTENT_DATA_REF <HASH>) itemoff 3915 itemsize 28 |
3082 | * extent data backref root FS_TREE objectid 866 offset 0 count 1 |
3083 | * |
3084 | * This function get called with: |
3085 | * |
3086 | * node->bytenr = 13631488 |
3087 | * node->num_bytes = 1048576 |
3088 | * root_objectid = FS_TREE |
3089 | * owner_objectid = 866 |
3090 | * owner_offset = 0 |
3091 | * refs_to_drop = 1 |
3092 | * |
3093 | * Then we should get some like: |
3094 | * |
3095 | * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 3971 itemsize 24 |
3096 | * refs 753 gen 6 flags DATA |
3097 | * |
3098 | * And that (13631488 EXTENT_DATA_REF <HASH>) gets removed. |
3099 | */ |
3100 | static int __btrfs_free_extent(struct btrfs_trans_handle *trans, |
3101 | struct btrfs_delayed_ref_head *href, |
3102 | struct btrfs_delayed_ref_node *node, u64 parent, |
3103 | u64 root_objectid, u64 owner_objectid, |
3104 | u64 owner_offset, |
3105 | struct btrfs_delayed_extent_op *extent_op) |
3106 | { |
3107 | struct btrfs_fs_info *info = trans->fs_info; |
3108 | struct btrfs_key key; |
3109 | struct btrfs_path *path; |
3110 | struct btrfs_root *extent_root; |
3111 | struct extent_buffer *leaf; |
3112 | struct btrfs_extent_item *ei; |
3113 | struct btrfs_extent_inline_ref *iref; |
3114 | int ret; |
3115 | int is_data; |
3116 | int extent_slot = 0; |
3117 | int found_extent = 0; |
3118 | int num_to_del = 1; |
3119 | int refs_to_drop = node->ref_mod; |
3120 | u32 item_size; |
3121 | u64 refs; |
3122 | u64 bytenr = node->bytenr; |
3123 | u64 num_bytes = node->num_bytes; |
3124 | bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA); |
3125 | u64 delayed_ref_root = href->owning_root; |
3126 | |
3127 | extent_root = btrfs_extent_root(fs_info: info, bytenr); |
3128 | ASSERT(extent_root); |
3129 | |
3130 | path = btrfs_alloc_path(); |
3131 | if (!path) |
3132 | return -ENOMEM; |
3133 | |
3134 | is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID; |
3135 | |
3136 | if (!is_data && refs_to_drop != 1) { |
3137 | btrfs_crit(info, |
3138 | "invalid refs_to_drop, dropping more than 1 refs for tree block %llu refs_to_drop %u" , |
3139 | node->bytenr, refs_to_drop); |
3140 | ret = -EINVAL; |
3141 | btrfs_abort_transaction(trans, ret); |
3142 | goto out; |
3143 | } |
3144 | |
3145 | if (is_data) |
3146 | skinny_metadata = false; |
3147 | |
3148 | ret = lookup_extent_backref(trans, path, ref_ret: &iref, bytenr, num_bytes, |
3149 | parent, root_objectid, owner: owner_objectid, |
3150 | offset: owner_offset); |
3151 | if (ret == 0) { |
3152 | /* |
3153 | * Either the inline backref or the SHARED_DATA_REF/ |
3154 | * SHARED_BLOCK_REF is found |
3155 | * |
3156 | * Here is a quick path to locate EXTENT/METADATA_ITEM. |
3157 | * It's possible the EXTENT/METADATA_ITEM is near current slot. |
3158 | */ |
3159 | extent_slot = path->slots[0]; |
3160 | while (extent_slot >= 0) { |
3161 | btrfs_item_key_to_cpu(eb: path->nodes[0], cpu_key: &key, |
3162 | nr: extent_slot); |
3163 | if (key.objectid != bytenr) |
3164 | break; |
3165 | if (key.type == BTRFS_EXTENT_ITEM_KEY && |
3166 | key.offset == num_bytes) { |
3167 | found_extent = 1; |
3168 | break; |
3169 | } |
3170 | if (key.type == BTRFS_METADATA_ITEM_KEY && |
3171 | key.offset == owner_objectid) { |
3172 | found_extent = 1; |
3173 | break; |
3174 | } |
3175 | |
3176 | /* Quick path didn't find the EXTEMT/METADATA_ITEM */ |
3177 | if (path->slots[0] - extent_slot > 5) |
3178 | break; |
3179 | extent_slot--; |
3180 | } |
3181 | |
3182 | if (!found_extent) { |
3183 | if (iref) { |
3184 | abort_and_dump(trans, path, |
3185 | "invalid iref slot %u, no EXTENT/METADATA_ITEM found but has inline extent ref" , |
3186 | path->slots[0]); |
3187 | ret = -EUCLEAN; |
3188 | goto out; |
3189 | } |
3190 | /* Must be SHARED_* item, remove the backref first */ |
3191 | ret = remove_extent_backref(trans, root: extent_root, path, |
3192 | NULL, refs_to_drop, is_data); |
3193 | if (ret) { |
3194 | btrfs_abort_transaction(trans, ret); |
3195 | goto out; |
3196 | } |
3197 | btrfs_release_path(p: path); |
3198 | |
3199 | /* Slow path to locate EXTENT/METADATA_ITEM */ |
3200 | key.objectid = bytenr; |
3201 | key.type = BTRFS_EXTENT_ITEM_KEY; |
3202 | key.offset = num_bytes; |
3203 | |
3204 | if (!is_data && skinny_metadata) { |
3205 | key.type = BTRFS_METADATA_ITEM_KEY; |
3206 | key.offset = owner_objectid; |
3207 | } |
3208 | |
3209 | ret = btrfs_search_slot(trans, root: extent_root, |
3210 | key: &key, p: path, ins_len: -1, cow: 1); |
3211 | if (ret > 0 && skinny_metadata && path->slots[0]) { |
3212 | /* |
3213 | * Couldn't find our skinny metadata item, |
3214 | * see if we have ye olde extent item. |
3215 | */ |
3216 | path->slots[0]--; |
3217 | btrfs_item_key_to_cpu(eb: path->nodes[0], cpu_key: &key, |
3218 | nr: path->slots[0]); |
3219 | if (key.objectid == bytenr && |
3220 | key.type == BTRFS_EXTENT_ITEM_KEY && |
3221 | key.offset == num_bytes) |
3222 | ret = 0; |
3223 | } |
3224 | |
3225 | if (ret > 0 && skinny_metadata) { |
3226 | skinny_metadata = false; |
3227 | key.objectid = bytenr; |
3228 | key.type = BTRFS_EXTENT_ITEM_KEY; |
3229 | key.offset = num_bytes; |
3230 | btrfs_release_path(p: path); |
3231 | ret = btrfs_search_slot(trans, root: extent_root, |
3232 | key: &key, p: path, ins_len: -1, cow: 1); |
3233 | } |
3234 | |
3235 | if (ret) { |
3236 | if (ret > 0) |
3237 | btrfs_print_leaf(l: path->nodes[0]); |
3238 | btrfs_err(info, |
3239 | "umm, got %d back from search, was looking for %llu, slot %d" , |
3240 | ret, bytenr, path->slots[0]); |
3241 | } |
3242 | if (ret < 0) { |
3243 | btrfs_abort_transaction(trans, ret); |
3244 | goto out; |
3245 | } |
3246 | extent_slot = path->slots[0]; |
3247 | } |
3248 | } else if (WARN_ON(ret == -ENOENT)) { |
3249 | abort_and_dump(trans, path, |
3250 | "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu slot %d" , |
3251 | bytenr, parent, root_objectid, owner_objectid, |
3252 | owner_offset, path->slots[0]); |
3253 | goto out; |
3254 | } else { |
3255 | btrfs_abort_transaction(trans, ret); |
3256 | goto out; |
3257 | } |
3258 | |
3259 | leaf = path->nodes[0]; |
3260 | item_size = btrfs_item_size(eb: leaf, slot: extent_slot); |
3261 | if (unlikely(item_size < sizeof(*ei))) { |
3262 | ret = -EUCLEAN; |
3263 | btrfs_err(trans->fs_info, |
3264 | "unexpected extent item size, has %u expect >= %zu" , |
3265 | item_size, sizeof(*ei)); |
3266 | btrfs_abort_transaction(trans, ret); |
3267 | goto out; |
3268 | } |
3269 | ei = btrfs_item_ptr(leaf, extent_slot, |
3270 | struct btrfs_extent_item); |
3271 | if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID && |
3272 | key.type == BTRFS_EXTENT_ITEM_KEY) { |
3273 | struct btrfs_tree_block_info *bi; |
3274 | |
3275 | if (item_size < sizeof(*ei) + sizeof(*bi)) { |
3276 | abort_and_dump(trans, path, |
3277 | "invalid extent item size for key (%llu, %u, %llu) slot %u owner %llu, has %u expect >= %zu" , |
3278 | key.objectid, key.type, key.offset, |
3279 | path->slots[0], owner_objectid, item_size, |
3280 | sizeof(*ei) + sizeof(*bi)); |
3281 | ret = -EUCLEAN; |
3282 | goto out; |
3283 | } |
3284 | bi = (struct btrfs_tree_block_info *)(ei + 1); |
3285 | WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi)); |
3286 | } |
3287 | |
3288 | refs = btrfs_extent_refs(eb: leaf, s: ei); |
3289 | if (refs < refs_to_drop) { |
3290 | abort_and_dump(trans, path, |
3291 | "trying to drop %d refs but we only have %llu for bytenr %llu slot %u" , |
3292 | refs_to_drop, refs, bytenr, path->slots[0]); |
3293 | ret = -EUCLEAN; |
3294 | goto out; |
3295 | } |
3296 | refs -= refs_to_drop; |
3297 | |
3298 | if (refs > 0) { |
3299 | if (extent_op) |
3300 | __run_delayed_extent_op(extent_op, leaf, ei); |
3301 | /* |
3302 | * In the case of inline back ref, reference count will |
3303 | * be updated by remove_extent_backref |
3304 | */ |
3305 | if (iref) { |
3306 | if (!found_extent) { |
3307 | abort_and_dump(trans, path, |
3308 | "invalid iref, got inlined extent ref but no EXTENT/METADATA_ITEM found, slot %u" , |
3309 | path->slots[0]); |
3310 | ret = -EUCLEAN; |
3311 | goto out; |
3312 | } |
3313 | } else { |
3314 | btrfs_set_extent_refs(eb: leaf, s: ei, val: refs); |
3315 | btrfs_mark_buffer_dirty(trans, buf: leaf); |
3316 | } |
3317 | if (found_extent) { |
3318 | ret = remove_extent_backref(trans, root: extent_root, path, |
3319 | iref, refs_to_drop, is_data); |
3320 | if (ret) { |
3321 | btrfs_abort_transaction(trans, ret); |
3322 | goto out; |
3323 | } |
3324 | } |
3325 | } else { |
3326 | struct btrfs_squota_delta delta = { |
3327 | .root = delayed_ref_root, |
3328 | .num_bytes = num_bytes, |
3329 | .is_data = is_data, |
3330 | .is_inc = false, |
3331 | .generation = btrfs_extent_generation(eb: leaf, s: ei), |
3332 | }; |
3333 | |
3334 | /* In this branch refs == 1 */ |
3335 | if (found_extent) { |
3336 | if (is_data && refs_to_drop != |
3337 | extent_data_ref_count(path, iref)) { |
3338 | abort_and_dump(trans, path, |
3339 | "invalid refs_to_drop, current refs %u refs_to_drop %u slot %u" , |
3340 | extent_data_ref_count(path, iref), |
3341 | refs_to_drop, path->slots[0]); |
3342 | ret = -EUCLEAN; |
3343 | goto out; |
3344 | } |
3345 | if (iref) { |
3346 | if (path->slots[0] != extent_slot) { |
3347 | abort_and_dump(trans, path, |
3348 | "invalid iref, extent item key (%llu %u %llu) slot %u doesn't have wanted iref" , |
3349 | key.objectid, key.type, |
3350 | key.offset, path->slots[0]); |
3351 | ret = -EUCLEAN; |
3352 | goto out; |
3353 | } |
3354 | } else { |
3355 | /* |
3356 | * No inline ref, we must be at SHARED_* item, |
3357 | * And it's single ref, it must be: |
3358 | * | extent_slot ||extent_slot + 1| |
3359 | * [ EXTENT/METADATA_ITEM ][ SHARED_* ITEM ] |
3360 | */ |
3361 | if (path->slots[0] != extent_slot + 1) { |
3362 | abort_and_dump(trans, path, |
3363 | "invalid SHARED_* item slot %u, previous item is not EXTENT/METADATA_ITEM" , |
3364 | path->slots[0]); |
3365 | ret = -EUCLEAN; |
3366 | goto out; |
3367 | } |
3368 | path->slots[0] = extent_slot; |
3369 | num_to_del = 2; |
3370 | } |
3371 | } |
3372 | /* |
3373 | * We can't infer the data owner from the delayed ref, so we need |
3374 | * to try to get it from the owning ref item. |
3375 | * |
3376 | * If it is not present, then that extent was not written under |
3377 | * simple quotas mode, so we don't need to account for its deletion. |
3378 | */ |
3379 | if (is_data) |
3380 | delta.root = btrfs_get_extent_owner_root(fs_info: trans->fs_info, |
3381 | leaf, slot: extent_slot); |
3382 | |
3383 | ret = btrfs_del_items(trans, root: extent_root, path, slot: path->slots[0], |
3384 | nr: num_to_del); |
3385 | if (ret) { |
3386 | btrfs_abort_transaction(trans, ret); |
3387 | goto out; |
3388 | } |
3389 | btrfs_release_path(p: path); |
3390 | |
3391 | ret = do_free_extent_accounting(trans, bytenr, delta: &delta); |
3392 | } |
3393 | btrfs_release_path(p: path); |
3394 | |
3395 | out: |
3396 | btrfs_free_path(p: path); |
3397 | return ret; |
3398 | } |
3399 | |
3400 | /* |
3401 | * when we free an block, it is possible (and likely) that we free the last |
3402 | * delayed ref for that extent as well. This searches the delayed ref tree for |
3403 | * a given extent, and if there are no other delayed refs to be processed, it |
3404 | * removes it from the tree. |
3405 | */ |
3406 | static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, |
3407 | u64 bytenr) |
3408 | { |
3409 | struct btrfs_delayed_ref_head *head; |
3410 | struct btrfs_delayed_ref_root *delayed_refs; |
3411 | int ret = 0; |
3412 | |
3413 | delayed_refs = &trans->transaction->delayed_refs; |
3414 | spin_lock(lock: &delayed_refs->lock); |
3415 | head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); |
3416 | if (!head) |
3417 | goto out_delayed_unlock; |
3418 | |
3419 | spin_lock(lock: &head->lock); |
3420 | if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root)) |
3421 | goto out; |
3422 | |
3423 | if (cleanup_extent_op(head) != NULL) |
3424 | goto out; |
3425 | |
3426 | /* |
3427 | * waiting for the lock here would deadlock. If someone else has it |
3428 | * locked they are already in the process of dropping it anyway |
3429 | */ |
3430 | if (!mutex_trylock(lock: &head->mutex)) |
3431 | goto out; |
3432 | |
3433 | btrfs_delete_ref_head(delayed_refs, head); |
3434 | head->processing = false; |
3435 | |
3436 | spin_unlock(lock: &head->lock); |
3437 | spin_unlock(lock: &delayed_refs->lock); |
3438 | |
3439 | BUG_ON(head->extent_op); |
3440 | if (head->must_insert_reserved) |
3441 | ret = 1; |
3442 | |
3443 | btrfs_cleanup_ref_head_accounting(fs_info: trans->fs_info, delayed_refs, head); |
3444 | mutex_unlock(lock: &head->mutex); |
3445 | btrfs_put_delayed_ref_head(head); |
3446 | return ret; |
3447 | out: |
3448 | spin_unlock(lock: &head->lock); |
3449 | |
3450 | out_delayed_unlock: |
3451 | spin_unlock(lock: &delayed_refs->lock); |
3452 | return 0; |
3453 | } |
3454 | |
3455 | void btrfs_free_tree_block(struct btrfs_trans_handle *trans, |
3456 | u64 root_id, |
3457 | struct extent_buffer *buf, |
3458 | u64 parent, int last_ref) |
3459 | { |
3460 | struct btrfs_fs_info *fs_info = trans->fs_info; |
3461 | struct btrfs_block_group *bg; |
3462 | int ret; |
3463 | |
3464 | if (root_id != BTRFS_TREE_LOG_OBJECTID) { |
3465 | struct btrfs_ref generic_ref = { 0 }; |
3466 | |
3467 | /* |
3468 | * Assert that the extent buffer is not cleared due to |
3469 | * EXTENT_BUFFER_ZONED_ZEROOUT. Please refer |
3470 | * btrfs_clear_buffer_dirty() and btree_csum_one_bio() for |
3471 | * detail. |
3472 | */ |
3473 | ASSERT(btrfs_header_bytenr(buf) != 0); |
3474 | |
3475 | btrfs_init_generic_ref(generic_ref: &generic_ref, action: BTRFS_DROP_DELAYED_REF, |
3476 | bytenr: buf->start, len: buf->len, parent, |
3477 | owning_root: btrfs_header_owner(eb: buf)); |
3478 | btrfs_init_tree_ref(generic_ref: &generic_ref, level: btrfs_header_level(eb: buf), |
3479 | root: root_id, mod_root: 0, skip_qgroup: false); |
3480 | btrfs_ref_tree_mod(fs_info, generic_ref: &generic_ref); |
3481 | ret = btrfs_add_delayed_tree_ref(trans, generic_ref: &generic_ref, NULL); |
3482 | BUG_ON(ret); /* -ENOMEM */ |
3483 | } |
3484 | |
3485 | if (!last_ref) |
3486 | return; |
3487 | |
3488 | if (btrfs_header_generation(eb: buf) != trans->transid) |
3489 | goto out; |
3490 | |
3491 | if (root_id != BTRFS_TREE_LOG_OBJECTID) { |
3492 | ret = check_ref_cleanup(trans, bytenr: buf->start); |
3493 | if (!ret) |
3494 | goto out; |
3495 | } |
3496 | |
3497 | bg = btrfs_lookup_block_group(info: fs_info, bytenr: buf->start); |
3498 | |
3499 | if (btrfs_header_flag(eb: buf, BTRFS_HEADER_FLAG_WRITTEN)) { |
3500 | pin_down_extent(trans, cache: bg, bytenr: buf->start, num_bytes: buf->len, reserved: 1); |
3501 | btrfs_put_block_group(cache: bg); |
3502 | goto out; |
3503 | } |
3504 | |
3505 | /* |
3506 | * If there are tree mod log users we may have recorded mod log |
3507 | * operations for this node. If we re-allocate this node we |
3508 | * could replay operations on this node that happened when it |
3509 | * existed in a completely different root. For example if it |
3510 | * was part of root A, then was reallocated to root B, and we |
3511 | * are doing a btrfs_old_search_slot(root b), we could replay |
3512 | * operations that happened when the block was part of root A, |
3513 | * giving us an inconsistent view of the btree. |
3514 | * |
3515 | * We are safe from races here because at this point no other |
3516 | * node or root points to this extent buffer, so if after this |
3517 | * check a new tree mod log user joins we will not have an |
3518 | * existing log of operations on this node that we have to |
3519 | * contend with. |
3520 | */ |
3521 | |
3522 | if (test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags) |
3523 | || btrfs_is_zoned(fs_info)) { |
3524 | pin_down_extent(trans, cache: bg, bytenr: buf->start, num_bytes: buf->len, reserved: 1); |
3525 | btrfs_put_block_group(cache: bg); |
3526 | goto out; |
3527 | } |
3528 | |
3529 | WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); |
3530 | |
3531 | btrfs_add_free_space(block_group: bg, bytenr: buf->start, size: buf->len); |
3532 | btrfs_free_reserved_bytes(cache: bg, num_bytes: buf->len, delalloc: 0); |
3533 | btrfs_put_block_group(cache: bg); |
3534 | trace_btrfs_reserved_extent_free(fs_info, start: buf->start, len: buf->len); |
3535 | |
3536 | out: |
3537 | |
3538 | /* |
3539 | * Deleting the buffer, clear the corrupt flag since it doesn't |
3540 | * matter anymore. |
3541 | */ |
3542 | clear_bit(nr: EXTENT_BUFFER_CORRUPT, addr: &buf->bflags); |
3543 | } |
3544 | |
3545 | /* Can return -ENOMEM */ |
3546 | int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref) |
3547 | { |
3548 | struct btrfs_fs_info *fs_info = trans->fs_info; |
3549 | int ret; |
3550 | |
3551 | if (btrfs_is_testing(fs_info)) |
3552 | return 0; |
3553 | |
3554 | /* |
3555 | * tree log blocks never actually go into the extent allocation |
3556 | * tree, just update pinning info and exit early. |
3557 | */ |
3558 | if ((ref->type == BTRFS_REF_METADATA && |
3559 | ref->tree_ref.ref_root == BTRFS_TREE_LOG_OBJECTID) || |
3560 | (ref->type == BTRFS_REF_DATA && |
3561 | ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID)) { |
3562 | btrfs_pin_extent(trans, bytenr: ref->bytenr, num_bytes: ref->len, reserved: 1); |
3563 | ret = 0; |
3564 | } else if (ref->type == BTRFS_REF_METADATA) { |
3565 | ret = btrfs_add_delayed_tree_ref(trans, generic_ref: ref, NULL); |
3566 | } else { |
3567 | ret = btrfs_add_delayed_data_ref(trans, generic_ref: ref, reserved: 0); |
3568 | } |
3569 | |
3570 | if (!((ref->type == BTRFS_REF_METADATA && |
3571 | ref->tree_ref.ref_root == BTRFS_TREE_LOG_OBJECTID) || |
3572 | (ref->type == BTRFS_REF_DATA && |
3573 | ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID))) |
3574 | btrfs_ref_tree_mod(fs_info, generic_ref: ref); |
3575 | |
3576 | return ret; |
3577 | } |
3578 | |
3579 | enum btrfs_loop_type { |
3580 | /* |
3581 | * Start caching block groups but do not wait for progress or for them |
3582 | * to be done. |
3583 | */ |
3584 | LOOP_CACHING_NOWAIT, |
3585 | |
3586 | /* |
3587 | * Wait for the block group free_space >= the space we're waiting for if |
3588 | * the block group isn't cached. |
3589 | */ |
3590 | LOOP_CACHING_WAIT, |
3591 | |
3592 | /* |
3593 | * Allow allocations to happen from block groups that do not yet have a |
3594 | * size classification. |
3595 | */ |
3596 | LOOP_UNSET_SIZE_CLASS, |
3597 | |
3598 | /* |
3599 | * Allocate a chunk and then retry the allocation. |
3600 | */ |
3601 | LOOP_ALLOC_CHUNK, |
3602 | |
3603 | /* |
3604 | * Ignore the size class restrictions for this allocation. |
3605 | */ |
3606 | LOOP_WRONG_SIZE_CLASS, |
3607 | |
3608 | /* |
3609 | * Ignore the empty size, only try to allocate the number of bytes |
3610 | * needed for this allocation. |
3611 | */ |
3612 | LOOP_NO_EMPTY_SIZE, |
3613 | }; |
3614 | |
3615 | static inline void |
3616 | btrfs_lock_block_group(struct btrfs_block_group *cache, |
3617 | int delalloc) |
3618 | { |
3619 | if (delalloc) |
3620 | down_read(sem: &cache->data_rwsem); |
3621 | } |
3622 | |
3623 | static inline void btrfs_grab_block_group(struct btrfs_block_group *cache, |
3624 | int delalloc) |
3625 | { |
3626 | btrfs_get_block_group(cache); |
3627 | if (delalloc) |
3628 | down_read(sem: &cache->data_rwsem); |
3629 | } |
3630 | |
3631 | static struct btrfs_block_group *btrfs_lock_cluster( |
3632 | struct btrfs_block_group *block_group, |
3633 | struct btrfs_free_cluster *cluster, |
3634 | int delalloc) |
3635 | __acquires(&cluster->refill_lock) |
3636 | { |
3637 | struct btrfs_block_group *used_bg = NULL; |
3638 | |
3639 | spin_lock(lock: &cluster->refill_lock); |
3640 | while (1) { |
3641 | used_bg = cluster->block_group; |
3642 | if (!used_bg) |
3643 | return NULL; |
3644 | |
3645 | if (used_bg == block_group) |
3646 | return used_bg; |
3647 | |
3648 | btrfs_get_block_group(cache: used_bg); |
3649 | |
3650 | if (!delalloc) |
3651 | return used_bg; |
3652 | |
3653 | if (down_read_trylock(sem: &used_bg->data_rwsem)) |
3654 | return used_bg; |
3655 | |
3656 | spin_unlock(lock: &cluster->refill_lock); |
3657 | |
3658 | /* We should only have one-level nested. */ |
3659 | down_read_nested(sem: &used_bg->data_rwsem, SINGLE_DEPTH_NESTING); |
3660 | |
3661 | spin_lock(lock: &cluster->refill_lock); |
3662 | if (used_bg == cluster->block_group) |
3663 | return used_bg; |
3664 | |
3665 | up_read(sem: &used_bg->data_rwsem); |
3666 | btrfs_put_block_group(cache: used_bg); |
3667 | } |
3668 | } |
3669 | |
3670 | static inline void |
3671 | btrfs_release_block_group(struct btrfs_block_group *cache, |
3672 | int delalloc) |
3673 | { |
3674 | if (delalloc) |
3675 | up_read(sem: &cache->data_rwsem); |
3676 | btrfs_put_block_group(cache); |
3677 | } |
3678 | |
3679 | /* |
3680 | * Helper function for find_free_extent(). |
3681 | * |
3682 | * Return -ENOENT to inform caller that we need fallback to unclustered mode. |
3683 | * Return >0 to inform caller that we find nothing |
3684 | * Return 0 means we have found a location and set ffe_ctl->found_offset. |
3685 | */ |
3686 | static int find_free_extent_clustered(struct btrfs_block_group *bg, |
3687 | struct find_free_extent_ctl *ffe_ctl, |
3688 | struct btrfs_block_group **cluster_bg_ret) |
3689 | { |
3690 | struct btrfs_block_group *cluster_bg; |
3691 | struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; |
3692 | u64 aligned_cluster; |
3693 | u64 offset; |
3694 | int ret; |
3695 | |
3696 | cluster_bg = btrfs_lock_cluster(block_group: bg, cluster: last_ptr, delalloc: ffe_ctl->delalloc); |
3697 | if (!cluster_bg) |
3698 | goto refill_cluster; |
3699 | if (cluster_bg != bg && (cluster_bg->ro || |
3700 | !block_group_bits(cache: cluster_bg, bits: ffe_ctl->flags))) |
3701 | goto release_cluster; |
3702 | |
3703 | offset = btrfs_alloc_from_cluster(block_group: cluster_bg, cluster: last_ptr, |
3704 | bytes: ffe_ctl->num_bytes, min_start: cluster_bg->start, |
3705 | max_extent_size: &ffe_ctl->max_extent_size); |
3706 | if (offset) { |
3707 | /* We have a block, we're done */ |
3708 | spin_unlock(lock: &last_ptr->refill_lock); |
3709 | trace_btrfs_reserve_extent_cluster(block_group: cluster_bg, ffe_ctl); |
3710 | *cluster_bg_ret = cluster_bg; |
3711 | ffe_ctl->found_offset = offset; |
3712 | return 0; |
3713 | } |
3714 | WARN_ON(last_ptr->block_group != cluster_bg); |
3715 | |
3716 | release_cluster: |
3717 | /* |
3718 | * If we are on LOOP_NO_EMPTY_SIZE, we can't set up a new clusters, so |
3719 | * lets just skip it and let the allocator find whatever block it can |
3720 | * find. If we reach this point, we will have tried the cluster |
3721 | * allocator plenty of times and not have found anything, so we are |
3722 | * likely way too fragmented for the clustering stuff to find anything. |
3723 | * |
3724 | * However, if the cluster is taken from the current block group, |
3725 | * release the cluster first, so that we stand a better chance of |
3726 | * succeeding in the unclustered allocation. |
3727 | */ |
3728 | if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE && cluster_bg != bg) { |
3729 | spin_unlock(lock: &last_ptr->refill_lock); |
3730 | btrfs_release_block_group(cache: cluster_bg, delalloc: ffe_ctl->delalloc); |
3731 | return -ENOENT; |
3732 | } |
3733 | |
3734 | /* This cluster didn't work out, free it and start over */ |
3735 | btrfs_return_cluster_to_free_space(NULL, cluster: last_ptr); |
3736 | |
3737 | if (cluster_bg != bg) |
3738 | btrfs_release_block_group(cache: cluster_bg, delalloc: ffe_ctl->delalloc); |
3739 | |
3740 | refill_cluster: |
3741 | if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE) { |
3742 | spin_unlock(lock: &last_ptr->refill_lock); |
3743 | return -ENOENT; |
3744 | } |
3745 | |
3746 | aligned_cluster = max_t(u64, |
3747 | ffe_ctl->empty_cluster + ffe_ctl->empty_size, |
3748 | bg->full_stripe_len); |
3749 | ret = btrfs_find_space_cluster(block_group: bg, cluster: last_ptr, offset: ffe_ctl->search_start, |
3750 | bytes: ffe_ctl->num_bytes, empty_size: aligned_cluster); |
3751 | if (ret == 0) { |
3752 | /* Now pull our allocation out of this cluster */ |
3753 | offset = btrfs_alloc_from_cluster(block_group: bg, cluster: last_ptr, |
3754 | bytes: ffe_ctl->num_bytes, min_start: ffe_ctl->search_start, |
3755 | max_extent_size: &ffe_ctl->max_extent_size); |
3756 | if (offset) { |
3757 | /* We found one, proceed */ |
3758 | spin_unlock(lock: &last_ptr->refill_lock); |
3759 | ffe_ctl->found_offset = offset; |
3760 | trace_btrfs_reserve_extent_cluster(block_group: bg, ffe_ctl); |
3761 | return 0; |
3762 | } |
3763 | } |
3764 | /* |
3765 | * At this point we either didn't find a cluster or we weren't able to |
3766 | * allocate a block from our cluster. Free the cluster we've been |
3767 | * trying to use, and go to the next block group. |
3768 | */ |
3769 | btrfs_return_cluster_to_free_space(NULL, cluster: last_ptr); |
3770 | spin_unlock(lock: &last_ptr->refill_lock); |
3771 | return 1; |
3772 | } |
3773 | |
3774 | /* |
3775 | * Return >0 to inform caller that we find nothing |
3776 | * Return 0 when we found an free extent and set ffe_ctrl->found_offset |
3777 | */ |
3778 | static int find_free_extent_unclustered(struct btrfs_block_group *bg, |
3779 | struct find_free_extent_ctl *ffe_ctl) |
3780 | { |
3781 | struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; |
3782 | u64 offset; |
3783 | |
3784 | /* |
3785 | * We are doing an unclustered allocation, set the fragmented flag so |
3786 | * we don't bother trying to setup a cluster again until we get more |
3787 | * space. |
3788 | */ |
3789 | if (unlikely(last_ptr)) { |
3790 | spin_lock(lock: &last_ptr->lock); |
3791 | last_ptr->fragmented = 1; |
3792 | spin_unlock(lock: &last_ptr->lock); |
3793 | } |
3794 | if (ffe_ctl->cached) { |
3795 | struct btrfs_free_space_ctl *free_space_ctl; |
3796 | |
3797 | free_space_ctl = bg->free_space_ctl; |
3798 | spin_lock(lock: &free_space_ctl->tree_lock); |
3799 | if (free_space_ctl->free_space < |
3800 | ffe_ctl->num_bytes + ffe_ctl->empty_cluster + |
3801 | ffe_ctl->empty_size) { |
3802 | ffe_ctl->total_free_space = max_t(u64, |
3803 | ffe_ctl->total_free_space, |
3804 | free_space_ctl->free_space); |
3805 | spin_unlock(lock: &free_space_ctl->tree_lock); |
3806 | return 1; |
3807 | } |
3808 | spin_unlock(lock: &free_space_ctl->tree_lock); |
3809 | } |
3810 | |
3811 | offset = btrfs_find_space_for_alloc(block_group: bg, offset: ffe_ctl->search_start, |
3812 | bytes: ffe_ctl->num_bytes, empty_size: ffe_ctl->empty_size, |
3813 | max_extent_size: &ffe_ctl->max_extent_size); |
3814 | if (!offset) |
3815 | return 1; |
3816 | ffe_ctl->found_offset = offset; |
3817 | return 0; |
3818 | } |
3819 | |
3820 | static int do_allocation_clustered(struct btrfs_block_group *block_group, |
3821 | struct find_free_extent_ctl *ffe_ctl, |
3822 | struct btrfs_block_group **bg_ret) |
3823 | { |
3824 | int ret; |
3825 | |
3826 | /* We want to try and use the cluster allocator, so lets look there */ |
3827 | if (ffe_ctl->last_ptr && ffe_ctl->use_cluster) { |
3828 | ret = find_free_extent_clustered(bg: block_group, ffe_ctl, cluster_bg_ret: bg_ret); |
3829 | if (ret >= 0) |
3830 | return ret; |
3831 | /* ret == -ENOENT case falls through */ |
3832 | } |
3833 | |
3834 | return find_free_extent_unclustered(bg: block_group, ffe_ctl); |
3835 | } |
3836 | |
3837 | /* |
3838 | * Tree-log block group locking |
3839 | * ============================ |
3840 | * |
3841 | * fs_info::treelog_bg_lock protects the fs_info::treelog_bg which |
3842 | * indicates the starting address of a block group, which is reserved only |
3843 | * for tree-log metadata. |
3844 | * |
3845 | * Lock nesting |
3846 | * ============ |
3847 | * |
3848 | * space_info::lock |
3849 | * block_group::lock |
3850 | * fs_info::treelog_bg_lock |
3851 | */ |
3852 | |
3853 | /* |
3854 | * Simple allocator for sequential-only block group. It only allows sequential |
3855 | * allocation. No need to play with trees. This function also reserves the |
3856 | * bytes as in btrfs_add_reserved_bytes. |
3857 | */ |
3858 | static int do_allocation_zoned(struct btrfs_block_group *block_group, |
3859 | struct find_free_extent_ctl *ffe_ctl, |
3860 | struct btrfs_block_group **bg_ret) |
3861 | { |
3862 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
3863 | struct btrfs_space_info *space_info = block_group->space_info; |
3864 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
3865 | u64 start = block_group->start; |
3866 | u64 num_bytes = ffe_ctl->num_bytes; |
3867 | u64 avail; |
3868 | u64 bytenr = block_group->start; |
3869 | u64 log_bytenr; |
3870 | u64 data_reloc_bytenr; |
3871 | int ret = 0; |
3872 | bool skip = false; |
3873 | |
3874 | ASSERT(btrfs_is_zoned(block_group->fs_info)); |
3875 | |
3876 | /* |
3877 | * Do not allow non-tree-log blocks in the dedicated tree-log block |
3878 | * group, and vice versa. |
3879 | */ |
3880 | spin_lock(lock: &fs_info->treelog_bg_lock); |
3881 | log_bytenr = fs_info->treelog_bg; |
3882 | if (log_bytenr && ((ffe_ctl->for_treelog && bytenr != log_bytenr) || |
3883 | (!ffe_ctl->for_treelog && bytenr == log_bytenr))) |
3884 | skip = true; |
3885 | spin_unlock(lock: &fs_info->treelog_bg_lock); |
3886 | if (skip) |
3887 | return 1; |
3888 | |
3889 | /* |
3890 | * Do not allow non-relocation blocks in the dedicated relocation block |
3891 | * group, and vice versa. |
3892 | */ |
3893 | spin_lock(lock: &fs_info->relocation_bg_lock); |
3894 | data_reloc_bytenr = fs_info->data_reloc_bg; |
3895 | if (data_reloc_bytenr && |
3896 | ((ffe_ctl->for_data_reloc && bytenr != data_reloc_bytenr) || |
3897 | (!ffe_ctl->for_data_reloc && bytenr == data_reloc_bytenr))) |
3898 | skip = true; |
3899 | spin_unlock(lock: &fs_info->relocation_bg_lock); |
3900 | if (skip) |
3901 | return 1; |
3902 | |
3903 | /* Check RO and no space case before trying to activate it */ |
3904 | spin_lock(lock: &block_group->lock); |
3905 | if (block_group->ro || btrfs_zoned_bg_is_full(bg: block_group)) { |
3906 | ret = 1; |
3907 | /* |
3908 | * May need to clear fs_info->{treelog,data_reloc}_bg. |
3909 | * Return the error after taking the locks. |
3910 | */ |
3911 | } |
3912 | spin_unlock(lock: &block_group->lock); |
3913 | |
3914 | /* Metadata block group is activated at write time. */ |
3915 | if (!ret && (block_group->flags & BTRFS_BLOCK_GROUP_DATA) && |
3916 | !btrfs_zone_activate(block_group)) { |
3917 | ret = 1; |
3918 | /* |
3919 | * May need to clear fs_info->{treelog,data_reloc}_bg. |
3920 | * Return the error after taking the locks. |
3921 | */ |
3922 | } |
3923 | |
3924 | spin_lock(lock: &space_info->lock); |
3925 | spin_lock(lock: &block_group->lock); |
3926 | spin_lock(lock: &fs_info->treelog_bg_lock); |
3927 | spin_lock(lock: &fs_info->relocation_bg_lock); |
3928 | |
3929 | if (ret) |
3930 | goto out; |
3931 | |
3932 | ASSERT(!ffe_ctl->for_treelog || |
3933 | block_group->start == fs_info->treelog_bg || |
3934 | fs_info->treelog_bg == 0); |
3935 | ASSERT(!ffe_ctl->for_data_reloc || |
3936 | block_group->start == fs_info->data_reloc_bg || |
3937 | fs_info->data_reloc_bg == 0); |
3938 | |
3939 | if (block_group->ro || |
3940 | (!ffe_ctl->for_data_reloc && |
3941 | test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))) { |
3942 | ret = 1; |
3943 | goto out; |
3944 | } |
3945 | |
3946 | /* |
3947 | * Do not allow currently using block group to be tree-log dedicated |
3948 | * block group. |
3949 | */ |
3950 | if (ffe_ctl->for_treelog && !fs_info->treelog_bg && |
3951 | (block_group->used || block_group->reserved)) { |
3952 | ret = 1; |
3953 | goto out; |
3954 | } |
3955 | |
3956 | /* |
3957 | * Do not allow currently used block group to be the data relocation |
3958 | * dedicated block group. |
3959 | */ |
3960 | if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg && |
3961 | (block_group->used || block_group->reserved)) { |
3962 | ret = 1; |
3963 | goto out; |
3964 | } |
3965 | |
3966 | WARN_ON_ONCE(block_group->alloc_offset > block_group->zone_capacity); |
3967 | avail = block_group->zone_capacity - block_group->alloc_offset; |
3968 | if (avail < num_bytes) { |
3969 | if (ffe_ctl->max_extent_size < avail) { |
3970 | /* |
3971 | * With sequential allocator, free space is always |
3972 | * contiguous |
3973 | */ |
3974 | ffe_ctl->max_extent_size = avail; |
3975 | ffe_ctl->total_free_space = avail; |
3976 | } |
3977 | ret = 1; |
3978 | goto out; |
3979 | } |
3980 | |
3981 | if (ffe_ctl->for_treelog && !fs_info->treelog_bg) |
3982 | fs_info->treelog_bg = block_group->start; |
3983 | |
3984 | if (ffe_ctl->for_data_reloc) { |
3985 | if (!fs_info->data_reloc_bg) |
3986 | fs_info->data_reloc_bg = block_group->start; |
3987 | /* |
3988 | * Do not allow allocations from this block group, unless it is |
3989 | * for data relocation. Compared to increasing the ->ro, setting |
3990 | * the ->zoned_data_reloc_ongoing flag still allows nocow |
3991 | * writers to come in. See btrfs_inc_nocow_writers(). |
3992 | * |
3993 | * We need to disable an allocation to avoid an allocation of |
3994 | * regular (non-relocation data) extent. With mix of relocation |
3995 | * extents and regular extents, we can dispatch WRITE commands |
3996 | * (for relocation extents) and ZONE APPEND commands (for |
3997 | * regular extents) at the same time to the same zone, which |
3998 | * easily break the write pointer. |
3999 | * |
4000 | * Also, this flag avoids this block group to be zone finished. |
4001 | */ |
4002 | set_bit(nr: BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, addr: &block_group->runtime_flags); |
4003 | } |
4004 | |
4005 | ffe_ctl->found_offset = start + block_group->alloc_offset; |
4006 | block_group->alloc_offset += num_bytes; |
4007 | spin_lock(lock: &ctl->tree_lock); |
4008 | ctl->free_space -= num_bytes; |
4009 | spin_unlock(lock: &ctl->tree_lock); |
4010 | |
4011 | /* |
4012 | * We do not check if found_offset is aligned to stripesize. The |
4013 | * address is anyway rewritten when using zone append writing. |
4014 | */ |
4015 | |
4016 | ffe_ctl->search_start = ffe_ctl->found_offset; |
4017 | |
4018 | out: |
4019 | if (ret && ffe_ctl->for_treelog) |
4020 | fs_info->treelog_bg = 0; |
4021 | if (ret && ffe_ctl->for_data_reloc) |
4022 | fs_info->data_reloc_bg = 0; |
4023 | spin_unlock(lock: &fs_info->relocation_bg_lock); |
4024 | spin_unlock(lock: &fs_info->treelog_bg_lock); |
4025 | spin_unlock(lock: &block_group->lock); |
4026 | spin_unlock(lock: &space_info->lock); |
4027 | return ret; |
4028 | } |
4029 | |
4030 | static int do_allocation(struct btrfs_block_group *block_group, |
4031 | struct find_free_extent_ctl *ffe_ctl, |
4032 | struct btrfs_block_group **bg_ret) |
4033 | { |
4034 | switch (ffe_ctl->policy) { |
4035 | case BTRFS_EXTENT_ALLOC_CLUSTERED: |
4036 | return do_allocation_clustered(block_group, ffe_ctl, bg_ret); |
4037 | case BTRFS_EXTENT_ALLOC_ZONED: |
4038 | return do_allocation_zoned(block_group, ffe_ctl, bg_ret); |
4039 | default: |
4040 | BUG(); |
4041 | } |
4042 | } |
4043 | |
4044 | static void release_block_group(struct btrfs_block_group *block_group, |
4045 | struct find_free_extent_ctl *ffe_ctl, |
4046 | int delalloc) |
4047 | { |
4048 | switch (ffe_ctl->policy) { |
4049 | case BTRFS_EXTENT_ALLOC_CLUSTERED: |
4050 | ffe_ctl->retry_uncached = false; |
4051 | break; |
4052 | case BTRFS_EXTENT_ALLOC_ZONED: |
4053 | /* Nothing to do */ |
4054 | break; |
4055 | default: |
4056 | BUG(); |
4057 | } |
4058 | |
4059 | BUG_ON(btrfs_bg_flags_to_raid_index(block_group->flags) != |
4060 | ffe_ctl->index); |
4061 | btrfs_release_block_group(cache: block_group, delalloc); |
4062 | } |
4063 | |
4064 | static void found_extent_clustered(struct find_free_extent_ctl *ffe_ctl, |
4065 | struct btrfs_key *ins) |
4066 | { |
4067 | struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; |
4068 | |
4069 | if (!ffe_ctl->use_cluster && last_ptr) { |
4070 | spin_lock(lock: &last_ptr->lock); |
4071 | last_ptr->window_start = ins->objectid; |
4072 | spin_unlock(lock: &last_ptr->lock); |
4073 | } |
4074 | } |
4075 | |
4076 | static void found_extent(struct find_free_extent_ctl *ffe_ctl, |
4077 | struct btrfs_key *ins) |
4078 | { |
4079 | switch (ffe_ctl->policy) { |
4080 | case BTRFS_EXTENT_ALLOC_CLUSTERED: |
4081 | found_extent_clustered(ffe_ctl, ins); |
4082 | break; |
4083 | case BTRFS_EXTENT_ALLOC_ZONED: |
4084 | /* Nothing to do */ |
4085 | break; |
4086 | default: |
4087 | BUG(); |
4088 | } |
4089 | } |
4090 | |
4091 | static int can_allocate_chunk_zoned(struct btrfs_fs_info *fs_info, |
4092 | struct find_free_extent_ctl *ffe_ctl) |
4093 | { |
4094 | /* Block group's activeness is not a requirement for METADATA block groups. */ |
4095 | if (!(ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA)) |
4096 | return 0; |
4097 | |
4098 | /* If we can activate new zone, just allocate a chunk and use it */ |
4099 | if (btrfs_can_activate_zone(fs_devices: fs_info->fs_devices, flags: ffe_ctl->flags)) |
4100 | return 0; |
4101 | |
4102 | /* |
4103 | * We already reached the max active zones. Try to finish one block |
4104 | * group to make a room for a new block group. This is only possible |
4105 | * for a data block group because btrfs_zone_finish() may need to wait |
4106 | * for a running transaction which can cause a deadlock for metadata |
4107 | * allocation. |
4108 | */ |
4109 | if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) { |
4110 | int ret = btrfs_zone_finish_one_bg(fs_info); |
4111 | |
4112 | if (ret == 1) |
4113 | return 0; |
4114 | else if (ret < 0) |
4115 | return ret; |
4116 | } |
4117 | |
4118 | /* |
4119 | * If we have enough free space left in an already active block group |
4120 | * and we can't activate any other zone now, do not allow allocating a |
4121 | * new chunk and let find_free_extent() retry with a smaller size. |
4122 | */ |
4123 | if (ffe_ctl->max_extent_size >= ffe_ctl->min_alloc_size) |
4124 | return -ENOSPC; |
4125 | |
4126 | /* |
4127 | * Even min_alloc_size is not left in any block groups. Since we cannot |
4128 | * activate a new block group, allocating it may not help. Let's tell a |
4129 | * caller to try again and hope it progress something by writing some |
4130 | * parts of the region. That is only possible for data block groups, |
4131 | * where a part of the region can be written. |
4132 | */ |
4133 | if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) |
4134 | return -EAGAIN; |
4135 | |
4136 | /* |
4137 | * We cannot activate a new block group and no enough space left in any |
4138 | * block groups. So, allocating a new block group may not help. But, |
4139 | * there is nothing to do anyway, so let's go with it. |
4140 | */ |
4141 | return 0; |
4142 | } |
4143 | |
4144 | static int can_allocate_chunk(struct btrfs_fs_info *fs_info, |
4145 | struct find_free_extent_ctl *ffe_ctl) |
4146 | { |
4147 | switch (ffe_ctl->policy) { |
4148 | case BTRFS_EXTENT_ALLOC_CLUSTERED: |
4149 | return 0; |
4150 | case BTRFS_EXTENT_ALLOC_ZONED: |
4151 | return can_allocate_chunk_zoned(fs_info, ffe_ctl); |
4152 | default: |
4153 | BUG(); |
4154 | } |
4155 | } |
4156 | |
4157 | /* |
4158 | * Return >0 means caller needs to re-search for free extent |
4159 | * Return 0 means we have the needed free extent. |
4160 | * Return <0 means we failed to locate any free extent. |
4161 | */ |
4162 | static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info, |
4163 | struct btrfs_key *ins, |
4164 | struct find_free_extent_ctl *ffe_ctl, |
4165 | bool full_search) |
4166 | { |
4167 | struct btrfs_root *root = fs_info->chunk_root; |
4168 | int ret; |
4169 | |
4170 | if ((ffe_ctl->loop == LOOP_CACHING_NOWAIT) && |
4171 | ffe_ctl->have_caching_bg && !ffe_ctl->orig_have_caching_bg) |
4172 | ffe_ctl->orig_have_caching_bg = true; |
4173 | |
4174 | if (ins->objectid) { |
4175 | found_extent(ffe_ctl, ins); |
4176 | return 0; |
4177 | } |
4178 | |
4179 | if (ffe_ctl->loop >= LOOP_CACHING_WAIT && ffe_ctl->have_caching_bg) |
4180 | return 1; |
4181 | |
4182 | ffe_ctl->index++; |
4183 | if (ffe_ctl->index < BTRFS_NR_RAID_TYPES) |
4184 | return 1; |
4185 | |
4186 | /* See the comments for btrfs_loop_type for an explanation of the phases. */ |
4187 | if (ffe_ctl->loop < LOOP_NO_EMPTY_SIZE) { |
4188 | ffe_ctl->index = 0; |
4189 | /* |
4190 | * We want to skip the LOOP_CACHING_WAIT step if we don't have |
4191 | * any uncached bgs and we've already done a full search |
4192 | * through. |
4193 | */ |
4194 | if (ffe_ctl->loop == LOOP_CACHING_NOWAIT && |
4195 | (!ffe_ctl->orig_have_caching_bg && full_search)) |
4196 | ffe_ctl->loop++; |
4197 | ffe_ctl->loop++; |
4198 | |
4199 | if (ffe_ctl->loop == LOOP_ALLOC_CHUNK) { |
4200 | struct btrfs_trans_handle *trans; |
4201 | int exist = 0; |
4202 | |
4203 | /* Check if allocation policy allows to create a new chunk */ |
4204 | ret = can_allocate_chunk(fs_info, ffe_ctl); |
4205 | if (ret) |
4206 | return ret; |
4207 | |
4208 | trans = current->journal_info; |
4209 | if (trans) |
4210 | exist = 1; |
4211 | else |
4212 | trans = btrfs_join_transaction(root); |
4213 | |
4214 | if (IS_ERR(ptr: trans)) { |
4215 | ret = PTR_ERR(ptr: trans); |
4216 | return ret; |
4217 | } |
4218 | |
4219 | ret = btrfs_chunk_alloc(trans, flags: ffe_ctl->flags, |
4220 | force: CHUNK_ALLOC_FORCE_FOR_EXTENT); |
4221 | |
4222 | /* Do not bail out on ENOSPC since we can do more. */ |
4223 | if (ret == -ENOSPC) { |
4224 | ret = 0; |
4225 | ffe_ctl->loop++; |
4226 | } |
4227 | else if (ret < 0) |
4228 | btrfs_abort_transaction(trans, ret); |
4229 | else |
4230 | ret = 0; |
4231 | if (!exist) |
4232 | btrfs_end_transaction(trans); |
4233 | if (ret) |
4234 | return ret; |
4235 | } |
4236 | |
4237 | if (ffe_ctl->loop == LOOP_NO_EMPTY_SIZE) { |
4238 | if (ffe_ctl->policy != BTRFS_EXTENT_ALLOC_CLUSTERED) |
4239 | return -ENOSPC; |
4240 | |
4241 | /* |
4242 | * Don't loop again if we already have no empty_size and |
4243 | * no empty_cluster. |
4244 | */ |
4245 | if (ffe_ctl->empty_size == 0 && |
4246 | ffe_ctl->empty_cluster == 0) |
4247 | return -ENOSPC; |
4248 | ffe_ctl->empty_size = 0; |
4249 | ffe_ctl->empty_cluster = 0; |
4250 | } |
4251 | return 1; |
4252 | } |
4253 | return -ENOSPC; |
4254 | } |
4255 | |
4256 | static bool find_free_extent_check_size_class(struct find_free_extent_ctl *ffe_ctl, |
4257 | struct btrfs_block_group *bg) |
4258 | { |
4259 | if (ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED) |
4260 | return true; |
4261 | if (!btrfs_block_group_should_use_size_class(bg)) |
4262 | return true; |
4263 | if (ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS) |
4264 | return true; |
4265 | if (ffe_ctl->loop >= LOOP_UNSET_SIZE_CLASS && |
4266 | bg->size_class == BTRFS_BG_SZ_NONE) |
4267 | return true; |
4268 | return ffe_ctl->size_class == bg->size_class; |
4269 | } |
4270 | |
4271 | static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info, |
4272 | struct find_free_extent_ctl *ffe_ctl, |
4273 | struct btrfs_space_info *space_info, |
4274 | struct btrfs_key *ins) |
4275 | { |
4276 | /* |
4277 | * If our free space is heavily fragmented we may not be able to make |
4278 | * big contiguous allocations, so instead of doing the expensive search |
4279 | * for free space, simply return ENOSPC with our max_extent_size so we |
4280 | * can go ahead and search for a more manageable chunk. |
4281 | * |
4282 | * If our max_extent_size is large enough for our allocation simply |
4283 | * disable clustering since we will likely not be able to find enough |
4284 | * space to create a cluster and induce latency trying. |
4285 | */ |
4286 | if (space_info->max_extent_size) { |
4287 | spin_lock(lock: &space_info->lock); |
4288 | if (space_info->max_extent_size && |
4289 | ffe_ctl->num_bytes > space_info->max_extent_size) { |
4290 | ins->offset = space_info->max_extent_size; |
4291 | spin_unlock(lock: &space_info->lock); |
4292 | return -ENOSPC; |
4293 | } else if (space_info->max_extent_size) { |
4294 | ffe_ctl->use_cluster = false; |
4295 | } |
4296 | spin_unlock(lock: &space_info->lock); |
4297 | } |
4298 | |
4299 | ffe_ctl->last_ptr = fetch_cluster_info(fs_info, space_info, |
4300 | empty_cluster: &ffe_ctl->empty_cluster); |
4301 | if (ffe_ctl->last_ptr) { |
4302 | struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; |
4303 | |
4304 | spin_lock(lock: &last_ptr->lock); |
4305 | if (last_ptr->block_group) |
4306 | ffe_ctl->hint_byte = last_ptr->window_start; |
4307 | if (last_ptr->fragmented) { |
4308 | /* |
4309 | * We still set window_start so we can keep track of the |
4310 | * last place we found an allocation to try and save |
4311 | * some time. |
4312 | */ |
4313 | ffe_ctl->hint_byte = last_ptr->window_start; |
4314 | ffe_ctl->use_cluster = false; |
4315 | } |
4316 | spin_unlock(lock: &last_ptr->lock); |
4317 | } |
4318 | |
4319 | return 0; |
4320 | } |
4321 | |
4322 | static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info, |
4323 | struct find_free_extent_ctl *ffe_ctl) |
4324 | { |
4325 | if (ffe_ctl->for_treelog) { |
4326 | spin_lock(lock: &fs_info->treelog_bg_lock); |
4327 | if (fs_info->treelog_bg) |
4328 | ffe_ctl->hint_byte = fs_info->treelog_bg; |
4329 | spin_unlock(lock: &fs_info->treelog_bg_lock); |
4330 | } else if (ffe_ctl->for_data_reloc) { |
4331 | spin_lock(lock: &fs_info->relocation_bg_lock); |
4332 | if (fs_info->data_reloc_bg) |
4333 | ffe_ctl->hint_byte = fs_info->data_reloc_bg; |
4334 | spin_unlock(lock: &fs_info->relocation_bg_lock); |
4335 | } else if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) { |
4336 | struct btrfs_block_group *block_group; |
4337 | |
4338 | spin_lock(lock: &fs_info->zone_active_bgs_lock); |
4339 | list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) { |
4340 | /* |
4341 | * No lock is OK here because avail is monotinically |
4342 | * decreasing, and this is just a hint. |
4343 | */ |
4344 | u64 avail = block_group->zone_capacity - block_group->alloc_offset; |
4345 | |
4346 | if (block_group_bits(cache: block_group, bits: ffe_ctl->flags) && |
4347 | avail >= ffe_ctl->num_bytes) { |
4348 | ffe_ctl->hint_byte = block_group->start; |
4349 | break; |
4350 | } |
4351 | } |
4352 | spin_unlock(lock: &fs_info->zone_active_bgs_lock); |
4353 | } |
4354 | |
4355 | return 0; |
4356 | } |
4357 | |
4358 | static int prepare_allocation(struct btrfs_fs_info *fs_info, |
4359 | struct find_free_extent_ctl *ffe_ctl, |
4360 | struct btrfs_space_info *space_info, |
4361 | struct btrfs_key *ins) |
4362 | { |
4363 | switch (ffe_ctl->policy) { |
4364 | case BTRFS_EXTENT_ALLOC_CLUSTERED: |
4365 | return prepare_allocation_clustered(fs_info, ffe_ctl, |
4366 | space_info, ins); |
4367 | case BTRFS_EXTENT_ALLOC_ZONED: |
4368 | return prepare_allocation_zoned(fs_info, ffe_ctl); |
4369 | default: |
4370 | BUG(); |
4371 | } |
4372 | } |
4373 | |
4374 | /* |
4375 | * walks the btree of allocated extents and find a hole of a given size. |
4376 | * The key ins is changed to record the hole: |
4377 | * ins->objectid == start position |
4378 | * ins->flags = BTRFS_EXTENT_ITEM_KEY |
4379 | * ins->offset == the size of the hole. |
4380 | * Any available blocks before search_start are skipped. |
4381 | * |
4382 | * If there is no suitable free space, we will record the max size of |
4383 | * the free space extent currently. |
4384 | * |
4385 | * The overall logic and call chain: |
4386 | * |
4387 | * find_free_extent() |
4388 | * |- Iterate through all block groups |
4389 | * | |- Get a valid block group |
4390 | * | |- Try to do clustered allocation in that block group |
4391 | * | |- Try to do unclustered allocation in that block group |
4392 | * | |- Check if the result is valid |
4393 | * | | |- If valid, then exit |
4394 | * | |- Jump to next block group |
4395 | * | |
4396 | * |- Push harder to find free extents |
4397 | * |- If not found, re-iterate all block groups |
4398 | */ |
4399 | static noinline int find_free_extent(struct btrfs_root *root, |
4400 | struct btrfs_key *ins, |
4401 | struct find_free_extent_ctl *ffe_ctl) |
4402 | { |
4403 | struct btrfs_fs_info *fs_info = root->fs_info; |
4404 | int ret = 0; |
4405 | int cache_block_group_error = 0; |
4406 | struct btrfs_block_group *block_group = NULL; |
4407 | struct btrfs_space_info *space_info; |
4408 | bool full_search = false; |
4409 | |
4410 | WARN_ON(ffe_ctl->num_bytes < fs_info->sectorsize); |
4411 | |
4412 | ffe_ctl->search_start = 0; |
4413 | /* For clustered allocation */ |
4414 | ffe_ctl->empty_cluster = 0; |
4415 | ffe_ctl->last_ptr = NULL; |
4416 | ffe_ctl->use_cluster = true; |
4417 | ffe_ctl->have_caching_bg = false; |
4418 | ffe_ctl->orig_have_caching_bg = false; |
4419 | ffe_ctl->index = btrfs_bg_flags_to_raid_index(flags: ffe_ctl->flags); |
4420 | ffe_ctl->loop = 0; |
4421 | ffe_ctl->retry_uncached = false; |
4422 | ffe_ctl->cached = 0; |
4423 | ffe_ctl->max_extent_size = 0; |
4424 | ffe_ctl->total_free_space = 0; |
4425 | ffe_ctl->found_offset = 0; |
4426 | ffe_ctl->policy = BTRFS_EXTENT_ALLOC_CLUSTERED; |
4427 | ffe_ctl->size_class = btrfs_calc_block_group_size_class(size: ffe_ctl->num_bytes); |
4428 | |
4429 | if (btrfs_is_zoned(fs_info)) |
4430 | ffe_ctl->policy = BTRFS_EXTENT_ALLOC_ZONED; |
4431 | |
4432 | ins->type = BTRFS_EXTENT_ITEM_KEY; |
4433 | ins->objectid = 0; |
4434 | ins->offset = 0; |
4435 | |
4436 | trace_find_free_extent(root, ffe_ctl); |
4437 | |
4438 | space_info = btrfs_find_space_info(info: fs_info, flags: ffe_ctl->flags); |
4439 | if (!space_info) { |
4440 | btrfs_err(fs_info, "No space info for %llu" , ffe_ctl->flags); |
4441 | return -ENOSPC; |
4442 | } |
4443 | |
4444 | ret = prepare_allocation(fs_info, ffe_ctl, space_info, ins); |
4445 | if (ret < 0) |
4446 | return ret; |
4447 | |
4448 | ffe_ctl->search_start = max(ffe_ctl->search_start, |
4449 | first_logical_byte(fs_info)); |
4450 | ffe_ctl->search_start = max(ffe_ctl->search_start, ffe_ctl->hint_byte); |
4451 | if (ffe_ctl->search_start == ffe_ctl->hint_byte) { |
4452 | block_group = btrfs_lookup_block_group(info: fs_info, |
4453 | bytenr: ffe_ctl->search_start); |
4454 | /* |
4455 | * we don't want to use the block group if it doesn't match our |
4456 | * allocation bits, or if its not cached. |
4457 | * |
4458 | * However if we are re-searching with an ideal block group |
4459 | * picked out then we don't care that the block group is cached. |
4460 | */ |
4461 | if (block_group && block_group_bits(cache: block_group, bits: ffe_ctl->flags) && |
4462 | block_group->cached != BTRFS_CACHE_NO) { |
4463 | down_read(sem: &space_info->groups_sem); |
4464 | if (list_empty(head: &block_group->list) || |
4465 | block_group->ro) { |
4466 | /* |
4467 | * someone is removing this block group, |
4468 | * we can't jump into the have_block_group |
4469 | * target because our list pointers are not |
4470 | * valid |
4471 | */ |
4472 | btrfs_put_block_group(cache: block_group); |
4473 | up_read(sem: &space_info->groups_sem); |
4474 | } else { |
4475 | ffe_ctl->index = btrfs_bg_flags_to_raid_index( |
4476 | flags: block_group->flags); |
4477 | btrfs_lock_block_group(cache: block_group, |
4478 | delalloc: ffe_ctl->delalloc); |
4479 | ffe_ctl->hinted = true; |
4480 | goto have_block_group; |
4481 | } |
4482 | } else if (block_group) { |
4483 | btrfs_put_block_group(cache: block_group); |
4484 | } |
4485 | } |
4486 | search: |
4487 | trace_find_free_extent_search_loop(root, ffe_ctl); |
4488 | ffe_ctl->have_caching_bg = false; |
4489 | if (ffe_ctl->index == btrfs_bg_flags_to_raid_index(flags: ffe_ctl->flags) || |
4490 | ffe_ctl->index == 0) |
4491 | full_search = true; |
4492 | down_read(sem: &space_info->groups_sem); |
4493 | list_for_each_entry(block_group, |
4494 | &space_info->block_groups[ffe_ctl->index], list) { |
4495 | struct btrfs_block_group *bg_ret; |
4496 | |
4497 | ffe_ctl->hinted = false; |
4498 | /* If the block group is read-only, we can skip it entirely. */ |
4499 | if (unlikely(block_group->ro)) { |
4500 | if (ffe_ctl->for_treelog) |
4501 | btrfs_clear_treelog_bg(bg: block_group); |
4502 | if (ffe_ctl->for_data_reloc) |
4503 | btrfs_clear_data_reloc_bg(bg: block_group); |
4504 | continue; |
4505 | } |
4506 | |
4507 | btrfs_grab_block_group(cache: block_group, delalloc: ffe_ctl->delalloc); |
4508 | ffe_ctl->search_start = block_group->start; |
4509 | |
4510 | /* |
4511 | * this can happen if we end up cycling through all the |
4512 | * raid types, but we want to make sure we only allocate |
4513 | * for the proper type. |
4514 | */ |
4515 | if (!block_group_bits(cache: block_group, bits: ffe_ctl->flags)) { |
4516 | u64 = BTRFS_BLOCK_GROUP_DUP | |
4517 | BTRFS_BLOCK_GROUP_RAID1_MASK | |
4518 | BTRFS_BLOCK_GROUP_RAID56_MASK | |
4519 | BTRFS_BLOCK_GROUP_RAID10; |
4520 | |
4521 | /* |
4522 | * if they asked for extra copies and this block group |
4523 | * doesn't provide them, bail. This does allow us to |
4524 | * fill raid0 from raid1. |
4525 | */ |
4526 | if ((ffe_ctl->flags & extra) && !(block_group->flags & extra)) |
4527 | goto loop; |
4528 | |
4529 | /* |
4530 | * This block group has different flags than we want. |
4531 | * It's possible that we have MIXED_GROUP flag but no |
4532 | * block group is mixed. Just skip such block group. |
4533 | */ |
4534 | btrfs_release_block_group(cache: block_group, delalloc: ffe_ctl->delalloc); |
4535 | continue; |
4536 | } |
4537 | |
4538 | have_block_group: |
4539 | trace_find_free_extent_have_block_group(root, ffe_ctl, block_group); |
4540 | ffe_ctl->cached = btrfs_block_group_done(cache: block_group); |
4541 | if (unlikely(!ffe_ctl->cached)) { |
4542 | ffe_ctl->have_caching_bg = true; |
4543 | ret = btrfs_cache_block_group(cache: block_group, wait: false); |
4544 | |
4545 | /* |
4546 | * If we get ENOMEM here or something else we want to |
4547 | * try other block groups, because it may not be fatal. |
4548 | * However if we can't find anything else we need to |
4549 | * save our return here so that we return the actual |
4550 | * error that caused problems, not ENOSPC. |
4551 | */ |
4552 | if (ret < 0) { |
4553 | if (!cache_block_group_error) |
4554 | cache_block_group_error = ret; |
4555 | ret = 0; |
4556 | goto loop; |
4557 | } |
4558 | ret = 0; |
4559 | } |
4560 | |
4561 | if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) { |
4562 | if (!cache_block_group_error) |
4563 | cache_block_group_error = -EIO; |
4564 | goto loop; |
4565 | } |
4566 | |
4567 | if (!find_free_extent_check_size_class(ffe_ctl, bg: block_group)) |
4568 | goto loop; |
4569 | |
4570 | bg_ret = NULL; |
4571 | ret = do_allocation(block_group, ffe_ctl, bg_ret: &bg_ret); |
4572 | if (ret > 0) |
4573 | goto loop; |
4574 | |
4575 | if (bg_ret && bg_ret != block_group) { |
4576 | btrfs_release_block_group(cache: block_group, delalloc: ffe_ctl->delalloc); |
4577 | block_group = bg_ret; |
4578 | } |
4579 | |
4580 | /* Checks */ |
4581 | ffe_ctl->search_start = round_up(ffe_ctl->found_offset, |
4582 | fs_info->stripesize); |
4583 | |
4584 | /* move on to the next group */ |
4585 | if (ffe_ctl->search_start + ffe_ctl->num_bytes > |
4586 | block_group->start + block_group->length) { |
4587 | btrfs_add_free_space_unused(block_group, |
4588 | bytenr: ffe_ctl->found_offset, |
4589 | size: ffe_ctl->num_bytes); |
4590 | goto loop; |
4591 | } |
4592 | |
4593 | if (ffe_ctl->found_offset < ffe_ctl->search_start) |
4594 | btrfs_add_free_space_unused(block_group, |
4595 | bytenr: ffe_ctl->found_offset, |
4596 | size: ffe_ctl->search_start - ffe_ctl->found_offset); |
4597 | |
4598 | ret = btrfs_add_reserved_bytes(cache: block_group, ram_bytes: ffe_ctl->ram_bytes, |
4599 | num_bytes: ffe_ctl->num_bytes, |
4600 | delalloc: ffe_ctl->delalloc, |
4601 | force_wrong_size_class: ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS); |
4602 | if (ret == -EAGAIN) { |
4603 | btrfs_add_free_space_unused(block_group, |
4604 | bytenr: ffe_ctl->found_offset, |
4605 | size: ffe_ctl->num_bytes); |
4606 | goto loop; |
4607 | } |
4608 | btrfs_inc_block_group_reservations(bg: block_group); |
4609 | |
4610 | /* we are all good, lets return */ |
4611 | ins->objectid = ffe_ctl->search_start; |
4612 | ins->offset = ffe_ctl->num_bytes; |
4613 | |
4614 | trace_btrfs_reserve_extent(block_group, ffe_ctl); |
4615 | btrfs_release_block_group(cache: block_group, delalloc: ffe_ctl->delalloc); |
4616 | break; |
4617 | loop: |
4618 | if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT && |
4619 | !ffe_ctl->retry_uncached) { |
4620 | ffe_ctl->retry_uncached = true; |
4621 | btrfs_wait_block_group_cache_progress(cache: block_group, |
4622 | num_bytes: ffe_ctl->num_bytes + |
4623 | ffe_ctl->empty_cluster + |
4624 | ffe_ctl->empty_size); |
4625 | goto have_block_group; |
4626 | } |
4627 | release_block_group(block_group, ffe_ctl, delalloc: ffe_ctl->delalloc); |
4628 | cond_resched(); |
4629 | } |
4630 | up_read(sem: &space_info->groups_sem); |
4631 | |
4632 | ret = find_free_extent_update_loop(fs_info, ins, ffe_ctl, full_search); |
4633 | if (ret > 0) |
4634 | goto search; |
4635 | |
4636 | if (ret == -ENOSPC && !cache_block_group_error) { |
4637 | /* |
4638 | * Use ffe_ctl->total_free_space as fallback if we can't find |
4639 | * any contiguous hole. |
4640 | */ |
4641 | if (!ffe_ctl->max_extent_size) |
4642 | ffe_ctl->max_extent_size = ffe_ctl->total_free_space; |
4643 | spin_lock(lock: &space_info->lock); |
4644 | space_info->max_extent_size = ffe_ctl->max_extent_size; |
4645 | spin_unlock(lock: &space_info->lock); |
4646 | ins->offset = ffe_ctl->max_extent_size; |
4647 | } else if (ret == -ENOSPC) { |
4648 | ret = cache_block_group_error; |
4649 | } |
4650 | return ret; |
4651 | } |
4652 | |
4653 | /* |
4654 | * Entry point to the extent allocator. Tries to find a hole that is at least |
4655 | * as big as @num_bytes. |
4656 | * |
4657 | * @root - The root that will contain this extent |
4658 | * |
4659 | * @ram_bytes - The amount of space in ram that @num_bytes take. This |
4660 | * is used for accounting purposes. This value differs |
4661 | * from @num_bytes only in the case of compressed extents. |
4662 | * |
4663 | * @num_bytes - Number of bytes to allocate on-disk. |
4664 | * |
4665 | * @min_alloc_size - Indicates the minimum amount of space that the |
4666 | * allocator should try to satisfy. In some cases |
4667 | * @num_bytes may be larger than what is required and if |
4668 | * the filesystem is fragmented then allocation fails. |
4669 | * However, the presence of @min_alloc_size gives a |
4670 | * chance to try and satisfy the smaller allocation. |
4671 | * |
4672 | * @empty_size - A hint that you plan on doing more COW. This is the |
4673 | * size in bytes the allocator should try to find free |
4674 | * next to the block it returns. This is just a hint and |
4675 | * may be ignored by the allocator. |
4676 | * |
4677 | * @hint_byte - Hint to the allocator to start searching above the byte |
4678 | * address passed. It might be ignored. |
4679 | * |
4680 | * @ins - This key is modified to record the found hole. It will |
4681 | * have the following values: |
4682 | * ins->objectid == start position |
4683 | * ins->flags = BTRFS_EXTENT_ITEM_KEY |
4684 | * ins->offset == the size of the hole. |
4685 | * |
4686 | * @is_data - Boolean flag indicating whether an extent is |
4687 | * allocated for data (true) or metadata (false) |
4688 | * |
4689 | * @delalloc - Boolean flag indicating whether this allocation is for |
4690 | * delalloc or not. If 'true' data_rwsem of block groups |
4691 | * is going to be acquired. |
4692 | * |
4693 | * |
4694 | * Returns 0 when an allocation succeeded or < 0 when an error occurred. In |
4695 | * case -ENOSPC is returned then @ins->offset will contain the size of the |
4696 | * largest available hole the allocator managed to find. |
4697 | */ |
4698 | int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, |
4699 | u64 num_bytes, u64 min_alloc_size, |
4700 | u64 empty_size, u64 hint_byte, |
4701 | struct btrfs_key *ins, int is_data, int delalloc) |
4702 | { |
4703 | struct btrfs_fs_info *fs_info = root->fs_info; |
4704 | struct find_free_extent_ctl ffe_ctl = {}; |
4705 | bool final_tried = num_bytes == min_alloc_size; |
4706 | u64 flags; |
4707 | int ret; |
4708 | bool for_treelog = (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); |
4709 | bool for_data_reloc = (btrfs_is_data_reloc_root(root) && is_data); |
4710 | |
4711 | flags = get_alloc_profile_by_root(root, data: is_data); |
4712 | again: |
4713 | WARN_ON(num_bytes < fs_info->sectorsize); |
4714 | |
4715 | ffe_ctl.ram_bytes = ram_bytes; |
4716 | ffe_ctl.num_bytes = num_bytes; |
4717 | ffe_ctl.min_alloc_size = min_alloc_size; |
4718 | ffe_ctl.empty_size = empty_size; |
4719 | ffe_ctl.flags = flags; |
4720 | ffe_ctl.delalloc = delalloc; |
4721 | ffe_ctl.hint_byte = hint_byte; |
4722 | ffe_ctl.for_treelog = for_treelog; |
4723 | ffe_ctl.for_data_reloc = for_data_reloc; |
4724 | |
4725 | ret = find_free_extent(root, ins, ffe_ctl: &ffe_ctl); |
4726 | if (!ret && !is_data) { |
4727 | btrfs_dec_block_group_reservations(fs_info, start: ins->objectid); |
4728 | } else if (ret == -ENOSPC) { |
4729 | if (!final_tried && ins->offset) { |
4730 | num_bytes = min(num_bytes >> 1, ins->offset); |
4731 | num_bytes = round_down(num_bytes, |
4732 | fs_info->sectorsize); |
4733 | num_bytes = max(num_bytes, min_alloc_size); |
4734 | ram_bytes = num_bytes; |
4735 | if (num_bytes == min_alloc_size) |
4736 | final_tried = true; |
4737 | goto again; |
4738 | } else if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { |
4739 | struct btrfs_space_info *sinfo; |
4740 | |
4741 | sinfo = btrfs_find_space_info(info: fs_info, flags); |
4742 | btrfs_err(fs_info, |
4743 | "allocation failed flags %llu, wanted %llu tree-log %d, relocation: %d" , |
4744 | flags, num_bytes, for_treelog, for_data_reloc); |
4745 | if (sinfo) |
4746 | btrfs_dump_space_info(fs_info, info: sinfo, |
4747 | bytes: num_bytes, dump_block_groups: 1); |
4748 | } |
4749 | } |
4750 | |
4751 | return ret; |
4752 | } |
4753 | |
4754 | int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, |
4755 | u64 start, u64 len, int delalloc) |
4756 | { |
4757 | struct btrfs_block_group *cache; |
4758 | |
4759 | cache = btrfs_lookup_block_group(info: fs_info, bytenr: start); |
4760 | if (!cache) { |
4761 | btrfs_err(fs_info, "Unable to find block group for %llu" , |
4762 | start); |
4763 | return -ENOSPC; |
4764 | } |
4765 | |
4766 | btrfs_add_free_space(block_group: cache, bytenr: start, size: len); |
4767 | btrfs_free_reserved_bytes(cache, num_bytes: len, delalloc); |
4768 | trace_btrfs_reserved_extent_free(fs_info, start, len); |
4769 | |
4770 | btrfs_put_block_group(cache); |
4771 | return 0; |
4772 | } |
4773 | |
4774 | int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans, |
4775 | const struct extent_buffer *eb) |
4776 | { |
4777 | struct btrfs_block_group *cache; |
4778 | int ret = 0; |
4779 | |
4780 | cache = btrfs_lookup_block_group(info: trans->fs_info, bytenr: eb->start); |
4781 | if (!cache) { |
4782 | btrfs_err(trans->fs_info, "unable to find block group for %llu" , |
4783 | eb->start); |
4784 | return -ENOSPC; |
4785 | } |
4786 | |
4787 | ret = pin_down_extent(trans, cache, bytenr: eb->start, num_bytes: eb->len, reserved: 1); |
4788 | btrfs_put_block_group(cache); |
4789 | return ret; |
4790 | } |
4791 | |
4792 | static int alloc_reserved_extent(struct btrfs_trans_handle *trans, u64 bytenr, |
4793 | u64 num_bytes) |
4794 | { |
4795 | struct btrfs_fs_info *fs_info = trans->fs_info; |
4796 | int ret; |
4797 | |
4798 | ret = remove_from_free_space_tree(trans, start: bytenr, size: num_bytes); |
4799 | if (ret) |
4800 | return ret; |
4801 | |
4802 | ret = btrfs_update_block_group(trans, bytenr, num_bytes, alloc: true); |
4803 | if (ret) { |
4804 | ASSERT(!ret); |
4805 | btrfs_err(fs_info, "update block group failed for %llu %llu" , |
4806 | bytenr, num_bytes); |
4807 | return ret; |
4808 | } |
4809 | |
4810 | trace_btrfs_reserved_extent_alloc(fs_info, start: bytenr, len: num_bytes); |
4811 | return 0; |
4812 | } |
4813 | |
4814 | static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, |
4815 | u64 parent, u64 root_objectid, |
4816 | u64 flags, u64 owner, u64 offset, |
4817 | struct btrfs_key *ins, int ref_mod, u64 oref_root) |
4818 | { |
4819 | struct btrfs_fs_info *fs_info = trans->fs_info; |
4820 | struct btrfs_root *extent_root; |
4821 | int ret; |
4822 | struct btrfs_extent_item *extent_item; |
4823 | struct btrfs_extent_owner_ref *oref; |
4824 | struct btrfs_extent_inline_ref *iref; |
4825 | struct btrfs_path *path; |
4826 | struct extent_buffer *leaf; |
4827 | int type; |
4828 | u32 size; |
4829 | const bool simple_quota = (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE); |
4830 | |
4831 | if (parent > 0) |
4832 | type = BTRFS_SHARED_DATA_REF_KEY; |
4833 | else |
4834 | type = BTRFS_EXTENT_DATA_REF_KEY; |
4835 | |
4836 | size = sizeof(*extent_item); |
4837 | if (simple_quota) |
4838 | size += btrfs_extent_inline_ref_size(BTRFS_EXTENT_OWNER_REF_KEY); |
4839 | size += btrfs_extent_inline_ref_size(type); |
4840 | |
4841 | path = btrfs_alloc_path(); |
4842 | if (!path) |
4843 | return -ENOMEM; |
4844 | |
4845 | extent_root = btrfs_extent_root(fs_info, bytenr: ins->objectid); |
4846 | ret = btrfs_insert_empty_item(trans, root: extent_root, path, key: ins, data_size: size); |
4847 | if (ret) { |
4848 | btrfs_free_path(p: path); |
4849 | return ret; |
4850 | } |
4851 | |
4852 | leaf = path->nodes[0]; |
4853 | extent_item = btrfs_item_ptr(leaf, path->slots[0], |
4854 | struct btrfs_extent_item); |
4855 | btrfs_set_extent_refs(eb: leaf, s: extent_item, val: ref_mod); |
4856 | btrfs_set_extent_generation(eb: leaf, s: extent_item, val: trans->transid); |
4857 | btrfs_set_extent_flags(eb: leaf, s: extent_item, |
4858 | val: flags | BTRFS_EXTENT_FLAG_DATA); |
4859 | |
4860 | iref = (struct btrfs_extent_inline_ref *)(extent_item + 1); |
4861 | if (simple_quota) { |
4862 | btrfs_set_extent_inline_ref_type(eb: leaf, s: iref, BTRFS_EXTENT_OWNER_REF_KEY); |
4863 | oref = (struct btrfs_extent_owner_ref *)(&iref->offset); |
4864 | btrfs_set_extent_owner_ref_root_id(eb: leaf, s: oref, val: oref_root); |
4865 | iref = (struct btrfs_extent_inline_ref *)(oref + 1); |
4866 | } |
4867 | btrfs_set_extent_inline_ref_type(eb: leaf, s: iref, val: type); |
4868 | |
4869 | if (parent > 0) { |
4870 | struct btrfs_shared_data_ref *ref; |
4871 | ref = (struct btrfs_shared_data_ref *)(iref + 1); |
4872 | btrfs_set_extent_inline_ref_offset(eb: leaf, s: iref, val: parent); |
4873 | btrfs_set_shared_data_ref_count(eb: leaf, s: ref, val: ref_mod); |
4874 | } else { |
4875 | struct btrfs_extent_data_ref *ref; |
4876 | ref = (struct btrfs_extent_data_ref *)(&iref->offset); |
4877 | btrfs_set_extent_data_ref_root(eb: leaf, s: ref, val: root_objectid); |
4878 | btrfs_set_extent_data_ref_objectid(eb: leaf, s: ref, val: owner); |
4879 | btrfs_set_extent_data_ref_offset(eb: leaf, s: ref, val: offset); |
4880 | btrfs_set_extent_data_ref_count(eb: leaf, s: ref, val: ref_mod); |
4881 | } |
4882 | |
4883 | btrfs_mark_buffer_dirty(trans, buf: path->nodes[0]); |
4884 | btrfs_free_path(p: path); |
4885 | |
4886 | return alloc_reserved_extent(trans, bytenr: ins->objectid, num_bytes: ins->offset); |
4887 | } |
4888 | |
4889 | static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, |
4890 | struct btrfs_delayed_ref_node *node, |
4891 | struct btrfs_delayed_extent_op *extent_op) |
4892 | { |
4893 | struct btrfs_fs_info *fs_info = trans->fs_info; |
4894 | struct btrfs_root *extent_root; |
4895 | int ret; |
4896 | struct btrfs_extent_item *extent_item; |
4897 | struct btrfs_key extent_key; |
4898 | struct btrfs_tree_block_info *block_info; |
4899 | struct btrfs_extent_inline_ref *iref; |
4900 | struct btrfs_path *path; |
4901 | struct extent_buffer *leaf; |
4902 | struct btrfs_delayed_tree_ref *ref; |
4903 | u32 size = sizeof(*extent_item) + sizeof(*iref); |
4904 | u64 flags = extent_op->flags_to_set; |
4905 | bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); |
4906 | |
4907 | ref = btrfs_delayed_node_to_tree_ref(node); |
4908 | |
4909 | extent_key.objectid = node->bytenr; |
4910 | if (skinny_metadata) { |
4911 | extent_key.offset = ref->level; |
4912 | extent_key.type = BTRFS_METADATA_ITEM_KEY; |
4913 | } else { |
4914 | extent_key.offset = node->num_bytes; |
4915 | extent_key.type = BTRFS_EXTENT_ITEM_KEY; |
4916 | size += sizeof(*block_info); |
4917 | } |
4918 | |
4919 | path = btrfs_alloc_path(); |
4920 | if (!path) |
4921 | return -ENOMEM; |
4922 | |
4923 | extent_root = btrfs_extent_root(fs_info, bytenr: extent_key.objectid); |
4924 | ret = btrfs_insert_empty_item(trans, root: extent_root, path, key: &extent_key, |
4925 | data_size: size); |
4926 | if (ret) { |
4927 | btrfs_free_path(p: path); |
4928 | return ret; |
4929 | } |
4930 | |
4931 | leaf = path->nodes[0]; |
4932 | extent_item = btrfs_item_ptr(leaf, path->slots[0], |
4933 | struct btrfs_extent_item); |
4934 | btrfs_set_extent_refs(eb: leaf, s: extent_item, val: 1); |
4935 | btrfs_set_extent_generation(eb: leaf, s: extent_item, val: trans->transid); |
4936 | btrfs_set_extent_flags(eb: leaf, s: extent_item, |
4937 | val: flags | BTRFS_EXTENT_FLAG_TREE_BLOCK); |
4938 | |
4939 | if (skinny_metadata) { |
4940 | iref = (struct btrfs_extent_inline_ref *)(extent_item + 1); |
4941 | } else { |
4942 | block_info = (struct btrfs_tree_block_info *)(extent_item + 1); |
4943 | btrfs_set_tree_block_key(eb: leaf, item: block_info, key: &extent_op->key); |
4944 | btrfs_set_tree_block_level(eb: leaf, s: block_info, val: ref->level); |
4945 | iref = (struct btrfs_extent_inline_ref *)(block_info + 1); |
4946 | } |
4947 | |
4948 | if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) { |
4949 | btrfs_set_extent_inline_ref_type(eb: leaf, s: iref, |
4950 | BTRFS_SHARED_BLOCK_REF_KEY); |
4951 | btrfs_set_extent_inline_ref_offset(eb: leaf, s: iref, val: ref->parent); |
4952 | } else { |
4953 | btrfs_set_extent_inline_ref_type(eb: leaf, s: iref, |
4954 | BTRFS_TREE_BLOCK_REF_KEY); |
4955 | btrfs_set_extent_inline_ref_offset(eb: leaf, s: iref, val: ref->root); |
4956 | } |
4957 | |
4958 | btrfs_mark_buffer_dirty(trans, buf: leaf); |
4959 | btrfs_free_path(p: path); |
4960 | |
4961 | return alloc_reserved_extent(trans, bytenr: node->bytenr, num_bytes: fs_info->nodesize); |
4962 | } |
4963 | |
4964 | int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans, |
4965 | struct btrfs_root *root, u64 owner, |
4966 | u64 offset, u64 ram_bytes, |
4967 | struct btrfs_key *ins) |
4968 | { |
4969 | struct btrfs_ref generic_ref = { 0 }; |
4970 | u64 root_objectid = root->root_key.objectid; |
4971 | u64 owning_root = root_objectid; |
4972 | |
4973 | ASSERT(root_objectid != BTRFS_TREE_LOG_OBJECTID); |
4974 | |
4975 | if (btrfs_is_data_reloc_root(root) && is_fstree(rootid: root->relocation_src_root)) |
4976 | owning_root = root->relocation_src_root; |
4977 | |
4978 | btrfs_init_generic_ref(generic_ref: &generic_ref, action: BTRFS_ADD_DELAYED_EXTENT, |
4979 | bytenr: ins->objectid, len: ins->offset, parent: 0, owning_root); |
4980 | btrfs_init_data_ref(generic_ref: &generic_ref, ref_root: root_objectid, ino: owner, |
4981 | offset, mod_root: 0, skip_qgroup: false); |
4982 | btrfs_ref_tree_mod(fs_info: root->fs_info, generic_ref: &generic_ref); |
4983 | |
4984 | return btrfs_add_delayed_data_ref(trans, generic_ref: &generic_ref, reserved: ram_bytes); |
4985 | } |
4986 | |
4987 | /* |
4988 | * this is used by the tree logging recovery code. It records that |
4989 | * an extent has been allocated and makes sure to clear the free |
4990 | * space cache bits as well |
4991 | */ |
4992 | int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, |
4993 | u64 root_objectid, u64 owner, u64 offset, |
4994 | struct btrfs_key *ins) |
4995 | { |
4996 | struct btrfs_fs_info *fs_info = trans->fs_info; |
4997 | int ret; |
4998 | struct btrfs_block_group *block_group; |
4999 | struct btrfs_space_info *space_info; |
5000 | struct btrfs_squota_delta delta = { |
5001 | .root = root_objectid, |
5002 | .num_bytes = ins->offset, |
5003 | .generation = trans->transid, |
5004 | .is_data = true, |
5005 | .is_inc = true, |
5006 | }; |
5007 | |
5008 | /* |
5009 | * Mixed block groups will exclude before processing the log so we only |
5010 | * need to do the exclude dance if this fs isn't mixed. |
5011 | */ |
5012 | if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) { |
5013 | ret = __exclude_logged_extent(fs_info, start: ins->objectid, |
5014 | num_bytes: ins->offset); |
5015 | if (ret) |
5016 | return ret; |
5017 | } |
5018 | |
5019 | block_group = btrfs_lookup_block_group(info: fs_info, bytenr: ins->objectid); |
5020 | if (!block_group) |
5021 | return -EINVAL; |
5022 | |
5023 | space_info = block_group->space_info; |
5024 | spin_lock(lock: &space_info->lock); |
5025 | spin_lock(lock: &block_group->lock); |
5026 | space_info->bytes_reserved += ins->offset; |
5027 | block_group->reserved += ins->offset; |
5028 | spin_unlock(lock: &block_group->lock); |
5029 | spin_unlock(lock: &space_info->lock); |
5030 | |
5031 | ret = alloc_reserved_file_extent(trans, parent: 0, root_objectid, flags: 0, owner, |
5032 | offset, ins, ref_mod: 1, oref_root: root_objectid); |
5033 | if (ret) |
5034 | btrfs_pin_extent(trans, bytenr: ins->objectid, num_bytes: ins->offset, reserved: 1); |
5035 | ret = btrfs_record_squota_delta(fs_info, delta: &delta); |
5036 | btrfs_put_block_group(cache: block_group); |
5037 | return ret; |
5038 | } |
5039 | |
5040 | #ifdef CONFIG_BTRFS_DEBUG |
5041 | /* |
5042 | * Extra safety check in case the extent tree is corrupted and extent allocator |
5043 | * chooses to use a tree block which is already used and locked. |
5044 | */ |
5045 | static bool check_eb_lock_owner(const struct extent_buffer *eb) |
5046 | { |
5047 | if (eb->lock_owner == current->pid) { |
5048 | btrfs_err_rl(eb->fs_info, |
5049 | "tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected" , |
5050 | eb->start, btrfs_header_owner(eb), current->pid); |
5051 | return true; |
5052 | } |
5053 | return false; |
5054 | } |
5055 | #else |
5056 | static bool check_eb_lock_owner(struct extent_buffer *eb) |
5057 | { |
5058 | return false; |
5059 | } |
5060 | #endif |
5061 | |
5062 | static struct extent_buffer * |
5063 | btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, |
5064 | u64 bytenr, int level, u64 owner, |
5065 | enum btrfs_lock_nesting nest) |
5066 | { |
5067 | struct btrfs_fs_info *fs_info = root->fs_info; |
5068 | struct extent_buffer *buf; |
5069 | u64 lockdep_owner = owner; |
5070 | |
5071 | buf = btrfs_find_create_tree_block(fs_info, bytenr, owner_root: owner, level); |
5072 | if (IS_ERR(ptr: buf)) |
5073 | return buf; |
5074 | |
5075 | if (check_eb_lock_owner(eb: buf)) { |
5076 | free_extent_buffer(eb: buf); |
5077 | return ERR_PTR(error: -EUCLEAN); |
5078 | } |
5079 | |
5080 | /* |
5081 | * The reloc trees are just snapshots, so we need them to appear to be |
5082 | * just like any other fs tree WRT lockdep. |
5083 | * |
5084 | * The exception however is in replace_path() in relocation, where we |
5085 | * hold the lock on the original fs root and then search for the reloc |
5086 | * root. At that point we need to make sure any reloc root buffers are |
5087 | * set to the BTRFS_TREE_RELOC_OBJECTID lockdep class in order to make |
5088 | * lockdep happy. |
5089 | */ |
5090 | if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID && |
5091 | !test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state)) |
5092 | lockdep_owner = BTRFS_FS_TREE_OBJECTID; |
5093 | |
5094 | /* btrfs_clear_buffer_dirty() accesses generation field. */ |
5095 | btrfs_set_header_generation(eb: buf, val: trans->transid); |
5096 | |
5097 | /* |
5098 | * This needs to stay, because we could allocate a freed block from an |
5099 | * old tree into a new tree, so we need to make sure this new block is |
5100 | * set to the appropriate level and owner. |
5101 | */ |
5102 | btrfs_set_buffer_lockdep_class(objectid: lockdep_owner, eb: buf, level); |
5103 | |
5104 | __btrfs_tree_lock(eb: buf, nest); |
5105 | btrfs_clear_buffer_dirty(trans, buf); |
5106 | clear_bit(nr: EXTENT_BUFFER_STALE, addr: &buf->bflags); |
5107 | clear_bit(nr: EXTENT_BUFFER_ZONED_ZEROOUT, addr: &buf->bflags); |
5108 | |
5109 | set_extent_buffer_uptodate(buf); |
5110 | |
5111 | memzero_extent_buffer(eb: buf, start: 0, len: sizeof(struct btrfs_header)); |
5112 | btrfs_set_header_level(eb: buf, val: level); |
5113 | btrfs_set_header_bytenr(eb: buf, val: buf->start); |
5114 | btrfs_set_header_generation(eb: buf, val: trans->transid); |
5115 | btrfs_set_header_backref_rev(eb: buf, BTRFS_MIXED_BACKREF_REV); |
5116 | btrfs_set_header_owner(eb: buf, val: owner); |
5117 | write_extent_buffer_fsid(eb: buf, fsid: fs_info->fs_devices->metadata_uuid); |
5118 | write_extent_buffer_chunk_tree_uuid(eb: buf, chunk_tree_uuid: fs_info->chunk_tree_uuid); |
5119 | if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { |
5120 | buf->log_index = root->log_transid % 2; |
5121 | /* |
5122 | * we allow two log transactions at a time, use different |
5123 | * EXTENT bit to differentiate dirty pages. |
5124 | */ |
5125 | if (buf->log_index == 0) |
5126 | set_extent_bit(tree: &root->dirty_log_pages, start: buf->start, |
5127 | end: buf->start + buf->len - 1, |
5128 | bits: EXTENT_DIRTY, NULL); |
5129 | else |
5130 | set_extent_bit(tree: &root->dirty_log_pages, start: buf->start, |
5131 | end: buf->start + buf->len - 1, |
5132 | bits: EXTENT_NEW, NULL); |
5133 | } else { |
5134 | buf->log_index = -1; |
5135 | set_extent_bit(tree: &trans->transaction->dirty_pages, start: buf->start, |
5136 | end: buf->start + buf->len - 1, bits: EXTENT_DIRTY, NULL); |
5137 | } |
5138 | /* this returns a buffer locked for blocking */ |
5139 | return buf; |
5140 | } |
5141 | |
5142 | /* |
5143 | * finds a free extent and does all the dirty work required for allocation |
5144 | * returns the tree buffer or an ERR_PTR on error. |
5145 | */ |
5146 | struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, |
5147 | struct btrfs_root *root, |
5148 | u64 parent, u64 root_objectid, |
5149 | const struct btrfs_disk_key *key, |
5150 | int level, u64 hint, |
5151 | u64 empty_size, |
5152 | u64 reloc_src_root, |
5153 | enum btrfs_lock_nesting nest) |
5154 | { |
5155 | struct btrfs_fs_info *fs_info = root->fs_info; |
5156 | struct btrfs_key ins; |
5157 | struct btrfs_block_rsv *block_rsv; |
5158 | struct extent_buffer *buf; |
5159 | struct btrfs_delayed_extent_op *extent_op; |
5160 | struct btrfs_ref generic_ref = { 0 }; |
5161 | u64 flags = 0; |
5162 | int ret; |
5163 | u32 blocksize = fs_info->nodesize; |
5164 | bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); |
5165 | u64 owning_root; |
5166 | |
5167 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
5168 | if (btrfs_is_testing(fs_info)) { |
5169 | buf = btrfs_init_new_buffer(trans, root, bytenr: root->alloc_bytenr, |
5170 | level, owner: root_objectid, nest); |
5171 | if (!IS_ERR(ptr: buf)) |
5172 | root->alloc_bytenr += blocksize; |
5173 | return buf; |
5174 | } |
5175 | #endif |
5176 | |
5177 | block_rsv = btrfs_use_block_rsv(trans, root, blocksize); |
5178 | if (IS_ERR(ptr: block_rsv)) |
5179 | return ERR_CAST(ptr: block_rsv); |
5180 | |
5181 | ret = btrfs_reserve_extent(root, ram_bytes: blocksize, num_bytes: blocksize, min_alloc_size: blocksize, |
5182 | empty_size, hint_byte: hint, ins: &ins, is_data: 0, delalloc: 0); |
5183 | if (ret) |
5184 | goto out_unuse; |
5185 | |
5186 | buf = btrfs_init_new_buffer(trans, root, bytenr: ins.objectid, level, |
5187 | owner: root_objectid, nest); |
5188 | if (IS_ERR(ptr: buf)) { |
5189 | ret = PTR_ERR(ptr: buf); |
5190 | goto out_free_reserved; |
5191 | } |
5192 | owning_root = btrfs_header_owner(eb: buf); |
5193 | |
5194 | if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { |
5195 | if (parent == 0) |
5196 | parent = ins.objectid; |
5197 | flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; |
5198 | owning_root = reloc_src_root; |
5199 | } else |
5200 | BUG_ON(parent > 0); |
5201 | |
5202 | if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { |
5203 | extent_op = btrfs_alloc_delayed_extent_op(); |
5204 | if (!extent_op) { |
5205 | ret = -ENOMEM; |
5206 | goto out_free_buf; |
5207 | } |
5208 | if (key) |
5209 | memcpy(&extent_op->key, key, sizeof(extent_op->key)); |
5210 | else |
5211 | memset(&extent_op->key, 0, sizeof(extent_op->key)); |
5212 | extent_op->flags_to_set = flags; |
5213 | extent_op->update_key = skinny_metadata ? false : true; |
5214 | extent_op->update_flags = true; |
5215 | extent_op->level = level; |
5216 | |
5217 | btrfs_init_generic_ref(generic_ref: &generic_ref, action: BTRFS_ADD_DELAYED_EXTENT, |
5218 | bytenr: ins.objectid, len: ins.offset, parent, owning_root); |
5219 | btrfs_init_tree_ref(generic_ref: &generic_ref, level, root: root_objectid, |
5220 | mod_root: root->root_key.objectid, skip_qgroup: false); |
5221 | btrfs_ref_tree_mod(fs_info, generic_ref: &generic_ref); |
5222 | ret = btrfs_add_delayed_tree_ref(trans, generic_ref: &generic_ref, extent_op); |
5223 | if (ret) |
5224 | goto out_free_delayed; |
5225 | } |
5226 | return buf; |
5227 | |
5228 | out_free_delayed: |
5229 | btrfs_free_delayed_extent_op(op: extent_op); |
5230 | out_free_buf: |
5231 | btrfs_tree_unlock(eb: buf); |
5232 | free_extent_buffer(eb: buf); |
5233 | out_free_reserved: |
5234 | btrfs_free_reserved_extent(fs_info, start: ins.objectid, len: ins.offset, delalloc: 0); |
5235 | out_unuse: |
5236 | btrfs_unuse_block_rsv(fs_info, block_rsv, blocksize); |
5237 | return ERR_PTR(error: ret); |
5238 | } |
5239 | |
5240 | struct walk_control { |
5241 | u64 refs[BTRFS_MAX_LEVEL]; |
5242 | u64 flags[BTRFS_MAX_LEVEL]; |
5243 | struct btrfs_key update_progress; |
5244 | struct btrfs_key drop_progress; |
5245 | int drop_level; |
5246 | int stage; |
5247 | int level; |
5248 | int shared_level; |
5249 | int update_ref; |
5250 | int keep_locks; |
5251 | int reada_slot; |
5252 | int reada_count; |
5253 | int restarted; |
5254 | }; |
5255 | |
5256 | #define DROP_REFERENCE 1 |
5257 | #define UPDATE_BACKREF 2 |
5258 | |
5259 | static noinline void reada_walk_down(struct btrfs_trans_handle *trans, |
5260 | struct btrfs_root *root, |
5261 | struct walk_control *wc, |
5262 | struct btrfs_path *path) |
5263 | { |
5264 | struct btrfs_fs_info *fs_info = root->fs_info; |
5265 | u64 bytenr; |
5266 | u64 generation; |
5267 | u64 refs; |
5268 | u64 flags; |
5269 | u32 nritems; |
5270 | struct btrfs_key key; |
5271 | struct extent_buffer *eb; |
5272 | int ret; |
5273 | int slot; |
5274 | int nread = 0; |
5275 | |
5276 | if (path->slots[wc->level] < wc->reada_slot) { |
5277 | wc->reada_count = wc->reada_count * 2 / 3; |
5278 | wc->reada_count = max(wc->reada_count, 2); |
5279 | } else { |
5280 | wc->reada_count = wc->reada_count * 3 / 2; |
5281 | wc->reada_count = min_t(int, wc->reada_count, |
5282 | BTRFS_NODEPTRS_PER_BLOCK(fs_info)); |
5283 | } |
5284 | |
5285 | eb = path->nodes[wc->level]; |
5286 | nritems = btrfs_header_nritems(eb); |
5287 | |
5288 | for (slot = path->slots[wc->level]; slot < nritems; slot++) { |
5289 | if (nread >= wc->reada_count) |
5290 | break; |
5291 | |
5292 | cond_resched(); |
5293 | bytenr = btrfs_node_blockptr(eb, nr: slot); |
5294 | generation = btrfs_node_ptr_generation(eb, nr: slot); |
5295 | |
5296 | if (slot == path->slots[wc->level]) |
5297 | goto reada; |
5298 | |
5299 | if (wc->stage == UPDATE_BACKREF && |
5300 | generation <= root->root_key.offset) |
5301 | continue; |
5302 | |
5303 | /* We don't lock the tree block, it's OK to be racy here */ |
5304 | ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, |
5305 | offset: wc->level - 1, metadata: 1, refs: &refs, |
5306 | flags: &flags, NULL); |
5307 | /* We don't care about errors in readahead. */ |
5308 | if (ret < 0) |
5309 | continue; |
5310 | BUG_ON(refs == 0); |
5311 | |
5312 | if (wc->stage == DROP_REFERENCE) { |
5313 | if (refs == 1) |
5314 | goto reada; |
5315 | |
5316 | if (wc->level == 1 && |
5317 | (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) |
5318 | continue; |
5319 | if (!wc->update_ref || |
5320 | generation <= root->root_key.offset) |
5321 | continue; |
5322 | btrfs_node_key_to_cpu(eb, cpu_key: &key, nr: slot); |
5323 | ret = btrfs_comp_cpu_keys(k1: &key, |
5324 | k2: &wc->update_progress); |
5325 | if (ret < 0) |
5326 | continue; |
5327 | } else { |
5328 | if (wc->level == 1 && |
5329 | (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) |
5330 | continue; |
5331 | } |
5332 | reada: |
5333 | btrfs_readahead_node_child(node: eb, slot); |
5334 | nread++; |
5335 | } |
5336 | wc->reada_slot = slot; |
5337 | } |
5338 | |
5339 | /* |
5340 | * helper to process tree block while walking down the tree. |
5341 | * |
5342 | * when wc->stage == UPDATE_BACKREF, this function updates |
5343 | * back refs for pointers in the block. |
5344 | * |
5345 | * NOTE: return value 1 means we should stop walking down. |
5346 | */ |
5347 | static noinline int walk_down_proc(struct btrfs_trans_handle *trans, |
5348 | struct btrfs_root *root, |
5349 | struct btrfs_path *path, |
5350 | struct walk_control *wc, int lookup_info) |
5351 | { |
5352 | struct btrfs_fs_info *fs_info = root->fs_info; |
5353 | int level = wc->level; |
5354 | struct extent_buffer *eb = path->nodes[level]; |
5355 | u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF; |
5356 | int ret; |
5357 | |
5358 | if (wc->stage == UPDATE_BACKREF && |
5359 | btrfs_header_owner(eb) != root->root_key.objectid) |
5360 | return 1; |
5361 | |
5362 | /* |
5363 | * when reference count of tree block is 1, it won't increase |
5364 | * again. once full backref flag is set, we never clear it. |
5365 | */ |
5366 | if (lookup_info && |
5367 | ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) || |
5368 | (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) { |
5369 | BUG_ON(!path->locks[level]); |
5370 | ret = btrfs_lookup_extent_info(trans, fs_info, |
5371 | bytenr: eb->start, offset: level, metadata: 1, |
5372 | refs: &wc->refs[level], |
5373 | flags: &wc->flags[level], |
5374 | NULL); |
5375 | BUG_ON(ret == -ENOMEM); |
5376 | if (ret) |
5377 | return ret; |
5378 | BUG_ON(wc->refs[level] == 0); |
5379 | } |
5380 | |
5381 | if (wc->stage == DROP_REFERENCE) { |
5382 | if (wc->refs[level] > 1) |
5383 | return 1; |
5384 | |
5385 | if (path->locks[level] && !wc->keep_locks) { |
5386 | btrfs_tree_unlock_rw(eb, rw: path->locks[level]); |
5387 | path->locks[level] = 0; |
5388 | } |
5389 | return 0; |
5390 | } |
5391 | |
5392 | /* wc->stage == UPDATE_BACKREF */ |
5393 | if (!(wc->flags[level] & flag)) { |
5394 | BUG_ON(!path->locks[level]); |
5395 | ret = btrfs_inc_ref(trans, root, buf: eb, full_backref: 1); |
5396 | BUG_ON(ret); /* -ENOMEM */ |
5397 | ret = btrfs_dec_ref(trans, root, buf: eb, full_backref: 0); |
5398 | BUG_ON(ret); /* -ENOMEM */ |
5399 | ret = btrfs_set_disk_extent_flags(trans, eb, flags: flag); |
5400 | BUG_ON(ret); /* -ENOMEM */ |
5401 | wc->flags[level] |= flag; |
5402 | } |
5403 | |
5404 | /* |
5405 | * the block is shared by multiple trees, so it's not good to |
5406 | * keep the tree lock |
5407 | */ |
5408 | if (path->locks[level] && level > 0) { |
5409 | btrfs_tree_unlock_rw(eb, rw: path->locks[level]); |
5410 | path->locks[level] = 0; |
5411 | } |
5412 | return 0; |
5413 | } |
5414 | |
5415 | /* |
5416 | * This is used to verify a ref exists for this root to deal with a bug where we |
5417 | * would have a drop_progress key that hadn't been updated properly. |
5418 | */ |
5419 | static int check_ref_exists(struct btrfs_trans_handle *trans, |
5420 | struct btrfs_root *root, u64 bytenr, u64 parent, |
5421 | int level) |
5422 | { |
5423 | struct btrfs_path *path; |
5424 | struct btrfs_extent_inline_ref *iref; |
5425 | int ret; |
5426 | |
5427 | path = btrfs_alloc_path(); |
5428 | if (!path) |
5429 | return -ENOMEM; |
5430 | |
5431 | ret = lookup_extent_backref(trans, path, ref_ret: &iref, bytenr, |
5432 | num_bytes: root->fs_info->nodesize, parent, |
5433 | root_objectid: root->root_key.objectid, owner: level, offset: 0); |
5434 | btrfs_free_path(p: path); |
5435 | if (ret == -ENOENT) |
5436 | return 0; |
5437 | if (ret < 0) |
5438 | return ret; |
5439 | return 1; |
5440 | } |
5441 | |
5442 | /* |
5443 | * helper to process tree block pointer. |
5444 | * |
5445 | * when wc->stage == DROP_REFERENCE, this function checks |
5446 | * reference count of the block pointed to. if the block |
5447 | * is shared and we need update back refs for the subtree |
5448 | * rooted at the block, this function changes wc->stage to |
5449 | * UPDATE_BACKREF. if the block is shared and there is no |
5450 | * need to update back, this function drops the reference |
5451 | * to the block. |
5452 | * |
5453 | * NOTE: return value 1 means we should stop walking down. |
5454 | */ |
5455 | static noinline int do_walk_down(struct btrfs_trans_handle *trans, |
5456 | struct btrfs_root *root, |
5457 | struct btrfs_path *path, |
5458 | struct walk_control *wc, int *lookup_info) |
5459 | { |
5460 | struct btrfs_fs_info *fs_info = root->fs_info; |
5461 | u64 bytenr; |
5462 | u64 generation; |
5463 | u64 parent; |
5464 | u64 owner_root = 0; |
5465 | struct btrfs_tree_parent_check check = { 0 }; |
5466 | struct btrfs_key key; |
5467 | struct btrfs_ref ref = { 0 }; |
5468 | struct extent_buffer *next; |
5469 | int level = wc->level; |
5470 | int reada = 0; |
5471 | int ret = 0; |
5472 | bool need_account = false; |
5473 | |
5474 | generation = btrfs_node_ptr_generation(eb: path->nodes[level], |
5475 | nr: path->slots[level]); |
5476 | /* |
5477 | * if the lower level block was created before the snapshot |
5478 | * was created, we know there is no need to update back refs |
5479 | * for the subtree |
5480 | */ |
5481 | if (wc->stage == UPDATE_BACKREF && |
5482 | generation <= root->root_key.offset) { |
5483 | *lookup_info = 1; |
5484 | return 1; |
5485 | } |
5486 | |
5487 | bytenr = btrfs_node_blockptr(eb: path->nodes[level], nr: path->slots[level]); |
5488 | |
5489 | check.level = level - 1; |
5490 | check.transid = generation; |
5491 | check.owner_root = root->root_key.objectid; |
5492 | check.has_first_key = true; |
5493 | btrfs_node_key_to_cpu(eb: path->nodes[level], cpu_key: &check.first_key, |
5494 | nr: path->slots[level]); |
5495 | |
5496 | next = find_extent_buffer(fs_info, start: bytenr); |
5497 | if (!next) { |
5498 | next = btrfs_find_create_tree_block(fs_info, bytenr, |
5499 | owner_root: root->root_key.objectid, level: level - 1); |
5500 | if (IS_ERR(ptr: next)) |
5501 | return PTR_ERR(ptr: next); |
5502 | reada = 1; |
5503 | } |
5504 | btrfs_tree_lock(eb: next); |
5505 | |
5506 | ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, offset: level - 1, metadata: 1, |
5507 | refs: &wc->refs[level - 1], |
5508 | flags: &wc->flags[level - 1], |
5509 | owning_root: &owner_root); |
5510 | if (ret < 0) |
5511 | goto out_unlock; |
5512 | |
5513 | if (unlikely(wc->refs[level - 1] == 0)) { |
5514 | btrfs_err(fs_info, "Missing references." ); |
5515 | ret = -EIO; |
5516 | goto out_unlock; |
5517 | } |
5518 | *lookup_info = 0; |
5519 | |
5520 | if (wc->stage == DROP_REFERENCE) { |
5521 | if (wc->refs[level - 1] > 1) { |
5522 | need_account = true; |
5523 | if (level == 1 && |
5524 | (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) |
5525 | goto skip; |
5526 | |
5527 | if (!wc->update_ref || |
5528 | generation <= root->root_key.offset) |
5529 | goto skip; |
5530 | |
5531 | btrfs_node_key_to_cpu(eb: path->nodes[level], cpu_key: &key, |
5532 | nr: path->slots[level]); |
5533 | ret = btrfs_comp_cpu_keys(k1: &key, k2: &wc->update_progress); |
5534 | if (ret < 0) |
5535 | goto skip; |
5536 | |
5537 | wc->stage = UPDATE_BACKREF; |
5538 | wc->shared_level = level - 1; |
5539 | } |
5540 | } else { |
5541 | if (level == 1 && |
5542 | (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) |
5543 | goto skip; |
5544 | } |
5545 | |
5546 | if (!btrfs_buffer_uptodate(buf: next, parent_transid: generation, atomic: 0)) { |
5547 | btrfs_tree_unlock(eb: next); |
5548 | free_extent_buffer(eb: next); |
5549 | next = NULL; |
5550 | *lookup_info = 1; |
5551 | } |
5552 | |
5553 | if (!next) { |
5554 | if (reada && level == 1) |
5555 | reada_walk_down(trans, root, wc, path); |
5556 | next = read_tree_block(fs_info, bytenr, check: &check); |
5557 | if (IS_ERR(ptr: next)) { |
5558 | return PTR_ERR(ptr: next); |
5559 | } else if (!extent_buffer_uptodate(eb: next)) { |
5560 | free_extent_buffer(eb: next); |
5561 | return -EIO; |
5562 | } |
5563 | btrfs_tree_lock(eb: next); |
5564 | } |
5565 | |
5566 | level--; |
5567 | ASSERT(level == btrfs_header_level(next)); |
5568 | if (level != btrfs_header_level(eb: next)) { |
5569 | btrfs_err(root->fs_info, "mismatched level" ); |
5570 | ret = -EIO; |
5571 | goto out_unlock; |
5572 | } |
5573 | path->nodes[level] = next; |
5574 | path->slots[level] = 0; |
5575 | path->locks[level] = BTRFS_WRITE_LOCK; |
5576 | wc->level = level; |
5577 | if (wc->level == 1) |
5578 | wc->reada_slot = 0; |
5579 | return 0; |
5580 | skip: |
5581 | wc->refs[level - 1] = 0; |
5582 | wc->flags[level - 1] = 0; |
5583 | if (wc->stage == DROP_REFERENCE) { |
5584 | if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) { |
5585 | parent = path->nodes[level]->start; |
5586 | } else { |
5587 | ASSERT(root->root_key.objectid == |
5588 | btrfs_header_owner(path->nodes[level])); |
5589 | if (root->root_key.objectid != |
5590 | btrfs_header_owner(eb: path->nodes[level])) { |
5591 | btrfs_err(root->fs_info, |
5592 | "mismatched block owner" ); |
5593 | ret = -EIO; |
5594 | goto out_unlock; |
5595 | } |
5596 | parent = 0; |
5597 | } |
5598 | |
5599 | /* |
5600 | * If we had a drop_progress we need to verify the refs are set |
5601 | * as expected. If we find our ref then we know that from here |
5602 | * on out everything should be correct, and we can clear the |
5603 | * ->restarted flag. |
5604 | */ |
5605 | if (wc->restarted) { |
5606 | ret = check_ref_exists(trans, root, bytenr, parent, |
5607 | level: level - 1); |
5608 | if (ret < 0) |
5609 | goto out_unlock; |
5610 | if (ret == 0) |
5611 | goto no_delete; |
5612 | ret = 0; |
5613 | wc->restarted = 0; |
5614 | } |
5615 | |
5616 | /* |
5617 | * Reloc tree doesn't contribute to qgroup numbers, and we have |
5618 | * already accounted them at merge time (replace_path), |
5619 | * thus we could skip expensive subtree trace here. |
5620 | */ |
5621 | if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && |
5622 | need_account) { |
5623 | ret = btrfs_qgroup_trace_subtree(trans, root_eb: next, |
5624 | root_gen: generation, root_level: level - 1); |
5625 | if (ret) { |
5626 | btrfs_err_rl(fs_info, |
5627 | "Error %d accounting shared subtree. Quota is out of sync, rescan required." , |
5628 | ret); |
5629 | } |
5630 | } |
5631 | |
5632 | /* |
5633 | * We need to update the next key in our walk control so we can |
5634 | * update the drop_progress key accordingly. We don't care if |
5635 | * find_next_key doesn't find a key because that means we're at |
5636 | * the end and are going to clean up now. |
5637 | */ |
5638 | wc->drop_level = level; |
5639 | find_next_key(path, level, key: &wc->drop_progress); |
5640 | |
5641 | btrfs_init_generic_ref(generic_ref: &ref, action: BTRFS_DROP_DELAYED_REF, bytenr, |
5642 | len: fs_info->nodesize, parent, owning_root: owner_root); |
5643 | btrfs_init_tree_ref(generic_ref: &ref, level: level - 1, root: root->root_key.objectid, |
5644 | mod_root: 0, skip_qgroup: false); |
5645 | ret = btrfs_free_extent(trans, ref: &ref); |
5646 | if (ret) |
5647 | goto out_unlock; |
5648 | } |
5649 | no_delete: |
5650 | *lookup_info = 1; |
5651 | ret = 1; |
5652 | |
5653 | out_unlock: |
5654 | btrfs_tree_unlock(eb: next); |
5655 | free_extent_buffer(eb: next); |
5656 | |
5657 | return ret; |
5658 | } |
5659 | |
5660 | /* |
5661 | * helper to process tree block while walking up the tree. |
5662 | * |
5663 | * when wc->stage == DROP_REFERENCE, this function drops |
5664 | * reference count on the block. |
5665 | * |
5666 | * when wc->stage == UPDATE_BACKREF, this function changes |
5667 | * wc->stage back to DROP_REFERENCE if we changed wc->stage |
5668 | * to UPDATE_BACKREF previously while processing the block. |
5669 | * |
5670 | * NOTE: return value 1 means we should stop walking up. |
5671 | */ |
5672 | static noinline int walk_up_proc(struct btrfs_trans_handle *trans, |
5673 | struct btrfs_root *root, |
5674 | struct btrfs_path *path, |
5675 | struct walk_control *wc) |
5676 | { |
5677 | struct btrfs_fs_info *fs_info = root->fs_info; |
5678 | int ret; |
5679 | int level = wc->level; |
5680 | struct extent_buffer *eb = path->nodes[level]; |
5681 | u64 parent = 0; |
5682 | |
5683 | if (wc->stage == UPDATE_BACKREF) { |
5684 | BUG_ON(wc->shared_level < level); |
5685 | if (level < wc->shared_level) |
5686 | goto out; |
5687 | |
5688 | ret = find_next_key(path, level: level + 1, key: &wc->update_progress); |
5689 | if (ret > 0) |
5690 | wc->update_ref = 0; |
5691 | |
5692 | wc->stage = DROP_REFERENCE; |
5693 | wc->shared_level = -1; |
5694 | path->slots[level] = 0; |
5695 | |
5696 | /* |
5697 | * check reference count again if the block isn't locked. |
5698 | * we should start walking down the tree again if reference |
5699 | * count is one. |
5700 | */ |
5701 | if (!path->locks[level]) { |
5702 | BUG_ON(level == 0); |
5703 | btrfs_tree_lock(eb); |
5704 | path->locks[level] = BTRFS_WRITE_LOCK; |
5705 | |
5706 | ret = btrfs_lookup_extent_info(trans, fs_info, |
5707 | bytenr: eb->start, offset: level, metadata: 1, |
5708 | refs: &wc->refs[level], |
5709 | flags: &wc->flags[level], |
5710 | NULL); |
5711 | if (ret < 0) { |
5712 | btrfs_tree_unlock_rw(eb, rw: path->locks[level]); |
5713 | path->locks[level] = 0; |
5714 | return ret; |
5715 | } |
5716 | BUG_ON(wc->refs[level] == 0); |
5717 | if (wc->refs[level] == 1) { |
5718 | btrfs_tree_unlock_rw(eb, rw: path->locks[level]); |
5719 | path->locks[level] = 0; |
5720 | return 1; |
5721 | } |
5722 | } |
5723 | } |
5724 | |
5725 | /* wc->stage == DROP_REFERENCE */ |
5726 | BUG_ON(wc->refs[level] > 1 && !path->locks[level]); |
5727 | |
5728 | if (wc->refs[level] == 1) { |
5729 | if (level == 0) { |
5730 | if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) |
5731 | ret = btrfs_dec_ref(trans, root, buf: eb, full_backref: 1); |
5732 | else |
5733 | ret = btrfs_dec_ref(trans, root, buf: eb, full_backref: 0); |
5734 | BUG_ON(ret); /* -ENOMEM */ |
5735 | if (is_fstree(rootid: root->root_key.objectid)) { |
5736 | ret = btrfs_qgroup_trace_leaf_items(trans, eb); |
5737 | if (ret) { |
5738 | btrfs_err_rl(fs_info, |
5739 | "error %d accounting leaf items, quota is out of sync, rescan required" , |
5740 | ret); |
5741 | } |
5742 | } |
5743 | } |
5744 | /* Make block locked assertion in btrfs_clear_buffer_dirty happy. */ |
5745 | if (!path->locks[level]) { |
5746 | btrfs_tree_lock(eb); |
5747 | path->locks[level] = BTRFS_WRITE_LOCK; |
5748 | } |
5749 | btrfs_clear_buffer_dirty(trans, buf: eb); |
5750 | } |
5751 | |
5752 | if (eb == root->node) { |
5753 | if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) |
5754 | parent = eb->start; |
5755 | else if (root->root_key.objectid != btrfs_header_owner(eb)) |
5756 | goto owner_mismatch; |
5757 | } else { |
5758 | if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF) |
5759 | parent = path->nodes[level + 1]->start; |
5760 | else if (root->root_key.objectid != |
5761 | btrfs_header_owner(eb: path->nodes[level + 1])) |
5762 | goto owner_mismatch; |
5763 | } |
5764 | |
5765 | btrfs_free_tree_block(trans, root_id: btrfs_root_id(root), buf: eb, parent, |
5766 | last_ref: wc->refs[level] == 1); |
5767 | out: |
5768 | wc->refs[level] = 0; |
5769 | wc->flags[level] = 0; |
5770 | return 0; |
5771 | |
5772 | owner_mismatch: |
5773 | btrfs_err_rl(fs_info, "unexpected tree owner, have %llu expect %llu" , |
5774 | btrfs_header_owner(eb), root->root_key.objectid); |
5775 | return -EUCLEAN; |
5776 | } |
5777 | |
5778 | static noinline int walk_down_tree(struct btrfs_trans_handle *trans, |
5779 | struct btrfs_root *root, |
5780 | struct btrfs_path *path, |
5781 | struct walk_control *wc) |
5782 | { |
5783 | int level = wc->level; |
5784 | int lookup_info = 1; |
5785 | int ret = 0; |
5786 | |
5787 | while (level >= 0) { |
5788 | ret = walk_down_proc(trans, root, path, wc, lookup_info); |
5789 | if (ret) |
5790 | break; |
5791 | |
5792 | if (level == 0) |
5793 | break; |
5794 | |
5795 | if (path->slots[level] >= |
5796 | btrfs_header_nritems(eb: path->nodes[level])) |
5797 | break; |
5798 | |
5799 | ret = do_walk_down(trans, root, path, wc, lookup_info: &lookup_info); |
5800 | if (ret > 0) { |
5801 | path->slots[level]++; |
5802 | continue; |
5803 | } else if (ret < 0) |
5804 | break; |
5805 | level = wc->level; |
5806 | } |
5807 | return (ret == 1) ? 0 : ret; |
5808 | } |
5809 | |
5810 | static noinline int walk_up_tree(struct btrfs_trans_handle *trans, |
5811 | struct btrfs_root *root, |
5812 | struct btrfs_path *path, |
5813 | struct walk_control *wc, int max_level) |
5814 | { |
5815 | int level = wc->level; |
5816 | int ret; |
5817 | |
5818 | path->slots[level] = btrfs_header_nritems(eb: path->nodes[level]); |
5819 | while (level < max_level && path->nodes[level]) { |
5820 | wc->level = level; |
5821 | if (path->slots[level] + 1 < |
5822 | btrfs_header_nritems(eb: path->nodes[level])) { |
5823 | path->slots[level]++; |
5824 | return 0; |
5825 | } else { |
5826 | ret = walk_up_proc(trans, root, path, wc); |
5827 | if (ret > 0) |
5828 | return 0; |
5829 | if (ret < 0) |
5830 | return ret; |
5831 | |
5832 | if (path->locks[level]) { |
5833 | btrfs_tree_unlock_rw(eb: path->nodes[level], |
5834 | rw: path->locks[level]); |
5835 | path->locks[level] = 0; |
5836 | } |
5837 | free_extent_buffer(eb: path->nodes[level]); |
5838 | path->nodes[level] = NULL; |
5839 | level++; |
5840 | } |
5841 | } |
5842 | return 1; |
5843 | } |
5844 | |
5845 | /* |
5846 | * drop a subvolume tree. |
5847 | * |
5848 | * this function traverses the tree freeing any blocks that only |
5849 | * referenced by the tree. |
5850 | * |
5851 | * when a shared tree block is found. this function decreases its |
5852 | * reference count by one. if update_ref is true, this function |
5853 | * also make sure backrefs for the shared block and all lower level |
5854 | * blocks are properly updated. |
5855 | * |
5856 | * If called with for_reloc == 0, may exit early with -EAGAIN |
5857 | */ |
5858 | int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc) |
5859 | { |
5860 | const bool is_reloc_root = (root->root_key.objectid == |
5861 | BTRFS_TREE_RELOC_OBJECTID); |
5862 | struct btrfs_fs_info *fs_info = root->fs_info; |
5863 | struct btrfs_path *path; |
5864 | struct btrfs_trans_handle *trans; |
5865 | struct btrfs_root *tree_root = fs_info->tree_root; |
5866 | struct btrfs_root_item *root_item = &root->root_item; |
5867 | struct walk_control *wc; |
5868 | struct btrfs_key key; |
5869 | int err = 0; |
5870 | int ret; |
5871 | int level; |
5872 | bool root_dropped = false; |
5873 | bool unfinished_drop = false; |
5874 | |
5875 | btrfs_debug(fs_info, "Drop subvolume %llu" , root->root_key.objectid); |
5876 | |
5877 | path = btrfs_alloc_path(); |
5878 | if (!path) { |
5879 | err = -ENOMEM; |
5880 | goto out; |
5881 | } |
5882 | |
5883 | wc = kzalloc(size: sizeof(*wc), GFP_NOFS); |
5884 | if (!wc) { |
5885 | btrfs_free_path(p: path); |
5886 | err = -ENOMEM; |
5887 | goto out; |
5888 | } |
5889 | |
5890 | /* |
5891 | * Use join to avoid potential EINTR from transaction start. See |
5892 | * wait_reserve_ticket and the whole reservation callchain. |
5893 | */ |
5894 | if (for_reloc) |
5895 | trans = btrfs_join_transaction(root: tree_root); |
5896 | else |
5897 | trans = btrfs_start_transaction(root: tree_root, num_items: 0); |
5898 | if (IS_ERR(ptr: trans)) { |
5899 | err = PTR_ERR(ptr: trans); |
5900 | goto out_free; |
5901 | } |
5902 | |
5903 | err = btrfs_run_delayed_items(trans); |
5904 | if (err) |
5905 | goto out_end_trans; |
5906 | |
5907 | /* |
5908 | * This will help us catch people modifying the fs tree while we're |
5909 | * dropping it. It is unsafe to mess with the fs tree while it's being |
5910 | * dropped as we unlock the root node and parent nodes as we walk down |
5911 | * the tree, assuming nothing will change. If something does change |
5912 | * then we'll have stale information and drop references to blocks we've |
5913 | * already dropped. |
5914 | */ |
5915 | set_bit(nr: BTRFS_ROOT_DELETING, addr: &root->state); |
5916 | unfinished_drop = test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state); |
5917 | |
5918 | if (btrfs_disk_key_objectid(s: &root_item->drop_progress) == 0) { |
5919 | level = btrfs_header_level(eb: root->node); |
5920 | path->nodes[level] = btrfs_lock_root_node(root); |
5921 | path->slots[level] = 0; |
5922 | path->locks[level] = BTRFS_WRITE_LOCK; |
5923 | memset(&wc->update_progress, 0, |
5924 | sizeof(wc->update_progress)); |
5925 | } else { |
5926 | btrfs_disk_key_to_cpu(cpu_key: &key, disk_key: &root_item->drop_progress); |
5927 | memcpy(&wc->update_progress, &key, |
5928 | sizeof(wc->update_progress)); |
5929 | |
5930 | level = btrfs_root_drop_level(s: root_item); |
5931 | BUG_ON(level == 0); |
5932 | path->lowest_level = level; |
5933 | ret = btrfs_search_slot(NULL, root, key: &key, p: path, ins_len: 0, cow: 0); |
5934 | path->lowest_level = 0; |
5935 | if (ret < 0) { |
5936 | err = ret; |
5937 | goto out_end_trans; |
5938 | } |
5939 | WARN_ON(ret > 0); |
5940 | |
5941 | /* |
5942 | * unlock our path, this is safe because only this |
5943 | * function is allowed to delete this snapshot |
5944 | */ |
5945 | btrfs_unlock_up_safe(path, level: 0); |
5946 | |
5947 | level = btrfs_header_level(eb: root->node); |
5948 | while (1) { |
5949 | btrfs_tree_lock(eb: path->nodes[level]); |
5950 | path->locks[level] = BTRFS_WRITE_LOCK; |
5951 | |
5952 | ret = btrfs_lookup_extent_info(trans, fs_info, |
5953 | bytenr: path->nodes[level]->start, |
5954 | offset: level, metadata: 1, refs: &wc->refs[level], |
5955 | flags: &wc->flags[level], NULL); |
5956 | if (ret < 0) { |
5957 | err = ret; |
5958 | goto out_end_trans; |
5959 | } |
5960 | BUG_ON(wc->refs[level] == 0); |
5961 | |
5962 | if (level == btrfs_root_drop_level(s: root_item)) |
5963 | break; |
5964 | |
5965 | btrfs_tree_unlock(eb: path->nodes[level]); |
5966 | path->locks[level] = 0; |
5967 | WARN_ON(wc->refs[level] != 1); |
5968 | level--; |
5969 | } |
5970 | } |
5971 | |
5972 | wc->restarted = test_bit(BTRFS_ROOT_DEAD_TREE, &root->state); |
5973 | wc->level = level; |
5974 | wc->shared_level = -1; |
5975 | wc->stage = DROP_REFERENCE; |
5976 | wc->update_ref = update_ref; |
5977 | wc->keep_locks = 0; |
5978 | wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(info: fs_info); |
5979 | |
5980 | while (1) { |
5981 | |
5982 | ret = walk_down_tree(trans, root, path, wc); |
5983 | if (ret < 0) { |
5984 | btrfs_abort_transaction(trans, ret); |
5985 | err = ret; |
5986 | break; |
5987 | } |
5988 | |
5989 | ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL); |
5990 | if (ret < 0) { |
5991 | btrfs_abort_transaction(trans, ret); |
5992 | err = ret; |
5993 | break; |
5994 | } |
5995 | |
5996 | if (ret > 0) { |
5997 | BUG_ON(wc->stage != DROP_REFERENCE); |
5998 | break; |
5999 | } |
6000 | |
6001 | if (wc->stage == DROP_REFERENCE) { |
6002 | wc->drop_level = wc->level; |
6003 | btrfs_node_key_to_cpu(eb: path->nodes[wc->drop_level], |
6004 | cpu_key: &wc->drop_progress, |
6005 | nr: path->slots[wc->drop_level]); |
6006 | } |
6007 | btrfs_cpu_key_to_disk(disk_key: &root_item->drop_progress, |
6008 | cpu_key: &wc->drop_progress); |
6009 | btrfs_set_root_drop_level(s: root_item, val: wc->drop_level); |
6010 | |
6011 | BUG_ON(wc->level == 0); |
6012 | if (btrfs_should_end_transaction(trans) || |
6013 | (!for_reloc && btrfs_need_cleaner_sleep(fs_info))) { |
6014 | ret = btrfs_update_root(trans, root: tree_root, |
6015 | key: &root->root_key, |
6016 | item: root_item); |
6017 | if (ret) { |
6018 | btrfs_abort_transaction(trans, ret); |
6019 | err = ret; |
6020 | goto out_end_trans; |
6021 | } |
6022 | |
6023 | if (!is_reloc_root) |
6024 | btrfs_set_last_root_drop_gen(fs_info, gen: trans->transid); |
6025 | |
6026 | btrfs_end_transaction_throttle(trans); |
6027 | if (!for_reloc && btrfs_need_cleaner_sleep(fs_info)) { |
6028 | btrfs_debug(fs_info, |
6029 | "drop snapshot early exit" ); |
6030 | err = -EAGAIN; |
6031 | goto out_free; |
6032 | } |
6033 | |
6034 | /* |
6035 | * Use join to avoid potential EINTR from transaction |
6036 | * start. See wait_reserve_ticket and the whole |
6037 | * reservation callchain. |
6038 | */ |
6039 | if (for_reloc) |
6040 | trans = btrfs_join_transaction(root: tree_root); |
6041 | else |
6042 | trans = btrfs_start_transaction(root: tree_root, num_items: 0); |
6043 | if (IS_ERR(ptr: trans)) { |
6044 | err = PTR_ERR(ptr: trans); |
6045 | goto out_free; |
6046 | } |
6047 | } |
6048 | } |
6049 | btrfs_release_path(p: path); |
6050 | if (err) |
6051 | goto out_end_trans; |
6052 | |
6053 | ret = btrfs_del_root(trans, key: &root->root_key); |
6054 | if (ret) { |
6055 | btrfs_abort_transaction(trans, ret); |
6056 | err = ret; |
6057 | goto out_end_trans; |
6058 | } |
6059 | |
6060 | if (!is_reloc_root) { |
6061 | ret = btrfs_find_root(root: tree_root, search_key: &root->root_key, path, |
6062 | NULL, NULL); |
6063 | if (ret < 0) { |
6064 | btrfs_abort_transaction(trans, ret); |
6065 | err = ret; |
6066 | goto out_end_trans; |
6067 | } else if (ret > 0) { |
6068 | /* if we fail to delete the orphan item this time |
6069 | * around, it'll get picked up the next time. |
6070 | * |
6071 | * The most common failure here is just -ENOENT. |
6072 | */ |
6073 | btrfs_del_orphan_item(trans, root: tree_root, |
6074 | offset: root->root_key.objectid); |
6075 | } |
6076 | } |
6077 | |
6078 | /* |
6079 | * This subvolume is going to be completely dropped, and won't be |
6080 | * recorded as dirty roots, thus pertrans meta rsv will not be freed at |
6081 | * commit transaction time. So free it here manually. |
6082 | */ |
6083 | btrfs_qgroup_convert_reserved_meta(root, INT_MAX); |
6084 | btrfs_qgroup_free_meta_all_pertrans(root); |
6085 | |
6086 | if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) |
6087 | btrfs_add_dropped_root(trans, root); |
6088 | else |
6089 | btrfs_put_root(root); |
6090 | root_dropped = true; |
6091 | out_end_trans: |
6092 | if (!is_reloc_root) |
6093 | btrfs_set_last_root_drop_gen(fs_info, gen: trans->transid); |
6094 | |
6095 | btrfs_end_transaction_throttle(trans); |
6096 | out_free: |
6097 | kfree(objp: wc); |
6098 | btrfs_free_path(p: path); |
6099 | out: |
6100 | /* |
6101 | * We were an unfinished drop root, check to see if there are any |
6102 | * pending, and if not clear and wake up any waiters. |
6103 | */ |
6104 | if (!err && unfinished_drop) |
6105 | btrfs_maybe_wake_unfinished_drop(fs_info); |
6106 | |
6107 | /* |
6108 | * So if we need to stop dropping the snapshot for whatever reason we |
6109 | * need to make sure to add it back to the dead root list so that we |
6110 | * keep trying to do the work later. This also cleans up roots if we |
6111 | * don't have it in the radix (like when we recover after a power fail |
6112 | * or unmount) so we don't leak memory. |
6113 | */ |
6114 | if (!for_reloc && !root_dropped) |
6115 | btrfs_add_dead_root(root); |
6116 | return err; |
6117 | } |
6118 | |
6119 | /* |
6120 | * drop subtree rooted at tree block 'node'. |
6121 | * |
6122 | * NOTE: this function will unlock and release tree block 'node' |
6123 | * only used by relocation code |
6124 | */ |
6125 | int btrfs_drop_subtree(struct btrfs_trans_handle *trans, |
6126 | struct btrfs_root *root, |
6127 | struct extent_buffer *node, |
6128 | struct extent_buffer *parent) |
6129 | { |
6130 | struct btrfs_fs_info *fs_info = root->fs_info; |
6131 | struct btrfs_path *path; |
6132 | struct walk_control *wc; |
6133 | int level; |
6134 | int parent_level; |
6135 | int ret = 0; |
6136 | int wret; |
6137 | |
6138 | BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); |
6139 | |
6140 | path = btrfs_alloc_path(); |
6141 | if (!path) |
6142 | return -ENOMEM; |
6143 | |
6144 | wc = kzalloc(size: sizeof(*wc), GFP_NOFS); |
6145 | if (!wc) { |
6146 | btrfs_free_path(p: path); |
6147 | return -ENOMEM; |
6148 | } |
6149 | |
6150 | btrfs_assert_tree_write_locked(eb: parent); |
6151 | parent_level = btrfs_header_level(eb: parent); |
6152 | atomic_inc(v: &parent->refs); |
6153 | path->nodes[parent_level] = parent; |
6154 | path->slots[parent_level] = btrfs_header_nritems(eb: parent); |
6155 | |
6156 | btrfs_assert_tree_write_locked(eb: node); |
6157 | level = btrfs_header_level(eb: node); |
6158 | path->nodes[level] = node; |
6159 | path->slots[level] = 0; |
6160 | path->locks[level] = BTRFS_WRITE_LOCK; |
6161 | |
6162 | wc->refs[parent_level] = 1; |
6163 | wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF; |
6164 | wc->level = level; |
6165 | wc->shared_level = -1; |
6166 | wc->stage = DROP_REFERENCE; |
6167 | wc->update_ref = 0; |
6168 | wc->keep_locks = 1; |
6169 | wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(info: fs_info); |
6170 | |
6171 | while (1) { |
6172 | wret = walk_down_tree(trans, root, path, wc); |
6173 | if (wret < 0) { |
6174 | ret = wret; |
6175 | break; |
6176 | } |
6177 | |
6178 | wret = walk_up_tree(trans, root, path, wc, max_level: parent_level); |
6179 | if (wret < 0) |
6180 | ret = wret; |
6181 | if (wret != 0) |
6182 | break; |
6183 | } |
6184 | |
6185 | kfree(objp: wc); |
6186 | btrfs_free_path(p: path); |
6187 | return ret; |
6188 | } |
6189 | |
6190 | /* |
6191 | * Unpin the extent range in an error context and don't add the space back. |
6192 | * Errors are not propagated further. |
6193 | */ |
6194 | void btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, u64 start, u64 end) |
6195 | { |
6196 | unpin_extent_range(fs_info, start, end, return_free_space: false); |
6197 | } |
6198 | |
6199 | /* |
6200 | * It used to be that old block groups would be left around forever. |
6201 | * Iterating over them would be enough to trim unused space. Since we |
6202 | * now automatically remove them, we also need to iterate over unallocated |
6203 | * space. |
6204 | * |
6205 | * We don't want a transaction for this since the discard may take a |
6206 | * substantial amount of time. We don't require that a transaction be |
6207 | * running, but we do need to take a running transaction into account |
6208 | * to ensure that we're not discarding chunks that were released or |
6209 | * allocated in the current transaction. |
6210 | * |
6211 | * Holding the chunks lock will prevent other threads from allocating |
6212 | * or releasing chunks, but it won't prevent a running transaction |
6213 | * from committing and releasing the memory that the pending chunks |
6214 | * list head uses. For that, we need to take a reference to the |
6215 | * transaction and hold the commit root sem. We only need to hold |
6216 | * it while performing the free space search since we have already |
6217 | * held back allocations. |
6218 | */ |
6219 | static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed) |
6220 | { |
6221 | u64 start = BTRFS_DEVICE_RANGE_RESERVED, len = 0, end = 0; |
6222 | int ret; |
6223 | |
6224 | *trimmed = 0; |
6225 | |
6226 | /* Discard not supported = nothing to do. */ |
6227 | if (!bdev_max_discard_sectors(bdev: device->bdev)) |
6228 | return 0; |
6229 | |
6230 | /* Not writable = nothing to do. */ |
6231 | if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) |
6232 | return 0; |
6233 | |
6234 | /* No free space = nothing to do. */ |
6235 | if (device->total_bytes <= device->bytes_used) |
6236 | return 0; |
6237 | |
6238 | ret = 0; |
6239 | |
6240 | while (1) { |
6241 | struct btrfs_fs_info *fs_info = device->fs_info; |
6242 | u64 bytes; |
6243 | |
6244 | ret = mutex_lock_interruptible(&fs_info->chunk_mutex); |
6245 | if (ret) |
6246 | break; |
6247 | |
6248 | find_first_clear_extent_bit(tree: &device->alloc_state, start, |
6249 | start_ret: &start, end_ret: &end, |
6250 | CHUNK_TRIMMED | CHUNK_ALLOCATED); |
6251 | |
6252 | /* Check if there are any CHUNK_* bits left */ |
6253 | if (start > device->total_bytes) { |
6254 | WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); |
6255 | btrfs_warn_in_rcu(fs_info, |
6256 | "ignoring attempt to trim beyond device size: offset %llu length %llu device %s device size %llu" , |
6257 | start, end - start + 1, |
6258 | btrfs_dev_name(device), |
6259 | device->total_bytes); |
6260 | mutex_unlock(lock: &fs_info->chunk_mutex); |
6261 | ret = 0; |
6262 | break; |
6263 | } |
6264 | |
6265 | /* Ensure we skip the reserved space on each device. */ |
6266 | start = max_t(u64, start, BTRFS_DEVICE_RANGE_RESERVED); |
6267 | |
6268 | /* |
6269 | * If find_first_clear_extent_bit find a range that spans the |
6270 | * end of the device it will set end to -1, in this case it's up |
6271 | * to the caller to trim the value to the size of the device. |
6272 | */ |
6273 | end = min(end, device->total_bytes - 1); |
6274 | |
6275 | len = end - start + 1; |
6276 | |
6277 | /* We didn't find any extents */ |
6278 | if (!len) { |
6279 | mutex_unlock(lock: &fs_info->chunk_mutex); |
6280 | ret = 0; |
6281 | break; |
6282 | } |
6283 | |
6284 | ret = btrfs_issue_discard(bdev: device->bdev, start, len, |
6285 | discarded_bytes: &bytes); |
6286 | if (!ret) |
6287 | set_extent_bit(tree: &device->alloc_state, start, |
6288 | end: start + bytes - 1, CHUNK_TRIMMED, NULL); |
6289 | mutex_unlock(lock: &fs_info->chunk_mutex); |
6290 | |
6291 | if (ret) |
6292 | break; |
6293 | |
6294 | start += len; |
6295 | *trimmed += bytes; |
6296 | |
6297 | if (fatal_signal_pending(current)) { |
6298 | ret = -ERESTARTSYS; |
6299 | break; |
6300 | } |
6301 | |
6302 | cond_resched(); |
6303 | } |
6304 | |
6305 | return ret; |
6306 | } |
6307 | |
6308 | /* |
6309 | * Trim the whole filesystem by: |
6310 | * 1) trimming the free space in each block group |
6311 | * 2) trimming the unallocated space on each device |
6312 | * |
6313 | * This will also continue trimming even if a block group or device encounters |
6314 | * an error. The return value will be the last error, or 0 if nothing bad |
6315 | * happens. |
6316 | */ |
6317 | int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) |
6318 | { |
6319 | struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; |
6320 | struct btrfs_block_group *cache = NULL; |
6321 | struct btrfs_device *device; |
6322 | u64 group_trimmed; |
6323 | u64 range_end = U64_MAX; |
6324 | u64 start; |
6325 | u64 end; |
6326 | u64 trimmed = 0; |
6327 | u64 bg_failed = 0; |
6328 | u64 dev_failed = 0; |
6329 | int bg_ret = 0; |
6330 | int dev_ret = 0; |
6331 | int ret = 0; |
6332 | |
6333 | if (range->start == U64_MAX) |
6334 | return -EINVAL; |
6335 | |
6336 | /* |
6337 | * Check range overflow if range->len is set. |
6338 | * The default range->len is U64_MAX. |
6339 | */ |
6340 | if (range->len != U64_MAX && |
6341 | check_add_overflow(range->start, range->len, &range_end)) |
6342 | return -EINVAL; |
6343 | |
6344 | cache = btrfs_lookup_first_block_group(info: fs_info, bytenr: range->start); |
6345 | for (; cache; cache = btrfs_next_block_group(cache)) { |
6346 | if (cache->start >= range_end) { |
6347 | btrfs_put_block_group(cache); |
6348 | break; |
6349 | } |
6350 | |
6351 | start = max(range->start, cache->start); |
6352 | end = min(range_end, cache->start + cache->length); |
6353 | |
6354 | if (end - start >= range->minlen) { |
6355 | if (!btrfs_block_group_done(cache)) { |
6356 | ret = btrfs_cache_block_group(cache, wait: true); |
6357 | if (ret) { |
6358 | bg_failed++; |
6359 | bg_ret = ret; |
6360 | continue; |
6361 | } |
6362 | } |
6363 | ret = btrfs_trim_block_group(block_group: cache, |
6364 | trimmed: &group_trimmed, |
6365 | start, |
6366 | end, |
6367 | minlen: range->minlen); |
6368 | |
6369 | trimmed += group_trimmed; |
6370 | if (ret) { |
6371 | bg_failed++; |
6372 | bg_ret = ret; |
6373 | continue; |
6374 | } |
6375 | } |
6376 | } |
6377 | |
6378 | if (bg_failed) |
6379 | btrfs_warn(fs_info, |
6380 | "failed to trim %llu block group(s), last error %d" , |
6381 | bg_failed, bg_ret); |
6382 | |
6383 | mutex_lock(&fs_devices->device_list_mutex); |
6384 | list_for_each_entry(device, &fs_devices->devices, dev_list) { |
6385 | if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) |
6386 | continue; |
6387 | |
6388 | ret = btrfs_trim_free_extents(device, trimmed: &group_trimmed); |
6389 | if (ret) { |
6390 | dev_failed++; |
6391 | dev_ret = ret; |
6392 | break; |
6393 | } |
6394 | |
6395 | trimmed += group_trimmed; |
6396 | } |
6397 | mutex_unlock(lock: &fs_devices->device_list_mutex); |
6398 | |
6399 | if (dev_failed) |
6400 | btrfs_warn(fs_info, |
6401 | "failed to trim %llu device(s), last error %d" , |
6402 | dev_failed, dev_ret); |
6403 | range->len = trimmed; |
6404 | if (bg_ret) |
6405 | return bg_ret; |
6406 | return dev_ret; |
6407 | } |
6408 | |