1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2007 Oracle. All rights reserved. |
4 | */ |
5 | |
6 | #include <linux/fs.h> |
7 | #include <linux/blkdev.h> |
8 | #include <linux/radix-tree.h> |
9 | #include <linux/writeback.h> |
10 | #include <linux/workqueue.h> |
11 | #include <linux/kthread.h> |
12 | #include <linux/slab.h> |
13 | #include <linux/migrate.h> |
14 | #include <linux/ratelimit.h> |
15 | #include <linux/uuid.h> |
16 | #include <linux/semaphore.h> |
17 | #include <linux/error-injection.h> |
18 | #include <linux/crc32c.h> |
19 | #include <linux/sched/mm.h> |
20 | #include <asm/unaligned.h> |
21 | #include <crypto/hash.h> |
22 | #include "ctree.h" |
23 | #include "disk-io.h" |
24 | #include "transaction.h" |
25 | #include "btrfs_inode.h" |
26 | #include "bio.h" |
27 | #include "print-tree.h" |
28 | #include "locking.h" |
29 | #include "tree-log.h" |
30 | #include "free-space-cache.h" |
31 | #include "free-space-tree.h" |
32 | #include "dev-replace.h" |
33 | #include "raid56.h" |
34 | #include "sysfs.h" |
35 | #include "qgroup.h" |
36 | #include "compression.h" |
37 | #include "tree-checker.h" |
38 | #include "ref-verify.h" |
39 | #include "block-group.h" |
40 | #include "discard.h" |
41 | #include "space-info.h" |
42 | #include "zoned.h" |
43 | #include "subpage.h" |
44 | #include "fs.h" |
45 | #include "accessors.h" |
46 | #include "extent-tree.h" |
47 | #include "root-tree.h" |
48 | #include "defrag.h" |
49 | #include "uuid-tree.h" |
50 | #include "relocation.h" |
51 | #include "scrub.h" |
52 | #include "super.h" |
53 | |
54 | #define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\ |
55 | BTRFS_HEADER_FLAG_RELOC |\ |
56 | BTRFS_SUPER_FLAG_ERROR |\ |
57 | BTRFS_SUPER_FLAG_SEEDING |\ |
58 | BTRFS_SUPER_FLAG_METADUMP |\ |
59 | BTRFS_SUPER_FLAG_METADUMP_V2) |
60 | |
61 | static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info); |
62 | static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info); |
63 | |
64 | static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info) |
65 | { |
66 | if (fs_info->csum_shash) |
67 | crypto_free_shash(tfm: fs_info->csum_shash); |
68 | } |
69 | |
70 | /* |
71 | * Compute the csum of a btree block and store the result to provided buffer. |
72 | */ |
73 | static void csum_tree_block(struct extent_buffer *buf, u8 *result) |
74 | { |
75 | struct btrfs_fs_info *fs_info = buf->fs_info; |
76 | int num_pages; |
77 | u32 first_page_part; |
78 | SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); |
79 | char *kaddr; |
80 | int i; |
81 | |
82 | shash->tfm = fs_info->csum_shash; |
83 | crypto_shash_init(desc: shash); |
84 | |
85 | if (buf->addr) { |
86 | /* Pages are contiguous, handle them as a big one. */ |
87 | kaddr = buf->addr; |
88 | first_page_part = fs_info->nodesize; |
89 | num_pages = 1; |
90 | } else { |
91 | kaddr = folio_address(folio: buf->folios[0]); |
92 | first_page_part = min_t(u32, PAGE_SIZE, fs_info->nodesize); |
93 | num_pages = num_extent_pages(eb: buf); |
94 | } |
95 | |
96 | crypto_shash_update(desc: shash, data: kaddr + BTRFS_CSUM_SIZE, |
97 | len: first_page_part - BTRFS_CSUM_SIZE); |
98 | |
99 | /* |
100 | * Multiple single-page folios case would reach here. |
101 | * |
102 | * nodesize <= PAGE_SIZE and large folio all handled by above |
103 | * crypto_shash_update() already. |
104 | */ |
105 | for (i = 1; i < num_pages && INLINE_EXTENT_BUFFER_PAGES > 1; i++) { |
106 | kaddr = folio_address(folio: buf->folios[i]); |
107 | crypto_shash_update(desc: shash, data: kaddr, PAGE_SIZE); |
108 | } |
109 | memset(result, 0, BTRFS_CSUM_SIZE); |
110 | crypto_shash_final(desc: shash, out: result); |
111 | } |
112 | |
113 | /* |
114 | * we can't consider a given block up to date unless the transid of the |
115 | * block matches the transid in the parent node's pointer. This is how we |
116 | * detect blocks that either didn't get written at all or got written |
117 | * in the wrong place. |
118 | */ |
119 | int btrfs_buffer_uptodate(struct extent_buffer *eb, u64 parent_transid, int atomic) |
120 | { |
121 | if (!extent_buffer_uptodate(eb)) |
122 | return 0; |
123 | |
124 | if (!parent_transid || btrfs_header_generation(eb) == parent_transid) |
125 | return 1; |
126 | |
127 | if (atomic) |
128 | return -EAGAIN; |
129 | |
130 | if (!extent_buffer_uptodate(eb) || |
131 | btrfs_header_generation(eb) != parent_transid) { |
132 | btrfs_err_rl(eb->fs_info, |
133 | "parent transid verify failed on logical %llu mirror %u wanted %llu found %llu" , |
134 | eb->start, eb->read_mirror, |
135 | parent_transid, btrfs_header_generation(eb)); |
136 | clear_extent_buffer_uptodate(eb); |
137 | return 0; |
138 | } |
139 | return 1; |
140 | } |
141 | |
142 | static bool btrfs_supported_super_csum(u16 csum_type) |
143 | { |
144 | switch (csum_type) { |
145 | case BTRFS_CSUM_TYPE_CRC32: |
146 | case BTRFS_CSUM_TYPE_XXHASH: |
147 | case BTRFS_CSUM_TYPE_SHA256: |
148 | case BTRFS_CSUM_TYPE_BLAKE2: |
149 | return true; |
150 | default: |
151 | return false; |
152 | } |
153 | } |
154 | |
155 | /* |
156 | * Return 0 if the superblock checksum type matches the checksum value of that |
157 | * algorithm. Pass the raw disk superblock data. |
158 | */ |
159 | int btrfs_check_super_csum(struct btrfs_fs_info *fs_info, |
160 | const struct btrfs_super_block *disk_sb) |
161 | { |
162 | char result[BTRFS_CSUM_SIZE]; |
163 | SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); |
164 | |
165 | shash->tfm = fs_info->csum_shash; |
166 | |
167 | /* |
168 | * The super_block structure does not span the whole |
169 | * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is |
170 | * filled with zeros and is included in the checksum. |
171 | */ |
172 | crypto_shash_digest(desc: shash, data: (const u8 *)disk_sb + BTRFS_CSUM_SIZE, |
173 | BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, out: result); |
174 | |
175 | if (memcmp(p: disk_sb->csum, q: result, size: fs_info->csum_size)) |
176 | return 1; |
177 | |
178 | return 0; |
179 | } |
180 | |
181 | static int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, |
182 | int mirror_num) |
183 | { |
184 | struct btrfs_fs_info *fs_info = eb->fs_info; |
185 | int num_folios = num_extent_folios(eb); |
186 | int ret = 0; |
187 | |
188 | if (sb_rdonly(sb: fs_info->sb)) |
189 | return -EROFS; |
190 | |
191 | for (int i = 0; i < num_folios; i++) { |
192 | struct folio *folio = eb->folios[i]; |
193 | u64 start = max_t(u64, eb->start, folio_pos(folio)); |
194 | u64 end = min_t(u64, eb->start + eb->len, |
195 | folio_pos(folio) + eb->folio_size); |
196 | u32 len = end - start; |
197 | |
198 | ret = btrfs_repair_io_failure(fs_info, ino: 0, start, length: len, |
199 | logical: start, folio, offset_in_folio(folio, start), |
200 | mirror_num); |
201 | if (ret) |
202 | break; |
203 | } |
204 | |
205 | return ret; |
206 | } |
207 | |
208 | /* |
209 | * helper to read a given tree block, doing retries as required when |
210 | * the checksums don't match and we have alternate mirrors to try. |
211 | * |
212 | * @check: expected tree parentness check, see the comments of the |
213 | * structure for details. |
214 | */ |
215 | int btrfs_read_extent_buffer(struct extent_buffer *eb, |
216 | struct btrfs_tree_parent_check *check) |
217 | { |
218 | struct btrfs_fs_info *fs_info = eb->fs_info; |
219 | int failed = 0; |
220 | int ret; |
221 | int num_copies = 0; |
222 | int mirror_num = 0; |
223 | int failed_mirror = 0; |
224 | |
225 | ASSERT(check); |
226 | |
227 | while (1) { |
228 | clear_bit(nr: EXTENT_BUFFER_CORRUPT, addr: &eb->bflags); |
229 | ret = read_extent_buffer_pages(eb, WAIT_COMPLETE, mirror_num, parent_check: check); |
230 | if (!ret) |
231 | break; |
232 | |
233 | num_copies = btrfs_num_copies(fs_info, |
234 | logical: eb->start, len: eb->len); |
235 | if (num_copies == 1) |
236 | break; |
237 | |
238 | if (!failed_mirror) { |
239 | failed = 1; |
240 | failed_mirror = eb->read_mirror; |
241 | } |
242 | |
243 | mirror_num++; |
244 | if (mirror_num == failed_mirror) |
245 | mirror_num++; |
246 | |
247 | if (mirror_num > num_copies) |
248 | break; |
249 | } |
250 | |
251 | if (failed && !ret && failed_mirror) |
252 | btrfs_repair_eb_io_failure(eb, mirror_num: failed_mirror); |
253 | |
254 | return ret; |
255 | } |
256 | |
257 | /* |
258 | * Checksum a dirty tree block before IO. |
259 | */ |
260 | blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio) |
261 | { |
262 | struct extent_buffer *eb = bbio->private; |
263 | struct btrfs_fs_info *fs_info = eb->fs_info; |
264 | u64 found_start = btrfs_header_bytenr(eb); |
265 | u64 last_trans; |
266 | u8 result[BTRFS_CSUM_SIZE]; |
267 | int ret; |
268 | |
269 | /* Btree blocks are always contiguous on disk. */ |
270 | if (WARN_ON_ONCE(bbio->file_offset != eb->start)) |
271 | return BLK_STS_IOERR; |
272 | if (WARN_ON_ONCE(bbio->bio.bi_iter.bi_size != eb->len)) |
273 | return BLK_STS_IOERR; |
274 | |
275 | /* |
276 | * If an extent_buffer is marked as EXTENT_BUFFER_ZONED_ZEROOUT, don't |
277 | * checksum it but zero-out its content. This is done to preserve |
278 | * ordering of I/O without unnecessarily writing out data. |
279 | */ |
280 | if (test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags)) { |
281 | memzero_extent_buffer(eb, start: 0, len: eb->len); |
282 | return BLK_STS_OK; |
283 | } |
284 | |
285 | if (WARN_ON_ONCE(found_start != eb->start)) |
286 | return BLK_STS_IOERR; |
287 | if (WARN_ON(!btrfs_folio_test_uptodate(fs_info, eb->folios[0], |
288 | eb->start, eb->len))) |
289 | return BLK_STS_IOERR; |
290 | |
291 | ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid, |
292 | offsetof(struct btrfs_header, fsid), |
293 | BTRFS_FSID_SIZE) == 0); |
294 | csum_tree_block(buf: eb, result); |
295 | |
296 | if (btrfs_header_level(eb)) |
297 | ret = btrfs_check_node(node: eb); |
298 | else |
299 | ret = btrfs_check_leaf(leaf: eb); |
300 | |
301 | if (ret < 0) |
302 | goto error; |
303 | |
304 | /* |
305 | * Also check the generation, the eb reached here must be newer than |
306 | * last committed. Or something seriously wrong happened. |
307 | */ |
308 | last_trans = btrfs_get_last_trans_committed(fs_info); |
309 | if (unlikely(btrfs_header_generation(eb) <= last_trans)) { |
310 | ret = -EUCLEAN; |
311 | btrfs_err(fs_info, |
312 | "block=%llu bad generation, have %llu expect > %llu" , |
313 | eb->start, btrfs_header_generation(eb), last_trans); |
314 | goto error; |
315 | } |
316 | write_extent_buffer(eb, src: result, start: 0, len: fs_info->csum_size); |
317 | return BLK_STS_OK; |
318 | |
319 | error: |
320 | btrfs_print_tree(c: eb, follow: 0); |
321 | btrfs_err(fs_info, "block=%llu write time tree block corruption detected" , |
322 | eb->start); |
323 | /* |
324 | * Be noisy if this is an extent buffer from a log tree. We don't abort |
325 | * a transaction in case there's a bad log tree extent buffer, we just |
326 | * fallback to a transaction commit. Still we want to know when there is |
327 | * a bad log tree extent buffer, as that may signal a bug somewhere. |
328 | */ |
329 | WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG) || |
330 | btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID); |
331 | return errno_to_blk_status(errno: ret); |
332 | } |
333 | |
334 | static bool check_tree_block_fsid(struct extent_buffer *eb) |
335 | { |
336 | struct btrfs_fs_info *fs_info = eb->fs_info; |
337 | struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; |
338 | u8 fsid[BTRFS_FSID_SIZE]; |
339 | |
340 | read_extent_buffer(eb, dst: fsid, offsetof(struct btrfs_header, fsid), |
341 | BTRFS_FSID_SIZE); |
342 | |
343 | /* |
344 | * alloc_fsid_devices() copies the fsid into fs_devices::metadata_uuid. |
345 | * This is then overwritten by metadata_uuid if it is present in the |
346 | * device_list_add(). The same true for a seed device as well. So use of |
347 | * fs_devices::metadata_uuid is appropriate here. |
348 | */ |
349 | if (memcmp(p: fsid, q: fs_info->fs_devices->metadata_uuid, BTRFS_FSID_SIZE) == 0) |
350 | return false; |
351 | |
352 | list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) |
353 | if (!memcmp(p: fsid, q: seed_devs->fsid, BTRFS_FSID_SIZE)) |
354 | return false; |
355 | |
356 | return true; |
357 | } |
358 | |
359 | /* Do basic extent buffer checks at read time */ |
360 | int btrfs_validate_extent_buffer(struct extent_buffer *eb, |
361 | struct btrfs_tree_parent_check *check) |
362 | { |
363 | struct btrfs_fs_info *fs_info = eb->fs_info; |
364 | u64 found_start; |
365 | const u32 csum_size = fs_info->csum_size; |
366 | u8 found_level; |
367 | u8 result[BTRFS_CSUM_SIZE]; |
368 | const u8 *; |
369 | int ret = 0; |
370 | |
371 | ASSERT(check); |
372 | |
373 | found_start = btrfs_header_bytenr(eb); |
374 | if (found_start != eb->start) { |
375 | btrfs_err_rl(fs_info, |
376 | "bad tree block start, mirror %u want %llu have %llu" , |
377 | eb->read_mirror, eb->start, found_start); |
378 | ret = -EIO; |
379 | goto out; |
380 | } |
381 | if (check_tree_block_fsid(eb)) { |
382 | btrfs_err_rl(fs_info, "bad fsid on logical %llu mirror %u" , |
383 | eb->start, eb->read_mirror); |
384 | ret = -EIO; |
385 | goto out; |
386 | } |
387 | found_level = btrfs_header_level(eb); |
388 | if (found_level >= BTRFS_MAX_LEVEL) { |
389 | btrfs_err(fs_info, |
390 | "bad tree block level, mirror %u level %d on logical %llu" , |
391 | eb->read_mirror, btrfs_header_level(eb), eb->start); |
392 | ret = -EIO; |
393 | goto out; |
394 | } |
395 | |
396 | csum_tree_block(buf: eb, result); |
397 | header_csum = folio_address(folio: eb->folios[0]) + |
398 | get_eb_offset_in_folio(eb, offsetof(struct btrfs_header, csum)); |
399 | |
400 | if (memcmp(p: result, q: header_csum, size: csum_size) != 0) { |
401 | btrfs_warn_rl(fs_info, |
402 | "checksum verify failed on logical %llu mirror %u wanted " CSUM_FMT " found " CSUM_FMT " level %d" , |
403 | eb->start, eb->read_mirror, |
404 | CSUM_FMT_VALUE(csum_size, header_csum), |
405 | CSUM_FMT_VALUE(csum_size, result), |
406 | btrfs_header_level(eb)); |
407 | ret = -EUCLEAN; |
408 | goto out; |
409 | } |
410 | |
411 | if (found_level != check->level) { |
412 | btrfs_err(fs_info, |
413 | "level verify failed on logical %llu mirror %u wanted %u found %u" , |
414 | eb->start, eb->read_mirror, check->level, found_level); |
415 | ret = -EIO; |
416 | goto out; |
417 | } |
418 | if (unlikely(check->transid && |
419 | btrfs_header_generation(eb) != check->transid)) { |
420 | btrfs_err_rl(eb->fs_info, |
421 | "parent transid verify failed on logical %llu mirror %u wanted %llu found %llu" , |
422 | eb->start, eb->read_mirror, check->transid, |
423 | btrfs_header_generation(eb)); |
424 | ret = -EIO; |
425 | goto out; |
426 | } |
427 | if (check->has_first_key) { |
428 | struct btrfs_key *expect_key = &check->first_key; |
429 | struct btrfs_key found_key; |
430 | |
431 | if (found_level) |
432 | btrfs_node_key_to_cpu(eb, cpu_key: &found_key, nr: 0); |
433 | else |
434 | btrfs_item_key_to_cpu(eb, cpu_key: &found_key, nr: 0); |
435 | if (unlikely(btrfs_comp_cpu_keys(expect_key, &found_key))) { |
436 | btrfs_err(fs_info, |
437 | "tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)" , |
438 | eb->start, check->transid, |
439 | expect_key->objectid, |
440 | expect_key->type, expect_key->offset, |
441 | found_key.objectid, found_key.type, |
442 | found_key.offset); |
443 | ret = -EUCLEAN; |
444 | goto out; |
445 | } |
446 | } |
447 | if (check->owner_root) { |
448 | ret = btrfs_check_eb_owner(eb, root_owner: check->owner_root); |
449 | if (ret < 0) |
450 | goto out; |
451 | } |
452 | |
453 | /* |
454 | * If this is a leaf block and it is corrupt, set the corrupt bit so |
455 | * that we don't try and read the other copies of this block, just |
456 | * return -EIO. |
457 | */ |
458 | if (found_level == 0 && btrfs_check_leaf(leaf: eb)) { |
459 | set_bit(nr: EXTENT_BUFFER_CORRUPT, addr: &eb->bflags); |
460 | ret = -EIO; |
461 | } |
462 | |
463 | if (found_level > 0 && btrfs_check_node(node: eb)) |
464 | ret = -EIO; |
465 | |
466 | if (ret) |
467 | btrfs_err(fs_info, |
468 | "read time tree block corruption detected on logical %llu mirror %u" , |
469 | eb->start, eb->read_mirror); |
470 | out: |
471 | return ret; |
472 | } |
473 | |
474 | #ifdef CONFIG_MIGRATION |
475 | static int btree_migrate_folio(struct address_space *mapping, |
476 | struct folio *dst, struct folio *src, enum migrate_mode mode) |
477 | { |
478 | /* |
479 | * we can't safely write a btree page from here, |
480 | * we haven't done the locking hook |
481 | */ |
482 | if (folio_test_dirty(folio: src)) |
483 | return -EAGAIN; |
484 | /* |
485 | * Buffers may be managed in a filesystem specific way. |
486 | * We must have no buffers or drop them. |
487 | */ |
488 | if (folio_get_private(folio: src) && |
489 | !filemap_release_folio(folio: src, GFP_KERNEL)) |
490 | return -EAGAIN; |
491 | return migrate_folio(mapping, dst, src, mode); |
492 | } |
493 | #else |
494 | #define btree_migrate_folio NULL |
495 | #endif |
496 | |
497 | static int btree_writepages(struct address_space *mapping, |
498 | struct writeback_control *wbc) |
499 | { |
500 | int ret; |
501 | |
502 | if (wbc->sync_mode == WB_SYNC_NONE) { |
503 | struct btrfs_fs_info *fs_info; |
504 | |
505 | if (wbc->for_kupdate) |
506 | return 0; |
507 | |
508 | fs_info = inode_to_fs_info(mapping->host); |
509 | /* this is a bit racy, but that's ok */ |
510 | ret = __percpu_counter_compare(fbc: &fs_info->dirty_metadata_bytes, |
511 | BTRFS_DIRTY_METADATA_THRESH, |
512 | batch: fs_info->dirty_metadata_batch); |
513 | if (ret < 0) |
514 | return 0; |
515 | } |
516 | return btree_write_cache_pages(mapping, wbc); |
517 | } |
518 | |
519 | static bool btree_release_folio(struct folio *folio, gfp_t gfp_flags) |
520 | { |
521 | if (folio_test_writeback(folio) || folio_test_dirty(folio)) |
522 | return false; |
523 | |
524 | return try_release_extent_buffer(page: &folio->page); |
525 | } |
526 | |
527 | static void btree_invalidate_folio(struct folio *folio, size_t offset, |
528 | size_t length) |
529 | { |
530 | struct extent_io_tree *tree; |
531 | |
532 | tree = &folio_to_inode(folio)->io_tree; |
533 | extent_invalidate_folio(tree, folio, offset); |
534 | btree_release_folio(folio, GFP_NOFS); |
535 | if (folio_get_private(folio)) { |
536 | btrfs_warn(folio_to_fs_info(folio), |
537 | "folio private not zero on folio %llu" , |
538 | (unsigned long long)folio_pos(folio)); |
539 | folio_detach_private(folio); |
540 | } |
541 | } |
542 | |
543 | #ifdef DEBUG |
544 | static bool btree_dirty_folio(struct address_space *mapping, |
545 | struct folio *folio) |
546 | { |
547 | struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host); |
548 | struct btrfs_subpage_info *spi = fs_info->subpage_info; |
549 | struct btrfs_subpage *subpage; |
550 | struct extent_buffer *eb; |
551 | int cur_bit = 0; |
552 | u64 page_start = folio_pos(folio); |
553 | |
554 | if (fs_info->sectorsize == PAGE_SIZE) { |
555 | eb = folio_get_private(folio); |
556 | BUG_ON(!eb); |
557 | BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); |
558 | BUG_ON(!atomic_read(&eb->refs)); |
559 | btrfs_assert_tree_write_locked(eb); |
560 | return filemap_dirty_folio(mapping, folio); |
561 | } |
562 | |
563 | ASSERT(spi); |
564 | subpage = folio_get_private(folio); |
565 | |
566 | for (cur_bit = spi->dirty_offset; |
567 | cur_bit < spi->dirty_offset + spi->bitmap_nr_bits; |
568 | cur_bit++) { |
569 | unsigned long flags; |
570 | u64 cur; |
571 | |
572 | spin_lock_irqsave(&subpage->lock, flags); |
573 | if (!test_bit(cur_bit, subpage->bitmaps)) { |
574 | spin_unlock_irqrestore(&subpage->lock, flags); |
575 | continue; |
576 | } |
577 | spin_unlock_irqrestore(&subpage->lock, flags); |
578 | cur = page_start + cur_bit * fs_info->sectorsize; |
579 | |
580 | eb = find_extent_buffer(fs_info, cur); |
581 | ASSERT(eb); |
582 | ASSERT(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); |
583 | ASSERT(atomic_read(&eb->refs)); |
584 | btrfs_assert_tree_write_locked(eb); |
585 | free_extent_buffer(eb); |
586 | |
587 | cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits) - 1; |
588 | } |
589 | return filemap_dirty_folio(mapping, folio); |
590 | } |
591 | #else |
592 | #define btree_dirty_folio filemap_dirty_folio |
593 | #endif |
594 | |
595 | static const struct address_space_operations btree_aops = { |
596 | .writepages = btree_writepages, |
597 | .release_folio = btree_release_folio, |
598 | .invalidate_folio = btree_invalidate_folio, |
599 | .migrate_folio = btree_migrate_folio, |
600 | .dirty_folio = btree_dirty_folio, |
601 | }; |
602 | |
603 | struct extent_buffer *btrfs_find_create_tree_block( |
604 | struct btrfs_fs_info *fs_info, |
605 | u64 bytenr, u64 owner_root, |
606 | int level) |
607 | { |
608 | if (btrfs_is_testing(fs_info)) |
609 | return alloc_test_extent_buffer(fs_info, start: bytenr); |
610 | return alloc_extent_buffer(fs_info, start: bytenr, owner_root, level); |
611 | } |
612 | |
613 | /* |
614 | * Read tree block at logical address @bytenr and do variant basic but critical |
615 | * verification. |
616 | * |
617 | * @check: expected tree parentness check, see comments of the |
618 | * structure for details. |
619 | */ |
620 | struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr, |
621 | struct btrfs_tree_parent_check *check) |
622 | { |
623 | struct extent_buffer *buf = NULL; |
624 | int ret; |
625 | |
626 | ASSERT(check); |
627 | |
628 | buf = btrfs_find_create_tree_block(fs_info, bytenr, owner_root: check->owner_root, |
629 | level: check->level); |
630 | if (IS_ERR(ptr: buf)) |
631 | return buf; |
632 | |
633 | ret = btrfs_read_extent_buffer(eb: buf, check); |
634 | if (ret) { |
635 | free_extent_buffer_stale(eb: buf); |
636 | return ERR_PTR(error: ret); |
637 | } |
638 | if (btrfs_check_eb_owner(eb: buf, root_owner: check->owner_root)) { |
639 | free_extent_buffer_stale(eb: buf); |
640 | return ERR_PTR(error: -EUCLEAN); |
641 | } |
642 | return buf; |
643 | |
644 | } |
645 | |
646 | static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, |
647 | u64 objectid) |
648 | { |
649 | bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state); |
650 | |
651 | memset(&root->root_key, 0, sizeof(root->root_key)); |
652 | memset(&root->root_item, 0, sizeof(root->root_item)); |
653 | memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); |
654 | root->fs_info = fs_info; |
655 | root->root_key.objectid = objectid; |
656 | root->node = NULL; |
657 | root->commit_root = NULL; |
658 | root->state = 0; |
659 | RB_CLEAR_NODE(&root->rb_node); |
660 | |
661 | root->last_trans = 0; |
662 | root->free_objectid = 0; |
663 | root->nr_delalloc_inodes = 0; |
664 | root->nr_ordered_extents = 0; |
665 | root->inode_tree = RB_ROOT; |
666 | /* GFP flags are compatible with XA_FLAGS_*. */ |
667 | xa_init_flags(xa: &root->delayed_nodes, GFP_ATOMIC); |
668 | |
669 | btrfs_init_root_block_rsv(root); |
670 | |
671 | INIT_LIST_HEAD(list: &root->dirty_list); |
672 | INIT_LIST_HEAD(list: &root->root_list); |
673 | INIT_LIST_HEAD(list: &root->delalloc_inodes); |
674 | INIT_LIST_HEAD(list: &root->delalloc_root); |
675 | INIT_LIST_HEAD(list: &root->ordered_extents); |
676 | INIT_LIST_HEAD(list: &root->ordered_root); |
677 | INIT_LIST_HEAD(list: &root->reloc_dirty_list); |
678 | spin_lock_init(&root->inode_lock); |
679 | spin_lock_init(&root->delalloc_lock); |
680 | spin_lock_init(&root->ordered_extent_lock); |
681 | spin_lock_init(&root->accounting_lock); |
682 | spin_lock_init(&root->qgroup_meta_rsv_lock); |
683 | mutex_init(&root->objectid_mutex); |
684 | mutex_init(&root->log_mutex); |
685 | mutex_init(&root->ordered_extent_mutex); |
686 | mutex_init(&root->delalloc_mutex); |
687 | init_waitqueue_head(&root->qgroup_flush_wait); |
688 | init_waitqueue_head(&root->log_writer_wait); |
689 | init_waitqueue_head(&root->log_commit_wait[0]); |
690 | init_waitqueue_head(&root->log_commit_wait[1]); |
691 | INIT_LIST_HEAD(list: &root->log_ctxs[0]); |
692 | INIT_LIST_HEAD(list: &root->log_ctxs[1]); |
693 | atomic_set(v: &root->log_commit[0], i: 0); |
694 | atomic_set(v: &root->log_commit[1], i: 0); |
695 | atomic_set(v: &root->log_writers, i: 0); |
696 | atomic_set(v: &root->log_batch, i: 0); |
697 | refcount_set(r: &root->refs, n: 1); |
698 | atomic_set(v: &root->snapshot_force_cow, i: 0); |
699 | atomic_set(v: &root->nr_swapfiles, i: 0); |
700 | btrfs_set_root_log_transid(root, log_transid: 0); |
701 | root->log_transid_committed = -1; |
702 | btrfs_set_root_last_log_commit(root, commit_id: 0); |
703 | root->anon_dev = 0; |
704 | if (!dummy) { |
705 | extent_io_tree_init(fs_info, tree: &root->dirty_log_pages, |
706 | owner: IO_TREE_ROOT_DIRTY_LOG_PAGES); |
707 | extent_io_tree_init(fs_info, tree: &root->log_csum_range, |
708 | owner: IO_TREE_LOG_CSUM_RANGE); |
709 | } |
710 | |
711 | spin_lock_init(&root->root_item_lock); |
712 | btrfs_qgroup_init_swapped_blocks(swapped_blocks: &root->swapped_blocks); |
713 | #ifdef CONFIG_BTRFS_DEBUG |
714 | INIT_LIST_HEAD(list: &root->leak_list); |
715 | spin_lock(lock: &fs_info->fs_roots_radix_lock); |
716 | list_add_tail(new: &root->leak_list, head: &fs_info->allocated_roots); |
717 | spin_unlock(lock: &fs_info->fs_roots_radix_lock); |
718 | #endif |
719 | } |
720 | |
721 | static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info, |
722 | u64 objectid, gfp_t flags) |
723 | { |
724 | struct btrfs_root *root = kzalloc(size: sizeof(*root), flags); |
725 | if (root) |
726 | __setup_root(root, fs_info, objectid); |
727 | return root; |
728 | } |
729 | |
730 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
731 | /* Should only be used by the testing infrastructure */ |
732 | struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info) |
733 | { |
734 | struct btrfs_root *root; |
735 | |
736 | if (!fs_info) |
737 | return ERR_PTR(error: -EINVAL); |
738 | |
739 | root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID, GFP_KERNEL); |
740 | if (!root) |
741 | return ERR_PTR(error: -ENOMEM); |
742 | |
743 | /* We don't use the stripesize in selftest, set it as sectorsize */ |
744 | root->alloc_bytenr = 0; |
745 | |
746 | return root; |
747 | } |
748 | #endif |
749 | |
750 | static int global_root_cmp(struct rb_node *a_node, const struct rb_node *b_node) |
751 | { |
752 | const struct btrfs_root *a = rb_entry(a_node, struct btrfs_root, rb_node); |
753 | const struct btrfs_root *b = rb_entry(b_node, struct btrfs_root, rb_node); |
754 | |
755 | return btrfs_comp_cpu_keys(k1: &a->root_key, k2: &b->root_key); |
756 | } |
757 | |
758 | static int global_root_key_cmp(const void *k, const struct rb_node *node) |
759 | { |
760 | const struct btrfs_key *key = k; |
761 | const struct btrfs_root *root = rb_entry(node, struct btrfs_root, rb_node); |
762 | |
763 | return btrfs_comp_cpu_keys(k1: key, k2: &root->root_key); |
764 | } |
765 | |
766 | int btrfs_global_root_insert(struct btrfs_root *root) |
767 | { |
768 | struct btrfs_fs_info *fs_info = root->fs_info; |
769 | struct rb_node *tmp; |
770 | int ret = 0; |
771 | |
772 | write_lock(&fs_info->global_root_lock); |
773 | tmp = rb_find_add(node: &root->rb_node, tree: &fs_info->global_root_tree, cmp: global_root_cmp); |
774 | write_unlock(&fs_info->global_root_lock); |
775 | |
776 | if (tmp) { |
777 | ret = -EEXIST; |
778 | btrfs_warn(fs_info, "global root %llu %llu already exists" , |
779 | root->root_key.objectid, root->root_key.offset); |
780 | } |
781 | return ret; |
782 | } |
783 | |
784 | void btrfs_global_root_delete(struct btrfs_root *root) |
785 | { |
786 | struct btrfs_fs_info *fs_info = root->fs_info; |
787 | |
788 | write_lock(&fs_info->global_root_lock); |
789 | rb_erase(&root->rb_node, &fs_info->global_root_tree); |
790 | write_unlock(&fs_info->global_root_lock); |
791 | } |
792 | |
793 | struct btrfs_root *btrfs_global_root(struct btrfs_fs_info *fs_info, |
794 | struct btrfs_key *key) |
795 | { |
796 | struct rb_node *node; |
797 | struct btrfs_root *root = NULL; |
798 | |
799 | read_lock(&fs_info->global_root_lock); |
800 | node = rb_find(key, tree: &fs_info->global_root_tree, cmp: global_root_key_cmp); |
801 | if (node) |
802 | root = container_of(node, struct btrfs_root, rb_node); |
803 | read_unlock(&fs_info->global_root_lock); |
804 | |
805 | return root; |
806 | } |
807 | |
808 | static u64 btrfs_global_root_id(struct btrfs_fs_info *fs_info, u64 bytenr) |
809 | { |
810 | struct btrfs_block_group *block_group; |
811 | u64 ret; |
812 | |
813 | if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) |
814 | return 0; |
815 | |
816 | if (bytenr) |
817 | block_group = btrfs_lookup_block_group(info: fs_info, bytenr); |
818 | else |
819 | block_group = btrfs_lookup_first_block_group(info: fs_info, bytenr); |
820 | ASSERT(block_group); |
821 | if (!block_group) |
822 | return 0; |
823 | ret = block_group->global_root_id; |
824 | btrfs_put_block_group(cache: block_group); |
825 | |
826 | return ret; |
827 | } |
828 | |
829 | struct btrfs_root *btrfs_csum_root(struct btrfs_fs_info *fs_info, u64 bytenr) |
830 | { |
831 | struct btrfs_key key = { |
832 | .objectid = BTRFS_CSUM_TREE_OBJECTID, |
833 | .type = BTRFS_ROOT_ITEM_KEY, |
834 | .offset = btrfs_global_root_id(fs_info, bytenr), |
835 | }; |
836 | |
837 | return btrfs_global_root(fs_info, key: &key); |
838 | } |
839 | |
840 | struct btrfs_root *btrfs_extent_root(struct btrfs_fs_info *fs_info, u64 bytenr) |
841 | { |
842 | struct btrfs_key key = { |
843 | .objectid = BTRFS_EXTENT_TREE_OBJECTID, |
844 | .type = BTRFS_ROOT_ITEM_KEY, |
845 | .offset = btrfs_global_root_id(fs_info, bytenr), |
846 | }; |
847 | |
848 | return btrfs_global_root(fs_info, key: &key); |
849 | } |
850 | |
851 | struct btrfs_root *btrfs_block_group_root(struct btrfs_fs_info *fs_info) |
852 | { |
853 | if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) |
854 | return fs_info->block_group_root; |
855 | return btrfs_extent_root(fs_info, bytenr: 0); |
856 | } |
857 | |
858 | struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, |
859 | u64 objectid) |
860 | { |
861 | struct btrfs_fs_info *fs_info = trans->fs_info; |
862 | struct extent_buffer *leaf; |
863 | struct btrfs_root *tree_root = fs_info->tree_root; |
864 | struct btrfs_root *root; |
865 | struct btrfs_key key; |
866 | unsigned int nofs_flag; |
867 | int ret = 0; |
868 | |
869 | /* |
870 | * We're holding a transaction handle, so use a NOFS memory allocation |
871 | * context to avoid deadlock if reclaim happens. |
872 | */ |
873 | nofs_flag = memalloc_nofs_save(); |
874 | root = btrfs_alloc_root(fs_info, objectid, GFP_KERNEL); |
875 | memalloc_nofs_restore(flags: nofs_flag); |
876 | if (!root) |
877 | return ERR_PTR(error: -ENOMEM); |
878 | |
879 | root->root_key.objectid = objectid; |
880 | root->root_key.type = BTRFS_ROOT_ITEM_KEY; |
881 | root->root_key.offset = 0; |
882 | |
883 | leaf = btrfs_alloc_tree_block(trans, root, parent: 0, root_objectid: objectid, NULL, level: 0, hint: 0, empty_size: 0, |
884 | reloc_src_root: 0, nest: BTRFS_NESTING_NORMAL); |
885 | if (IS_ERR(ptr: leaf)) { |
886 | ret = PTR_ERR(ptr: leaf); |
887 | leaf = NULL; |
888 | goto fail; |
889 | } |
890 | |
891 | root->node = leaf; |
892 | btrfs_mark_buffer_dirty(trans, buf: leaf); |
893 | |
894 | root->commit_root = btrfs_root_node(root); |
895 | set_bit(nr: BTRFS_ROOT_TRACK_DIRTY, addr: &root->state); |
896 | |
897 | btrfs_set_root_flags(s: &root->root_item, val: 0); |
898 | btrfs_set_root_limit(s: &root->root_item, val: 0); |
899 | btrfs_set_root_bytenr(s: &root->root_item, val: leaf->start); |
900 | btrfs_set_root_generation(s: &root->root_item, val: trans->transid); |
901 | btrfs_set_root_level(s: &root->root_item, val: 0); |
902 | btrfs_set_root_refs(s: &root->root_item, val: 1); |
903 | btrfs_set_root_used(s: &root->root_item, val: leaf->len); |
904 | btrfs_set_root_last_snapshot(s: &root->root_item, val: 0); |
905 | btrfs_set_root_dirid(s: &root->root_item, val: 0); |
906 | if (is_fstree(rootid: objectid)) |
907 | generate_random_guid(guid: root->root_item.uuid); |
908 | else |
909 | export_guid(dst: root->root_item.uuid, src: &guid_null); |
910 | btrfs_set_root_drop_level(s: &root->root_item, val: 0); |
911 | |
912 | btrfs_tree_unlock(eb: leaf); |
913 | |
914 | key.objectid = objectid; |
915 | key.type = BTRFS_ROOT_ITEM_KEY; |
916 | key.offset = 0; |
917 | ret = btrfs_insert_root(trans, root: tree_root, key: &key, item: &root->root_item); |
918 | if (ret) |
919 | goto fail; |
920 | |
921 | return root; |
922 | |
923 | fail: |
924 | btrfs_put_root(root); |
925 | |
926 | return ERR_PTR(error: ret); |
927 | } |
928 | |
929 | static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, |
930 | struct btrfs_fs_info *fs_info) |
931 | { |
932 | struct btrfs_root *root; |
933 | |
934 | root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID, GFP_NOFS); |
935 | if (!root) |
936 | return ERR_PTR(error: -ENOMEM); |
937 | |
938 | root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID; |
939 | root->root_key.type = BTRFS_ROOT_ITEM_KEY; |
940 | root->root_key.offset = BTRFS_TREE_LOG_OBJECTID; |
941 | |
942 | return root; |
943 | } |
944 | |
945 | int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans, |
946 | struct btrfs_root *root) |
947 | { |
948 | struct extent_buffer *leaf; |
949 | |
950 | /* |
951 | * DON'T set SHAREABLE bit for log trees. |
952 | * |
953 | * Log trees are not exposed to user space thus can't be snapshotted, |
954 | * and they go away before a real commit is actually done. |
955 | * |
956 | * They do store pointers to file data extents, and those reference |
957 | * counts still get updated (along with back refs to the log tree). |
958 | */ |
959 | |
960 | leaf = btrfs_alloc_tree_block(trans, root, parent: 0, BTRFS_TREE_LOG_OBJECTID, |
961 | NULL, level: 0, hint: 0, empty_size: 0, reloc_src_root: 0, nest: BTRFS_NESTING_NORMAL); |
962 | if (IS_ERR(ptr: leaf)) |
963 | return PTR_ERR(ptr: leaf); |
964 | |
965 | root->node = leaf; |
966 | |
967 | btrfs_mark_buffer_dirty(trans, buf: root->node); |
968 | btrfs_tree_unlock(eb: root->node); |
969 | |
970 | return 0; |
971 | } |
972 | |
973 | int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, |
974 | struct btrfs_fs_info *fs_info) |
975 | { |
976 | struct btrfs_root *log_root; |
977 | |
978 | log_root = alloc_log_tree(trans, fs_info); |
979 | if (IS_ERR(ptr: log_root)) |
980 | return PTR_ERR(ptr: log_root); |
981 | |
982 | if (!btrfs_is_zoned(fs_info)) { |
983 | int ret = btrfs_alloc_log_tree_node(trans, root: log_root); |
984 | |
985 | if (ret) { |
986 | btrfs_put_root(root: log_root); |
987 | return ret; |
988 | } |
989 | } |
990 | |
991 | WARN_ON(fs_info->log_root_tree); |
992 | fs_info->log_root_tree = log_root; |
993 | return 0; |
994 | } |
995 | |
996 | int btrfs_add_log_tree(struct btrfs_trans_handle *trans, |
997 | struct btrfs_root *root) |
998 | { |
999 | struct btrfs_fs_info *fs_info = root->fs_info; |
1000 | struct btrfs_root *log_root; |
1001 | struct btrfs_inode_item *inode_item; |
1002 | int ret; |
1003 | |
1004 | log_root = alloc_log_tree(trans, fs_info); |
1005 | if (IS_ERR(ptr: log_root)) |
1006 | return PTR_ERR(ptr: log_root); |
1007 | |
1008 | ret = btrfs_alloc_log_tree_node(trans, root: log_root); |
1009 | if (ret) { |
1010 | btrfs_put_root(root: log_root); |
1011 | return ret; |
1012 | } |
1013 | |
1014 | log_root->last_trans = trans->transid; |
1015 | log_root->root_key.offset = root->root_key.objectid; |
1016 | |
1017 | inode_item = &log_root->root_item.inode; |
1018 | btrfs_set_stack_inode_generation(s: inode_item, val: 1); |
1019 | btrfs_set_stack_inode_size(s: inode_item, val: 3); |
1020 | btrfs_set_stack_inode_nlink(s: inode_item, val: 1); |
1021 | btrfs_set_stack_inode_nbytes(s: inode_item, |
1022 | val: fs_info->nodesize); |
1023 | btrfs_set_stack_inode_mode(s: inode_item, S_IFDIR | 0755); |
1024 | |
1025 | btrfs_set_root_node(item: &log_root->root_item, node: log_root->node); |
1026 | |
1027 | WARN_ON(root->log_root); |
1028 | root->log_root = log_root; |
1029 | btrfs_set_root_log_transid(root, log_transid: 0); |
1030 | root->log_transid_committed = -1; |
1031 | btrfs_set_root_last_log_commit(root, commit_id: 0); |
1032 | return 0; |
1033 | } |
1034 | |
1035 | static struct btrfs_root *read_tree_root_path(struct btrfs_root *tree_root, |
1036 | struct btrfs_path *path, |
1037 | struct btrfs_key *key) |
1038 | { |
1039 | struct btrfs_root *root; |
1040 | struct btrfs_tree_parent_check check = { 0 }; |
1041 | struct btrfs_fs_info *fs_info = tree_root->fs_info; |
1042 | u64 generation; |
1043 | int ret; |
1044 | int level; |
1045 | |
1046 | root = btrfs_alloc_root(fs_info, objectid: key->objectid, GFP_NOFS); |
1047 | if (!root) |
1048 | return ERR_PTR(error: -ENOMEM); |
1049 | |
1050 | ret = btrfs_find_root(root: tree_root, search_key: key, path, |
1051 | root_item: &root->root_item, root_key: &root->root_key); |
1052 | if (ret) { |
1053 | if (ret > 0) |
1054 | ret = -ENOENT; |
1055 | goto fail; |
1056 | } |
1057 | |
1058 | generation = btrfs_root_generation(s: &root->root_item); |
1059 | level = btrfs_root_level(s: &root->root_item); |
1060 | check.level = level; |
1061 | check.transid = generation; |
1062 | check.owner_root = key->objectid; |
1063 | root->node = read_tree_block(fs_info, bytenr: btrfs_root_bytenr(s: &root->root_item), |
1064 | check: &check); |
1065 | if (IS_ERR(ptr: root->node)) { |
1066 | ret = PTR_ERR(ptr: root->node); |
1067 | root->node = NULL; |
1068 | goto fail; |
1069 | } |
1070 | if (!btrfs_buffer_uptodate(eb: root->node, parent_transid: generation, atomic: 0)) { |
1071 | ret = -EIO; |
1072 | goto fail; |
1073 | } |
1074 | |
1075 | /* |
1076 | * For real fs, and not log/reloc trees, root owner must |
1077 | * match its root node owner |
1078 | */ |
1079 | if (!test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state) && |
1080 | root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID && |
1081 | root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && |
1082 | root->root_key.objectid != btrfs_header_owner(eb: root->node)) { |
1083 | btrfs_crit(fs_info, |
1084 | "root=%llu block=%llu, tree root owner mismatch, have %llu expect %llu" , |
1085 | root->root_key.objectid, root->node->start, |
1086 | btrfs_header_owner(root->node), |
1087 | root->root_key.objectid); |
1088 | ret = -EUCLEAN; |
1089 | goto fail; |
1090 | } |
1091 | root->commit_root = btrfs_root_node(root); |
1092 | return root; |
1093 | fail: |
1094 | btrfs_put_root(root); |
1095 | return ERR_PTR(error: ret); |
1096 | } |
1097 | |
1098 | struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root, |
1099 | struct btrfs_key *key) |
1100 | { |
1101 | struct btrfs_root *root; |
1102 | struct btrfs_path *path; |
1103 | |
1104 | path = btrfs_alloc_path(); |
1105 | if (!path) |
1106 | return ERR_PTR(error: -ENOMEM); |
1107 | root = read_tree_root_path(tree_root, path, key); |
1108 | btrfs_free_path(p: path); |
1109 | |
1110 | return root; |
1111 | } |
1112 | |
1113 | /* |
1114 | * Initialize subvolume root in-memory structure |
1115 | * |
1116 | * @anon_dev: anonymous device to attach to the root, if zero, allocate new |
1117 | */ |
1118 | static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev) |
1119 | { |
1120 | int ret; |
1121 | |
1122 | btrfs_drew_lock_init(lock: &root->snapshot_lock); |
1123 | |
1124 | if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID && |
1125 | !btrfs_is_data_reloc_root(root) && |
1126 | is_fstree(rootid: root->root_key.objectid)) { |
1127 | set_bit(nr: BTRFS_ROOT_SHAREABLE, addr: &root->state); |
1128 | btrfs_check_and_init_root_item(item: &root->root_item); |
1129 | } |
1130 | |
1131 | /* |
1132 | * Don't assign anonymous block device to roots that are not exposed to |
1133 | * userspace, the id pool is limited to 1M |
1134 | */ |
1135 | if (is_fstree(rootid: root->root_key.objectid) && |
1136 | btrfs_root_refs(s: &root->root_item) > 0) { |
1137 | if (!anon_dev) { |
1138 | ret = get_anon_bdev(&root->anon_dev); |
1139 | if (ret) |
1140 | goto fail; |
1141 | } else { |
1142 | root->anon_dev = anon_dev; |
1143 | } |
1144 | } |
1145 | |
1146 | mutex_lock(&root->objectid_mutex); |
1147 | ret = btrfs_init_root_free_objectid(root); |
1148 | if (ret) { |
1149 | mutex_unlock(lock: &root->objectid_mutex); |
1150 | goto fail; |
1151 | } |
1152 | |
1153 | ASSERT(root->free_objectid <= BTRFS_LAST_FREE_OBJECTID); |
1154 | |
1155 | mutex_unlock(lock: &root->objectid_mutex); |
1156 | |
1157 | return 0; |
1158 | fail: |
1159 | /* The caller is responsible to call btrfs_free_fs_root */ |
1160 | return ret; |
1161 | } |
1162 | |
1163 | static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, |
1164 | u64 root_id) |
1165 | { |
1166 | struct btrfs_root *root; |
1167 | |
1168 | spin_lock(lock: &fs_info->fs_roots_radix_lock); |
1169 | root = radix_tree_lookup(&fs_info->fs_roots_radix, |
1170 | (unsigned long)root_id); |
1171 | root = btrfs_grab_root(root); |
1172 | spin_unlock(lock: &fs_info->fs_roots_radix_lock); |
1173 | return root; |
1174 | } |
1175 | |
1176 | static struct btrfs_root *btrfs_get_global_root(struct btrfs_fs_info *fs_info, |
1177 | u64 objectid) |
1178 | { |
1179 | struct btrfs_key key = { |
1180 | .objectid = objectid, |
1181 | .type = BTRFS_ROOT_ITEM_KEY, |
1182 | .offset = 0, |
1183 | }; |
1184 | |
1185 | switch (objectid) { |
1186 | case BTRFS_ROOT_TREE_OBJECTID: |
1187 | return btrfs_grab_root(root: fs_info->tree_root); |
1188 | case BTRFS_EXTENT_TREE_OBJECTID: |
1189 | return btrfs_grab_root(root: btrfs_global_root(fs_info, key: &key)); |
1190 | case BTRFS_CHUNK_TREE_OBJECTID: |
1191 | return btrfs_grab_root(root: fs_info->chunk_root); |
1192 | case BTRFS_DEV_TREE_OBJECTID: |
1193 | return btrfs_grab_root(root: fs_info->dev_root); |
1194 | case BTRFS_CSUM_TREE_OBJECTID: |
1195 | return btrfs_grab_root(root: btrfs_global_root(fs_info, key: &key)); |
1196 | case BTRFS_QUOTA_TREE_OBJECTID: |
1197 | return btrfs_grab_root(root: fs_info->quota_root); |
1198 | case BTRFS_UUID_TREE_OBJECTID: |
1199 | return btrfs_grab_root(root: fs_info->uuid_root); |
1200 | case BTRFS_BLOCK_GROUP_TREE_OBJECTID: |
1201 | return btrfs_grab_root(root: fs_info->block_group_root); |
1202 | case BTRFS_FREE_SPACE_TREE_OBJECTID: |
1203 | return btrfs_grab_root(root: btrfs_global_root(fs_info, key: &key)); |
1204 | case BTRFS_RAID_STRIPE_TREE_OBJECTID: |
1205 | return btrfs_grab_root(root: fs_info->stripe_root); |
1206 | default: |
1207 | return NULL; |
1208 | } |
1209 | } |
1210 | |
1211 | int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info, |
1212 | struct btrfs_root *root) |
1213 | { |
1214 | int ret; |
1215 | |
1216 | ret = radix_tree_preload(GFP_NOFS); |
1217 | if (ret) |
1218 | return ret; |
1219 | |
1220 | spin_lock(lock: &fs_info->fs_roots_radix_lock); |
1221 | ret = radix_tree_insert(&fs_info->fs_roots_radix, |
1222 | index: (unsigned long)root->root_key.objectid, |
1223 | root); |
1224 | if (ret == 0) { |
1225 | btrfs_grab_root(root); |
1226 | set_bit(nr: BTRFS_ROOT_IN_RADIX, addr: &root->state); |
1227 | } |
1228 | spin_unlock(lock: &fs_info->fs_roots_radix_lock); |
1229 | radix_tree_preload_end(); |
1230 | |
1231 | return ret; |
1232 | } |
1233 | |
1234 | void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info) |
1235 | { |
1236 | #ifdef CONFIG_BTRFS_DEBUG |
1237 | struct btrfs_root *root; |
1238 | |
1239 | while (!list_empty(head: &fs_info->allocated_roots)) { |
1240 | char buf[BTRFS_ROOT_NAME_BUF_LEN]; |
1241 | |
1242 | root = list_first_entry(&fs_info->allocated_roots, |
1243 | struct btrfs_root, leak_list); |
1244 | btrfs_err(fs_info, "leaked root %s refcount %d" , |
1245 | btrfs_root_name(&root->root_key, buf), |
1246 | refcount_read(&root->refs)); |
1247 | WARN_ON_ONCE(1); |
1248 | while (refcount_read(r: &root->refs) > 1) |
1249 | btrfs_put_root(root); |
1250 | btrfs_put_root(root); |
1251 | } |
1252 | #endif |
1253 | } |
1254 | |
1255 | static void free_global_roots(struct btrfs_fs_info *fs_info) |
1256 | { |
1257 | struct btrfs_root *root; |
1258 | struct rb_node *node; |
1259 | |
1260 | while ((node = rb_first_postorder(&fs_info->global_root_tree)) != NULL) { |
1261 | root = rb_entry(node, struct btrfs_root, rb_node); |
1262 | rb_erase(&root->rb_node, &fs_info->global_root_tree); |
1263 | btrfs_put_root(root); |
1264 | } |
1265 | } |
1266 | |
1267 | void btrfs_free_fs_info(struct btrfs_fs_info *fs_info) |
1268 | { |
1269 | percpu_counter_destroy(fbc: &fs_info->dirty_metadata_bytes); |
1270 | percpu_counter_destroy(fbc: &fs_info->delalloc_bytes); |
1271 | percpu_counter_destroy(fbc: &fs_info->ordered_bytes); |
1272 | percpu_counter_destroy(fbc: &fs_info->dev_replace.bio_counter); |
1273 | btrfs_free_csum_hash(fs_info); |
1274 | btrfs_free_stripe_hash_table(info: fs_info); |
1275 | btrfs_free_ref_cache(fs_info); |
1276 | kfree(objp: fs_info->balance_ctl); |
1277 | kfree(objp: fs_info->delayed_root); |
1278 | free_global_roots(fs_info); |
1279 | btrfs_put_root(root: fs_info->tree_root); |
1280 | btrfs_put_root(root: fs_info->chunk_root); |
1281 | btrfs_put_root(root: fs_info->dev_root); |
1282 | btrfs_put_root(root: fs_info->quota_root); |
1283 | btrfs_put_root(root: fs_info->uuid_root); |
1284 | btrfs_put_root(root: fs_info->fs_root); |
1285 | btrfs_put_root(root: fs_info->data_reloc_root); |
1286 | btrfs_put_root(root: fs_info->block_group_root); |
1287 | btrfs_put_root(root: fs_info->stripe_root); |
1288 | btrfs_check_leaked_roots(fs_info); |
1289 | btrfs_extent_buffer_leak_debug_check(fs_info); |
1290 | kfree(objp: fs_info->super_copy); |
1291 | kfree(objp: fs_info->super_for_commit); |
1292 | kfree(objp: fs_info->subpage_info); |
1293 | kvfree(addr: fs_info); |
1294 | } |
1295 | |
1296 | |
1297 | /* |
1298 | * Get an in-memory reference of a root structure. |
1299 | * |
1300 | * For essential trees like root/extent tree, we grab it from fs_info directly. |
1301 | * For subvolume trees, we check the cached filesystem roots first. If not |
1302 | * found, then read it from disk and add it to cached fs roots. |
1303 | * |
1304 | * Caller should release the root by calling btrfs_put_root() after the usage. |
1305 | * |
1306 | * NOTE: Reloc and log trees can't be read by this function as they share the |
1307 | * same root objectid. |
1308 | * |
1309 | * @objectid: root id |
1310 | * @anon_dev: preallocated anonymous block device number for new roots, |
1311 | * pass NULL for a new allocation. |
1312 | * @check_ref: whether to check root item references, If true, return -ENOENT |
1313 | * for orphan roots |
1314 | */ |
1315 | static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info, |
1316 | u64 objectid, dev_t *anon_dev, |
1317 | bool check_ref) |
1318 | { |
1319 | struct btrfs_root *root; |
1320 | struct btrfs_path *path; |
1321 | struct btrfs_key key; |
1322 | int ret; |
1323 | |
1324 | root = btrfs_get_global_root(fs_info, objectid); |
1325 | if (root) |
1326 | return root; |
1327 | |
1328 | /* |
1329 | * If we're called for non-subvolume trees, and above function didn't |
1330 | * find one, do not try to read it from disk. |
1331 | * |
1332 | * This is namely for free-space-tree and quota tree, which can change |
1333 | * at runtime and should only be grabbed from fs_info. |
1334 | */ |
1335 | if (!is_fstree(rootid: objectid) && objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) |
1336 | return ERR_PTR(error: -ENOENT); |
1337 | again: |
1338 | root = btrfs_lookup_fs_root(fs_info, root_id: objectid); |
1339 | if (root) { |
1340 | /* |
1341 | * Some other caller may have read out the newly inserted |
1342 | * subvolume already (for things like backref walk etc). Not |
1343 | * that common but still possible. In that case, we just need |
1344 | * to free the anon_dev. |
1345 | */ |
1346 | if (unlikely(anon_dev && *anon_dev)) { |
1347 | free_anon_bdev(*anon_dev); |
1348 | *anon_dev = 0; |
1349 | } |
1350 | |
1351 | if (check_ref && btrfs_root_refs(s: &root->root_item) == 0) { |
1352 | btrfs_put_root(root); |
1353 | return ERR_PTR(error: -ENOENT); |
1354 | } |
1355 | return root; |
1356 | } |
1357 | |
1358 | key.objectid = objectid; |
1359 | key.type = BTRFS_ROOT_ITEM_KEY; |
1360 | key.offset = (u64)-1; |
1361 | root = btrfs_read_tree_root(tree_root: fs_info->tree_root, key: &key); |
1362 | if (IS_ERR(ptr: root)) |
1363 | return root; |
1364 | |
1365 | if (check_ref && btrfs_root_refs(s: &root->root_item) == 0) { |
1366 | ret = -ENOENT; |
1367 | goto fail; |
1368 | } |
1369 | |
1370 | ret = btrfs_init_fs_root(root, anon_dev: anon_dev ? *anon_dev : 0); |
1371 | if (ret) |
1372 | goto fail; |
1373 | |
1374 | path = btrfs_alloc_path(); |
1375 | if (!path) { |
1376 | ret = -ENOMEM; |
1377 | goto fail; |
1378 | } |
1379 | key.objectid = BTRFS_ORPHAN_OBJECTID; |
1380 | key.type = BTRFS_ORPHAN_ITEM_KEY; |
1381 | key.offset = objectid; |
1382 | |
1383 | ret = btrfs_search_slot(NULL, root: fs_info->tree_root, key: &key, p: path, ins_len: 0, cow: 0); |
1384 | btrfs_free_path(p: path); |
1385 | if (ret < 0) |
1386 | goto fail; |
1387 | if (ret == 0) |
1388 | set_bit(nr: BTRFS_ROOT_ORPHAN_ITEM_INSERTED, addr: &root->state); |
1389 | |
1390 | ret = btrfs_insert_fs_root(fs_info, root); |
1391 | if (ret) { |
1392 | if (ret == -EEXIST) { |
1393 | btrfs_put_root(root); |
1394 | goto again; |
1395 | } |
1396 | goto fail; |
1397 | } |
1398 | return root; |
1399 | fail: |
1400 | /* |
1401 | * If our caller provided us an anonymous device, then it's his |
1402 | * responsibility to free it in case we fail. So we have to set our |
1403 | * root's anon_dev to 0 to avoid a double free, once by btrfs_put_root() |
1404 | * and once again by our caller. |
1405 | */ |
1406 | if (anon_dev && *anon_dev) |
1407 | root->anon_dev = 0; |
1408 | btrfs_put_root(root); |
1409 | return ERR_PTR(error: ret); |
1410 | } |
1411 | |
1412 | /* |
1413 | * Get in-memory reference of a root structure |
1414 | * |
1415 | * @objectid: tree objectid |
1416 | * @check_ref: if set, verify that the tree exists and the item has at least |
1417 | * one reference |
1418 | */ |
1419 | struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, |
1420 | u64 objectid, bool check_ref) |
1421 | { |
1422 | return btrfs_get_root_ref(fs_info, objectid, NULL, check_ref); |
1423 | } |
1424 | |
1425 | /* |
1426 | * Get in-memory reference of a root structure, created as new, optionally pass |
1427 | * the anonymous block device id |
1428 | * |
1429 | * @objectid: tree objectid |
1430 | * @anon_dev: if NULL, allocate a new anonymous block device or use the |
1431 | * parameter value if not NULL |
1432 | */ |
1433 | struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info, |
1434 | u64 objectid, dev_t *anon_dev) |
1435 | { |
1436 | return btrfs_get_root_ref(fs_info, objectid, anon_dev, check_ref: true); |
1437 | } |
1438 | |
1439 | /* |
1440 | * Return a root for the given objectid. |
1441 | * |
1442 | * @fs_info: the fs_info |
1443 | * @objectid: the objectid we need to lookup |
1444 | * |
1445 | * This is exclusively used for backref walking, and exists specifically because |
1446 | * of how qgroups does lookups. Qgroups will do a backref lookup at delayed ref |
1447 | * creation time, which means we may have to read the tree_root in order to look |
1448 | * up a fs root that is not in memory. If the root is not in memory we will |
1449 | * read the tree root commit root and look up the fs root from there. This is a |
1450 | * temporary root, it will not be inserted into the radix tree as it doesn't |
1451 | * have the most uptodate information, it'll simply be discarded once the |
1452 | * backref code is finished using the root. |
1453 | */ |
1454 | struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info, |
1455 | struct btrfs_path *path, |
1456 | u64 objectid) |
1457 | { |
1458 | struct btrfs_root *root; |
1459 | struct btrfs_key key; |
1460 | |
1461 | ASSERT(path->search_commit_root && path->skip_locking); |
1462 | |
1463 | /* |
1464 | * This can return -ENOENT if we ask for a root that doesn't exist, but |
1465 | * since this is called via the backref walking code we won't be looking |
1466 | * up a root that doesn't exist, unless there's corruption. So if root |
1467 | * != NULL just return it. |
1468 | */ |
1469 | root = btrfs_get_global_root(fs_info, objectid); |
1470 | if (root) |
1471 | return root; |
1472 | |
1473 | root = btrfs_lookup_fs_root(fs_info, root_id: objectid); |
1474 | if (root) |
1475 | return root; |
1476 | |
1477 | key.objectid = objectid; |
1478 | key.type = BTRFS_ROOT_ITEM_KEY; |
1479 | key.offset = (u64)-1; |
1480 | root = read_tree_root_path(tree_root: fs_info->tree_root, path, key: &key); |
1481 | btrfs_release_path(p: path); |
1482 | |
1483 | return root; |
1484 | } |
1485 | |
1486 | static int cleaner_kthread(void *arg) |
1487 | { |
1488 | struct btrfs_fs_info *fs_info = arg; |
1489 | int again; |
1490 | |
1491 | while (1) { |
1492 | again = 0; |
1493 | |
1494 | set_bit(nr: BTRFS_FS_CLEANER_RUNNING, addr: &fs_info->flags); |
1495 | |
1496 | /* Make the cleaner go to sleep early. */ |
1497 | if (btrfs_need_cleaner_sleep(fs_info)) |
1498 | goto sleep; |
1499 | |
1500 | /* |
1501 | * Do not do anything if we might cause open_ctree() to block |
1502 | * before we have finished mounting the filesystem. |
1503 | */ |
1504 | if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) |
1505 | goto sleep; |
1506 | |
1507 | if (!mutex_trylock(lock: &fs_info->cleaner_mutex)) |
1508 | goto sleep; |
1509 | |
1510 | /* |
1511 | * Avoid the problem that we change the status of the fs |
1512 | * during the above check and trylock. |
1513 | */ |
1514 | if (btrfs_need_cleaner_sleep(fs_info)) { |
1515 | mutex_unlock(lock: &fs_info->cleaner_mutex); |
1516 | goto sleep; |
1517 | } |
1518 | |
1519 | if (test_and_clear_bit(nr: BTRFS_FS_FEATURE_CHANGED, addr: &fs_info->flags)) |
1520 | btrfs_sysfs_feature_update(fs_info); |
1521 | |
1522 | btrfs_run_delayed_iputs(fs_info); |
1523 | |
1524 | again = btrfs_clean_one_deleted_snapshot(fs_info); |
1525 | mutex_unlock(lock: &fs_info->cleaner_mutex); |
1526 | |
1527 | /* |
1528 | * The defragger has dealt with the R/O remount and umount, |
1529 | * needn't do anything special here. |
1530 | */ |
1531 | btrfs_run_defrag_inodes(fs_info); |
1532 | |
1533 | /* |
1534 | * Acquires fs_info->reclaim_bgs_lock to avoid racing |
1535 | * with relocation (btrfs_relocate_chunk) and relocation |
1536 | * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group) |
1537 | * after acquiring fs_info->reclaim_bgs_lock. So we |
1538 | * can't hold, nor need to, fs_info->cleaner_mutex when deleting |
1539 | * unused block groups. |
1540 | */ |
1541 | btrfs_delete_unused_bgs(fs_info); |
1542 | |
1543 | /* |
1544 | * Reclaim block groups in the reclaim_bgs list after we deleted |
1545 | * all unused block_groups. This possibly gives us some more free |
1546 | * space. |
1547 | */ |
1548 | btrfs_reclaim_bgs(fs_info); |
1549 | sleep: |
1550 | clear_and_wake_up_bit(bit: BTRFS_FS_CLEANER_RUNNING, word: &fs_info->flags); |
1551 | if (kthread_should_park()) |
1552 | kthread_parkme(); |
1553 | if (kthread_should_stop()) |
1554 | return 0; |
1555 | if (!again) { |
1556 | set_current_state(TASK_INTERRUPTIBLE); |
1557 | schedule(); |
1558 | __set_current_state(TASK_RUNNING); |
1559 | } |
1560 | } |
1561 | } |
1562 | |
1563 | static int transaction_kthread(void *arg) |
1564 | { |
1565 | struct btrfs_root *root = arg; |
1566 | struct btrfs_fs_info *fs_info = root->fs_info; |
1567 | struct btrfs_trans_handle *trans; |
1568 | struct btrfs_transaction *cur; |
1569 | u64 transid; |
1570 | time64_t delta; |
1571 | unsigned long delay; |
1572 | bool cannot_commit; |
1573 | |
1574 | do { |
1575 | cannot_commit = false; |
1576 | delay = msecs_to_jiffies(m: fs_info->commit_interval * 1000); |
1577 | mutex_lock(&fs_info->transaction_kthread_mutex); |
1578 | |
1579 | spin_lock(lock: &fs_info->trans_lock); |
1580 | cur = fs_info->running_transaction; |
1581 | if (!cur) { |
1582 | spin_unlock(lock: &fs_info->trans_lock); |
1583 | goto sleep; |
1584 | } |
1585 | |
1586 | delta = ktime_get_seconds() - cur->start_time; |
1587 | if (!test_and_clear_bit(nr: BTRFS_FS_COMMIT_TRANS, addr: &fs_info->flags) && |
1588 | cur->state < TRANS_STATE_COMMIT_PREP && |
1589 | delta < fs_info->commit_interval) { |
1590 | spin_unlock(lock: &fs_info->trans_lock); |
1591 | delay -= msecs_to_jiffies(m: (delta - 1) * 1000); |
1592 | delay = min(delay, |
1593 | msecs_to_jiffies(fs_info->commit_interval * 1000)); |
1594 | goto sleep; |
1595 | } |
1596 | transid = cur->transid; |
1597 | spin_unlock(lock: &fs_info->trans_lock); |
1598 | |
1599 | /* If the file system is aborted, this will always fail. */ |
1600 | trans = btrfs_attach_transaction(root); |
1601 | if (IS_ERR(ptr: trans)) { |
1602 | if (PTR_ERR(ptr: trans) != -ENOENT) |
1603 | cannot_commit = true; |
1604 | goto sleep; |
1605 | } |
1606 | if (transid == trans->transid) { |
1607 | btrfs_commit_transaction(trans); |
1608 | } else { |
1609 | btrfs_end_transaction(trans); |
1610 | } |
1611 | sleep: |
1612 | wake_up_process(tsk: fs_info->cleaner_kthread); |
1613 | mutex_unlock(lock: &fs_info->transaction_kthread_mutex); |
1614 | |
1615 | if (BTRFS_FS_ERROR(fs_info)) |
1616 | btrfs_cleanup_transaction(fs_info); |
1617 | if (!kthread_should_stop() && |
1618 | (!btrfs_transaction_blocked(info: fs_info) || |
1619 | cannot_commit)) |
1620 | schedule_timeout_interruptible(timeout: delay); |
1621 | } while (!kthread_should_stop()); |
1622 | return 0; |
1623 | } |
1624 | |
1625 | /* |
1626 | * This will find the highest generation in the array of root backups. The |
1627 | * index of the highest array is returned, or -EINVAL if we can't find |
1628 | * anything. |
1629 | * |
1630 | * We check to make sure the array is valid by comparing the |
1631 | * generation of the latest root in the array with the generation |
1632 | * in the super block. If they don't match we pitch it. |
1633 | */ |
1634 | static int find_newest_super_backup(struct btrfs_fs_info *info) |
1635 | { |
1636 | const u64 newest_gen = btrfs_super_generation(s: info->super_copy); |
1637 | u64 cur; |
1638 | struct btrfs_root_backup *root_backup; |
1639 | int i; |
1640 | |
1641 | for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) { |
1642 | root_backup = info->super_copy->super_roots + i; |
1643 | cur = btrfs_backup_tree_root_gen(s: root_backup); |
1644 | if (cur == newest_gen) |
1645 | return i; |
1646 | } |
1647 | |
1648 | return -EINVAL; |
1649 | } |
1650 | |
1651 | /* |
1652 | * copy all the root pointers into the super backup array. |
1653 | * this will bump the backup pointer by one when it is |
1654 | * done |
1655 | */ |
1656 | static void backup_super_roots(struct btrfs_fs_info *info) |
1657 | { |
1658 | const int next_backup = info->backup_root_index; |
1659 | struct btrfs_root_backup *root_backup; |
1660 | |
1661 | root_backup = info->super_for_commit->super_roots + next_backup; |
1662 | |
1663 | /* |
1664 | * make sure all of our padding and empty slots get zero filled |
1665 | * regardless of which ones we use today |
1666 | */ |
1667 | memset(root_backup, 0, sizeof(*root_backup)); |
1668 | |
1669 | info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS; |
1670 | |
1671 | btrfs_set_backup_tree_root(s: root_backup, val: info->tree_root->node->start); |
1672 | btrfs_set_backup_tree_root_gen(s: root_backup, |
1673 | val: btrfs_header_generation(eb: info->tree_root->node)); |
1674 | |
1675 | btrfs_set_backup_tree_root_level(s: root_backup, |
1676 | val: btrfs_header_level(eb: info->tree_root->node)); |
1677 | |
1678 | btrfs_set_backup_chunk_root(s: root_backup, val: info->chunk_root->node->start); |
1679 | btrfs_set_backup_chunk_root_gen(s: root_backup, |
1680 | val: btrfs_header_generation(eb: info->chunk_root->node)); |
1681 | btrfs_set_backup_chunk_root_level(s: root_backup, |
1682 | val: btrfs_header_level(eb: info->chunk_root->node)); |
1683 | |
1684 | if (!btrfs_fs_compat_ro(info, BLOCK_GROUP_TREE)) { |
1685 | struct btrfs_root *extent_root = btrfs_extent_root(fs_info: info, bytenr: 0); |
1686 | struct btrfs_root *csum_root = btrfs_csum_root(fs_info: info, bytenr: 0); |
1687 | |
1688 | btrfs_set_backup_extent_root(s: root_backup, |
1689 | val: extent_root->node->start); |
1690 | btrfs_set_backup_extent_root_gen(s: root_backup, |
1691 | val: btrfs_header_generation(eb: extent_root->node)); |
1692 | btrfs_set_backup_extent_root_level(s: root_backup, |
1693 | val: btrfs_header_level(eb: extent_root->node)); |
1694 | |
1695 | btrfs_set_backup_csum_root(s: root_backup, val: csum_root->node->start); |
1696 | btrfs_set_backup_csum_root_gen(s: root_backup, |
1697 | val: btrfs_header_generation(eb: csum_root->node)); |
1698 | btrfs_set_backup_csum_root_level(s: root_backup, |
1699 | val: btrfs_header_level(eb: csum_root->node)); |
1700 | } |
1701 | |
1702 | /* |
1703 | * we might commit during log recovery, which happens before we set |
1704 | * the fs_root. Make sure it is valid before we fill it in. |
1705 | */ |
1706 | if (info->fs_root && info->fs_root->node) { |
1707 | btrfs_set_backup_fs_root(s: root_backup, |
1708 | val: info->fs_root->node->start); |
1709 | btrfs_set_backup_fs_root_gen(s: root_backup, |
1710 | val: btrfs_header_generation(eb: info->fs_root->node)); |
1711 | btrfs_set_backup_fs_root_level(s: root_backup, |
1712 | val: btrfs_header_level(eb: info->fs_root->node)); |
1713 | } |
1714 | |
1715 | btrfs_set_backup_dev_root(s: root_backup, val: info->dev_root->node->start); |
1716 | btrfs_set_backup_dev_root_gen(s: root_backup, |
1717 | val: btrfs_header_generation(eb: info->dev_root->node)); |
1718 | btrfs_set_backup_dev_root_level(s: root_backup, |
1719 | val: btrfs_header_level(eb: info->dev_root->node)); |
1720 | |
1721 | btrfs_set_backup_total_bytes(s: root_backup, |
1722 | val: btrfs_super_total_bytes(s: info->super_copy)); |
1723 | btrfs_set_backup_bytes_used(s: root_backup, |
1724 | val: btrfs_super_bytes_used(s: info->super_copy)); |
1725 | btrfs_set_backup_num_devices(s: root_backup, |
1726 | val: btrfs_super_num_devices(s: info->super_copy)); |
1727 | |
1728 | /* |
1729 | * if we don't copy this out to the super_copy, it won't get remembered |
1730 | * for the next commit |
1731 | */ |
1732 | memcpy(&info->super_copy->super_roots, |
1733 | &info->super_for_commit->super_roots, |
1734 | sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS); |
1735 | } |
1736 | |
1737 | /* |
1738 | * Reads a backup root based on the passed priority. Prio 0 is the newest, prio |
1739 | * 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots |
1740 | * |
1741 | * @fs_info: filesystem whose backup roots need to be read |
1742 | * @priority: priority of backup root required |
1743 | * |
1744 | * Returns backup root index on success and -EINVAL otherwise. |
1745 | */ |
1746 | static int read_backup_root(struct btrfs_fs_info *fs_info, u8 priority) |
1747 | { |
1748 | int backup_index = find_newest_super_backup(info: fs_info); |
1749 | struct btrfs_super_block *super = fs_info->super_copy; |
1750 | struct btrfs_root_backup *root_backup; |
1751 | |
1752 | if (priority < BTRFS_NUM_BACKUP_ROOTS && backup_index >= 0) { |
1753 | if (priority == 0) |
1754 | return backup_index; |
1755 | |
1756 | backup_index = backup_index + BTRFS_NUM_BACKUP_ROOTS - priority; |
1757 | backup_index %= BTRFS_NUM_BACKUP_ROOTS; |
1758 | } else { |
1759 | return -EINVAL; |
1760 | } |
1761 | |
1762 | root_backup = super->super_roots + backup_index; |
1763 | |
1764 | btrfs_set_super_generation(s: super, |
1765 | val: btrfs_backup_tree_root_gen(s: root_backup)); |
1766 | btrfs_set_super_root(s: super, val: btrfs_backup_tree_root(s: root_backup)); |
1767 | btrfs_set_super_root_level(s: super, |
1768 | val: btrfs_backup_tree_root_level(s: root_backup)); |
1769 | btrfs_set_super_bytes_used(s: super, val: btrfs_backup_bytes_used(s: root_backup)); |
1770 | |
1771 | /* |
1772 | * Fixme: the total bytes and num_devices need to match or we should |
1773 | * need a fsck |
1774 | */ |
1775 | btrfs_set_super_total_bytes(s: super, val: btrfs_backup_total_bytes(s: root_backup)); |
1776 | btrfs_set_super_num_devices(s: super, val: btrfs_backup_num_devices(s: root_backup)); |
1777 | |
1778 | return backup_index; |
1779 | } |
1780 | |
1781 | /* helper to cleanup workers */ |
1782 | static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) |
1783 | { |
1784 | btrfs_destroy_workqueue(wq: fs_info->fixup_workers); |
1785 | btrfs_destroy_workqueue(wq: fs_info->delalloc_workers); |
1786 | btrfs_destroy_workqueue(wq: fs_info->workers); |
1787 | if (fs_info->endio_workers) |
1788 | destroy_workqueue(wq: fs_info->endio_workers); |
1789 | if (fs_info->rmw_workers) |
1790 | destroy_workqueue(wq: fs_info->rmw_workers); |
1791 | if (fs_info->compressed_write_workers) |
1792 | destroy_workqueue(wq: fs_info->compressed_write_workers); |
1793 | btrfs_destroy_workqueue(wq: fs_info->endio_write_workers); |
1794 | btrfs_destroy_workqueue(wq: fs_info->endio_freespace_worker); |
1795 | btrfs_destroy_workqueue(wq: fs_info->delayed_workers); |
1796 | btrfs_destroy_workqueue(wq: fs_info->caching_workers); |
1797 | btrfs_destroy_workqueue(wq: fs_info->flush_workers); |
1798 | btrfs_destroy_workqueue(wq: fs_info->qgroup_rescan_workers); |
1799 | if (fs_info->discard_ctl.discard_workers) |
1800 | destroy_workqueue(wq: fs_info->discard_ctl.discard_workers); |
1801 | /* |
1802 | * Now that all other work queues are destroyed, we can safely destroy |
1803 | * the queues used for metadata I/O, since tasks from those other work |
1804 | * queues can do metadata I/O operations. |
1805 | */ |
1806 | if (fs_info->endio_meta_workers) |
1807 | destroy_workqueue(wq: fs_info->endio_meta_workers); |
1808 | } |
1809 | |
1810 | static void free_root_extent_buffers(struct btrfs_root *root) |
1811 | { |
1812 | if (root) { |
1813 | free_extent_buffer(eb: root->node); |
1814 | free_extent_buffer(eb: root->commit_root); |
1815 | root->node = NULL; |
1816 | root->commit_root = NULL; |
1817 | } |
1818 | } |
1819 | |
1820 | static void free_global_root_pointers(struct btrfs_fs_info *fs_info) |
1821 | { |
1822 | struct btrfs_root *root, *tmp; |
1823 | |
1824 | rbtree_postorder_for_each_entry_safe(root, tmp, |
1825 | &fs_info->global_root_tree, |
1826 | rb_node) |
1827 | free_root_extent_buffers(root); |
1828 | } |
1829 | |
1830 | /* helper to cleanup tree roots */ |
1831 | static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root) |
1832 | { |
1833 | free_root_extent_buffers(root: info->tree_root); |
1834 | |
1835 | free_global_root_pointers(fs_info: info); |
1836 | free_root_extent_buffers(root: info->dev_root); |
1837 | free_root_extent_buffers(root: info->quota_root); |
1838 | free_root_extent_buffers(root: info->uuid_root); |
1839 | free_root_extent_buffers(root: info->fs_root); |
1840 | free_root_extent_buffers(root: info->data_reloc_root); |
1841 | free_root_extent_buffers(root: info->block_group_root); |
1842 | free_root_extent_buffers(root: info->stripe_root); |
1843 | if (free_chunk_root) |
1844 | free_root_extent_buffers(root: info->chunk_root); |
1845 | } |
1846 | |
1847 | void btrfs_put_root(struct btrfs_root *root) |
1848 | { |
1849 | if (!root) |
1850 | return; |
1851 | |
1852 | if (refcount_dec_and_test(r: &root->refs)) { |
1853 | WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); |
1854 | WARN_ON(test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state)); |
1855 | if (root->anon_dev) |
1856 | free_anon_bdev(root->anon_dev); |
1857 | free_root_extent_buffers(root); |
1858 | #ifdef CONFIG_BTRFS_DEBUG |
1859 | spin_lock(lock: &root->fs_info->fs_roots_radix_lock); |
1860 | list_del_init(entry: &root->leak_list); |
1861 | spin_unlock(lock: &root->fs_info->fs_roots_radix_lock); |
1862 | #endif |
1863 | kfree(objp: root); |
1864 | } |
1865 | } |
1866 | |
1867 | void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info) |
1868 | { |
1869 | int ret; |
1870 | struct btrfs_root *gang[8]; |
1871 | int i; |
1872 | |
1873 | while (!list_empty(head: &fs_info->dead_roots)) { |
1874 | gang[0] = list_entry(fs_info->dead_roots.next, |
1875 | struct btrfs_root, root_list); |
1876 | list_del(entry: &gang[0]->root_list); |
1877 | |
1878 | if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) |
1879 | btrfs_drop_and_free_fs_root(fs_info, root: gang[0]); |
1880 | btrfs_put_root(root: gang[0]); |
1881 | } |
1882 | |
1883 | while (1) { |
1884 | ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, |
1885 | results: (void **)gang, first_index: 0, |
1886 | ARRAY_SIZE(gang)); |
1887 | if (!ret) |
1888 | break; |
1889 | for (i = 0; i < ret; i++) |
1890 | btrfs_drop_and_free_fs_root(fs_info, root: gang[i]); |
1891 | } |
1892 | } |
1893 | |
1894 | static void btrfs_init_scrub(struct btrfs_fs_info *fs_info) |
1895 | { |
1896 | mutex_init(&fs_info->scrub_lock); |
1897 | atomic_set(v: &fs_info->scrubs_running, i: 0); |
1898 | atomic_set(v: &fs_info->scrub_pause_req, i: 0); |
1899 | atomic_set(v: &fs_info->scrubs_paused, i: 0); |
1900 | atomic_set(v: &fs_info->scrub_cancel_req, i: 0); |
1901 | init_waitqueue_head(&fs_info->scrub_pause_wait); |
1902 | refcount_set(r: &fs_info->scrub_workers_refcnt, n: 0); |
1903 | } |
1904 | |
1905 | static void btrfs_init_balance(struct btrfs_fs_info *fs_info) |
1906 | { |
1907 | spin_lock_init(&fs_info->balance_lock); |
1908 | mutex_init(&fs_info->balance_mutex); |
1909 | atomic_set(v: &fs_info->balance_pause_req, i: 0); |
1910 | atomic_set(v: &fs_info->balance_cancel_req, i: 0); |
1911 | fs_info->balance_ctl = NULL; |
1912 | init_waitqueue_head(&fs_info->balance_wait_q); |
1913 | atomic_set(v: &fs_info->reloc_cancel_req, i: 0); |
1914 | } |
1915 | |
1916 | static int btrfs_init_btree_inode(struct super_block *sb) |
1917 | { |
1918 | struct btrfs_fs_info *fs_info = btrfs_sb(sb); |
1919 | unsigned long hash = btrfs_inode_hash(BTRFS_BTREE_INODE_OBJECTID, |
1920 | root: fs_info->tree_root); |
1921 | struct inode *inode; |
1922 | |
1923 | inode = new_inode(sb); |
1924 | if (!inode) |
1925 | return -ENOMEM; |
1926 | |
1927 | inode->i_ino = BTRFS_BTREE_INODE_OBJECTID; |
1928 | set_nlink(inode, nlink: 1); |
1929 | /* |
1930 | * we set the i_size on the btree inode to the max possible int. |
1931 | * the real end of the address space is determined by all of |
1932 | * the devices in the system |
1933 | */ |
1934 | inode->i_size = OFFSET_MAX; |
1935 | inode->i_mapping->a_ops = &btree_aops; |
1936 | mapping_set_gfp_mask(m: inode->i_mapping, GFP_NOFS); |
1937 | |
1938 | RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); |
1939 | extent_io_tree_init(fs_info, tree: &BTRFS_I(inode)->io_tree, |
1940 | owner: IO_TREE_BTREE_INODE_IO); |
1941 | extent_map_tree_init(tree: &BTRFS_I(inode)->extent_tree); |
1942 | |
1943 | BTRFS_I(inode)->root = btrfs_grab_root(root: fs_info->tree_root); |
1944 | BTRFS_I(inode)->location.objectid = BTRFS_BTREE_INODE_OBJECTID; |
1945 | BTRFS_I(inode)->location.type = 0; |
1946 | BTRFS_I(inode)->location.offset = 0; |
1947 | set_bit(nr: BTRFS_INODE_DUMMY, addr: &BTRFS_I(inode)->runtime_flags); |
1948 | __insert_inode_hash(inode, hashval: hash); |
1949 | fs_info->btree_inode = inode; |
1950 | |
1951 | return 0; |
1952 | } |
1953 | |
1954 | static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info) |
1955 | { |
1956 | mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount); |
1957 | init_rwsem(&fs_info->dev_replace.rwsem); |
1958 | init_waitqueue_head(&fs_info->dev_replace.replace_wait); |
1959 | } |
1960 | |
1961 | static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info) |
1962 | { |
1963 | spin_lock_init(&fs_info->qgroup_lock); |
1964 | mutex_init(&fs_info->qgroup_ioctl_lock); |
1965 | fs_info->qgroup_tree = RB_ROOT; |
1966 | INIT_LIST_HEAD(list: &fs_info->dirty_qgroups); |
1967 | fs_info->qgroup_seq = 1; |
1968 | fs_info->qgroup_ulist = NULL; |
1969 | fs_info->qgroup_rescan_running = false; |
1970 | fs_info->qgroup_drop_subtree_thres = BTRFS_MAX_LEVEL; |
1971 | mutex_init(&fs_info->qgroup_rescan_lock); |
1972 | } |
1973 | |
1974 | static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info) |
1975 | { |
1976 | u32 max_active = fs_info->thread_pool_size; |
1977 | unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND; |
1978 | unsigned int ordered_flags = WQ_MEM_RECLAIM | WQ_FREEZABLE; |
1979 | |
1980 | fs_info->workers = |
1981 | btrfs_alloc_workqueue(fs_info, name: "worker" , flags, limit_active: max_active, thresh: 16); |
1982 | |
1983 | fs_info->delalloc_workers = |
1984 | btrfs_alloc_workqueue(fs_info, name: "delalloc" , |
1985 | flags, limit_active: max_active, thresh: 2); |
1986 | |
1987 | fs_info->flush_workers = |
1988 | btrfs_alloc_workqueue(fs_info, name: "flush_delalloc" , |
1989 | flags, limit_active: max_active, thresh: 0); |
1990 | |
1991 | fs_info->caching_workers = |
1992 | btrfs_alloc_workqueue(fs_info, name: "cache" , flags, limit_active: max_active, thresh: 0); |
1993 | |
1994 | fs_info->fixup_workers = |
1995 | btrfs_alloc_ordered_workqueue(fs_info, name: "fixup" , flags: ordered_flags); |
1996 | |
1997 | fs_info->endio_workers = |
1998 | alloc_workqueue(fmt: "btrfs-endio" , flags, max_active); |
1999 | fs_info->endio_meta_workers = |
2000 | alloc_workqueue(fmt: "btrfs-endio-meta" , flags, max_active); |
2001 | fs_info->rmw_workers = alloc_workqueue(fmt: "btrfs-rmw" , flags, max_active); |
2002 | fs_info->endio_write_workers = |
2003 | btrfs_alloc_workqueue(fs_info, name: "endio-write" , flags, |
2004 | limit_active: max_active, thresh: 2); |
2005 | fs_info->compressed_write_workers = |
2006 | alloc_workqueue(fmt: "btrfs-compressed-write" , flags, max_active); |
2007 | fs_info->endio_freespace_worker = |
2008 | btrfs_alloc_workqueue(fs_info, name: "freespace-write" , flags, |
2009 | limit_active: max_active, thresh: 0); |
2010 | fs_info->delayed_workers = |
2011 | btrfs_alloc_workqueue(fs_info, name: "delayed-meta" , flags, |
2012 | limit_active: max_active, thresh: 0); |
2013 | fs_info->qgroup_rescan_workers = |
2014 | btrfs_alloc_ordered_workqueue(fs_info, name: "qgroup-rescan" , |
2015 | flags: ordered_flags); |
2016 | fs_info->discard_ctl.discard_workers = |
2017 | alloc_ordered_workqueue("btrfs_discard" , WQ_FREEZABLE); |
2018 | |
2019 | if (!(fs_info->workers && |
2020 | fs_info->delalloc_workers && fs_info->flush_workers && |
2021 | fs_info->endio_workers && fs_info->endio_meta_workers && |
2022 | fs_info->compressed_write_workers && |
2023 | fs_info->endio_write_workers && |
2024 | fs_info->endio_freespace_worker && fs_info->rmw_workers && |
2025 | fs_info->caching_workers && fs_info->fixup_workers && |
2026 | fs_info->delayed_workers && fs_info->qgroup_rescan_workers && |
2027 | fs_info->discard_ctl.discard_workers)) { |
2028 | return -ENOMEM; |
2029 | } |
2030 | |
2031 | return 0; |
2032 | } |
2033 | |
2034 | static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type) |
2035 | { |
2036 | struct crypto_shash *csum_shash; |
2037 | const char *csum_driver = btrfs_super_csum_driver(csum_type); |
2038 | |
2039 | csum_shash = crypto_alloc_shash(alg_name: csum_driver, type: 0, mask: 0); |
2040 | |
2041 | if (IS_ERR(ptr: csum_shash)) { |
2042 | btrfs_err(fs_info, "error allocating %s hash for checksum" , |
2043 | csum_driver); |
2044 | return PTR_ERR(ptr: csum_shash); |
2045 | } |
2046 | |
2047 | fs_info->csum_shash = csum_shash; |
2048 | |
2049 | /* |
2050 | * Check if the checksum implementation is a fast accelerated one. |
2051 | * As-is this is a bit of a hack and should be replaced once the csum |
2052 | * implementations provide that information themselves. |
2053 | */ |
2054 | switch (csum_type) { |
2055 | case BTRFS_CSUM_TYPE_CRC32: |
2056 | if (!strstr(crypto_shash_driver_name(tfm: csum_shash), "generic" )) |
2057 | set_bit(nr: BTRFS_FS_CSUM_IMPL_FAST, addr: &fs_info->flags); |
2058 | break; |
2059 | case BTRFS_CSUM_TYPE_XXHASH: |
2060 | set_bit(nr: BTRFS_FS_CSUM_IMPL_FAST, addr: &fs_info->flags); |
2061 | break; |
2062 | default: |
2063 | break; |
2064 | } |
2065 | |
2066 | btrfs_info(fs_info, "using %s (%s) checksum algorithm" , |
2067 | btrfs_super_csum_name(csum_type), |
2068 | crypto_shash_driver_name(csum_shash)); |
2069 | return 0; |
2070 | } |
2071 | |
2072 | static int btrfs_replay_log(struct btrfs_fs_info *fs_info, |
2073 | struct btrfs_fs_devices *fs_devices) |
2074 | { |
2075 | int ret; |
2076 | struct btrfs_tree_parent_check check = { 0 }; |
2077 | struct btrfs_root *log_tree_root; |
2078 | struct btrfs_super_block *disk_super = fs_info->super_copy; |
2079 | u64 bytenr = btrfs_super_log_root(s: disk_super); |
2080 | int level = btrfs_super_log_root_level(s: disk_super); |
2081 | |
2082 | if (fs_devices->rw_devices == 0) { |
2083 | btrfs_warn(fs_info, "log replay required on RO media" ); |
2084 | return -EIO; |
2085 | } |
2086 | |
2087 | log_tree_root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID, |
2088 | GFP_KERNEL); |
2089 | if (!log_tree_root) |
2090 | return -ENOMEM; |
2091 | |
2092 | check.level = level; |
2093 | check.transid = fs_info->generation + 1; |
2094 | check.owner_root = BTRFS_TREE_LOG_OBJECTID; |
2095 | log_tree_root->node = read_tree_block(fs_info, bytenr, check: &check); |
2096 | if (IS_ERR(ptr: log_tree_root->node)) { |
2097 | btrfs_warn(fs_info, "failed to read log tree" ); |
2098 | ret = PTR_ERR(ptr: log_tree_root->node); |
2099 | log_tree_root->node = NULL; |
2100 | btrfs_put_root(root: log_tree_root); |
2101 | return ret; |
2102 | } |
2103 | if (!extent_buffer_uptodate(eb: log_tree_root->node)) { |
2104 | btrfs_err(fs_info, "failed to read log tree" ); |
2105 | btrfs_put_root(root: log_tree_root); |
2106 | return -EIO; |
2107 | } |
2108 | |
2109 | /* returns with log_tree_root freed on success */ |
2110 | ret = btrfs_recover_log_trees(tree_root: log_tree_root); |
2111 | if (ret) { |
2112 | btrfs_handle_fs_error(fs_info, ret, |
2113 | "Failed to recover log tree" ); |
2114 | btrfs_put_root(root: log_tree_root); |
2115 | return ret; |
2116 | } |
2117 | |
2118 | if (sb_rdonly(sb: fs_info->sb)) { |
2119 | ret = btrfs_commit_super(fs_info); |
2120 | if (ret) |
2121 | return ret; |
2122 | } |
2123 | |
2124 | return 0; |
2125 | } |
2126 | |
2127 | static int load_global_roots_objectid(struct btrfs_root *tree_root, |
2128 | struct btrfs_path *path, u64 objectid, |
2129 | const char *name) |
2130 | { |
2131 | struct btrfs_fs_info *fs_info = tree_root->fs_info; |
2132 | struct btrfs_root *root; |
2133 | u64 max_global_id = 0; |
2134 | int ret; |
2135 | struct btrfs_key key = { |
2136 | .objectid = objectid, |
2137 | .type = BTRFS_ROOT_ITEM_KEY, |
2138 | .offset = 0, |
2139 | }; |
2140 | bool found = false; |
2141 | |
2142 | /* If we have IGNOREDATACSUMS skip loading these roots. */ |
2143 | if (objectid == BTRFS_CSUM_TREE_OBJECTID && |
2144 | btrfs_test_opt(fs_info, IGNOREDATACSUMS)) { |
2145 | set_bit(nr: BTRFS_FS_STATE_NO_CSUMS, addr: &fs_info->fs_state); |
2146 | return 0; |
2147 | } |
2148 | |
2149 | while (1) { |
2150 | ret = btrfs_search_slot(NULL, root: tree_root, key: &key, p: path, ins_len: 0, cow: 0); |
2151 | if (ret < 0) |
2152 | break; |
2153 | |
2154 | if (path->slots[0] >= btrfs_header_nritems(eb: path->nodes[0])) { |
2155 | ret = btrfs_next_leaf(root: tree_root, path); |
2156 | if (ret) { |
2157 | if (ret > 0) |
2158 | ret = 0; |
2159 | break; |
2160 | } |
2161 | } |
2162 | ret = 0; |
2163 | |
2164 | btrfs_item_key_to_cpu(eb: path->nodes[0], cpu_key: &key, nr: path->slots[0]); |
2165 | if (key.objectid != objectid) |
2166 | break; |
2167 | btrfs_release_path(p: path); |
2168 | |
2169 | /* |
2170 | * Just worry about this for extent tree, it'll be the same for |
2171 | * everybody. |
2172 | */ |
2173 | if (objectid == BTRFS_EXTENT_TREE_OBJECTID) |
2174 | max_global_id = max(max_global_id, key.offset); |
2175 | |
2176 | found = true; |
2177 | root = read_tree_root_path(tree_root, path, key: &key); |
2178 | if (IS_ERR(ptr: root)) { |
2179 | if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) |
2180 | ret = PTR_ERR(ptr: root); |
2181 | break; |
2182 | } |
2183 | set_bit(nr: BTRFS_ROOT_TRACK_DIRTY, addr: &root->state); |
2184 | ret = btrfs_global_root_insert(root); |
2185 | if (ret) { |
2186 | btrfs_put_root(root); |
2187 | break; |
2188 | } |
2189 | key.offset++; |
2190 | } |
2191 | btrfs_release_path(p: path); |
2192 | |
2193 | if (objectid == BTRFS_EXTENT_TREE_OBJECTID) |
2194 | fs_info->nr_global_roots = max_global_id + 1; |
2195 | |
2196 | if (!found || ret) { |
2197 | if (objectid == BTRFS_CSUM_TREE_OBJECTID) |
2198 | set_bit(nr: BTRFS_FS_STATE_NO_CSUMS, addr: &fs_info->fs_state); |
2199 | |
2200 | if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) |
2201 | ret = ret ? ret : -ENOENT; |
2202 | else |
2203 | ret = 0; |
2204 | btrfs_err(fs_info, "failed to load root %s" , name); |
2205 | } |
2206 | return ret; |
2207 | } |
2208 | |
2209 | static int load_global_roots(struct btrfs_root *tree_root) |
2210 | { |
2211 | struct btrfs_path *path; |
2212 | int ret = 0; |
2213 | |
2214 | path = btrfs_alloc_path(); |
2215 | if (!path) |
2216 | return -ENOMEM; |
2217 | |
2218 | ret = load_global_roots_objectid(tree_root, path, |
2219 | BTRFS_EXTENT_TREE_OBJECTID, name: "extent" ); |
2220 | if (ret) |
2221 | goto out; |
2222 | ret = load_global_roots_objectid(tree_root, path, |
2223 | BTRFS_CSUM_TREE_OBJECTID, name: "csum" ); |
2224 | if (ret) |
2225 | goto out; |
2226 | if (!btrfs_fs_compat_ro(tree_root->fs_info, FREE_SPACE_TREE)) |
2227 | goto out; |
2228 | ret = load_global_roots_objectid(tree_root, path, |
2229 | BTRFS_FREE_SPACE_TREE_OBJECTID, |
2230 | name: "free space" ); |
2231 | out: |
2232 | btrfs_free_path(p: path); |
2233 | return ret; |
2234 | } |
2235 | |
2236 | static int btrfs_read_roots(struct btrfs_fs_info *fs_info) |
2237 | { |
2238 | struct btrfs_root *tree_root = fs_info->tree_root; |
2239 | struct btrfs_root *root; |
2240 | struct btrfs_key location; |
2241 | int ret; |
2242 | |
2243 | ASSERT(fs_info->tree_root); |
2244 | |
2245 | ret = load_global_roots(tree_root); |
2246 | if (ret) |
2247 | return ret; |
2248 | |
2249 | location.type = BTRFS_ROOT_ITEM_KEY; |
2250 | location.offset = 0; |
2251 | |
2252 | if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) { |
2253 | location.objectid = BTRFS_BLOCK_GROUP_TREE_OBJECTID; |
2254 | root = btrfs_read_tree_root(tree_root, key: &location); |
2255 | if (IS_ERR(ptr: root)) { |
2256 | if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) { |
2257 | ret = PTR_ERR(ptr: root); |
2258 | goto out; |
2259 | } |
2260 | } else { |
2261 | set_bit(nr: BTRFS_ROOT_TRACK_DIRTY, addr: &root->state); |
2262 | fs_info->block_group_root = root; |
2263 | } |
2264 | } |
2265 | |
2266 | location.objectid = BTRFS_DEV_TREE_OBJECTID; |
2267 | root = btrfs_read_tree_root(tree_root, key: &location); |
2268 | if (IS_ERR(ptr: root)) { |
2269 | if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) { |
2270 | ret = PTR_ERR(ptr: root); |
2271 | goto out; |
2272 | } |
2273 | } else { |
2274 | set_bit(nr: BTRFS_ROOT_TRACK_DIRTY, addr: &root->state); |
2275 | fs_info->dev_root = root; |
2276 | } |
2277 | /* Initialize fs_info for all devices in any case */ |
2278 | ret = btrfs_init_devices_late(fs_info); |
2279 | if (ret) |
2280 | goto out; |
2281 | |
2282 | /* |
2283 | * This tree can share blocks with some other fs tree during relocation |
2284 | * and we need a proper setup by btrfs_get_fs_root |
2285 | */ |
2286 | root = btrfs_get_fs_root(fs_info: tree_root->fs_info, |
2287 | BTRFS_DATA_RELOC_TREE_OBJECTID, check_ref: true); |
2288 | if (IS_ERR(ptr: root)) { |
2289 | if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) { |
2290 | ret = PTR_ERR(ptr: root); |
2291 | goto out; |
2292 | } |
2293 | } else { |
2294 | set_bit(nr: BTRFS_ROOT_TRACK_DIRTY, addr: &root->state); |
2295 | fs_info->data_reloc_root = root; |
2296 | } |
2297 | |
2298 | location.objectid = BTRFS_QUOTA_TREE_OBJECTID; |
2299 | root = btrfs_read_tree_root(tree_root, key: &location); |
2300 | if (!IS_ERR(ptr: root)) { |
2301 | set_bit(nr: BTRFS_ROOT_TRACK_DIRTY, addr: &root->state); |
2302 | fs_info->quota_root = root; |
2303 | } |
2304 | |
2305 | location.objectid = BTRFS_UUID_TREE_OBJECTID; |
2306 | root = btrfs_read_tree_root(tree_root, key: &location); |
2307 | if (IS_ERR(ptr: root)) { |
2308 | if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) { |
2309 | ret = PTR_ERR(ptr: root); |
2310 | if (ret != -ENOENT) |
2311 | goto out; |
2312 | } |
2313 | } else { |
2314 | set_bit(nr: BTRFS_ROOT_TRACK_DIRTY, addr: &root->state); |
2315 | fs_info->uuid_root = root; |
2316 | } |
2317 | |
2318 | if (btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE)) { |
2319 | location.objectid = BTRFS_RAID_STRIPE_TREE_OBJECTID; |
2320 | root = btrfs_read_tree_root(tree_root, key: &location); |
2321 | if (IS_ERR(ptr: root)) { |
2322 | if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) { |
2323 | ret = PTR_ERR(ptr: root); |
2324 | goto out; |
2325 | } |
2326 | } else { |
2327 | set_bit(nr: BTRFS_ROOT_TRACK_DIRTY, addr: &root->state); |
2328 | fs_info->stripe_root = root; |
2329 | } |
2330 | } |
2331 | |
2332 | return 0; |
2333 | out: |
2334 | btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d" , |
2335 | location.objectid, ret); |
2336 | return ret; |
2337 | } |
2338 | |
2339 | /* |
2340 | * Real super block validation |
2341 | * NOTE: super csum type and incompat features will not be checked here. |
2342 | * |
2343 | * @sb: super block to check |
2344 | * @mirror_num: the super block number to check its bytenr: |
2345 | * 0 the primary (1st) sb |
2346 | * 1, 2 2nd and 3rd backup copy |
2347 | * -1 skip bytenr check |
2348 | */ |
2349 | int btrfs_validate_super(struct btrfs_fs_info *fs_info, |
2350 | struct btrfs_super_block *sb, int mirror_num) |
2351 | { |
2352 | u64 nodesize = btrfs_super_nodesize(s: sb); |
2353 | u64 sectorsize = btrfs_super_sectorsize(s: sb); |
2354 | int ret = 0; |
2355 | |
2356 | if (btrfs_super_magic(s: sb) != BTRFS_MAGIC) { |
2357 | btrfs_err(fs_info, "no valid FS found" ); |
2358 | ret = -EINVAL; |
2359 | } |
2360 | if (btrfs_super_flags(s: sb) & ~BTRFS_SUPER_FLAG_SUPP) { |
2361 | btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu" , |
2362 | btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP); |
2363 | ret = -EINVAL; |
2364 | } |
2365 | if (btrfs_super_root_level(s: sb) >= BTRFS_MAX_LEVEL) { |
2366 | btrfs_err(fs_info, "tree_root level too big: %d >= %d" , |
2367 | btrfs_super_root_level(sb), BTRFS_MAX_LEVEL); |
2368 | ret = -EINVAL; |
2369 | } |
2370 | if (btrfs_super_chunk_root_level(s: sb) >= BTRFS_MAX_LEVEL) { |
2371 | btrfs_err(fs_info, "chunk_root level too big: %d >= %d" , |
2372 | btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL); |
2373 | ret = -EINVAL; |
2374 | } |
2375 | if (btrfs_super_log_root_level(s: sb) >= BTRFS_MAX_LEVEL) { |
2376 | btrfs_err(fs_info, "log_root level too big: %d >= %d" , |
2377 | btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL); |
2378 | ret = -EINVAL; |
2379 | } |
2380 | |
2381 | /* |
2382 | * Check sectorsize and nodesize first, other check will need it. |
2383 | * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here. |
2384 | */ |
2385 | if (!is_power_of_2(n: sectorsize) || sectorsize < 4096 || |
2386 | sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) { |
2387 | btrfs_err(fs_info, "invalid sectorsize %llu" , sectorsize); |
2388 | ret = -EINVAL; |
2389 | } |
2390 | |
2391 | /* |
2392 | * We only support at most two sectorsizes: 4K and PAGE_SIZE. |
2393 | * |
2394 | * We can support 16K sectorsize with 64K page size without problem, |
2395 | * but such sectorsize/pagesize combination doesn't make much sense. |
2396 | * 4K will be our future standard, PAGE_SIZE is supported from the very |
2397 | * beginning. |
2398 | */ |
2399 | if (sectorsize > PAGE_SIZE || (sectorsize != SZ_4K && sectorsize != PAGE_SIZE)) { |
2400 | btrfs_err(fs_info, |
2401 | "sectorsize %llu not yet supported for page size %lu" , |
2402 | sectorsize, PAGE_SIZE); |
2403 | ret = -EINVAL; |
2404 | } |
2405 | |
2406 | if (!is_power_of_2(n: nodesize) || nodesize < sectorsize || |
2407 | nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) { |
2408 | btrfs_err(fs_info, "invalid nodesize %llu" , nodesize); |
2409 | ret = -EINVAL; |
2410 | } |
2411 | if (nodesize != le32_to_cpu(sb->__unused_leafsize)) { |
2412 | btrfs_err(fs_info, "invalid leafsize %u, should be %llu" , |
2413 | le32_to_cpu(sb->__unused_leafsize), nodesize); |
2414 | ret = -EINVAL; |
2415 | } |
2416 | |
2417 | /* Root alignment check */ |
2418 | if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) { |
2419 | btrfs_warn(fs_info, "tree_root block unaligned: %llu" , |
2420 | btrfs_super_root(sb)); |
2421 | ret = -EINVAL; |
2422 | } |
2423 | if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) { |
2424 | btrfs_warn(fs_info, "chunk_root block unaligned: %llu" , |
2425 | btrfs_super_chunk_root(sb)); |
2426 | ret = -EINVAL; |
2427 | } |
2428 | if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) { |
2429 | btrfs_warn(fs_info, "log_root block unaligned: %llu" , |
2430 | btrfs_super_log_root(sb)); |
2431 | ret = -EINVAL; |
2432 | } |
2433 | |
2434 | if (!fs_info->fs_devices->temp_fsid && |
2435 | memcmp(p: fs_info->fs_devices->fsid, q: sb->fsid, BTRFS_FSID_SIZE) != 0) { |
2436 | btrfs_err(fs_info, |
2437 | "superblock fsid doesn't match fsid of fs_devices: %pU != %pU" , |
2438 | sb->fsid, fs_info->fs_devices->fsid); |
2439 | ret = -EINVAL; |
2440 | } |
2441 | |
2442 | if (memcmp(p: fs_info->fs_devices->metadata_uuid, q: btrfs_sb_fsid_ptr(sb), |
2443 | BTRFS_FSID_SIZE) != 0) { |
2444 | btrfs_err(fs_info, |
2445 | "superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU" , |
2446 | btrfs_sb_fsid_ptr(sb), fs_info->fs_devices->metadata_uuid); |
2447 | ret = -EINVAL; |
2448 | } |
2449 | |
2450 | if (memcmp(p: fs_info->fs_devices->metadata_uuid, q: sb->dev_item.fsid, |
2451 | BTRFS_FSID_SIZE) != 0) { |
2452 | btrfs_err(fs_info, |
2453 | "dev_item UUID does not match metadata fsid: %pU != %pU" , |
2454 | fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid); |
2455 | ret = -EINVAL; |
2456 | } |
2457 | |
2458 | /* |
2459 | * Artificial requirement for block-group-tree to force newer features |
2460 | * (free-space-tree, no-holes) so the test matrix is smaller. |
2461 | */ |
2462 | if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE) && |
2463 | (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID) || |
2464 | !btrfs_fs_incompat(fs_info, NO_HOLES))) { |
2465 | btrfs_err(fs_info, |
2466 | "block-group-tree feature requires fres-space-tree and no-holes" ); |
2467 | ret = -EINVAL; |
2468 | } |
2469 | |
2470 | /* |
2471 | * Hint to catch really bogus numbers, bitflips or so, more exact checks are |
2472 | * done later |
2473 | */ |
2474 | if (btrfs_super_bytes_used(s: sb) < 6 * btrfs_super_nodesize(s: sb)) { |
2475 | btrfs_err(fs_info, "bytes_used is too small %llu" , |
2476 | btrfs_super_bytes_used(sb)); |
2477 | ret = -EINVAL; |
2478 | } |
2479 | if (!is_power_of_2(n: btrfs_super_stripesize(s: sb))) { |
2480 | btrfs_err(fs_info, "invalid stripesize %u" , |
2481 | btrfs_super_stripesize(sb)); |
2482 | ret = -EINVAL; |
2483 | } |
2484 | if (btrfs_super_num_devices(s: sb) > (1UL << 31)) |
2485 | btrfs_warn(fs_info, "suspicious number of devices: %llu" , |
2486 | btrfs_super_num_devices(sb)); |
2487 | if (btrfs_super_num_devices(s: sb) == 0) { |
2488 | btrfs_err(fs_info, "number of devices is 0" ); |
2489 | ret = -EINVAL; |
2490 | } |
2491 | |
2492 | if (mirror_num >= 0 && |
2493 | btrfs_super_bytenr(s: sb) != btrfs_sb_offset(mirror: mirror_num)) { |
2494 | btrfs_err(fs_info, "super offset mismatch %llu != %u" , |
2495 | btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET); |
2496 | ret = -EINVAL; |
2497 | } |
2498 | |
2499 | /* |
2500 | * Obvious sys_chunk_array corruptions, it must hold at least one key |
2501 | * and one chunk |
2502 | */ |
2503 | if (btrfs_super_sys_array_size(s: sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) { |
2504 | btrfs_err(fs_info, "system chunk array too big %u > %u" , |
2505 | btrfs_super_sys_array_size(sb), |
2506 | BTRFS_SYSTEM_CHUNK_ARRAY_SIZE); |
2507 | ret = -EINVAL; |
2508 | } |
2509 | if (btrfs_super_sys_array_size(s: sb) < sizeof(struct btrfs_disk_key) |
2510 | + sizeof(struct btrfs_chunk)) { |
2511 | btrfs_err(fs_info, "system chunk array too small %u < %zu" , |
2512 | btrfs_super_sys_array_size(sb), |
2513 | sizeof(struct btrfs_disk_key) |
2514 | + sizeof(struct btrfs_chunk)); |
2515 | ret = -EINVAL; |
2516 | } |
2517 | |
2518 | /* |
2519 | * The generation is a global counter, we'll trust it more than the others |
2520 | * but it's still possible that it's the one that's wrong. |
2521 | */ |
2522 | if (btrfs_super_generation(s: sb) < btrfs_super_chunk_root_generation(s: sb)) |
2523 | btrfs_warn(fs_info, |
2524 | "suspicious: generation < chunk_root_generation: %llu < %llu" , |
2525 | btrfs_super_generation(sb), |
2526 | btrfs_super_chunk_root_generation(sb)); |
2527 | if (btrfs_super_generation(s: sb) < btrfs_super_cache_generation(s: sb) |
2528 | && btrfs_super_cache_generation(s: sb) != (u64)-1) |
2529 | btrfs_warn(fs_info, |
2530 | "suspicious: generation < cache_generation: %llu < %llu" , |
2531 | btrfs_super_generation(sb), |
2532 | btrfs_super_cache_generation(sb)); |
2533 | |
2534 | return ret; |
2535 | } |
2536 | |
2537 | /* |
2538 | * Validation of super block at mount time. |
2539 | * Some checks already done early at mount time, like csum type and incompat |
2540 | * flags will be skipped. |
2541 | */ |
2542 | static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info) |
2543 | { |
2544 | return btrfs_validate_super(fs_info, sb: fs_info->super_copy, mirror_num: 0); |
2545 | } |
2546 | |
2547 | /* |
2548 | * Validation of super block at write time. |
2549 | * Some checks like bytenr check will be skipped as their values will be |
2550 | * overwritten soon. |
2551 | * Extra checks like csum type and incompat flags will be done here. |
2552 | */ |
2553 | static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info, |
2554 | struct btrfs_super_block *sb) |
2555 | { |
2556 | int ret; |
2557 | |
2558 | ret = btrfs_validate_super(fs_info, sb, mirror_num: -1); |
2559 | if (ret < 0) |
2560 | goto out; |
2561 | if (!btrfs_supported_super_csum(csum_type: btrfs_super_csum_type(s: sb))) { |
2562 | ret = -EUCLEAN; |
2563 | btrfs_err(fs_info, "invalid csum type, has %u want %u" , |
2564 | btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32); |
2565 | goto out; |
2566 | } |
2567 | if (btrfs_super_incompat_flags(s: sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) { |
2568 | ret = -EUCLEAN; |
2569 | btrfs_err(fs_info, |
2570 | "invalid incompat flags, has 0x%llx valid mask 0x%llx" , |
2571 | btrfs_super_incompat_flags(sb), |
2572 | (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP); |
2573 | goto out; |
2574 | } |
2575 | out: |
2576 | if (ret < 0) |
2577 | btrfs_err(fs_info, |
2578 | "super block corruption detected before writing it to disk" ); |
2579 | return ret; |
2580 | } |
2581 | |
2582 | static int load_super_root(struct btrfs_root *root, u64 bytenr, u64 gen, int level) |
2583 | { |
2584 | struct btrfs_tree_parent_check check = { |
2585 | .level = level, |
2586 | .transid = gen, |
2587 | .owner_root = root->root_key.objectid |
2588 | }; |
2589 | int ret = 0; |
2590 | |
2591 | root->node = read_tree_block(fs_info: root->fs_info, bytenr, check: &check); |
2592 | if (IS_ERR(ptr: root->node)) { |
2593 | ret = PTR_ERR(ptr: root->node); |
2594 | root->node = NULL; |
2595 | return ret; |
2596 | } |
2597 | if (!extent_buffer_uptodate(eb: root->node)) { |
2598 | free_extent_buffer(eb: root->node); |
2599 | root->node = NULL; |
2600 | return -EIO; |
2601 | } |
2602 | |
2603 | btrfs_set_root_node(item: &root->root_item, node: root->node); |
2604 | root->commit_root = btrfs_root_node(root); |
2605 | btrfs_set_root_refs(s: &root->root_item, val: 1); |
2606 | return ret; |
2607 | } |
2608 | |
2609 | static int load_important_roots(struct btrfs_fs_info *fs_info) |
2610 | { |
2611 | struct btrfs_super_block *sb = fs_info->super_copy; |
2612 | u64 gen, bytenr; |
2613 | int level, ret; |
2614 | |
2615 | bytenr = btrfs_super_root(s: sb); |
2616 | gen = btrfs_super_generation(s: sb); |
2617 | level = btrfs_super_root_level(s: sb); |
2618 | ret = load_super_root(root: fs_info->tree_root, bytenr, gen, level); |
2619 | if (ret) { |
2620 | btrfs_warn(fs_info, "couldn't read tree root" ); |
2621 | return ret; |
2622 | } |
2623 | return 0; |
2624 | } |
2625 | |
2626 | static int __cold init_tree_roots(struct btrfs_fs_info *fs_info) |
2627 | { |
2628 | int backup_index = find_newest_super_backup(info: fs_info); |
2629 | struct btrfs_super_block *sb = fs_info->super_copy; |
2630 | struct btrfs_root *tree_root = fs_info->tree_root; |
2631 | bool handle_error = false; |
2632 | int ret = 0; |
2633 | int i; |
2634 | |
2635 | for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) { |
2636 | if (handle_error) { |
2637 | if (!IS_ERR(ptr: tree_root->node)) |
2638 | free_extent_buffer(eb: tree_root->node); |
2639 | tree_root->node = NULL; |
2640 | |
2641 | if (!btrfs_test_opt(fs_info, USEBACKUPROOT)) |
2642 | break; |
2643 | |
2644 | free_root_pointers(info: fs_info, free_chunk_root: 0); |
2645 | |
2646 | /* |
2647 | * Don't use the log in recovery mode, it won't be |
2648 | * valid |
2649 | */ |
2650 | btrfs_set_super_log_root(s: sb, val: 0); |
2651 | |
2652 | btrfs_warn(fs_info, "try to load backup roots slot %d" , i); |
2653 | ret = read_backup_root(fs_info, priority: i); |
2654 | backup_index = ret; |
2655 | if (ret < 0) |
2656 | return ret; |
2657 | } |
2658 | |
2659 | ret = load_important_roots(fs_info); |
2660 | if (ret) { |
2661 | handle_error = true; |
2662 | continue; |
2663 | } |
2664 | |
2665 | /* |
2666 | * No need to hold btrfs_root::objectid_mutex since the fs |
2667 | * hasn't been fully initialised and we are the only user |
2668 | */ |
2669 | ret = btrfs_init_root_free_objectid(root: tree_root); |
2670 | if (ret < 0) { |
2671 | handle_error = true; |
2672 | continue; |
2673 | } |
2674 | |
2675 | ASSERT(tree_root->free_objectid <= BTRFS_LAST_FREE_OBJECTID); |
2676 | |
2677 | ret = btrfs_read_roots(fs_info); |
2678 | if (ret < 0) { |
2679 | handle_error = true; |
2680 | continue; |
2681 | } |
2682 | |
2683 | /* All successful */ |
2684 | fs_info->generation = btrfs_header_generation(eb: tree_root->node); |
2685 | btrfs_set_last_trans_committed(fs_info, gen: fs_info->generation); |
2686 | fs_info->last_reloc_trans = 0; |
2687 | |
2688 | /* Always begin writing backup roots after the one being used */ |
2689 | if (backup_index < 0) { |
2690 | fs_info->backup_root_index = 0; |
2691 | } else { |
2692 | fs_info->backup_root_index = backup_index + 1; |
2693 | fs_info->backup_root_index %= BTRFS_NUM_BACKUP_ROOTS; |
2694 | } |
2695 | break; |
2696 | } |
2697 | |
2698 | return ret; |
2699 | } |
2700 | |
2701 | void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) |
2702 | { |
2703 | INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); |
2704 | INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC); |
2705 | INIT_LIST_HEAD(list: &fs_info->trans_list); |
2706 | INIT_LIST_HEAD(list: &fs_info->dead_roots); |
2707 | INIT_LIST_HEAD(list: &fs_info->delayed_iputs); |
2708 | INIT_LIST_HEAD(list: &fs_info->delalloc_roots); |
2709 | INIT_LIST_HEAD(list: &fs_info->caching_block_groups); |
2710 | spin_lock_init(&fs_info->delalloc_root_lock); |
2711 | spin_lock_init(&fs_info->trans_lock); |
2712 | spin_lock_init(&fs_info->fs_roots_radix_lock); |
2713 | spin_lock_init(&fs_info->delayed_iput_lock); |
2714 | spin_lock_init(&fs_info->defrag_inodes_lock); |
2715 | spin_lock_init(&fs_info->super_lock); |
2716 | spin_lock_init(&fs_info->buffer_lock); |
2717 | spin_lock_init(&fs_info->unused_bgs_lock); |
2718 | spin_lock_init(&fs_info->treelog_bg_lock); |
2719 | spin_lock_init(&fs_info->zone_active_bgs_lock); |
2720 | spin_lock_init(&fs_info->relocation_bg_lock); |
2721 | rwlock_init(&fs_info->tree_mod_log_lock); |
2722 | rwlock_init(&fs_info->global_root_lock); |
2723 | mutex_init(&fs_info->unused_bg_unpin_mutex); |
2724 | mutex_init(&fs_info->reclaim_bgs_lock); |
2725 | mutex_init(&fs_info->reloc_mutex); |
2726 | mutex_init(&fs_info->delalloc_root_mutex); |
2727 | mutex_init(&fs_info->zoned_meta_io_lock); |
2728 | mutex_init(&fs_info->zoned_data_reloc_io_lock); |
2729 | seqlock_init(&fs_info->profiles_lock); |
2730 | |
2731 | btrfs_lockdep_init_map(fs_info, btrfs_trans_num_writers); |
2732 | btrfs_lockdep_init_map(fs_info, btrfs_trans_num_extwriters); |
2733 | btrfs_lockdep_init_map(fs_info, btrfs_trans_pending_ordered); |
2734 | btrfs_lockdep_init_map(fs_info, btrfs_ordered_extent); |
2735 | btrfs_state_lockdep_init_map(fs_info, btrfs_trans_commit_prep, |
2736 | BTRFS_LOCKDEP_TRANS_COMMIT_PREP); |
2737 | btrfs_state_lockdep_init_map(fs_info, btrfs_trans_unblocked, |
2738 | BTRFS_LOCKDEP_TRANS_UNBLOCKED); |
2739 | btrfs_state_lockdep_init_map(fs_info, btrfs_trans_super_committed, |
2740 | BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED); |
2741 | btrfs_state_lockdep_init_map(fs_info, btrfs_trans_completed, |
2742 | BTRFS_LOCKDEP_TRANS_COMPLETED); |
2743 | |
2744 | INIT_LIST_HEAD(list: &fs_info->dirty_cowonly_roots); |
2745 | INIT_LIST_HEAD(list: &fs_info->space_info); |
2746 | INIT_LIST_HEAD(list: &fs_info->tree_mod_seq_list); |
2747 | INIT_LIST_HEAD(list: &fs_info->unused_bgs); |
2748 | INIT_LIST_HEAD(list: &fs_info->reclaim_bgs); |
2749 | INIT_LIST_HEAD(list: &fs_info->zone_active_bgs); |
2750 | #ifdef CONFIG_BTRFS_DEBUG |
2751 | INIT_LIST_HEAD(list: &fs_info->allocated_roots); |
2752 | INIT_LIST_HEAD(list: &fs_info->allocated_ebs); |
2753 | spin_lock_init(&fs_info->eb_leak_lock); |
2754 | #endif |
2755 | fs_info->mapping_tree = RB_ROOT_CACHED; |
2756 | rwlock_init(&fs_info->mapping_tree_lock); |
2757 | btrfs_init_block_rsv(rsv: &fs_info->global_block_rsv, |
2758 | type: BTRFS_BLOCK_RSV_GLOBAL); |
2759 | btrfs_init_block_rsv(rsv: &fs_info->trans_block_rsv, type: BTRFS_BLOCK_RSV_TRANS); |
2760 | btrfs_init_block_rsv(rsv: &fs_info->chunk_block_rsv, type: BTRFS_BLOCK_RSV_CHUNK); |
2761 | btrfs_init_block_rsv(rsv: &fs_info->empty_block_rsv, type: BTRFS_BLOCK_RSV_EMPTY); |
2762 | btrfs_init_block_rsv(rsv: &fs_info->delayed_block_rsv, |
2763 | type: BTRFS_BLOCK_RSV_DELOPS); |
2764 | btrfs_init_block_rsv(rsv: &fs_info->delayed_refs_rsv, |
2765 | type: BTRFS_BLOCK_RSV_DELREFS); |
2766 | |
2767 | atomic_set(v: &fs_info->async_delalloc_pages, i: 0); |
2768 | atomic_set(v: &fs_info->defrag_running, i: 0); |
2769 | atomic_set(v: &fs_info->nr_delayed_iputs, i: 0); |
2770 | atomic64_set(v: &fs_info->tree_mod_seq, i: 0); |
2771 | fs_info->global_root_tree = RB_ROOT; |
2772 | fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE; |
2773 | fs_info->metadata_ratio = 0; |
2774 | fs_info->defrag_inodes = RB_ROOT; |
2775 | atomic64_set(v: &fs_info->free_chunk_space, i: 0); |
2776 | fs_info->tree_mod_log = RB_ROOT; |
2777 | fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; |
2778 | btrfs_init_ref_verify(fs_info); |
2779 | |
2780 | fs_info->thread_pool_size = min_t(unsigned long, |
2781 | num_online_cpus() + 2, 8); |
2782 | |
2783 | INIT_LIST_HEAD(list: &fs_info->ordered_roots); |
2784 | spin_lock_init(&fs_info->ordered_root_lock); |
2785 | |
2786 | btrfs_init_scrub(fs_info); |
2787 | btrfs_init_balance(fs_info); |
2788 | btrfs_init_async_reclaim_work(fs_info); |
2789 | |
2790 | rwlock_init(&fs_info->block_group_cache_lock); |
2791 | fs_info->block_group_cache_tree = RB_ROOT_CACHED; |
2792 | |
2793 | extent_io_tree_init(fs_info, tree: &fs_info->excluded_extents, |
2794 | owner: IO_TREE_FS_EXCLUDED_EXTENTS); |
2795 | |
2796 | mutex_init(&fs_info->ordered_operations_mutex); |
2797 | mutex_init(&fs_info->tree_log_mutex); |
2798 | mutex_init(&fs_info->chunk_mutex); |
2799 | mutex_init(&fs_info->transaction_kthread_mutex); |
2800 | mutex_init(&fs_info->cleaner_mutex); |
2801 | mutex_init(&fs_info->ro_block_group_mutex); |
2802 | init_rwsem(&fs_info->commit_root_sem); |
2803 | init_rwsem(&fs_info->cleanup_work_sem); |
2804 | init_rwsem(&fs_info->subvol_sem); |
2805 | sema_init(sem: &fs_info->uuid_tree_rescan_sem, val: 1); |
2806 | |
2807 | btrfs_init_dev_replace_locks(fs_info); |
2808 | btrfs_init_qgroup(fs_info); |
2809 | btrfs_discard_init(fs_info); |
2810 | |
2811 | btrfs_init_free_cluster(cluster: &fs_info->meta_alloc_cluster); |
2812 | btrfs_init_free_cluster(cluster: &fs_info->data_alloc_cluster); |
2813 | |
2814 | init_waitqueue_head(&fs_info->transaction_throttle); |
2815 | init_waitqueue_head(&fs_info->transaction_wait); |
2816 | init_waitqueue_head(&fs_info->transaction_blocked_wait); |
2817 | init_waitqueue_head(&fs_info->async_submit_wait); |
2818 | init_waitqueue_head(&fs_info->delayed_iputs_wait); |
2819 | |
2820 | /* Usable values until the real ones are cached from the superblock */ |
2821 | fs_info->nodesize = 4096; |
2822 | fs_info->sectorsize = 4096; |
2823 | fs_info->sectorsize_bits = ilog2(4096); |
2824 | fs_info->stripesize = 4096; |
2825 | |
2826 | /* Default compress algorithm when user does -o compress */ |
2827 | fs_info->compress_type = BTRFS_COMPRESS_ZLIB; |
2828 | |
2829 | fs_info->max_extent_size = BTRFS_MAX_EXTENT_SIZE; |
2830 | |
2831 | spin_lock_init(&fs_info->swapfile_pins_lock); |
2832 | fs_info->swapfile_pins = RB_ROOT; |
2833 | |
2834 | fs_info->bg_reclaim_threshold = BTRFS_DEFAULT_RECLAIM_THRESH; |
2835 | INIT_WORK(&fs_info->reclaim_bgs_work, btrfs_reclaim_bgs_work); |
2836 | } |
2837 | |
2838 | static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block *sb) |
2839 | { |
2840 | int ret; |
2841 | |
2842 | fs_info->sb = sb; |
2843 | /* Temporary fixed values for block size until we read the superblock. */ |
2844 | sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE; |
2845 | sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE); |
2846 | |
2847 | ret = percpu_counter_init(&fs_info->ordered_bytes, 0, GFP_KERNEL); |
2848 | if (ret) |
2849 | return ret; |
2850 | |
2851 | ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL); |
2852 | if (ret) |
2853 | return ret; |
2854 | |
2855 | fs_info->dirty_metadata_batch = PAGE_SIZE * |
2856 | (1 + ilog2(nr_cpu_ids)); |
2857 | |
2858 | ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL); |
2859 | if (ret) |
2860 | return ret; |
2861 | |
2862 | ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0, |
2863 | GFP_KERNEL); |
2864 | if (ret) |
2865 | return ret; |
2866 | |
2867 | fs_info->delayed_root = kmalloc(size: sizeof(struct btrfs_delayed_root), |
2868 | GFP_KERNEL); |
2869 | if (!fs_info->delayed_root) |
2870 | return -ENOMEM; |
2871 | btrfs_init_delayed_root(delayed_root: fs_info->delayed_root); |
2872 | |
2873 | if (sb_rdonly(sb)) |
2874 | set_bit(nr: BTRFS_FS_STATE_RO, addr: &fs_info->fs_state); |
2875 | |
2876 | return btrfs_alloc_stripe_hash_table(info: fs_info); |
2877 | } |
2878 | |
2879 | static int btrfs_uuid_rescan_kthread(void *data) |
2880 | { |
2881 | struct btrfs_fs_info *fs_info = data; |
2882 | int ret; |
2883 | |
2884 | /* |
2885 | * 1st step is to iterate through the existing UUID tree and |
2886 | * to delete all entries that contain outdated data. |
2887 | * 2nd step is to add all missing entries to the UUID tree. |
2888 | */ |
2889 | ret = btrfs_uuid_tree_iterate(fs_info); |
2890 | if (ret < 0) { |
2891 | if (ret != -EINTR) |
2892 | btrfs_warn(fs_info, "iterating uuid_tree failed %d" , |
2893 | ret); |
2894 | up(sem: &fs_info->uuid_tree_rescan_sem); |
2895 | return ret; |
2896 | } |
2897 | return btrfs_uuid_scan_kthread(data); |
2898 | } |
2899 | |
2900 | static int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info) |
2901 | { |
2902 | struct task_struct *task; |
2903 | |
2904 | down(sem: &fs_info->uuid_tree_rescan_sem); |
2905 | task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid" ); |
2906 | if (IS_ERR(ptr: task)) { |
2907 | /* fs_info->update_uuid_tree_gen remains 0 in all error case */ |
2908 | btrfs_warn(fs_info, "failed to start uuid_rescan task" ); |
2909 | up(sem: &fs_info->uuid_tree_rescan_sem); |
2910 | return PTR_ERR(ptr: task); |
2911 | } |
2912 | |
2913 | return 0; |
2914 | } |
2915 | |
2916 | static int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info) |
2917 | { |
2918 | u64 root_objectid = 0; |
2919 | struct btrfs_root *gang[8]; |
2920 | int i = 0; |
2921 | int err = 0; |
2922 | unsigned int ret = 0; |
2923 | |
2924 | while (1) { |
2925 | spin_lock(lock: &fs_info->fs_roots_radix_lock); |
2926 | ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, |
2927 | results: (void **)gang, first_index: root_objectid, |
2928 | ARRAY_SIZE(gang)); |
2929 | if (!ret) { |
2930 | spin_unlock(lock: &fs_info->fs_roots_radix_lock); |
2931 | break; |
2932 | } |
2933 | root_objectid = gang[ret - 1]->root_key.objectid + 1; |
2934 | |
2935 | for (i = 0; i < ret; i++) { |
2936 | /* Avoid to grab roots in dead_roots. */ |
2937 | if (btrfs_root_refs(s: &gang[i]->root_item) == 0) { |
2938 | gang[i] = NULL; |
2939 | continue; |
2940 | } |
2941 | /* Grab all the search result for later use. */ |
2942 | gang[i] = btrfs_grab_root(root: gang[i]); |
2943 | } |
2944 | spin_unlock(lock: &fs_info->fs_roots_radix_lock); |
2945 | |
2946 | for (i = 0; i < ret; i++) { |
2947 | if (!gang[i]) |
2948 | continue; |
2949 | root_objectid = gang[i]->root_key.objectid; |
2950 | err = btrfs_orphan_cleanup(root: gang[i]); |
2951 | if (err) |
2952 | goto out; |
2953 | btrfs_put_root(root: gang[i]); |
2954 | } |
2955 | root_objectid++; |
2956 | } |
2957 | out: |
2958 | /* Release the uncleaned roots due to error. */ |
2959 | for (; i < ret; i++) { |
2960 | if (gang[i]) |
2961 | btrfs_put_root(root: gang[i]); |
2962 | } |
2963 | return err; |
2964 | } |
2965 | |
2966 | /* |
2967 | * Mounting logic specific to read-write file systems. Shared by open_ctree |
2968 | * and btrfs_remount when remounting from read-only to read-write. |
2969 | */ |
2970 | int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info) |
2971 | { |
2972 | int ret; |
2973 | const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE); |
2974 | bool rebuild_free_space_tree = false; |
2975 | |
2976 | if (btrfs_test_opt(fs_info, CLEAR_CACHE) && |
2977 | btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { |
2978 | if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) |
2979 | btrfs_warn(fs_info, |
2980 | "'clear_cache' option is ignored with extent tree v2" ); |
2981 | else |
2982 | rebuild_free_space_tree = true; |
2983 | } else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && |
2984 | !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) { |
2985 | btrfs_warn(fs_info, "free space tree is invalid" ); |
2986 | rebuild_free_space_tree = true; |
2987 | } |
2988 | |
2989 | if (rebuild_free_space_tree) { |
2990 | btrfs_info(fs_info, "rebuilding free space tree" ); |
2991 | ret = btrfs_rebuild_free_space_tree(fs_info); |
2992 | if (ret) { |
2993 | btrfs_warn(fs_info, |
2994 | "failed to rebuild free space tree: %d" , ret); |
2995 | goto out; |
2996 | } |
2997 | } |
2998 | |
2999 | if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && |
3000 | !btrfs_test_opt(fs_info, FREE_SPACE_TREE)) { |
3001 | btrfs_info(fs_info, "disabling free space tree" ); |
3002 | ret = btrfs_delete_free_space_tree(fs_info); |
3003 | if (ret) { |
3004 | btrfs_warn(fs_info, |
3005 | "failed to disable free space tree: %d" , ret); |
3006 | goto out; |
3007 | } |
3008 | } |
3009 | |
3010 | /* |
3011 | * btrfs_find_orphan_roots() is responsible for finding all the dead |
3012 | * roots (with 0 refs), flag them with BTRFS_ROOT_DEAD_TREE and load |
3013 | * them into the fs_info->fs_roots_radix tree. This must be done before |
3014 | * calling btrfs_orphan_cleanup() on the tree root. If we don't do it |
3015 | * first, then btrfs_orphan_cleanup() will delete a dead root's orphan |
3016 | * item before the root's tree is deleted - this means that if we unmount |
3017 | * or crash before the deletion completes, on the next mount we will not |
3018 | * delete what remains of the tree because the orphan item does not |
3019 | * exists anymore, which is what tells us we have a pending deletion. |
3020 | */ |
3021 | ret = btrfs_find_orphan_roots(fs_info); |
3022 | if (ret) |
3023 | goto out; |
3024 | |
3025 | ret = btrfs_cleanup_fs_roots(fs_info); |
3026 | if (ret) |
3027 | goto out; |
3028 | |
3029 | down_read(sem: &fs_info->cleanup_work_sem); |
3030 | if ((ret = btrfs_orphan_cleanup(root: fs_info->fs_root)) || |
3031 | (ret = btrfs_orphan_cleanup(root: fs_info->tree_root))) { |
3032 | up_read(sem: &fs_info->cleanup_work_sem); |
3033 | goto out; |
3034 | } |
3035 | up_read(sem: &fs_info->cleanup_work_sem); |
3036 | |
3037 | mutex_lock(&fs_info->cleaner_mutex); |
3038 | ret = btrfs_recover_relocation(fs_info); |
3039 | mutex_unlock(lock: &fs_info->cleaner_mutex); |
3040 | if (ret < 0) { |
3041 | btrfs_warn(fs_info, "failed to recover relocation: %d" , ret); |
3042 | goto out; |
3043 | } |
3044 | |
3045 | if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) && |
3046 | !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { |
3047 | btrfs_info(fs_info, "creating free space tree" ); |
3048 | ret = btrfs_create_free_space_tree(fs_info); |
3049 | if (ret) { |
3050 | btrfs_warn(fs_info, |
3051 | "failed to create free space tree: %d" , ret); |
3052 | goto out; |
3053 | } |
3054 | } |
3055 | |
3056 | if (cache_opt != btrfs_free_space_cache_v1_active(fs_info)) { |
3057 | ret = btrfs_set_free_space_cache_v1_active(fs_info, active: cache_opt); |
3058 | if (ret) |
3059 | goto out; |
3060 | } |
3061 | |
3062 | ret = btrfs_resume_balance_async(fs_info); |
3063 | if (ret) |
3064 | goto out; |
3065 | |
3066 | ret = btrfs_resume_dev_replace_async(fs_info); |
3067 | if (ret) { |
3068 | btrfs_warn(fs_info, "failed to resume dev_replace" ); |
3069 | goto out; |
3070 | } |
3071 | |
3072 | btrfs_qgroup_rescan_resume(fs_info); |
3073 | |
3074 | if (!fs_info->uuid_root) { |
3075 | btrfs_info(fs_info, "creating UUID tree" ); |
3076 | ret = btrfs_create_uuid_tree(fs_info); |
3077 | if (ret) { |
3078 | btrfs_warn(fs_info, |
3079 | "failed to create the UUID tree %d" , ret); |
3080 | goto out; |
3081 | } |
3082 | } |
3083 | |
3084 | out: |
3085 | return ret; |
3086 | } |
3087 | |
3088 | /* |
3089 | * Do various sanity and dependency checks of different features. |
3090 | * |
3091 | * @is_rw_mount: If the mount is read-write. |
3092 | * |
3093 | * This is the place for less strict checks (like for subpage or artificial |
3094 | * feature dependencies). |
3095 | * |
3096 | * For strict checks or possible corruption detection, see |
3097 | * btrfs_validate_super(). |
3098 | * |
3099 | * This should be called after btrfs_parse_options(), as some mount options |
3100 | * (space cache related) can modify on-disk format like free space tree and |
3101 | * screw up certain feature dependencies. |
3102 | */ |
3103 | int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount) |
3104 | { |
3105 | struct btrfs_super_block *disk_super = fs_info->super_copy; |
3106 | u64 incompat = btrfs_super_incompat_flags(s: disk_super); |
3107 | const u64 compat_ro = btrfs_super_compat_ro_flags(s: disk_super); |
3108 | const u64 compat_ro_unsupp = (compat_ro & ~BTRFS_FEATURE_COMPAT_RO_SUPP); |
3109 | |
3110 | if (incompat & ~BTRFS_FEATURE_INCOMPAT_SUPP) { |
3111 | btrfs_err(fs_info, |
3112 | "cannot mount because of unknown incompat features (0x%llx)" , |
3113 | incompat); |
3114 | return -EINVAL; |
3115 | } |
3116 | |
3117 | /* Runtime limitation for mixed block groups. */ |
3118 | if ((incompat & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) && |
3119 | (fs_info->sectorsize != fs_info->nodesize)) { |
3120 | btrfs_err(fs_info, |
3121 | "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups" , |
3122 | fs_info->nodesize, fs_info->sectorsize); |
3123 | return -EINVAL; |
3124 | } |
3125 | |
3126 | /* Mixed backref is an always-enabled feature. */ |
3127 | incompat |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; |
3128 | |
3129 | /* Set compression related flags just in case. */ |
3130 | if (fs_info->compress_type == BTRFS_COMPRESS_LZO) |
3131 | incompat |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; |
3132 | else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD) |
3133 | incompat |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD; |
3134 | |
3135 | /* |
3136 | * An ancient flag, which should really be marked deprecated. |
3137 | * Such runtime limitation doesn't really need a incompat flag. |
3138 | */ |
3139 | if (btrfs_super_nodesize(s: disk_super) > PAGE_SIZE) |
3140 | incompat |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA; |
3141 | |
3142 | if (compat_ro_unsupp && is_rw_mount) { |
3143 | btrfs_err(fs_info, |
3144 | "cannot mount read-write because of unknown compat_ro features (0x%llx)" , |
3145 | compat_ro); |
3146 | return -EINVAL; |
3147 | } |
3148 | |
3149 | /* |
3150 | * We have unsupported RO compat features, although RO mounted, we |
3151 | * should not cause any metadata writes, including log replay. |
3152 | * Or we could screw up whatever the new feature requires. |
3153 | */ |
3154 | if (compat_ro_unsupp && btrfs_super_log_root(s: disk_super) && |
3155 | !btrfs_test_opt(fs_info, NOLOGREPLAY)) { |
3156 | btrfs_err(fs_info, |
3157 | "cannot replay dirty log with unsupported compat_ro features (0x%llx), try rescue=nologreplay" , |
3158 | compat_ro); |
3159 | return -EINVAL; |
3160 | } |
3161 | |
3162 | /* |
3163 | * Artificial limitations for block group tree, to force |
3164 | * block-group-tree to rely on no-holes and free-space-tree. |
3165 | */ |
3166 | if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE) && |
3167 | (!btrfs_fs_incompat(fs_info, NO_HOLES) || |
3168 | !btrfs_test_opt(fs_info, FREE_SPACE_TREE))) { |
3169 | btrfs_err(fs_info, |
3170 | "block-group-tree feature requires no-holes and free-space-tree features" ); |
3171 | return -EINVAL; |
3172 | } |
3173 | |
3174 | /* |
3175 | * Subpage runtime limitation on v1 cache. |
3176 | * |
3177 | * V1 space cache still has some hard codeed PAGE_SIZE usage, while |
3178 | * we're already defaulting to v2 cache, no need to bother v1 as it's |
3179 | * going to be deprecated anyway. |
3180 | */ |
3181 | if (fs_info->sectorsize < PAGE_SIZE && btrfs_test_opt(fs_info, SPACE_CACHE)) { |
3182 | btrfs_warn(fs_info, |
3183 | "v1 space cache is not supported for page size %lu with sectorsize %u" , |
3184 | PAGE_SIZE, fs_info->sectorsize); |
3185 | return -EINVAL; |
3186 | } |
3187 | |
3188 | /* This can be called by remount, we need to protect the super block. */ |
3189 | spin_lock(lock: &fs_info->super_lock); |
3190 | btrfs_set_super_incompat_flags(s: disk_super, val: incompat); |
3191 | spin_unlock(lock: &fs_info->super_lock); |
3192 | |
3193 | return 0; |
3194 | } |
3195 | |
3196 | int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices, |
3197 | char *options) |
3198 | { |
3199 | u32 sectorsize; |
3200 | u32 nodesize; |
3201 | u32 stripesize; |
3202 | u64 generation; |
3203 | u16 csum_type; |
3204 | struct btrfs_super_block *disk_super; |
3205 | struct btrfs_fs_info *fs_info = btrfs_sb(sb); |
3206 | struct btrfs_root *tree_root; |
3207 | struct btrfs_root *chunk_root; |
3208 | int ret; |
3209 | int level; |
3210 | |
3211 | ret = init_mount_fs_info(fs_info, sb); |
3212 | if (ret) |
3213 | goto fail; |
3214 | |
3215 | /* These need to be init'ed before we start creating inodes and such. */ |
3216 | tree_root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID, |
3217 | GFP_KERNEL); |
3218 | fs_info->tree_root = tree_root; |
3219 | chunk_root = btrfs_alloc_root(fs_info, BTRFS_CHUNK_TREE_OBJECTID, |
3220 | GFP_KERNEL); |
3221 | fs_info->chunk_root = chunk_root; |
3222 | if (!tree_root || !chunk_root) { |
3223 | ret = -ENOMEM; |
3224 | goto fail; |
3225 | } |
3226 | |
3227 | ret = btrfs_init_btree_inode(sb); |
3228 | if (ret) |
3229 | goto fail; |
3230 | |
3231 | invalidate_bdev(bdev: fs_devices->latest_dev->bdev); |
3232 | |
3233 | /* |
3234 | * Read super block and check the signature bytes only |
3235 | */ |
3236 | disk_super = btrfs_read_dev_super(bdev: fs_devices->latest_dev->bdev); |
3237 | if (IS_ERR(ptr: disk_super)) { |
3238 | ret = PTR_ERR(ptr: disk_super); |
3239 | goto fail_alloc; |
3240 | } |
3241 | |
3242 | btrfs_info(fs_info, "first mount of filesystem %pU" , disk_super->fsid); |
3243 | /* |
3244 | * Verify the type first, if that or the checksum value are |
3245 | * corrupted, we'll find out |
3246 | */ |
3247 | csum_type = btrfs_super_csum_type(s: disk_super); |
3248 | if (!btrfs_supported_super_csum(csum_type)) { |
3249 | btrfs_err(fs_info, "unsupported checksum algorithm: %u" , |
3250 | csum_type); |
3251 | ret = -EINVAL; |
3252 | btrfs_release_disk_super(super: disk_super); |
3253 | goto fail_alloc; |
3254 | } |
3255 | |
3256 | fs_info->csum_size = btrfs_super_csum_size(s: disk_super); |
3257 | |
3258 | ret = btrfs_init_csum_hash(fs_info, csum_type); |
3259 | if (ret) { |
3260 | btrfs_release_disk_super(super: disk_super); |
3261 | goto fail_alloc; |
3262 | } |
3263 | |
3264 | /* |
3265 | * We want to check superblock checksum, the type is stored inside. |
3266 | * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k). |
3267 | */ |
3268 | if (btrfs_check_super_csum(fs_info, disk_sb: disk_super)) { |
3269 | btrfs_err(fs_info, "superblock checksum mismatch" ); |
3270 | ret = -EINVAL; |
3271 | btrfs_release_disk_super(super: disk_super); |
3272 | goto fail_alloc; |
3273 | } |
3274 | |
3275 | /* |
3276 | * super_copy is zeroed at allocation time and we never touch the |
3277 | * following bytes up to INFO_SIZE, the checksum is calculated from |
3278 | * the whole block of INFO_SIZE |
3279 | */ |
3280 | memcpy(fs_info->super_copy, disk_super, sizeof(*fs_info->super_copy)); |
3281 | btrfs_release_disk_super(super: disk_super); |
3282 | |
3283 | disk_super = fs_info->super_copy; |
3284 | |
3285 | memcpy(fs_info->super_for_commit, fs_info->super_copy, |
3286 | sizeof(*fs_info->super_for_commit)); |
3287 | |
3288 | ret = btrfs_validate_mount_super(fs_info); |
3289 | if (ret) { |
3290 | btrfs_err(fs_info, "superblock contains fatal errors" ); |
3291 | ret = -EINVAL; |
3292 | goto fail_alloc; |
3293 | } |
3294 | |
3295 | if (!btrfs_super_root(s: disk_super)) { |
3296 | btrfs_err(fs_info, "invalid superblock tree root bytenr" ); |
3297 | ret = -EINVAL; |
3298 | goto fail_alloc; |
3299 | } |
3300 | |
3301 | /* check FS state, whether FS is broken. */ |
3302 | if (btrfs_super_flags(s: disk_super) & BTRFS_SUPER_FLAG_ERROR) |
3303 | WRITE_ONCE(fs_info->fs_error, -EUCLEAN); |
3304 | |
3305 | /* Set up fs_info before parsing mount options */ |
3306 | nodesize = btrfs_super_nodesize(s: disk_super); |
3307 | sectorsize = btrfs_super_sectorsize(s: disk_super); |
3308 | stripesize = sectorsize; |
3309 | fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids)); |
3310 | fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); |
3311 | |
3312 | fs_info->nodesize = nodesize; |
3313 | fs_info->sectorsize = sectorsize; |
3314 | fs_info->sectorsize_bits = ilog2(sectorsize); |
3315 | fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(info: fs_info) / fs_info->csum_size; |
3316 | fs_info->stripesize = stripesize; |
3317 | |
3318 | /* |
3319 | * Handle the space caching options appropriately now that we have the |
3320 | * super block loaded and validated. |
3321 | */ |
3322 | btrfs_set_free_space_cache_settings(fs_info); |
3323 | |
3324 | if (!btrfs_check_options(info: fs_info, mount_opt: &fs_info->mount_opt, flags: sb->s_flags)) { |
3325 | ret = -EINVAL; |
3326 | goto fail_alloc; |
3327 | } |
3328 | |
3329 | ret = btrfs_check_features(fs_info, is_rw_mount: !sb_rdonly(sb)); |
3330 | if (ret < 0) |
3331 | goto fail_alloc; |
3332 | |
3333 | /* |
3334 | * At this point our mount options are validated, if we set ->max_inline |
3335 | * to something non-standard make sure we truncate it to sectorsize. |
3336 | */ |
3337 | fs_info->max_inline = min_t(u64, fs_info->max_inline, fs_info->sectorsize); |
3338 | |
3339 | if (sectorsize < PAGE_SIZE) { |
3340 | struct btrfs_subpage_info *subpage_info; |
3341 | |
3342 | btrfs_warn(fs_info, |
3343 | "read-write for sector size %u with page size %lu is experimental" , |
3344 | sectorsize, PAGE_SIZE); |
3345 | subpage_info = kzalloc(size: sizeof(*subpage_info), GFP_KERNEL); |
3346 | if (!subpage_info) { |
3347 | ret = -ENOMEM; |
3348 | goto fail_alloc; |
3349 | } |
3350 | btrfs_init_subpage_info(subpage_info, sectorsize); |
3351 | fs_info->subpage_info = subpage_info; |
3352 | } |
3353 | |
3354 | ret = btrfs_init_workqueues(fs_info); |
3355 | if (ret) |
3356 | goto fail_sb_buffer; |
3357 | |
3358 | sb->s_bdi->ra_pages *= btrfs_super_num_devices(s: disk_super); |
3359 | sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE); |
3360 | |
3361 | /* Update the values for the current filesystem. */ |
3362 | sb->s_blocksize = sectorsize; |
3363 | sb->s_blocksize_bits = blksize_bits(size: sectorsize); |
3364 | memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE); |
3365 | |
3366 | mutex_lock(&fs_info->chunk_mutex); |
3367 | ret = btrfs_read_sys_array(fs_info); |
3368 | mutex_unlock(lock: &fs_info->chunk_mutex); |
3369 | if (ret) { |
3370 | btrfs_err(fs_info, "failed to read the system array: %d" , ret); |
3371 | goto fail_sb_buffer; |
3372 | } |
3373 | |
3374 | generation = btrfs_super_chunk_root_generation(s: disk_super); |
3375 | level = btrfs_super_chunk_root_level(s: disk_super); |
3376 | ret = load_super_root(root: chunk_root, bytenr: btrfs_super_chunk_root(s: disk_super), |
3377 | gen: generation, level); |
3378 | if (ret) { |
3379 | btrfs_err(fs_info, "failed to read chunk root" ); |
3380 | goto fail_tree_roots; |
3381 | } |
3382 | |
3383 | read_extent_buffer(eb: chunk_root->node, dst: fs_info->chunk_tree_uuid, |
3384 | offsetof(struct btrfs_header, chunk_tree_uuid), |
3385 | BTRFS_UUID_SIZE); |
3386 | |
3387 | ret = btrfs_read_chunk_tree(fs_info); |
3388 | if (ret) { |
3389 | btrfs_err(fs_info, "failed to read chunk tree: %d" , ret); |
3390 | goto fail_tree_roots; |
3391 | } |
3392 | |
3393 | /* |
3394 | * At this point we know all the devices that make this filesystem, |
3395 | * including the seed devices but we don't know yet if the replace |
3396 | * target is required. So free devices that are not part of this |
3397 | * filesystem but skip the replace target device which is checked |
3398 | * below in btrfs_init_dev_replace(). |
3399 | */ |
3400 | btrfs_free_extra_devids(fs_devices); |
3401 | if (!fs_devices->latest_dev->bdev) { |
3402 | btrfs_err(fs_info, "failed to read devices" ); |
3403 | ret = -EIO; |
3404 | goto fail_tree_roots; |
3405 | } |
3406 | |
3407 | ret = init_tree_roots(fs_info); |
3408 | if (ret) |
3409 | goto fail_tree_roots; |
3410 | |
3411 | /* |
3412 | * Get zone type information of zoned block devices. This will also |
3413 | * handle emulation of a zoned filesystem if a regular device has the |
3414 | * zoned incompat feature flag set. |
3415 | */ |
3416 | ret = btrfs_get_dev_zone_info_all_devices(fs_info); |
3417 | if (ret) { |
3418 | btrfs_err(fs_info, |
3419 | "zoned: failed to read device zone info: %d" , ret); |
3420 | goto fail_block_groups; |
3421 | } |
3422 | |
3423 | /* |
3424 | * If we have a uuid root and we're not being told to rescan we need to |
3425 | * check the generation here so we can set the |
3426 | * BTRFS_FS_UPDATE_UUID_TREE_GEN bit. Otherwise we could commit the |
3427 | * transaction during a balance or the log replay without updating the |
3428 | * uuid generation, and then if we crash we would rescan the uuid tree, |
3429 | * even though it was perfectly fine. |
3430 | */ |
3431 | if (fs_info->uuid_root && !btrfs_test_opt(fs_info, RESCAN_UUID_TREE) && |
3432 | fs_info->generation == btrfs_super_uuid_tree_generation(s: disk_super)) |
3433 | set_bit(nr: BTRFS_FS_UPDATE_UUID_TREE_GEN, addr: &fs_info->flags); |
3434 | |
3435 | ret = btrfs_verify_dev_extents(fs_info); |
3436 | if (ret) { |
3437 | btrfs_err(fs_info, |
3438 | "failed to verify dev extents against chunks: %d" , |
3439 | ret); |
3440 | goto fail_block_groups; |
3441 | } |
3442 | ret = btrfs_recover_balance(fs_info); |
3443 | if (ret) { |
3444 | btrfs_err(fs_info, "failed to recover balance: %d" , ret); |
3445 | goto fail_block_groups; |
3446 | } |
3447 | |
3448 | ret = btrfs_init_dev_stats(fs_info); |
3449 | if (ret) { |
3450 | btrfs_err(fs_info, "failed to init dev_stats: %d" , ret); |
3451 | goto fail_block_groups; |
3452 | } |
3453 | |
3454 | ret = btrfs_init_dev_replace(fs_info); |
3455 | if (ret) { |
3456 | btrfs_err(fs_info, "failed to init dev_replace: %d" , ret); |
3457 | goto fail_block_groups; |
3458 | } |
3459 | |
3460 | ret = btrfs_check_zoned_mode(fs_info); |
3461 | if (ret) { |
3462 | btrfs_err(fs_info, "failed to initialize zoned mode: %d" , |
3463 | ret); |
3464 | goto fail_block_groups; |
3465 | } |
3466 | |
3467 | ret = btrfs_sysfs_add_fsid(fs_devs: fs_devices); |
3468 | if (ret) { |
3469 | btrfs_err(fs_info, "failed to init sysfs fsid interface: %d" , |
3470 | ret); |
3471 | goto fail_block_groups; |
3472 | } |
3473 | |
3474 | ret = btrfs_sysfs_add_mounted(fs_info); |
3475 | if (ret) { |
3476 | btrfs_err(fs_info, "failed to init sysfs interface: %d" , ret); |
3477 | goto fail_fsdev_sysfs; |
3478 | } |
3479 | |
3480 | ret = btrfs_init_space_info(fs_info); |
3481 | if (ret) { |
3482 | btrfs_err(fs_info, "failed to initialize space info: %d" , ret); |
3483 | goto fail_sysfs; |
3484 | } |
3485 | |
3486 | ret = btrfs_read_block_groups(info: fs_info); |
3487 | if (ret) { |
3488 | btrfs_err(fs_info, "failed to read block groups: %d" , ret); |
3489 | goto fail_sysfs; |
3490 | } |
3491 | |
3492 | btrfs_free_zone_cache(fs_info); |
3493 | |
3494 | btrfs_check_active_zone_reservation(fs_info); |
3495 | |
3496 | if (!sb_rdonly(sb) && fs_info->fs_devices->missing_devices && |
3497 | !btrfs_check_rw_degradable(fs_info, NULL)) { |
3498 | btrfs_warn(fs_info, |
3499 | "writable mount is not allowed due to too many missing devices" ); |
3500 | ret = -EINVAL; |
3501 | goto fail_sysfs; |
3502 | } |
3503 | |
3504 | fs_info->cleaner_kthread = kthread_run(cleaner_kthread, fs_info, |
3505 | "btrfs-cleaner" ); |
3506 | if (IS_ERR(ptr: fs_info->cleaner_kthread)) { |
3507 | ret = PTR_ERR(ptr: fs_info->cleaner_kthread); |
3508 | goto fail_sysfs; |
3509 | } |
3510 | |
3511 | fs_info->transaction_kthread = kthread_run(transaction_kthread, |
3512 | tree_root, |
3513 | "btrfs-transaction" ); |
3514 | if (IS_ERR(ptr: fs_info->transaction_kthread)) { |
3515 | ret = PTR_ERR(ptr: fs_info->transaction_kthread); |
3516 | goto fail_cleaner; |
3517 | } |
3518 | |
3519 | ret = btrfs_read_qgroup_config(fs_info); |
3520 | if (ret) |
3521 | goto fail_trans_kthread; |
3522 | |
3523 | if (btrfs_build_ref_tree(fs_info)) |
3524 | btrfs_err(fs_info, "couldn't build ref tree" ); |
3525 | |
3526 | /* do not make disk changes in broken FS or nologreplay is given */ |
3527 | if (btrfs_super_log_root(s: disk_super) != 0 && |
3528 | !btrfs_test_opt(fs_info, NOLOGREPLAY)) { |
3529 | btrfs_info(fs_info, "start tree-log replay" ); |
3530 | ret = btrfs_replay_log(fs_info, fs_devices); |
3531 | if (ret) |
3532 | goto fail_qgroup; |
3533 | } |
3534 | |
3535 | fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, check_ref: true); |
3536 | if (IS_ERR(ptr: fs_info->fs_root)) { |
3537 | ret = PTR_ERR(ptr: fs_info->fs_root); |
3538 | btrfs_warn(fs_info, "failed to read fs tree: %d" , ret); |
3539 | fs_info->fs_root = NULL; |
3540 | goto fail_qgroup; |
3541 | } |
3542 | |
3543 | if (sb_rdonly(sb)) |
3544 | return 0; |
3545 | |
3546 | ret = btrfs_start_pre_rw_mount(fs_info); |
3547 | if (ret) { |
3548 | close_ctree(fs_info); |
3549 | return ret; |
3550 | } |
3551 | btrfs_discard_resume(fs_info); |
3552 | |
3553 | if (fs_info->uuid_root && |
3554 | (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) || |
3555 | fs_info->generation != btrfs_super_uuid_tree_generation(s: disk_super))) { |
3556 | btrfs_info(fs_info, "checking UUID tree" ); |
3557 | ret = btrfs_check_uuid_tree(fs_info); |
3558 | if (ret) { |
3559 | btrfs_warn(fs_info, |
3560 | "failed to check the UUID tree: %d" , ret); |
3561 | close_ctree(fs_info); |
3562 | return ret; |
3563 | } |
3564 | } |
3565 | |
3566 | set_bit(nr: BTRFS_FS_OPEN, addr: &fs_info->flags); |
3567 | |
3568 | /* Kick the cleaner thread so it'll start deleting snapshots. */ |
3569 | if (test_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags)) |
3570 | wake_up_process(tsk: fs_info->cleaner_kthread); |
3571 | |
3572 | return 0; |
3573 | |
3574 | fail_qgroup: |
3575 | btrfs_free_qgroup_config(fs_info); |
3576 | fail_trans_kthread: |
3577 | kthread_stop(k: fs_info->transaction_kthread); |
3578 | btrfs_cleanup_transaction(fs_info); |
3579 | btrfs_free_fs_roots(fs_info); |
3580 | fail_cleaner: |
3581 | kthread_stop(k: fs_info->cleaner_kthread); |
3582 | |
3583 | /* |
3584 | * make sure we're done with the btree inode before we stop our |
3585 | * kthreads |
3586 | */ |
3587 | filemap_write_and_wait(mapping: fs_info->btree_inode->i_mapping); |
3588 | |
3589 | fail_sysfs: |
3590 | btrfs_sysfs_remove_mounted(fs_info); |
3591 | |
3592 | fail_fsdev_sysfs: |
3593 | btrfs_sysfs_remove_fsid(fs_devs: fs_info->fs_devices); |
3594 | |
3595 | fail_block_groups: |
3596 | btrfs_put_block_group_cache(info: fs_info); |
3597 | |
3598 | fail_tree_roots: |
3599 | if (fs_info->data_reloc_root) |
3600 | btrfs_drop_and_free_fs_root(fs_info, root: fs_info->data_reloc_root); |
3601 | free_root_pointers(info: fs_info, free_chunk_root: true); |
3602 | invalidate_inode_pages2(mapping: fs_info->btree_inode->i_mapping); |
3603 | |
3604 | fail_sb_buffer: |
3605 | btrfs_stop_all_workers(fs_info); |
3606 | btrfs_free_block_groups(info: fs_info); |
3607 | fail_alloc: |
3608 | btrfs_mapping_tree_free(fs_info); |
3609 | |
3610 | iput(fs_info->btree_inode); |
3611 | fail: |
3612 | btrfs_close_devices(fs_devices: fs_info->fs_devices); |
3613 | ASSERT(ret < 0); |
3614 | return ret; |
3615 | } |
3616 | ALLOW_ERROR_INJECTION(open_ctree, ERRNO); |
3617 | |
3618 | static void btrfs_end_super_write(struct bio *bio) |
3619 | { |
3620 | struct btrfs_device *device = bio->bi_private; |
3621 | struct bio_vec *bvec; |
3622 | struct bvec_iter_all iter_all; |
3623 | struct page *page; |
3624 | |
3625 | bio_for_each_segment_all(bvec, bio, iter_all) { |
3626 | page = bvec->bv_page; |
3627 | |
3628 | if (bio->bi_status) { |
3629 | btrfs_warn_rl_in_rcu(device->fs_info, |
3630 | "lost page write due to IO error on %s (%d)" , |
3631 | btrfs_dev_name(device), |
3632 | blk_status_to_errno(bio->bi_status)); |
3633 | ClearPageUptodate(page); |
3634 | SetPageError(page); |
3635 | btrfs_dev_stat_inc_and_print(dev: device, |
3636 | index: BTRFS_DEV_STAT_WRITE_ERRS); |
3637 | } else { |
3638 | SetPageUptodate(page); |
3639 | } |
3640 | |
3641 | put_page(page); |
3642 | unlock_page(page); |
3643 | } |
3644 | |
3645 | bio_put(bio); |
3646 | } |
3647 | |
3648 | struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev, |
3649 | int copy_num, bool drop_cache) |
3650 | { |
3651 | struct btrfs_super_block *super; |
3652 | struct page *page; |
3653 | u64 bytenr, bytenr_orig; |
3654 | struct address_space *mapping = bdev->bd_inode->i_mapping; |
3655 | int ret; |
3656 | |
3657 | bytenr_orig = btrfs_sb_offset(mirror: copy_num); |
3658 | ret = btrfs_sb_log_location_bdev(bdev, mirror: copy_num, READ, bytenr_ret: &bytenr); |
3659 | if (ret == -ENOENT) |
3660 | return ERR_PTR(error: -EINVAL); |
3661 | else if (ret) |
3662 | return ERR_PTR(error: ret); |
3663 | |
3664 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= bdev_nr_bytes(bdev)) |
3665 | return ERR_PTR(error: -EINVAL); |
3666 | |
3667 | if (drop_cache) { |
3668 | /* This should only be called with the primary sb. */ |
3669 | ASSERT(copy_num == 0); |
3670 | |
3671 | /* |
3672 | * Drop the page of the primary superblock, so later read will |
3673 | * always read from the device. |
3674 | */ |
3675 | invalidate_inode_pages2_range(mapping, |
3676 | start: bytenr >> PAGE_SHIFT, |
3677 | end: (bytenr + BTRFS_SUPER_INFO_SIZE) >> PAGE_SHIFT); |
3678 | } |
3679 | |
3680 | page = read_cache_page_gfp(mapping, index: bytenr >> PAGE_SHIFT, GFP_NOFS); |
3681 | if (IS_ERR(ptr: page)) |
3682 | return ERR_CAST(ptr: page); |
3683 | |
3684 | super = page_address(page); |
3685 | if (btrfs_super_magic(s: super) != BTRFS_MAGIC) { |
3686 | btrfs_release_disk_super(super); |
3687 | return ERR_PTR(error: -ENODATA); |
3688 | } |
3689 | |
3690 | if (btrfs_super_bytenr(s: super) != bytenr_orig) { |
3691 | btrfs_release_disk_super(super); |
3692 | return ERR_PTR(error: -EINVAL); |
3693 | } |
3694 | |
3695 | return super; |
3696 | } |
3697 | |
3698 | |
3699 | struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev) |
3700 | { |
3701 | struct btrfs_super_block *super, *latest = NULL; |
3702 | int i; |
3703 | u64 transid = 0; |
3704 | |
3705 | /* we would like to check all the supers, but that would make |
3706 | * a btrfs mount succeed after a mkfs from a different FS. |
3707 | * So, we need to add a special mount option to scan for |
3708 | * later supers, using BTRFS_SUPER_MIRROR_MAX instead |
3709 | */ |
3710 | for (i = 0; i < 1; i++) { |
3711 | super = btrfs_read_dev_one_super(bdev, copy_num: i, drop_cache: false); |
3712 | if (IS_ERR(ptr: super)) |
3713 | continue; |
3714 | |
3715 | if (!latest || btrfs_super_generation(s: super) > transid) { |
3716 | if (latest) |
3717 | btrfs_release_disk_super(super); |
3718 | |
3719 | latest = super; |
3720 | transid = btrfs_super_generation(s: super); |
3721 | } |
3722 | } |
3723 | |
3724 | return super; |
3725 | } |
3726 | |
3727 | /* |
3728 | * Write superblock @sb to the @device. Do not wait for completion, all the |
3729 | * pages we use for writing are locked. |
3730 | * |
3731 | * Write @max_mirrors copies of the superblock, where 0 means default that fit |
3732 | * the expected device size at commit time. Note that max_mirrors must be |
3733 | * same for write and wait phases. |
3734 | * |
3735 | * Return number of errors when page is not found or submission fails. |
3736 | */ |
3737 | static int write_dev_supers(struct btrfs_device *device, |
3738 | struct btrfs_super_block *sb, int max_mirrors) |
3739 | { |
3740 | struct btrfs_fs_info *fs_info = device->fs_info; |
3741 | struct address_space *mapping = device->bdev->bd_inode->i_mapping; |
3742 | SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); |
3743 | int i; |
3744 | int errors = 0; |
3745 | int ret; |
3746 | u64 bytenr, bytenr_orig; |
3747 | |
3748 | if (max_mirrors == 0) |
3749 | max_mirrors = BTRFS_SUPER_MIRROR_MAX; |
3750 | |
3751 | shash->tfm = fs_info->csum_shash; |
3752 | |
3753 | for (i = 0; i < max_mirrors; i++) { |
3754 | struct page *page; |
3755 | struct bio *bio; |
3756 | struct btrfs_super_block *disk_super; |
3757 | |
3758 | bytenr_orig = btrfs_sb_offset(mirror: i); |
3759 | ret = btrfs_sb_log_location(device, mirror: i, WRITE, bytenr_ret: &bytenr); |
3760 | if (ret == -ENOENT) { |
3761 | continue; |
3762 | } else if (ret < 0) { |
3763 | btrfs_err(device->fs_info, |
3764 | "couldn't get super block location for mirror %d" , |
3765 | i); |
3766 | errors++; |
3767 | continue; |
3768 | } |
3769 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= |
3770 | device->commit_total_bytes) |
3771 | break; |
3772 | |
3773 | btrfs_set_super_bytenr(s: sb, val: bytenr_orig); |
3774 | |
3775 | crypto_shash_digest(desc: shash, data: (const char *)sb + BTRFS_CSUM_SIZE, |
3776 | BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, |
3777 | out: sb->csum); |
3778 | |
3779 | page = find_or_create_page(mapping, index: bytenr >> PAGE_SHIFT, |
3780 | GFP_NOFS); |
3781 | if (!page) { |
3782 | btrfs_err(device->fs_info, |
3783 | "couldn't get super block page for bytenr %llu" , |
3784 | bytenr); |
3785 | errors++; |
3786 | continue; |
3787 | } |
3788 | |
3789 | /* Bump the refcount for wait_dev_supers() */ |
3790 | get_page(page); |
3791 | |
3792 | disk_super = page_address(page); |
3793 | memcpy(disk_super, sb, BTRFS_SUPER_INFO_SIZE); |
3794 | |
3795 | /* |
3796 | * Directly use bios here instead of relying on the page cache |
3797 | * to do I/O, so we don't lose the ability to do integrity |
3798 | * checking. |
3799 | */ |
3800 | bio = bio_alloc(bdev: device->bdev, nr_vecs: 1, |
3801 | opf: REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO, |
3802 | GFP_NOFS); |
3803 | bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT; |
3804 | bio->bi_private = device; |
3805 | bio->bi_end_io = btrfs_end_super_write; |
3806 | __bio_add_page(bio, page, BTRFS_SUPER_INFO_SIZE, |
3807 | offset_in_page(bytenr)); |
3808 | |
3809 | /* |
3810 | * We FUA only the first super block. The others we allow to |
3811 | * go down lazy and there's a short window where the on-disk |
3812 | * copies might still contain the older version. |
3813 | */ |
3814 | if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER)) |
3815 | bio->bi_opf |= REQ_FUA; |
3816 | submit_bio(bio); |
3817 | |
3818 | if (btrfs_advance_sb_log(device, mirror: i)) |
3819 | errors++; |
3820 | } |
3821 | return errors < i ? 0 : -1; |
3822 | } |
3823 | |
3824 | /* |
3825 | * Wait for write completion of superblocks done by write_dev_supers, |
3826 | * @max_mirrors same for write and wait phases. |
3827 | * |
3828 | * Return number of errors when page is not found or not marked up to |
3829 | * date. |
3830 | */ |
3831 | static int wait_dev_supers(struct btrfs_device *device, int max_mirrors) |
3832 | { |
3833 | int i; |
3834 | int errors = 0; |
3835 | bool primary_failed = false; |
3836 | int ret; |
3837 | u64 bytenr; |
3838 | |
3839 | if (max_mirrors == 0) |
3840 | max_mirrors = BTRFS_SUPER_MIRROR_MAX; |
3841 | |
3842 | for (i = 0; i < max_mirrors; i++) { |
3843 | struct page *page; |
3844 | |
3845 | ret = btrfs_sb_log_location(device, mirror: i, READ, bytenr_ret: &bytenr); |
3846 | if (ret == -ENOENT) { |
3847 | break; |
3848 | } else if (ret < 0) { |
3849 | errors++; |
3850 | if (i == 0) |
3851 | primary_failed = true; |
3852 | continue; |
3853 | } |
3854 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= |
3855 | device->commit_total_bytes) |
3856 | break; |
3857 | |
3858 | page = find_get_page(mapping: device->bdev->bd_inode->i_mapping, |
3859 | offset: bytenr >> PAGE_SHIFT); |
3860 | if (!page) { |
3861 | errors++; |
3862 | if (i == 0) |
3863 | primary_failed = true; |
3864 | continue; |
3865 | } |
3866 | /* Page is submitted locked and unlocked once the IO completes */ |
3867 | wait_on_page_locked(page); |
3868 | if (PageError(page)) { |
3869 | errors++; |
3870 | if (i == 0) |
3871 | primary_failed = true; |
3872 | } |
3873 | |
3874 | /* Drop our reference */ |
3875 | put_page(page); |
3876 | |
3877 | /* Drop the reference from the writing run */ |
3878 | put_page(page); |
3879 | } |
3880 | |
3881 | /* log error, force error return */ |
3882 | if (primary_failed) { |
3883 | btrfs_err(device->fs_info, "error writing primary super block to device %llu" , |
3884 | device->devid); |
3885 | return -1; |
3886 | } |
3887 | |
3888 | return errors < i ? 0 : -1; |
3889 | } |
3890 | |
3891 | /* |
3892 | * endio for the write_dev_flush, this will wake anyone waiting |
3893 | * for the barrier when it is done |
3894 | */ |
3895 | static void btrfs_end_empty_barrier(struct bio *bio) |
3896 | { |
3897 | bio_uninit(bio); |
3898 | complete(bio->bi_private); |
3899 | } |
3900 | |
3901 | /* |
3902 | * Submit a flush request to the device if it supports it. Error handling is |
3903 | * done in the waiting counterpart. |
3904 | */ |
3905 | static void write_dev_flush(struct btrfs_device *device) |
3906 | { |
3907 | struct bio *bio = &device->flush_bio; |
3908 | |
3909 | device->last_flush_error = BLK_STS_OK; |
3910 | |
3911 | bio_init(bio, bdev: device->bdev, NULL, max_vecs: 0, |
3912 | opf: REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH); |
3913 | bio->bi_end_io = btrfs_end_empty_barrier; |
3914 | init_completion(x: &device->flush_wait); |
3915 | bio->bi_private = &device->flush_wait; |
3916 | submit_bio(bio); |
3917 | set_bit(BTRFS_DEV_STATE_FLUSH_SENT, addr: &device->dev_state); |
3918 | } |
3919 | |
3920 | /* |
3921 | * If the flush bio has been submitted by write_dev_flush, wait for it. |
3922 | * Return true for any error, and false otherwise. |
3923 | */ |
3924 | static bool wait_dev_flush(struct btrfs_device *device) |
3925 | { |
3926 | struct bio *bio = &device->flush_bio; |
3927 | |
3928 | if (!test_and_clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, addr: &device->dev_state)) |
3929 | return false; |
3930 | |
3931 | wait_for_completion_io(&device->flush_wait); |
3932 | |
3933 | if (bio->bi_status) { |
3934 | device->last_flush_error = bio->bi_status; |
3935 | btrfs_dev_stat_inc_and_print(dev: device, index: BTRFS_DEV_STAT_FLUSH_ERRS); |
3936 | return true; |
3937 | } |
3938 | |
3939 | return false; |
3940 | } |
3941 | |
3942 | /* |
3943 | * send an empty flush down to each device in parallel, |
3944 | * then wait for them |
3945 | */ |
3946 | static int barrier_all_devices(struct btrfs_fs_info *info) |
3947 | { |
3948 | struct list_head *head; |
3949 | struct btrfs_device *dev; |
3950 | int errors_wait = 0; |
3951 | |
3952 | lockdep_assert_held(&info->fs_devices->device_list_mutex); |
3953 | /* send down all the barriers */ |
3954 | head = &info->fs_devices->devices; |
3955 | list_for_each_entry(dev, head, dev_list) { |
3956 | if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) |
3957 | continue; |
3958 | if (!dev->bdev) |
3959 | continue; |
3960 | if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || |
3961 | !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) |
3962 | continue; |
3963 | |
3964 | write_dev_flush(device: dev); |
3965 | } |
3966 | |
3967 | /* wait for all the barriers */ |
3968 | list_for_each_entry(dev, head, dev_list) { |
3969 | if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) |
3970 | continue; |
3971 | if (!dev->bdev) { |
3972 | errors_wait++; |
3973 | continue; |
3974 | } |
3975 | if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || |
3976 | !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) |
3977 | continue; |
3978 | |
3979 | if (wait_dev_flush(device: dev)) |
3980 | errors_wait++; |
3981 | } |
3982 | |
3983 | /* |
3984 | * Checks last_flush_error of disks in order to determine the device |
3985 | * state. |
3986 | */ |
3987 | if (errors_wait && !btrfs_check_rw_degradable(fs_info: info, NULL)) |
3988 | return -EIO; |
3989 | |
3990 | return 0; |
3991 | } |
3992 | |
3993 | int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags) |
3994 | { |
3995 | int raid_type; |
3996 | int min_tolerated = INT_MAX; |
3997 | |
3998 | if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 || |
3999 | (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE)) |
4000 | min_tolerated = min_t(int, min_tolerated, |
4001 | btrfs_raid_array[BTRFS_RAID_SINGLE]. |
4002 | tolerated_failures); |
4003 | |
4004 | for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { |
4005 | if (raid_type == BTRFS_RAID_SINGLE) |
4006 | continue; |
4007 | if (!(flags & btrfs_raid_array[raid_type].bg_flag)) |
4008 | continue; |
4009 | min_tolerated = min_t(int, min_tolerated, |
4010 | btrfs_raid_array[raid_type]. |
4011 | tolerated_failures); |
4012 | } |
4013 | |
4014 | if (min_tolerated == INT_MAX) { |
4015 | pr_warn("BTRFS: unknown raid flag: %llu" , flags); |
4016 | min_tolerated = 0; |
4017 | } |
4018 | |
4019 | return min_tolerated; |
4020 | } |
4021 | |
4022 | int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors) |
4023 | { |
4024 | struct list_head *head; |
4025 | struct btrfs_device *dev; |
4026 | struct btrfs_super_block *sb; |
4027 | struct btrfs_dev_item *dev_item; |
4028 | int ret; |
4029 | int do_barriers; |
4030 | int max_errors; |
4031 | int total_errors = 0; |
4032 | u64 flags; |
4033 | |
4034 | do_barriers = !btrfs_test_opt(fs_info, NOBARRIER); |
4035 | |
4036 | /* |
4037 | * max_mirrors == 0 indicates we're from commit_transaction, |
4038 | * not from fsync where the tree roots in fs_info have not |
4039 | * been consistent on disk. |
4040 | */ |
4041 | if (max_mirrors == 0) |
4042 | backup_super_roots(info: fs_info); |
4043 | |
4044 | sb = fs_info->super_for_commit; |
4045 | dev_item = &sb->dev_item; |
4046 | |
4047 | mutex_lock(&fs_info->fs_devices->device_list_mutex); |
4048 | head = &fs_info->fs_devices->devices; |
4049 | max_errors = btrfs_super_num_devices(s: fs_info->super_copy) - 1; |
4050 | |
4051 | if (do_barriers) { |
4052 | ret = barrier_all_devices(info: fs_info); |
4053 | if (ret) { |
4054 | mutex_unlock( |
4055 | lock: &fs_info->fs_devices->device_list_mutex); |
4056 | btrfs_handle_fs_error(fs_info, ret, |
4057 | "errors while submitting device barriers." ); |
4058 | return ret; |
4059 | } |
4060 | } |
4061 | |
4062 | list_for_each_entry(dev, head, dev_list) { |
4063 | if (!dev->bdev) { |
4064 | total_errors++; |
4065 | continue; |
4066 | } |
4067 | if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || |
4068 | !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) |
4069 | continue; |
4070 | |
4071 | btrfs_set_stack_device_generation(s: dev_item, val: 0); |
4072 | btrfs_set_stack_device_type(s: dev_item, val: dev->type); |
4073 | btrfs_set_stack_device_id(s: dev_item, val: dev->devid); |
4074 | btrfs_set_stack_device_total_bytes(s: dev_item, |
4075 | val: dev->commit_total_bytes); |
4076 | btrfs_set_stack_device_bytes_used(s: dev_item, |
4077 | val: dev->commit_bytes_used); |
4078 | btrfs_set_stack_device_io_align(s: dev_item, val: dev->io_align); |
4079 | btrfs_set_stack_device_io_width(s: dev_item, val: dev->io_width); |
4080 | btrfs_set_stack_device_sector_size(s: dev_item, val: dev->sector_size); |
4081 | memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE); |
4082 | memcpy(dev_item->fsid, dev->fs_devices->metadata_uuid, |
4083 | BTRFS_FSID_SIZE); |
4084 | |
4085 | flags = btrfs_super_flags(s: sb); |
4086 | btrfs_set_super_flags(s: sb, val: flags | BTRFS_HEADER_FLAG_WRITTEN); |
4087 | |
4088 | ret = btrfs_validate_write_super(fs_info, sb); |
4089 | if (ret < 0) { |
4090 | mutex_unlock(lock: &fs_info->fs_devices->device_list_mutex); |
4091 | btrfs_handle_fs_error(fs_info, -EUCLEAN, |
4092 | "unexpected superblock corruption detected" ); |
4093 | return -EUCLEAN; |
4094 | } |
4095 | |
4096 | ret = write_dev_supers(device: dev, sb, max_mirrors); |
4097 | if (ret) |
4098 | total_errors++; |
4099 | } |
4100 | if (total_errors > max_errors) { |
4101 | btrfs_err(fs_info, "%d errors while writing supers" , |
4102 | total_errors); |
4103 | mutex_unlock(lock: &fs_info->fs_devices->device_list_mutex); |
4104 | |
4105 | /* FUA is masked off if unsupported and can't be the reason */ |
4106 | btrfs_handle_fs_error(fs_info, -EIO, |
4107 | "%d errors while writing supers" , |
4108 | total_errors); |
4109 | return -EIO; |
4110 | } |
4111 | |
4112 | total_errors = 0; |
4113 | list_for_each_entry(dev, head, dev_list) { |
4114 | if (!dev->bdev) |
4115 | continue; |
4116 | if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || |
4117 | !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) |
4118 | continue; |
4119 | |
4120 | ret = wait_dev_supers(device: dev, max_mirrors); |
4121 | if (ret) |
4122 | total_errors++; |
4123 | } |
4124 | mutex_unlock(lock: &fs_info->fs_devices->device_list_mutex); |
4125 | if (total_errors > max_errors) { |
4126 | btrfs_handle_fs_error(fs_info, -EIO, |
4127 | "%d errors while writing supers" , |
4128 | total_errors); |
4129 | return -EIO; |
4130 | } |
4131 | return 0; |
4132 | } |
4133 | |
4134 | /* Drop a fs root from the radix tree and free it. */ |
4135 | void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info, |
4136 | struct btrfs_root *root) |
4137 | { |
4138 | bool drop_ref = false; |
4139 | |
4140 | spin_lock(lock: &fs_info->fs_roots_radix_lock); |
4141 | radix_tree_delete(&fs_info->fs_roots_radix, |
4142 | (unsigned long)root->root_key.objectid); |
4143 | if (test_and_clear_bit(nr: BTRFS_ROOT_IN_RADIX, addr: &root->state)) |
4144 | drop_ref = true; |
4145 | spin_unlock(lock: &fs_info->fs_roots_radix_lock); |
4146 | |
4147 | if (BTRFS_FS_ERROR(fs_info)) { |
4148 | ASSERT(root->log_root == NULL); |
4149 | if (root->reloc_root) { |
4150 | btrfs_put_root(root: root->reloc_root); |
4151 | root->reloc_root = NULL; |
4152 | } |
4153 | } |
4154 | |
4155 | if (drop_ref) |
4156 | btrfs_put_root(root); |
4157 | } |
4158 | |
4159 | int btrfs_commit_super(struct btrfs_fs_info *fs_info) |
4160 | { |
4161 | struct btrfs_root *root = fs_info->tree_root; |
4162 | struct btrfs_trans_handle *trans; |
4163 | |
4164 | mutex_lock(&fs_info->cleaner_mutex); |
4165 | btrfs_run_delayed_iputs(fs_info); |
4166 | mutex_unlock(lock: &fs_info->cleaner_mutex); |
4167 | wake_up_process(tsk: fs_info->cleaner_kthread); |
4168 | |
4169 | /* wait until ongoing cleanup work done */ |
4170 | down_write(sem: &fs_info->cleanup_work_sem); |
4171 | up_write(sem: &fs_info->cleanup_work_sem); |
4172 | |
4173 | trans = btrfs_join_transaction(root); |
4174 | if (IS_ERR(ptr: trans)) |
4175 | return PTR_ERR(ptr: trans); |
4176 | return btrfs_commit_transaction(trans); |
4177 | } |
4178 | |
4179 | static void warn_about_uncommitted_trans(struct btrfs_fs_info *fs_info) |
4180 | { |
4181 | struct btrfs_transaction *trans; |
4182 | struct btrfs_transaction *tmp; |
4183 | bool found = false; |
4184 | |
4185 | if (list_empty(head: &fs_info->trans_list)) |
4186 | return; |
4187 | |
4188 | /* |
4189 | * This function is only called at the very end of close_ctree(), |
4190 | * thus no other running transaction, no need to take trans_lock. |
4191 | */ |
4192 | ASSERT(test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags)); |
4193 | list_for_each_entry_safe(trans, tmp, &fs_info->trans_list, list) { |
4194 | struct extent_state *cached = NULL; |
4195 | u64 dirty_bytes = 0; |
4196 | u64 cur = 0; |
4197 | u64 found_start; |
4198 | u64 found_end; |
4199 | |
4200 | found = true; |
4201 | while (find_first_extent_bit(tree: &trans->dirty_pages, start: cur, |
4202 | start_ret: &found_start, end_ret: &found_end, bits: EXTENT_DIRTY, cached_state: &cached)) { |
4203 | dirty_bytes += found_end + 1 - found_start; |
4204 | cur = found_end + 1; |
4205 | } |
4206 | btrfs_warn(fs_info, |
4207 | "transaction %llu (with %llu dirty metadata bytes) is not committed" , |
4208 | trans->transid, dirty_bytes); |
4209 | btrfs_cleanup_one_transaction(trans, fs_info); |
4210 | |
4211 | if (trans == fs_info->running_transaction) |
4212 | fs_info->running_transaction = NULL; |
4213 | list_del_init(entry: &trans->list); |
4214 | |
4215 | btrfs_put_transaction(transaction: trans); |
4216 | trace_btrfs_transaction_commit(fs_info); |
4217 | } |
4218 | ASSERT(!found); |
4219 | } |
4220 | |
4221 | void __cold close_ctree(struct btrfs_fs_info *fs_info) |
4222 | { |
4223 | int ret; |
4224 | |
4225 | set_bit(nr: BTRFS_FS_CLOSING_START, addr: &fs_info->flags); |
4226 | |
4227 | /* |
4228 | * If we had UNFINISHED_DROPS we could still be processing them, so |
4229 | * clear that bit and wake up relocation so it can stop. |
4230 | * We must do this before stopping the block group reclaim task, because |
4231 | * at btrfs_relocate_block_group() we wait for this bit, and after the |
4232 | * wait we stop with -EINTR if btrfs_fs_closing() returns non-zero - we |
4233 | * have just set BTRFS_FS_CLOSING_START, so btrfs_fs_closing() will |
4234 | * return 1. |
4235 | */ |
4236 | btrfs_wake_unfinished_drop(fs_info); |
4237 | |
4238 | /* |
4239 | * We may have the reclaim task running and relocating a data block group, |
4240 | * in which case it may create delayed iputs. So stop it before we park |
4241 | * the cleaner kthread otherwise we can get new delayed iputs after |
4242 | * parking the cleaner, and that can make the async reclaim task to hang |
4243 | * if it's waiting for delayed iputs to complete, since the cleaner is |
4244 | * parked and can not run delayed iputs - this will make us hang when |
4245 | * trying to stop the async reclaim task. |
4246 | */ |
4247 | cancel_work_sync(work: &fs_info->reclaim_bgs_work); |
4248 | /* |
4249 | * We don't want the cleaner to start new transactions, add more delayed |
4250 | * iputs, etc. while we're closing. We can't use kthread_stop() yet |
4251 | * because that frees the task_struct, and the transaction kthread might |
4252 | * still try to wake up the cleaner. |
4253 | */ |
4254 | kthread_park(k: fs_info->cleaner_kthread); |
4255 | |
4256 | /* wait for the qgroup rescan worker to stop */ |
4257 | btrfs_qgroup_wait_for_completion(fs_info, interruptible: false); |
4258 | |
4259 | /* wait for the uuid_scan task to finish */ |
4260 | down(sem: &fs_info->uuid_tree_rescan_sem); |
4261 | /* avoid complains from lockdep et al., set sem back to initial state */ |
4262 | up(sem: &fs_info->uuid_tree_rescan_sem); |
4263 | |
4264 | /* pause restriper - we want to resume on mount */ |
4265 | btrfs_pause_balance(fs_info); |
4266 | |
4267 | btrfs_dev_replace_suspend_for_unmount(fs_info); |
4268 | |
4269 | btrfs_scrub_cancel(info: fs_info); |
4270 | |
4271 | /* wait for any defraggers to finish */ |
4272 | wait_event(fs_info->transaction_wait, |
4273 | (atomic_read(&fs_info->defrag_running) == 0)); |
4274 | |
4275 | /* clear out the rbtree of defraggable inodes */ |
4276 | btrfs_cleanup_defrag_inodes(fs_info); |
4277 | |
4278 | /* |
4279 | * After we parked the cleaner kthread, ordered extents may have |
4280 | * completed and created new delayed iputs. If one of the async reclaim |
4281 | * tasks is running and in the RUN_DELAYED_IPUTS flush state, then we |
4282 | * can hang forever trying to stop it, because if a delayed iput is |
4283 | * added after it ran btrfs_run_delayed_iputs() and before it called |
4284 | * btrfs_wait_on_delayed_iputs(), it will hang forever since there is |
4285 | * no one else to run iputs. |
4286 | * |
4287 | * So wait for all ongoing ordered extents to complete and then run |
4288 | * delayed iputs. This works because once we reach this point no one |
4289 | * can either create new ordered extents nor create delayed iputs |
4290 | * through some other means. |
4291 | * |
4292 | * Also note that btrfs_wait_ordered_roots() is not safe here, because |
4293 | * it waits for BTRFS_ORDERED_COMPLETE to be set on an ordered extent, |
4294 | * but the delayed iput for the respective inode is made only when doing |
4295 | * the final btrfs_put_ordered_extent() (which must happen at |
4296 | * btrfs_finish_ordered_io() when we are unmounting). |
4297 | */ |
4298 | btrfs_flush_workqueue(wq: fs_info->endio_write_workers); |
4299 | /* Ordered extents for free space inodes. */ |
4300 | btrfs_flush_workqueue(wq: fs_info->endio_freespace_worker); |
4301 | btrfs_run_delayed_iputs(fs_info); |
4302 | |
4303 | cancel_work_sync(work: &fs_info->async_reclaim_work); |
4304 | cancel_work_sync(work: &fs_info->async_data_reclaim_work); |
4305 | cancel_work_sync(work: &fs_info->preempt_reclaim_work); |
4306 | |
4307 | /* Cancel or finish ongoing discard work */ |
4308 | btrfs_discard_cleanup(fs_info); |
4309 | |
4310 | if (!sb_rdonly(sb: fs_info->sb)) { |
4311 | /* |
4312 | * The cleaner kthread is stopped, so do one final pass over |
4313 | * unused block groups. |
4314 | */ |
4315 | btrfs_delete_unused_bgs(fs_info); |
4316 | |
4317 | /* |
4318 | * There might be existing delayed inode workers still running |
4319 | * and holding an empty delayed inode item. We must wait for |
4320 | * them to complete first because they can create a transaction. |
4321 | * This happens when someone calls btrfs_balance_delayed_items() |
4322 | * and then a transaction commit runs the same delayed nodes |
4323 | * before any delayed worker has done something with the nodes. |
4324 | * We must wait for any worker here and not at transaction |
4325 | * commit time since that could cause a deadlock. |
4326 | * This is a very rare case. |
4327 | */ |
4328 | btrfs_flush_workqueue(wq: fs_info->delayed_workers); |
4329 | |
4330 | ret = btrfs_commit_super(fs_info); |
4331 | if (ret) |
4332 | btrfs_err(fs_info, "commit super ret %d" , ret); |
4333 | } |
4334 | |
4335 | if (BTRFS_FS_ERROR(fs_info)) |
4336 | btrfs_error_commit_super(fs_info); |
4337 | |
4338 | kthread_stop(k: fs_info->transaction_kthread); |
4339 | kthread_stop(k: fs_info->cleaner_kthread); |
4340 | |
4341 | ASSERT(list_empty(&fs_info->delayed_iputs)); |
4342 | set_bit(nr: BTRFS_FS_CLOSING_DONE, addr: &fs_info->flags); |
4343 | |
4344 | if (btrfs_check_quota_leak(fs_info)) { |
4345 | WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); |
4346 | btrfs_err(fs_info, "qgroup reserved space leaked" ); |
4347 | } |
4348 | |
4349 | btrfs_free_qgroup_config(fs_info); |
4350 | ASSERT(list_empty(&fs_info->delalloc_roots)); |
4351 | |
4352 | if (percpu_counter_sum(fbc: &fs_info->delalloc_bytes)) { |
4353 | btrfs_info(fs_info, "at unmount delalloc count %lld" , |
4354 | percpu_counter_sum(&fs_info->delalloc_bytes)); |
4355 | } |
4356 | |
4357 | if (percpu_counter_sum(fbc: &fs_info->ordered_bytes)) |
4358 | btrfs_info(fs_info, "at unmount dio bytes count %lld" , |
4359 | percpu_counter_sum(&fs_info->ordered_bytes)); |
4360 | |
4361 | btrfs_sysfs_remove_mounted(fs_info); |
4362 | btrfs_sysfs_remove_fsid(fs_devs: fs_info->fs_devices); |
4363 | |
4364 | btrfs_put_block_group_cache(info: fs_info); |
4365 | |
4366 | /* |
4367 | * we must make sure there is not any read request to |
4368 | * submit after we stopping all workers. |
4369 | */ |
4370 | invalidate_inode_pages2(mapping: fs_info->btree_inode->i_mapping); |
4371 | btrfs_stop_all_workers(fs_info); |
4372 | |
4373 | /* We shouldn't have any transaction open at this point */ |
4374 | warn_about_uncommitted_trans(fs_info); |
4375 | |
4376 | clear_bit(nr: BTRFS_FS_OPEN, addr: &fs_info->flags); |
4377 | free_root_pointers(info: fs_info, free_chunk_root: true); |
4378 | btrfs_free_fs_roots(fs_info); |
4379 | |
4380 | /* |
4381 | * We must free the block groups after dropping the fs_roots as we could |
4382 | * have had an IO error and have left over tree log blocks that aren't |
4383 | * cleaned up until the fs roots are freed. This makes the block group |
4384 | * accounting appear to be wrong because there's pending reserved bytes, |
4385 | * so make sure we do the block group cleanup afterwards. |
4386 | */ |
4387 | btrfs_free_block_groups(info: fs_info); |
4388 | |
4389 | iput(fs_info->btree_inode); |
4390 | |
4391 | btrfs_mapping_tree_free(fs_info); |
4392 | btrfs_close_devices(fs_devices: fs_info->fs_devices); |
4393 | } |
4394 | |
4395 | void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans, |
4396 | struct extent_buffer *buf) |
4397 | { |
4398 | struct btrfs_fs_info *fs_info = buf->fs_info; |
4399 | u64 transid = btrfs_header_generation(eb: buf); |
4400 | |
4401 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
4402 | /* |
4403 | * This is a fast path so only do this check if we have sanity tests |
4404 | * enabled. Normal people shouldn't be using unmapped buffers as dirty |
4405 | * outside of the sanity tests. |
4406 | */ |
4407 | if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags))) |
4408 | return; |
4409 | #endif |
4410 | /* This is an active transaction (its state < TRANS_STATE_UNBLOCKED). */ |
4411 | ASSERT(trans->transid == fs_info->generation); |
4412 | btrfs_assert_tree_write_locked(eb: buf); |
4413 | if (unlikely(transid != fs_info->generation)) { |
4414 | btrfs_abort_transaction(trans, -EUCLEAN); |
4415 | btrfs_crit(fs_info, |
4416 | "dirty buffer transid mismatch, logical %llu found transid %llu running transid %llu" , |
4417 | buf->start, transid, fs_info->generation); |
4418 | } |
4419 | set_extent_buffer_dirty(buf); |
4420 | } |
4421 | |
4422 | static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info, |
4423 | int flush_delayed) |
4424 | { |
4425 | /* |
4426 | * looks as though older kernels can get into trouble with |
4427 | * this code, they end up stuck in balance_dirty_pages forever |
4428 | */ |
4429 | int ret; |
4430 | |
4431 | if (current->flags & PF_MEMALLOC) |
4432 | return; |
4433 | |
4434 | if (flush_delayed) |
4435 | btrfs_balance_delayed_items(fs_info); |
4436 | |
4437 | ret = __percpu_counter_compare(fbc: &fs_info->dirty_metadata_bytes, |
4438 | BTRFS_DIRTY_METADATA_THRESH, |
4439 | batch: fs_info->dirty_metadata_batch); |
4440 | if (ret > 0) { |
4441 | balance_dirty_pages_ratelimited(mapping: fs_info->btree_inode->i_mapping); |
4442 | } |
4443 | } |
4444 | |
4445 | void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info) |
4446 | { |
4447 | __btrfs_btree_balance_dirty(fs_info, flush_delayed: 1); |
4448 | } |
4449 | |
4450 | void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info) |
4451 | { |
4452 | __btrfs_btree_balance_dirty(fs_info, flush_delayed: 0); |
4453 | } |
4454 | |
4455 | static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info) |
4456 | { |
4457 | /* cleanup FS via transaction */ |
4458 | btrfs_cleanup_transaction(fs_info); |
4459 | |
4460 | mutex_lock(&fs_info->cleaner_mutex); |
4461 | btrfs_run_delayed_iputs(fs_info); |
4462 | mutex_unlock(lock: &fs_info->cleaner_mutex); |
4463 | |
4464 | down_write(sem: &fs_info->cleanup_work_sem); |
4465 | up_write(sem: &fs_info->cleanup_work_sem); |
4466 | } |
4467 | |
4468 | static void btrfs_drop_all_logs(struct btrfs_fs_info *fs_info) |
4469 | { |
4470 | struct btrfs_root *gang[8]; |
4471 | u64 root_objectid = 0; |
4472 | int ret; |
4473 | |
4474 | spin_lock(lock: &fs_info->fs_roots_radix_lock); |
4475 | while ((ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, |
4476 | results: (void **)gang, first_index: root_objectid, |
4477 | ARRAY_SIZE(gang))) != 0) { |
4478 | int i; |
4479 | |
4480 | for (i = 0; i < ret; i++) |
4481 | gang[i] = btrfs_grab_root(root: gang[i]); |
4482 | spin_unlock(lock: &fs_info->fs_roots_radix_lock); |
4483 | |
4484 | for (i = 0; i < ret; i++) { |
4485 | if (!gang[i]) |
4486 | continue; |
4487 | root_objectid = gang[i]->root_key.objectid; |
4488 | btrfs_free_log(NULL, root: gang[i]); |
4489 | btrfs_put_root(root: gang[i]); |
4490 | } |
4491 | root_objectid++; |
4492 | spin_lock(lock: &fs_info->fs_roots_radix_lock); |
4493 | } |
4494 | spin_unlock(lock: &fs_info->fs_roots_radix_lock); |
4495 | btrfs_free_log_root_tree(NULL, fs_info); |
4496 | } |
4497 | |
4498 | static void btrfs_destroy_ordered_extents(struct btrfs_root *root) |
4499 | { |
4500 | struct btrfs_ordered_extent *ordered; |
4501 | |
4502 | spin_lock(lock: &root->ordered_extent_lock); |
4503 | /* |
4504 | * This will just short circuit the ordered completion stuff which will |
4505 | * make sure the ordered extent gets properly cleaned up. |
4506 | */ |
4507 | list_for_each_entry(ordered, &root->ordered_extents, |
4508 | root_extent_list) |
4509 | set_bit(nr: BTRFS_ORDERED_IOERR, addr: &ordered->flags); |
4510 | spin_unlock(lock: &root->ordered_extent_lock); |
4511 | } |
4512 | |
4513 | static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info) |
4514 | { |
4515 | struct btrfs_root *root; |
4516 | LIST_HEAD(splice); |
4517 | |
4518 | spin_lock(lock: &fs_info->ordered_root_lock); |
4519 | list_splice_init(list: &fs_info->ordered_roots, head: &splice); |
4520 | while (!list_empty(head: &splice)) { |
4521 | root = list_first_entry(&splice, struct btrfs_root, |
4522 | ordered_root); |
4523 | list_move_tail(list: &root->ordered_root, |
4524 | head: &fs_info->ordered_roots); |
4525 | |
4526 | spin_unlock(lock: &fs_info->ordered_root_lock); |
4527 | btrfs_destroy_ordered_extents(root); |
4528 | |
4529 | cond_resched(); |
4530 | spin_lock(lock: &fs_info->ordered_root_lock); |
4531 | } |
4532 | spin_unlock(lock: &fs_info->ordered_root_lock); |
4533 | |
4534 | /* |
4535 | * We need this here because if we've been flipped read-only we won't |
4536 | * get sync() from the umount, so we need to make sure any ordered |
4537 | * extents that haven't had their dirty pages IO start writeout yet |
4538 | * actually get run and error out properly. |
4539 | */ |
4540 | btrfs_wait_ordered_roots(fs_info, U64_MAX, range_start: 0, range_len: (u64)-1); |
4541 | } |
4542 | |
4543 | static void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, |
4544 | struct btrfs_fs_info *fs_info) |
4545 | { |
4546 | struct rb_node *node; |
4547 | struct btrfs_delayed_ref_root *delayed_refs; |
4548 | struct btrfs_delayed_ref_node *ref; |
4549 | |
4550 | delayed_refs = &trans->delayed_refs; |
4551 | |
4552 | spin_lock(lock: &delayed_refs->lock); |
4553 | if (atomic_read(v: &delayed_refs->num_entries) == 0) { |
4554 | spin_unlock(lock: &delayed_refs->lock); |
4555 | btrfs_debug(fs_info, "delayed_refs has NO entry" ); |
4556 | return; |
4557 | } |
4558 | |
4559 | while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) { |
4560 | struct btrfs_delayed_ref_head *head; |
4561 | struct rb_node *n; |
4562 | bool pin_bytes = false; |
4563 | |
4564 | head = rb_entry(node, struct btrfs_delayed_ref_head, |
4565 | href_node); |
4566 | if (btrfs_delayed_ref_lock(delayed_refs, head)) |
4567 | continue; |
4568 | |
4569 | spin_lock(lock: &head->lock); |
4570 | while ((n = rb_first_cached(&head->ref_tree)) != NULL) { |
4571 | ref = rb_entry(n, struct btrfs_delayed_ref_node, |
4572 | ref_node); |
4573 | rb_erase_cached(node: &ref->ref_node, root: &head->ref_tree); |
4574 | RB_CLEAR_NODE(&ref->ref_node); |
4575 | if (!list_empty(head: &ref->add_list)) |
4576 | list_del(entry: &ref->add_list); |
4577 | atomic_dec(v: &delayed_refs->num_entries); |
4578 | btrfs_put_delayed_ref(ref); |
4579 | btrfs_delayed_refs_rsv_release(fs_info, nr_refs: 1, nr_csums: 0); |
4580 | } |
4581 | if (head->must_insert_reserved) |
4582 | pin_bytes = true; |
4583 | btrfs_free_delayed_extent_op(op: head->extent_op); |
4584 | btrfs_delete_ref_head(delayed_refs, head); |
4585 | spin_unlock(lock: &head->lock); |
4586 | spin_unlock(lock: &delayed_refs->lock); |
4587 | mutex_unlock(lock: &head->mutex); |
4588 | |
4589 | if (pin_bytes) { |
4590 | struct btrfs_block_group *cache; |
4591 | |
4592 | cache = btrfs_lookup_block_group(info: fs_info, bytenr: head->bytenr); |
4593 | BUG_ON(!cache); |
4594 | |
4595 | spin_lock(lock: &cache->space_info->lock); |
4596 | spin_lock(lock: &cache->lock); |
4597 | cache->pinned += head->num_bytes; |
4598 | btrfs_space_info_update_bytes_pinned(fs_info, |
4599 | sinfo: cache->space_info, bytes: head->num_bytes); |
4600 | cache->reserved -= head->num_bytes; |
4601 | cache->space_info->bytes_reserved -= head->num_bytes; |
4602 | spin_unlock(lock: &cache->lock); |
4603 | spin_unlock(lock: &cache->space_info->lock); |
4604 | |
4605 | btrfs_put_block_group(cache); |
4606 | |
4607 | btrfs_error_unpin_extent_range(fs_info, start: head->bytenr, |
4608 | end: head->bytenr + head->num_bytes - 1); |
4609 | } |
4610 | btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); |
4611 | btrfs_put_delayed_ref_head(head); |
4612 | cond_resched(); |
4613 | spin_lock(lock: &delayed_refs->lock); |
4614 | } |
4615 | btrfs_qgroup_destroy_extent_records(trans); |
4616 | |
4617 | spin_unlock(lock: &delayed_refs->lock); |
4618 | } |
4619 | |
4620 | static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root) |
4621 | { |
4622 | struct btrfs_inode *btrfs_inode; |
4623 | LIST_HEAD(splice); |
4624 | |
4625 | spin_lock(lock: &root->delalloc_lock); |
4626 | list_splice_init(list: &root->delalloc_inodes, head: &splice); |
4627 | |
4628 | while (!list_empty(head: &splice)) { |
4629 | struct inode *inode = NULL; |
4630 | btrfs_inode = list_first_entry(&splice, struct btrfs_inode, |
4631 | delalloc_inodes); |
4632 | btrfs_del_delalloc_inode(inode: btrfs_inode); |
4633 | spin_unlock(lock: &root->delalloc_lock); |
4634 | |
4635 | /* |
4636 | * Make sure we get a live inode and that it'll not disappear |
4637 | * meanwhile. |
4638 | */ |
4639 | inode = igrab(&btrfs_inode->vfs_inode); |
4640 | if (inode) { |
4641 | unsigned int nofs_flag; |
4642 | |
4643 | nofs_flag = memalloc_nofs_save(); |
4644 | invalidate_inode_pages2(mapping: inode->i_mapping); |
4645 | memalloc_nofs_restore(flags: nofs_flag); |
4646 | iput(inode); |
4647 | } |
4648 | spin_lock(lock: &root->delalloc_lock); |
4649 | } |
4650 | spin_unlock(lock: &root->delalloc_lock); |
4651 | } |
4652 | |
4653 | static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info) |
4654 | { |
4655 | struct btrfs_root *root; |
4656 | LIST_HEAD(splice); |
4657 | |
4658 | spin_lock(lock: &fs_info->delalloc_root_lock); |
4659 | list_splice_init(list: &fs_info->delalloc_roots, head: &splice); |
4660 | while (!list_empty(head: &splice)) { |
4661 | root = list_first_entry(&splice, struct btrfs_root, |
4662 | delalloc_root); |
4663 | root = btrfs_grab_root(root); |
4664 | BUG_ON(!root); |
4665 | spin_unlock(lock: &fs_info->delalloc_root_lock); |
4666 | |
4667 | btrfs_destroy_delalloc_inodes(root); |
4668 | btrfs_put_root(root); |
4669 | |
4670 | spin_lock(lock: &fs_info->delalloc_root_lock); |
4671 | } |
4672 | spin_unlock(lock: &fs_info->delalloc_root_lock); |
4673 | } |
4674 | |
4675 | static void btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info, |
4676 | struct extent_io_tree *dirty_pages, |
4677 | int mark) |
4678 | { |
4679 | struct extent_buffer *eb; |
4680 | u64 start = 0; |
4681 | u64 end; |
4682 | |
4683 | while (find_first_extent_bit(tree: dirty_pages, start, start_ret: &start, end_ret: &end, |
4684 | bits: mark, NULL)) { |
4685 | clear_extent_bits(tree: dirty_pages, start, end, bits: mark); |
4686 | while (start <= end) { |
4687 | eb = find_extent_buffer(fs_info, start); |
4688 | start += fs_info->nodesize; |
4689 | if (!eb) |
4690 | continue; |
4691 | |
4692 | btrfs_tree_lock(eb); |
4693 | wait_on_extent_buffer_writeback(eb); |
4694 | btrfs_clear_buffer_dirty(NULL, buf: eb); |
4695 | btrfs_tree_unlock(eb); |
4696 | |
4697 | free_extent_buffer_stale(eb); |
4698 | } |
4699 | } |
4700 | } |
4701 | |
4702 | static void btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info, |
4703 | struct extent_io_tree *unpin) |
4704 | { |
4705 | u64 start; |
4706 | u64 end; |
4707 | |
4708 | while (1) { |
4709 | struct extent_state *cached_state = NULL; |
4710 | |
4711 | /* |
4712 | * The btrfs_finish_extent_commit() may get the same range as |
4713 | * ours between find_first_extent_bit and clear_extent_dirty. |
4714 | * Hence, hold the unused_bg_unpin_mutex to avoid double unpin |
4715 | * the same extent range. |
4716 | */ |
4717 | mutex_lock(&fs_info->unused_bg_unpin_mutex); |
4718 | if (!find_first_extent_bit(tree: unpin, start: 0, start_ret: &start, end_ret: &end, |
4719 | bits: EXTENT_DIRTY, cached_state: &cached_state)) { |
4720 | mutex_unlock(lock: &fs_info->unused_bg_unpin_mutex); |
4721 | break; |
4722 | } |
4723 | |
4724 | clear_extent_dirty(tree: unpin, start, end, cached: &cached_state); |
4725 | free_extent_state(state: cached_state); |
4726 | btrfs_error_unpin_extent_range(fs_info, start, end); |
4727 | mutex_unlock(lock: &fs_info->unused_bg_unpin_mutex); |
4728 | cond_resched(); |
4729 | } |
4730 | } |
4731 | |
4732 | static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache) |
4733 | { |
4734 | struct inode *inode; |
4735 | |
4736 | inode = cache->io_ctl.inode; |
4737 | if (inode) { |
4738 | unsigned int nofs_flag; |
4739 | |
4740 | nofs_flag = memalloc_nofs_save(); |
4741 | invalidate_inode_pages2(mapping: inode->i_mapping); |
4742 | memalloc_nofs_restore(flags: nofs_flag); |
4743 | |
4744 | BTRFS_I(inode)->generation = 0; |
4745 | cache->io_ctl.inode = NULL; |
4746 | iput(inode); |
4747 | } |
4748 | ASSERT(cache->io_ctl.pages == NULL); |
4749 | btrfs_put_block_group(cache); |
4750 | } |
4751 | |
4752 | void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans, |
4753 | struct btrfs_fs_info *fs_info) |
4754 | { |
4755 | struct btrfs_block_group *cache; |
4756 | |
4757 | spin_lock(lock: &cur_trans->dirty_bgs_lock); |
4758 | while (!list_empty(head: &cur_trans->dirty_bgs)) { |
4759 | cache = list_first_entry(&cur_trans->dirty_bgs, |
4760 | struct btrfs_block_group, |
4761 | dirty_list); |
4762 | |
4763 | if (!list_empty(head: &cache->io_list)) { |
4764 | spin_unlock(lock: &cur_trans->dirty_bgs_lock); |
4765 | list_del_init(entry: &cache->io_list); |
4766 | btrfs_cleanup_bg_io(cache); |
4767 | spin_lock(lock: &cur_trans->dirty_bgs_lock); |
4768 | } |
4769 | |
4770 | list_del_init(entry: &cache->dirty_list); |
4771 | spin_lock(lock: &cache->lock); |
4772 | cache->disk_cache_state = BTRFS_DC_ERROR; |
4773 | spin_unlock(lock: &cache->lock); |
4774 | |
4775 | spin_unlock(lock: &cur_trans->dirty_bgs_lock); |
4776 | btrfs_put_block_group(cache); |
4777 | btrfs_dec_delayed_refs_rsv_bg_updates(fs_info); |
4778 | spin_lock(lock: &cur_trans->dirty_bgs_lock); |
4779 | } |
4780 | spin_unlock(lock: &cur_trans->dirty_bgs_lock); |
4781 | |
4782 | /* |
4783 | * Refer to the definition of io_bgs member for details why it's safe |
4784 | * to use it without any locking |
4785 | */ |
4786 | while (!list_empty(head: &cur_trans->io_bgs)) { |
4787 | cache = list_first_entry(&cur_trans->io_bgs, |
4788 | struct btrfs_block_group, |
4789 | io_list); |
4790 | |
4791 | list_del_init(entry: &cache->io_list); |
4792 | spin_lock(lock: &cache->lock); |
4793 | cache->disk_cache_state = BTRFS_DC_ERROR; |
4794 | spin_unlock(lock: &cache->lock); |
4795 | btrfs_cleanup_bg_io(cache); |
4796 | } |
4797 | } |
4798 | |
4799 | static void btrfs_free_all_qgroup_pertrans(struct btrfs_fs_info *fs_info) |
4800 | { |
4801 | struct btrfs_root *gang[8]; |
4802 | int i; |
4803 | int ret; |
4804 | |
4805 | spin_lock(lock: &fs_info->fs_roots_radix_lock); |
4806 | while (1) { |
4807 | ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, |
4808 | results: (void **)gang, first_index: 0, |
4809 | ARRAY_SIZE(gang), |
4810 | BTRFS_ROOT_TRANS_TAG); |
4811 | if (ret == 0) |
4812 | break; |
4813 | for (i = 0; i < ret; i++) { |
4814 | struct btrfs_root *root = gang[i]; |
4815 | |
4816 | btrfs_qgroup_free_meta_all_pertrans(root); |
4817 | radix_tree_tag_clear(&fs_info->fs_roots_radix, |
4818 | index: (unsigned long)root->root_key.objectid, |
4819 | BTRFS_ROOT_TRANS_TAG); |
4820 | } |
4821 | } |
4822 | spin_unlock(lock: &fs_info->fs_roots_radix_lock); |
4823 | } |
4824 | |
4825 | void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, |
4826 | struct btrfs_fs_info *fs_info) |
4827 | { |
4828 | struct btrfs_device *dev, *tmp; |
4829 | |
4830 | btrfs_cleanup_dirty_bgs(cur_trans, fs_info); |
4831 | ASSERT(list_empty(&cur_trans->dirty_bgs)); |
4832 | ASSERT(list_empty(&cur_trans->io_bgs)); |
4833 | |
4834 | list_for_each_entry_safe(dev, tmp, &cur_trans->dev_update_list, |
4835 | post_commit_list) { |
4836 | list_del_init(entry: &dev->post_commit_list); |
4837 | } |
4838 | |
4839 | btrfs_destroy_delayed_refs(trans: cur_trans, fs_info); |
4840 | |
4841 | cur_trans->state = TRANS_STATE_COMMIT_START; |
4842 | wake_up(&fs_info->transaction_blocked_wait); |
4843 | |
4844 | cur_trans->state = TRANS_STATE_UNBLOCKED; |
4845 | wake_up(&fs_info->transaction_wait); |
4846 | |
4847 | btrfs_destroy_delayed_inodes(fs_info); |
4848 | |
4849 | btrfs_destroy_marked_extents(fs_info, dirty_pages: &cur_trans->dirty_pages, |
4850 | mark: EXTENT_DIRTY); |
4851 | btrfs_destroy_pinned_extent(fs_info, unpin: &cur_trans->pinned_extents); |
4852 | |
4853 | btrfs_free_all_qgroup_pertrans(fs_info); |
4854 | |
4855 | cur_trans->state =TRANS_STATE_COMPLETED; |
4856 | wake_up(&cur_trans->commit_wait); |
4857 | } |
4858 | |
4859 | static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info) |
4860 | { |
4861 | struct btrfs_transaction *t; |
4862 | |
4863 | mutex_lock(&fs_info->transaction_kthread_mutex); |
4864 | |
4865 | spin_lock(lock: &fs_info->trans_lock); |
4866 | while (!list_empty(head: &fs_info->trans_list)) { |
4867 | t = list_first_entry(&fs_info->trans_list, |
4868 | struct btrfs_transaction, list); |
4869 | if (t->state >= TRANS_STATE_COMMIT_PREP) { |
4870 | refcount_inc(r: &t->use_count); |
4871 | spin_unlock(lock: &fs_info->trans_lock); |
4872 | btrfs_wait_for_commit(fs_info, transid: t->transid); |
4873 | btrfs_put_transaction(transaction: t); |
4874 | spin_lock(lock: &fs_info->trans_lock); |
4875 | continue; |
4876 | } |
4877 | if (t == fs_info->running_transaction) { |
4878 | t->state = TRANS_STATE_COMMIT_DOING; |
4879 | spin_unlock(lock: &fs_info->trans_lock); |
4880 | /* |
4881 | * We wait for 0 num_writers since we don't hold a trans |
4882 | * handle open currently for this transaction. |
4883 | */ |
4884 | wait_event(t->writer_wait, |
4885 | atomic_read(&t->num_writers) == 0); |
4886 | } else { |
4887 | spin_unlock(lock: &fs_info->trans_lock); |
4888 | } |
4889 | btrfs_cleanup_one_transaction(cur_trans: t, fs_info); |
4890 | |
4891 | spin_lock(lock: &fs_info->trans_lock); |
4892 | if (t == fs_info->running_transaction) |
4893 | fs_info->running_transaction = NULL; |
4894 | list_del_init(entry: &t->list); |
4895 | spin_unlock(lock: &fs_info->trans_lock); |
4896 | |
4897 | btrfs_put_transaction(transaction: t); |
4898 | trace_btrfs_transaction_commit(fs_info); |
4899 | spin_lock(lock: &fs_info->trans_lock); |
4900 | } |
4901 | spin_unlock(lock: &fs_info->trans_lock); |
4902 | btrfs_destroy_all_ordered_extents(fs_info); |
4903 | btrfs_destroy_delayed_inodes(fs_info); |
4904 | btrfs_assert_delayed_root_empty(fs_info); |
4905 | btrfs_destroy_all_delalloc_inodes(fs_info); |
4906 | btrfs_drop_all_logs(fs_info); |
4907 | mutex_unlock(lock: &fs_info->transaction_kthread_mutex); |
4908 | |
4909 | return 0; |
4910 | } |
4911 | |
4912 | int btrfs_init_root_free_objectid(struct btrfs_root *root) |
4913 | { |
4914 | struct btrfs_path *path; |
4915 | int ret; |
4916 | struct extent_buffer *l; |
4917 | struct btrfs_key search_key; |
4918 | struct btrfs_key found_key; |
4919 | int slot; |
4920 | |
4921 | path = btrfs_alloc_path(); |
4922 | if (!path) |
4923 | return -ENOMEM; |
4924 | |
4925 | search_key.objectid = BTRFS_LAST_FREE_OBJECTID; |
4926 | search_key.type = -1; |
4927 | search_key.offset = (u64)-1; |
4928 | ret = btrfs_search_slot(NULL, root, key: &search_key, p: path, ins_len: 0, cow: 0); |
4929 | if (ret < 0) |
4930 | goto error; |
4931 | if (ret == 0) { |
4932 | /* |
4933 | * Key with offset -1 found, there would have to exist a root |
4934 | * with such id, but this is out of valid range. |
4935 | */ |
4936 | ret = -EUCLEAN; |
4937 | goto error; |
4938 | } |
4939 | if (path->slots[0] > 0) { |
4940 | slot = path->slots[0] - 1; |
4941 | l = path->nodes[0]; |
4942 | btrfs_item_key_to_cpu(eb: l, cpu_key: &found_key, nr: slot); |
4943 | root->free_objectid = max_t(u64, found_key.objectid + 1, |
4944 | BTRFS_FIRST_FREE_OBJECTID); |
4945 | } else { |
4946 | root->free_objectid = BTRFS_FIRST_FREE_OBJECTID; |
4947 | } |
4948 | ret = 0; |
4949 | error: |
4950 | btrfs_free_path(p: path); |
4951 | return ret; |
4952 | } |
4953 | |
4954 | int btrfs_get_free_objectid(struct btrfs_root *root, u64 *objectid) |
4955 | { |
4956 | int ret; |
4957 | mutex_lock(&root->objectid_mutex); |
4958 | |
4959 | if (unlikely(root->free_objectid >= BTRFS_LAST_FREE_OBJECTID)) { |
4960 | btrfs_warn(root->fs_info, |
4961 | "the objectid of root %llu reaches its highest value" , |
4962 | root->root_key.objectid); |
4963 | ret = -ENOSPC; |
4964 | goto out; |
4965 | } |
4966 | |
4967 | *objectid = root->free_objectid++; |
4968 | ret = 0; |
4969 | out: |
4970 | mutex_unlock(lock: &root->objectid_mutex); |
4971 | return ret; |
4972 | } |
4973 | |