1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
4 | * Copyright (c) 2018 Red Hat, Inc. |
5 | * All rights reserved. |
6 | */ |
7 | |
8 | #include "xfs.h" |
9 | #include "xfs_fs.h" |
10 | #include "xfs_shared.h" |
11 | #include "xfs_format.h" |
12 | #include "xfs_trans_resv.h" |
13 | #include "xfs_bit.h" |
14 | #include "xfs_sb.h" |
15 | #include "xfs_mount.h" |
16 | #include "xfs_btree.h" |
17 | #include "xfs_alloc_btree.h" |
18 | #include "xfs_rmap_btree.h" |
19 | #include "xfs_alloc.h" |
20 | #include "xfs_ialloc.h" |
21 | #include "xfs_rmap.h" |
22 | #include "xfs_ag.h" |
23 | #include "xfs_ag_resv.h" |
24 | #include "xfs_health.h" |
25 | #include "xfs_error.h" |
26 | #include "xfs_bmap.h" |
27 | #include "xfs_defer.h" |
28 | #include "xfs_log_format.h" |
29 | #include "xfs_trans.h" |
30 | #include "xfs_trace.h" |
31 | #include "xfs_inode.h" |
32 | #include "xfs_icache.h" |
33 | |
34 | |
35 | /* |
36 | * Passive reference counting access wrappers to the perag structures. If the |
37 | * per-ag structure is to be freed, the freeing code is responsible for cleaning |
38 | * up objects with passive references before freeing the structure. This is |
39 | * things like cached buffers. |
40 | */ |
41 | struct xfs_perag * |
42 | xfs_perag_get( |
43 | struct xfs_mount *mp, |
44 | xfs_agnumber_t agno) |
45 | { |
46 | struct xfs_perag *pag; |
47 | |
48 | rcu_read_lock(); |
49 | pag = radix_tree_lookup(&mp->m_perag_tree, agno); |
50 | if (pag) { |
51 | trace_xfs_perag_get(pag, _RET_IP_); |
52 | ASSERT(atomic_read(&pag->pag_ref) >= 0); |
53 | atomic_inc(&pag->pag_ref); |
54 | } |
55 | rcu_read_unlock(); |
56 | return pag; |
57 | } |
58 | |
59 | /* |
60 | * search from @first to find the next perag with the given tag set. |
61 | */ |
62 | struct xfs_perag * |
63 | xfs_perag_get_tag( |
64 | struct xfs_mount *mp, |
65 | xfs_agnumber_t first, |
66 | unsigned int tag) |
67 | { |
68 | struct xfs_perag *pag; |
69 | int found; |
70 | |
71 | rcu_read_lock(); |
72 | found = radix_tree_gang_lookup_tag(&mp->m_perag_tree, |
73 | (void **)&pag, first, 1, tag); |
74 | if (found <= 0) { |
75 | rcu_read_unlock(); |
76 | return NULL; |
77 | } |
78 | trace_xfs_perag_get_tag(pag, _RET_IP_); |
79 | atomic_inc(&pag->pag_ref); |
80 | rcu_read_unlock(); |
81 | return pag; |
82 | } |
83 | |
84 | /* Get a passive reference to the given perag. */ |
85 | struct xfs_perag * |
86 | xfs_perag_hold( |
87 | struct xfs_perag *pag) |
88 | { |
89 | ASSERT(atomic_read(&pag->pag_ref) > 0 || |
90 | atomic_read(&pag->pag_active_ref) > 0); |
91 | |
92 | trace_xfs_perag_hold(pag, _RET_IP_); |
93 | atomic_inc(&pag->pag_ref); |
94 | return pag; |
95 | } |
96 | |
97 | void |
98 | xfs_perag_put( |
99 | struct xfs_perag *pag) |
100 | { |
101 | trace_xfs_perag_put(pag, _RET_IP_); |
102 | ASSERT(atomic_read(&pag->pag_ref) > 0); |
103 | atomic_dec(&pag->pag_ref); |
104 | } |
105 | |
106 | /* |
107 | * Active references for perag structures. This is for short term access to the |
108 | * per ag structures for walking trees or accessing state. If an AG is being |
109 | * shrunk or is offline, then this will fail to find that AG and return NULL |
110 | * instead. |
111 | */ |
112 | struct xfs_perag * |
113 | xfs_perag_grab( |
114 | struct xfs_mount *mp, |
115 | xfs_agnumber_t agno) |
116 | { |
117 | struct xfs_perag *pag; |
118 | |
119 | rcu_read_lock(); |
120 | pag = radix_tree_lookup(&mp->m_perag_tree, agno); |
121 | if (pag) { |
122 | trace_xfs_perag_grab(pag, _RET_IP_); |
123 | if (!atomic_inc_not_zero(&pag->pag_active_ref)) |
124 | pag = NULL; |
125 | } |
126 | rcu_read_unlock(); |
127 | return pag; |
128 | } |
129 | |
130 | /* |
131 | * search from @first to find the next perag with the given tag set. |
132 | */ |
133 | struct xfs_perag * |
134 | xfs_perag_grab_tag( |
135 | struct xfs_mount *mp, |
136 | xfs_agnumber_t first, |
137 | int tag) |
138 | { |
139 | struct xfs_perag *pag; |
140 | int found; |
141 | |
142 | rcu_read_lock(); |
143 | found = radix_tree_gang_lookup_tag(&mp->m_perag_tree, |
144 | (void **)&pag, first, 1, tag); |
145 | if (found <= 0) { |
146 | rcu_read_unlock(); |
147 | return NULL; |
148 | } |
149 | trace_xfs_perag_grab_tag(pag, _RET_IP_); |
150 | if (!atomic_inc_not_zero(&pag->pag_active_ref)) |
151 | pag = NULL; |
152 | rcu_read_unlock(); |
153 | return pag; |
154 | } |
155 | |
156 | void |
157 | xfs_perag_rele( |
158 | struct xfs_perag *pag) |
159 | { |
160 | trace_xfs_perag_rele(pag, _RET_IP_); |
161 | if (atomic_dec_and_test(&pag->pag_active_ref)) |
162 | wake_up(&pag->pag_active_wq); |
163 | } |
164 | |
165 | /* |
166 | * xfs_initialize_perag_data |
167 | * |
168 | * Read in each per-ag structure so we can count up the number of |
169 | * allocated inodes, free inodes and used filesystem blocks as this |
170 | * information is no longer persistent in the superblock. Once we have |
171 | * this information, write it into the in-core superblock structure. |
172 | */ |
173 | int |
174 | xfs_initialize_perag_data( |
175 | struct xfs_mount *mp, |
176 | xfs_agnumber_t agcount) |
177 | { |
178 | xfs_agnumber_t index; |
179 | struct xfs_perag *pag; |
180 | struct xfs_sb *sbp = &mp->m_sb; |
181 | uint64_t ifree = 0; |
182 | uint64_t ialloc = 0; |
183 | uint64_t bfree = 0; |
184 | uint64_t bfreelst = 0; |
185 | uint64_t btree = 0; |
186 | uint64_t fdblocks; |
187 | int error = 0; |
188 | |
189 | for (index = 0; index < agcount; index++) { |
190 | /* |
191 | * Read the AGF and AGI buffers to populate the per-ag |
192 | * structures for us. |
193 | */ |
194 | pag = xfs_perag_get(mp, index); |
195 | error = xfs_alloc_read_agf(pag, NULL, 0, NULL); |
196 | if (!error) |
197 | error = xfs_ialloc_read_agi(pag, NULL, NULL); |
198 | if (error) { |
199 | xfs_perag_put(pag); |
200 | return error; |
201 | } |
202 | |
203 | ifree += pag->pagi_freecount; |
204 | ialloc += pag->pagi_count; |
205 | bfree += pag->pagf_freeblks; |
206 | bfreelst += pag->pagf_flcount; |
207 | btree += pag->pagf_btreeblks; |
208 | xfs_perag_put(pag); |
209 | } |
210 | fdblocks = bfree + bfreelst + btree; |
211 | |
212 | /* |
213 | * If the new summary counts are obviously incorrect, fail the |
214 | * mount operation because that implies the AGFs are also corrupt. |
215 | * Clear FS_COUNTERS so that we don't unmount with a dirty log, which |
216 | * will prevent xfs_repair from fixing anything. |
217 | */ |
218 | if (fdblocks > sbp->sb_dblocks || ifree > ialloc) { |
219 | xfs_alert(mp, "AGF corruption. Please run xfs_repair." ); |
220 | xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS); |
221 | error = -EFSCORRUPTED; |
222 | goto out; |
223 | } |
224 | |
225 | /* Overwrite incore superblock counters with just-read data */ |
226 | spin_lock(&mp->m_sb_lock); |
227 | sbp->sb_ifree = ifree; |
228 | sbp->sb_icount = ialloc; |
229 | sbp->sb_fdblocks = fdblocks; |
230 | spin_unlock(&mp->m_sb_lock); |
231 | |
232 | xfs_reinit_percpu_counters(mp); |
233 | out: |
234 | xfs_fs_mark_healthy(mp, XFS_SICK_FS_COUNTERS); |
235 | return error; |
236 | } |
237 | |
238 | STATIC void |
239 | __xfs_free_perag( |
240 | struct rcu_head *head) |
241 | { |
242 | struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head); |
243 | |
244 | ASSERT(!delayed_work_pending(&pag->pag_blockgc_work)); |
245 | kfree(pag); |
246 | } |
247 | |
248 | /* |
249 | * Free up the per-ag resources associated with the mount structure. |
250 | */ |
251 | void |
252 | xfs_free_perag( |
253 | struct xfs_mount *mp) |
254 | { |
255 | struct xfs_perag *pag; |
256 | xfs_agnumber_t agno; |
257 | |
258 | for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { |
259 | spin_lock(&mp->m_perag_lock); |
260 | pag = radix_tree_delete(&mp->m_perag_tree, agno); |
261 | spin_unlock(&mp->m_perag_lock); |
262 | ASSERT(pag); |
263 | XFS_IS_CORRUPT(pag->pag_mount, atomic_read(&pag->pag_ref) != 0); |
264 | xfs_defer_drain_free(&pag->pag_intents_drain); |
265 | |
266 | cancel_delayed_work_sync(&pag->pag_blockgc_work); |
267 | xfs_buf_cache_destroy(&pag->pag_bcache); |
268 | |
269 | /* drop the mount's active reference */ |
270 | xfs_perag_rele(pag); |
271 | XFS_IS_CORRUPT(pag->pag_mount, |
272 | atomic_read(&pag->pag_active_ref) != 0); |
273 | call_rcu(&pag->rcu_head, __xfs_free_perag); |
274 | } |
275 | } |
276 | |
277 | /* Find the size of the AG, in blocks. */ |
278 | static xfs_agblock_t |
279 | __xfs_ag_block_count( |
280 | struct xfs_mount *mp, |
281 | xfs_agnumber_t agno, |
282 | xfs_agnumber_t agcount, |
283 | xfs_rfsblock_t dblocks) |
284 | { |
285 | ASSERT(agno < agcount); |
286 | |
287 | if (agno < agcount - 1) |
288 | return mp->m_sb.sb_agblocks; |
289 | return dblocks - (agno * mp->m_sb.sb_agblocks); |
290 | } |
291 | |
292 | xfs_agblock_t |
293 | xfs_ag_block_count( |
294 | struct xfs_mount *mp, |
295 | xfs_agnumber_t agno) |
296 | { |
297 | return __xfs_ag_block_count(mp, agno, mp->m_sb.sb_agcount, |
298 | mp->m_sb.sb_dblocks); |
299 | } |
300 | |
301 | /* Calculate the first and last possible inode number in an AG. */ |
302 | static void |
303 | __xfs_agino_range( |
304 | struct xfs_mount *mp, |
305 | xfs_agblock_t eoag, |
306 | xfs_agino_t *first, |
307 | xfs_agino_t *last) |
308 | { |
309 | xfs_agblock_t bno; |
310 | |
311 | /* |
312 | * Calculate the first inode, which will be in the first |
313 | * cluster-aligned block after the AGFL. |
314 | */ |
315 | bno = round_up(XFS_AGFL_BLOCK(mp) + 1, M_IGEO(mp)->cluster_align); |
316 | *first = XFS_AGB_TO_AGINO(mp, bno); |
317 | |
318 | /* |
319 | * Calculate the last inode, which will be at the end of the |
320 | * last (aligned) cluster that can be allocated in the AG. |
321 | */ |
322 | bno = round_down(eoag, M_IGEO(mp)->cluster_align); |
323 | *last = XFS_AGB_TO_AGINO(mp, bno) - 1; |
324 | } |
325 | |
326 | void |
327 | xfs_agino_range( |
328 | struct xfs_mount *mp, |
329 | xfs_agnumber_t agno, |
330 | xfs_agino_t *first, |
331 | xfs_agino_t *last) |
332 | { |
333 | return __xfs_agino_range(mp, xfs_ag_block_count(mp, agno), first, last); |
334 | } |
335 | |
336 | /* |
337 | * Free perag within the specified AG range, it is only used to free unused |
338 | * perags under the error handling path. |
339 | */ |
340 | void |
341 | xfs_free_unused_perag_range( |
342 | struct xfs_mount *mp, |
343 | xfs_agnumber_t agstart, |
344 | xfs_agnumber_t agend) |
345 | { |
346 | struct xfs_perag *pag; |
347 | xfs_agnumber_t index; |
348 | |
349 | for (index = agstart; index < agend; index++) { |
350 | spin_lock(&mp->m_perag_lock); |
351 | pag = radix_tree_delete(&mp->m_perag_tree, index); |
352 | spin_unlock(&mp->m_perag_lock); |
353 | if (!pag) |
354 | break; |
355 | xfs_buf_cache_destroy(&pag->pag_bcache); |
356 | xfs_defer_drain_free(&pag->pag_intents_drain); |
357 | kfree(pag); |
358 | } |
359 | } |
360 | |
361 | int |
362 | xfs_initialize_perag( |
363 | struct xfs_mount *mp, |
364 | xfs_agnumber_t agcount, |
365 | xfs_rfsblock_t dblocks, |
366 | xfs_agnumber_t *maxagi) |
367 | { |
368 | struct xfs_perag *pag; |
369 | xfs_agnumber_t index; |
370 | xfs_agnumber_t first_initialised = NULLAGNUMBER; |
371 | int error; |
372 | |
373 | /* |
374 | * Walk the current per-ag tree so we don't try to initialise AGs |
375 | * that already exist (growfs case). Allocate and insert all the |
376 | * AGs we don't find ready for initialisation. |
377 | */ |
378 | for (index = 0; index < agcount; index++) { |
379 | pag = xfs_perag_get(mp, index); |
380 | if (pag) { |
381 | xfs_perag_put(pag); |
382 | continue; |
383 | } |
384 | |
385 | pag = kzalloc(sizeof(*pag), GFP_KERNEL | __GFP_RETRY_MAYFAIL); |
386 | if (!pag) { |
387 | error = -ENOMEM; |
388 | goto out_unwind_new_pags; |
389 | } |
390 | pag->pag_agno = index; |
391 | pag->pag_mount = mp; |
392 | |
393 | error = radix_tree_preload(GFP_KERNEL | __GFP_RETRY_MAYFAIL); |
394 | if (error) |
395 | goto out_free_pag; |
396 | |
397 | spin_lock(&mp->m_perag_lock); |
398 | if (radix_tree_insert(&mp->m_perag_tree, index, pag)) { |
399 | WARN_ON_ONCE(1); |
400 | spin_unlock(&mp->m_perag_lock); |
401 | radix_tree_preload_end(); |
402 | error = -EEXIST; |
403 | goto out_free_pag; |
404 | } |
405 | spin_unlock(&mp->m_perag_lock); |
406 | radix_tree_preload_end(); |
407 | |
408 | #ifdef __KERNEL__ |
409 | /* Place kernel structure only init below this point. */ |
410 | spin_lock_init(&pag->pag_ici_lock); |
411 | spin_lock_init(&pag->pagb_lock); |
412 | spin_lock_init(&pag->pag_state_lock); |
413 | INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker); |
414 | INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); |
415 | xfs_defer_drain_init(&pag->pag_intents_drain); |
416 | init_waitqueue_head(&pag->pagb_wait); |
417 | init_waitqueue_head(&pag->pag_active_wq); |
418 | pag->pagb_count = 0; |
419 | pag->pagb_tree = RB_ROOT; |
420 | xfs_hooks_init(&pag->pag_rmap_update_hooks); |
421 | #endif /* __KERNEL__ */ |
422 | |
423 | error = xfs_buf_cache_init(&pag->pag_bcache); |
424 | if (error) |
425 | goto out_remove_pag; |
426 | |
427 | /* Active ref owned by mount indicates AG is online. */ |
428 | atomic_set(&pag->pag_active_ref, 1); |
429 | |
430 | /* first new pag is fully initialized */ |
431 | if (first_initialised == NULLAGNUMBER) |
432 | first_initialised = index; |
433 | |
434 | /* |
435 | * Pre-calculated geometry |
436 | */ |
437 | pag->block_count = __xfs_ag_block_count(mp, index, agcount, |
438 | dblocks); |
439 | pag->min_block = XFS_AGFL_BLOCK(mp); |
440 | __xfs_agino_range(mp, pag->block_count, &pag->agino_min, |
441 | &pag->agino_max); |
442 | } |
443 | |
444 | index = xfs_set_inode_alloc(mp, agcount); |
445 | |
446 | if (maxagi) |
447 | *maxagi = index; |
448 | |
449 | mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp); |
450 | return 0; |
451 | |
452 | out_remove_pag: |
453 | xfs_defer_drain_free(&pag->pag_intents_drain); |
454 | spin_lock(&mp->m_perag_lock); |
455 | radix_tree_delete(&mp->m_perag_tree, index); |
456 | spin_unlock(&mp->m_perag_lock); |
457 | out_free_pag: |
458 | kfree(pag); |
459 | out_unwind_new_pags: |
460 | /* unwind any prior newly initialized pags */ |
461 | xfs_free_unused_perag_range(mp, first_initialised, agcount); |
462 | return error; |
463 | } |
464 | |
465 | static int |
466 | xfs_get_aghdr_buf( |
467 | struct xfs_mount *mp, |
468 | xfs_daddr_t blkno, |
469 | size_t numblks, |
470 | struct xfs_buf **bpp, |
471 | const struct xfs_buf_ops *ops) |
472 | { |
473 | struct xfs_buf *bp; |
474 | int error; |
475 | |
476 | error = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, 0, &bp); |
477 | if (error) |
478 | return error; |
479 | |
480 | bp->b_maps[0].bm_bn = blkno; |
481 | bp->b_ops = ops; |
482 | |
483 | *bpp = bp; |
484 | return 0; |
485 | } |
486 | |
487 | /* |
488 | * Generic btree root block init function |
489 | */ |
490 | static void |
491 | xfs_btroot_init( |
492 | struct xfs_mount *mp, |
493 | struct xfs_buf *bp, |
494 | struct aghdr_init_data *id) |
495 | { |
496 | xfs_btree_init_buf(mp, bp, id->bc_ops, 0, 0, id->agno); |
497 | } |
498 | |
499 | /* Finish initializing a free space btree. */ |
500 | static void |
501 | xfs_freesp_init_recs( |
502 | struct xfs_mount *mp, |
503 | struct xfs_buf *bp, |
504 | struct aghdr_init_data *id) |
505 | { |
506 | struct xfs_alloc_rec *arec; |
507 | struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); |
508 | |
509 | arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1); |
510 | arec->ar_startblock = cpu_to_be32(mp->m_ag_prealloc_blocks); |
511 | |
512 | if (xfs_ag_contains_log(mp, id->agno)) { |
513 | struct xfs_alloc_rec *nrec; |
514 | xfs_agblock_t start = XFS_FSB_TO_AGBNO(mp, |
515 | mp->m_sb.sb_logstart); |
516 | |
517 | ASSERT(start >= mp->m_ag_prealloc_blocks); |
518 | if (start != mp->m_ag_prealloc_blocks) { |
519 | /* |
520 | * Modify first record to pad stripe align of log and |
521 | * bump the record count. |
522 | */ |
523 | arec->ar_blockcount = cpu_to_be32(start - |
524 | mp->m_ag_prealloc_blocks); |
525 | be16_add_cpu(&block->bb_numrecs, 1); |
526 | nrec = arec + 1; |
527 | |
528 | /* |
529 | * Insert second record at start of internal log |
530 | * which then gets trimmed. |
531 | */ |
532 | nrec->ar_startblock = cpu_to_be32( |
533 | be32_to_cpu(arec->ar_startblock) + |
534 | be32_to_cpu(arec->ar_blockcount)); |
535 | arec = nrec; |
536 | } |
537 | /* |
538 | * Change record start to after the internal log |
539 | */ |
540 | be32_add_cpu(&arec->ar_startblock, mp->m_sb.sb_logblocks); |
541 | } |
542 | |
543 | /* |
544 | * Calculate the block count of this record; if it is nonzero, |
545 | * increment the record count. |
546 | */ |
547 | arec->ar_blockcount = cpu_to_be32(id->agsize - |
548 | be32_to_cpu(arec->ar_startblock)); |
549 | if (arec->ar_blockcount) |
550 | be16_add_cpu(&block->bb_numrecs, 1); |
551 | } |
552 | |
553 | /* |
554 | * bnobt/cntbt btree root block init functions |
555 | */ |
556 | static void |
557 | xfs_bnoroot_init( |
558 | struct xfs_mount *mp, |
559 | struct xfs_buf *bp, |
560 | struct aghdr_init_data *id) |
561 | { |
562 | xfs_btree_init_buf(mp, bp, id->bc_ops, 0, 0, id->agno); |
563 | xfs_freesp_init_recs(mp, bp, id); |
564 | } |
565 | |
566 | /* |
567 | * Reverse map root block init |
568 | */ |
569 | static void |
570 | xfs_rmaproot_init( |
571 | struct xfs_mount *mp, |
572 | struct xfs_buf *bp, |
573 | struct aghdr_init_data *id) |
574 | { |
575 | struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); |
576 | struct xfs_rmap_rec *rrec; |
577 | |
578 | xfs_btree_init_buf(mp, bp, id->bc_ops, 0, 4, id->agno); |
579 | |
580 | /* |
581 | * mark the AG header regions as static metadata The BNO |
582 | * btree block is the first block after the headers, so |
583 | * it's location defines the size of region the static |
584 | * metadata consumes. |
585 | * |
586 | * Note: unlike mkfs, we never have to account for log |
587 | * space when growing the data regions |
588 | */ |
589 | rrec = XFS_RMAP_REC_ADDR(block, 1); |
590 | rrec->rm_startblock = 0; |
591 | rrec->rm_blockcount = cpu_to_be32(XFS_BNO_BLOCK(mp)); |
592 | rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_FS); |
593 | rrec->rm_offset = 0; |
594 | |
595 | /* account freespace btree root blocks */ |
596 | rrec = XFS_RMAP_REC_ADDR(block, 2); |
597 | rrec->rm_startblock = cpu_to_be32(XFS_BNO_BLOCK(mp)); |
598 | rrec->rm_blockcount = cpu_to_be32(2); |
599 | rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG); |
600 | rrec->rm_offset = 0; |
601 | |
602 | /* account inode btree root blocks */ |
603 | rrec = XFS_RMAP_REC_ADDR(block, 3); |
604 | rrec->rm_startblock = cpu_to_be32(XFS_IBT_BLOCK(mp)); |
605 | rrec->rm_blockcount = cpu_to_be32(XFS_RMAP_BLOCK(mp) - |
606 | XFS_IBT_BLOCK(mp)); |
607 | rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_INOBT); |
608 | rrec->rm_offset = 0; |
609 | |
610 | /* account for rmap btree root */ |
611 | rrec = XFS_RMAP_REC_ADDR(block, 4); |
612 | rrec->rm_startblock = cpu_to_be32(XFS_RMAP_BLOCK(mp)); |
613 | rrec->rm_blockcount = cpu_to_be32(1); |
614 | rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG); |
615 | rrec->rm_offset = 0; |
616 | |
617 | /* account for refc btree root */ |
618 | if (xfs_has_reflink(mp)) { |
619 | rrec = XFS_RMAP_REC_ADDR(block, 5); |
620 | rrec->rm_startblock = cpu_to_be32(xfs_refc_block(mp)); |
621 | rrec->rm_blockcount = cpu_to_be32(1); |
622 | rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_REFC); |
623 | rrec->rm_offset = 0; |
624 | be16_add_cpu(&block->bb_numrecs, 1); |
625 | } |
626 | |
627 | /* account for the log space */ |
628 | if (xfs_ag_contains_log(mp, id->agno)) { |
629 | rrec = XFS_RMAP_REC_ADDR(block, |
630 | be16_to_cpu(block->bb_numrecs) + 1); |
631 | rrec->rm_startblock = cpu_to_be32( |
632 | XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart)); |
633 | rrec->rm_blockcount = cpu_to_be32(mp->m_sb.sb_logblocks); |
634 | rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_LOG); |
635 | rrec->rm_offset = 0; |
636 | be16_add_cpu(&block->bb_numrecs, 1); |
637 | } |
638 | } |
639 | |
640 | /* |
641 | * Initialise new secondary superblocks with the pre-grow geometry, but mark |
642 | * them as "in progress" so we know they haven't yet been activated. This will |
643 | * get cleared when the update with the new geometry information is done after |
644 | * changes to the primary are committed. This isn't strictly necessary, but we |
645 | * get it for free with the delayed buffer write lists and it means we can tell |
646 | * if a grow operation didn't complete properly after the fact. |
647 | */ |
648 | static void |
649 | xfs_sbblock_init( |
650 | struct xfs_mount *mp, |
651 | struct xfs_buf *bp, |
652 | struct aghdr_init_data *id) |
653 | { |
654 | struct xfs_dsb *dsb = bp->b_addr; |
655 | |
656 | xfs_sb_to_disk(to: dsb, from: &mp->m_sb); |
657 | dsb->sb_inprogress = 1; |
658 | } |
659 | |
660 | static void |
661 | xfs_agfblock_init( |
662 | struct xfs_mount *mp, |
663 | struct xfs_buf *bp, |
664 | struct aghdr_init_data *id) |
665 | { |
666 | struct xfs_agf *agf = bp->b_addr; |
667 | xfs_extlen_t tmpsize; |
668 | |
669 | agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC); |
670 | agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION); |
671 | agf->agf_seqno = cpu_to_be32(id->agno); |
672 | agf->agf_length = cpu_to_be32(id->agsize); |
673 | agf->agf_bno_root = cpu_to_be32(XFS_BNO_BLOCK(mp)); |
674 | agf->agf_cnt_root = cpu_to_be32(XFS_CNT_BLOCK(mp)); |
675 | agf->agf_bno_level = cpu_to_be32(1); |
676 | agf->agf_cnt_level = cpu_to_be32(1); |
677 | if (xfs_has_rmapbt(mp)) { |
678 | agf->agf_rmap_root = cpu_to_be32(XFS_RMAP_BLOCK(mp)); |
679 | agf->agf_rmap_level = cpu_to_be32(1); |
680 | agf->agf_rmap_blocks = cpu_to_be32(1); |
681 | } |
682 | |
683 | agf->agf_flfirst = cpu_to_be32(1); |
684 | agf->agf_fllast = 0; |
685 | agf->agf_flcount = 0; |
686 | tmpsize = id->agsize - mp->m_ag_prealloc_blocks; |
687 | agf->agf_freeblks = cpu_to_be32(tmpsize); |
688 | agf->agf_longest = cpu_to_be32(tmpsize); |
689 | if (xfs_has_crc(mp)) |
690 | uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid); |
691 | if (xfs_has_reflink(mp)) { |
692 | agf->agf_refcount_root = cpu_to_be32( |
693 | xfs_refc_block(mp)); |
694 | agf->agf_refcount_level = cpu_to_be32(1); |
695 | agf->agf_refcount_blocks = cpu_to_be32(1); |
696 | } |
697 | |
698 | if (xfs_ag_contains_log(mp, id->agno)) { |
699 | int64_t logblocks = mp->m_sb.sb_logblocks; |
700 | |
701 | be32_add_cpu(&agf->agf_freeblks, -logblocks); |
702 | agf->agf_longest = cpu_to_be32(id->agsize - |
703 | XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart) - logblocks); |
704 | } |
705 | } |
706 | |
707 | static void |
708 | xfs_agflblock_init( |
709 | struct xfs_mount *mp, |
710 | struct xfs_buf *bp, |
711 | struct aghdr_init_data *id) |
712 | { |
713 | struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp); |
714 | __be32 *agfl_bno; |
715 | int bucket; |
716 | |
717 | if (xfs_has_crc(mp)) { |
718 | agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC); |
719 | agfl->agfl_seqno = cpu_to_be32(id->agno); |
720 | uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid); |
721 | } |
722 | |
723 | agfl_bno = xfs_buf_to_agfl_bno(bp); |
724 | for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++) |
725 | agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK); |
726 | } |
727 | |
728 | static void |
729 | xfs_agiblock_init( |
730 | struct xfs_mount *mp, |
731 | struct xfs_buf *bp, |
732 | struct aghdr_init_data *id) |
733 | { |
734 | struct xfs_agi *agi = bp->b_addr; |
735 | int bucket; |
736 | |
737 | agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC); |
738 | agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION); |
739 | agi->agi_seqno = cpu_to_be32(id->agno); |
740 | agi->agi_length = cpu_to_be32(id->agsize); |
741 | agi->agi_count = 0; |
742 | agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp)); |
743 | agi->agi_level = cpu_to_be32(1); |
744 | agi->agi_freecount = 0; |
745 | agi->agi_newino = cpu_to_be32(NULLAGINO); |
746 | agi->agi_dirino = cpu_to_be32(NULLAGINO); |
747 | if (xfs_has_crc(mp)) |
748 | uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid); |
749 | if (xfs_has_finobt(mp)) { |
750 | agi->agi_free_root = cpu_to_be32(XFS_FIBT_BLOCK(mp)); |
751 | agi->agi_free_level = cpu_to_be32(1); |
752 | } |
753 | for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) |
754 | agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); |
755 | if (xfs_has_inobtcounts(mp)) { |
756 | agi->agi_iblocks = cpu_to_be32(1); |
757 | if (xfs_has_finobt(mp)) |
758 | agi->agi_fblocks = cpu_to_be32(1); |
759 | } |
760 | } |
761 | |
762 | typedef void (*aghdr_init_work_f)(struct xfs_mount *mp, struct xfs_buf *bp, |
763 | struct aghdr_init_data *id); |
764 | static int |
765 | xfs_ag_init_hdr( |
766 | struct xfs_mount *mp, |
767 | struct aghdr_init_data *id, |
768 | aghdr_init_work_f work, |
769 | const struct xfs_buf_ops *ops) |
770 | { |
771 | struct xfs_buf *bp; |
772 | int error; |
773 | |
774 | error = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, &bp, ops); |
775 | if (error) |
776 | return error; |
777 | |
778 | (*work)(mp, bp, id); |
779 | |
780 | xfs_buf_delwri_queue(bp, &id->buffer_list); |
781 | xfs_buf_relse(bp); |
782 | return 0; |
783 | } |
784 | |
785 | struct xfs_aghdr_grow_data { |
786 | xfs_daddr_t daddr; |
787 | size_t numblks; |
788 | const struct xfs_buf_ops *ops; |
789 | aghdr_init_work_f work; |
790 | const struct xfs_btree_ops *bc_ops; |
791 | bool need_init; |
792 | }; |
793 | |
794 | /* |
795 | * Prepare new AG headers to be written to disk. We use uncached buffers here, |
796 | * as it is assumed these new AG headers are currently beyond the currently |
797 | * valid filesystem address space. Using cached buffers would trip over EOFS |
798 | * corruption detection alogrithms in the buffer cache lookup routines. |
799 | * |
800 | * This is a non-transactional function, but the prepared buffers are added to a |
801 | * delayed write buffer list supplied by the caller so they can submit them to |
802 | * disk and wait on them as required. |
803 | */ |
804 | int |
805 | ( |
806 | struct xfs_mount *mp, |
807 | struct aghdr_init_data *id) |
808 | |
809 | { |
810 | struct xfs_aghdr_grow_data aghdr_data[] = { |
811 | { /* SB */ |
812 | .daddr = XFS_AG_DADDR(mp, id->agno, XFS_SB_DADDR), |
813 | .numblks = XFS_FSS_TO_BB(mp, 1), |
814 | .ops = &xfs_sb_buf_ops, |
815 | .work = &xfs_sbblock_init, |
816 | .need_init = true |
817 | }, |
818 | { /* AGF */ |
819 | .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGF_DADDR(mp)), |
820 | .numblks = XFS_FSS_TO_BB(mp, 1), |
821 | .ops = &xfs_agf_buf_ops, |
822 | .work = &xfs_agfblock_init, |
823 | .need_init = true |
824 | }, |
825 | { /* AGFL */ |
826 | .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGFL_DADDR(mp)), |
827 | .numblks = XFS_FSS_TO_BB(mp, 1), |
828 | .ops = &xfs_agfl_buf_ops, |
829 | .work = &xfs_agflblock_init, |
830 | .need_init = true |
831 | }, |
832 | { /* AGI */ |
833 | .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGI_DADDR(mp)), |
834 | .numblks = XFS_FSS_TO_BB(mp, 1), |
835 | .ops = &xfs_agi_buf_ops, |
836 | .work = &xfs_agiblock_init, |
837 | .need_init = true |
838 | }, |
839 | { /* BNO root block */ |
840 | .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_BNO_BLOCK(mp)), |
841 | .numblks = BTOBB(mp->m_sb.sb_blocksize), |
842 | .ops = &xfs_bnobt_buf_ops, |
843 | .work = &xfs_bnoroot_init, |
844 | .bc_ops = &xfs_bnobt_ops, |
845 | .need_init = true |
846 | }, |
847 | { /* CNT root block */ |
848 | .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_CNT_BLOCK(mp)), |
849 | .numblks = BTOBB(mp->m_sb.sb_blocksize), |
850 | .ops = &xfs_cntbt_buf_ops, |
851 | .work = &xfs_bnoroot_init, |
852 | .bc_ops = &xfs_cntbt_ops, |
853 | .need_init = true |
854 | }, |
855 | { /* INO root block */ |
856 | .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_IBT_BLOCK(mp)), |
857 | .numblks = BTOBB(mp->m_sb.sb_blocksize), |
858 | .ops = &xfs_inobt_buf_ops, |
859 | .work = &xfs_btroot_init, |
860 | .bc_ops = &xfs_inobt_ops, |
861 | .need_init = true |
862 | }, |
863 | { /* FINO root block */ |
864 | .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_FIBT_BLOCK(mp)), |
865 | .numblks = BTOBB(mp->m_sb.sb_blocksize), |
866 | .ops = &xfs_finobt_buf_ops, |
867 | .work = &xfs_btroot_init, |
868 | .bc_ops = &xfs_finobt_ops, |
869 | .need_init = xfs_has_finobt(mp) |
870 | }, |
871 | { /* RMAP root block */ |
872 | .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_RMAP_BLOCK(mp)), |
873 | .numblks = BTOBB(mp->m_sb.sb_blocksize), |
874 | .ops = &xfs_rmapbt_buf_ops, |
875 | .work = &xfs_rmaproot_init, |
876 | .bc_ops = &xfs_rmapbt_ops, |
877 | .need_init = xfs_has_rmapbt(mp) |
878 | }, |
879 | { /* REFC root block */ |
880 | .daddr = XFS_AGB_TO_DADDR(mp, id->agno, xfs_refc_block(mp)), |
881 | .numblks = BTOBB(mp->m_sb.sb_blocksize), |
882 | .ops = &xfs_refcountbt_buf_ops, |
883 | .work = &xfs_btroot_init, |
884 | .bc_ops = &xfs_refcountbt_ops, |
885 | .need_init = xfs_has_reflink(mp) |
886 | }, |
887 | { /* NULL terminating block */ |
888 | .daddr = XFS_BUF_DADDR_NULL, |
889 | } |
890 | }; |
891 | struct xfs_aghdr_grow_data *dp; |
892 | int error = 0; |
893 | |
894 | /* Account for AG free space in new AG */ |
895 | id->nfree += id->agsize - mp->m_ag_prealloc_blocks; |
896 | for (dp = &aghdr_data[0]; dp->daddr != XFS_BUF_DADDR_NULL; dp++) { |
897 | if (!dp->need_init) |
898 | continue; |
899 | |
900 | id->daddr = dp->daddr; |
901 | id->numblks = dp->numblks; |
902 | id->bc_ops = dp->bc_ops; |
903 | error = xfs_ag_init_hdr(mp, id, dp->work, dp->ops); |
904 | if (error) |
905 | break; |
906 | } |
907 | return error; |
908 | } |
909 | |
910 | int |
911 | xfs_ag_shrink_space( |
912 | struct xfs_perag *pag, |
913 | struct xfs_trans **tpp, |
914 | xfs_extlen_t delta) |
915 | { |
916 | struct xfs_mount *mp = pag->pag_mount; |
917 | struct xfs_alloc_arg args = { |
918 | .tp = *tpp, |
919 | .mp = mp, |
920 | .pag = pag, |
921 | .minlen = delta, |
922 | .maxlen = delta, |
923 | .oinfo = XFS_RMAP_OINFO_SKIP_UPDATE, |
924 | .resv = XFS_AG_RESV_NONE, |
925 | .prod = 1 |
926 | }; |
927 | struct xfs_buf *agibp, *agfbp; |
928 | struct xfs_agi *agi; |
929 | struct xfs_agf *agf; |
930 | xfs_agblock_t aglen; |
931 | int error, err2; |
932 | |
933 | ASSERT(pag->pag_agno == mp->m_sb.sb_agcount - 1); |
934 | error = xfs_ialloc_read_agi(pag, tp: *tpp, agibpp: &agibp); |
935 | if (error) |
936 | return error; |
937 | |
938 | agi = agibp->b_addr; |
939 | |
940 | error = xfs_alloc_read_agf(pag, tp: *tpp, flags: 0, agfbpp: &agfbp); |
941 | if (error) |
942 | return error; |
943 | |
944 | agf = agfbp->b_addr; |
945 | aglen = be32_to_cpu(agi->agi_length); |
946 | /* some extra paranoid checks before we shrink the ag */ |
947 | if (XFS_IS_CORRUPT(mp, agf->agf_length != agi->agi_length)) { |
948 | xfs_ag_mark_sick(pag, XFS_SICK_AG_AGF); |
949 | return -EFSCORRUPTED; |
950 | } |
951 | if (delta >= aglen) |
952 | return -EINVAL; |
953 | |
954 | /* |
955 | * Make sure that the last inode cluster cannot overlap with the new |
956 | * end of the AG, even if it's sparse. |
957 | */ |
958 | error = xfs_ialloc_check_shrink(pag, *tpp, agibp, aglen - delta); |
959 | if (error) |
960 | return error; |
961 | |
962 | /* |
963 | * Disable perag reservations so it doesn't cause the allocation request |
964 | * to fail. We'll reestablish reservation before we return. |
965 | */ |
966 | error = xfs_ag_resv_free(pag); |
967 | if (error) |
968 | return error; |
969 | |
970 | /* internal log shouldn't also show up in the free space btrees */ |
971 | error = xfs_alloc_vextent_exact_bno(&args, |
972 | XFS_AGB_TO_FSB(mp, pag->pag_agno, aglen - delta)); |
973 | if (!error && args.agbno == NULLAGBLOCK) |
974 | error = -ENOSPC; |
975 | |
976 | if (error) { |
977 | /* |
978 | * If extent allocation fails, need to roll the transaction to |
979 | * ensure that the AGFL fixup has been committed anyway. |
980 | * |
981 | * We need to hold the AGF across the roll to ensure nothing can |
982 | * access the AG for allocation until the shrink is fully |
983 | * cleaned up. And due to the resetting of the AG block |
984 | * reservation space needing to lock the AGI, we also have to |
985 | * hold that so we don't get AGI/AGF lock order inversions in |
986 | * the error handling path. |
987 | */ |
988 | xfs_trans_bhold(*tpp, agfbp); |
989 | xfs_trans_bhold(*tpp, agibp); |
990 | err2 = xfs_trans_roll(tpp); |
991 | if (err2) |
992 | return err2; |
993 | xfs_trans_bjoin(*tpp, agfbp); |
994 | xfs_trans_bjoin(*tpp, agibp); |
995 | goto resv_init_out; |
996 | } |
997 | |
998 | /* |
999 | * if successfully deleted from freespace btrees, need to confirm |
1000 | * per-AG reservation works as expected. |
1001 | */ |
1002 | be32_add_cpu(&agi->agi_length, -delta); |
1003 | be32_add_cpu(&agf->agf_length, -delta); |
1004 | |
1005 | err2 = xfs_ag_resv_init(pag, tp: *tpp); |
1006 | if (err2) { |
1007 | be32_add_cpu(&agi->agi_length, delta); |
1008 | be32_add_cpu(&agf->agf_length, delta); |
1009 | if (err2 != -ENOSPC) |
1010 | goto resv_err; |
1011 | |
1012 | err2 = xfs_free_extent_later(*tpp, args.fsbno, delta, NULL, |
1013 | XFS_AG_RESV_NONE, true); |
1014 | if (err2) |
1015 | goto resv_err; |
1016 | |
1017 | /* |
1018 | * Roll the transaction before trying to re-init the per-ag |
1019 | * reservation. The new transaction is clean so it will cancel |
1020 | * without any side effects. |
1021 | */ |
1022 | error = xfs_defer_finish(tp: tpp); |
1023 | if (error) |
1024 | return error; |
1025 | |
1026 | error = -ENOSPC; |
1027 | goto resv_init_out; |
1028 | } |
1029 | |
1030 | /* Update perag geometry */ |
1031 | pag->block_count -= delta; |
1032 | __xfs_agino_range(pag->pag_mount, pag->block_count, &pag->agino_min, |
1033 | &pag->agino_max); |
1034 | |
1035 | xfs_ialloc_log_agi(*tpp, agibp, XFS_AGI_LENGTH); |
1036 | xfs_alloc_log_agf(*tpp, agfbp, XFS_AGF_LENGTH); |
1037 | return 0; |
1038 | |
1039 | resv_init_out: |
1040 | err2 = xfs_ag_resv_init(pag, tp: *tpp); |
1041 | if (!err2) |
1042 | return error; |
1043 | resv_err: |
1044 | xfs_warn(mp, "Error %d reserving per-AG metadata reserve pool." , err2); |
1045 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); |
1046 | return err2; |
1047 | } |
1048 | |
1049 | /* |
1050 | * Extent the AG indicated by the @id by the length passed in |
1051 | */ |
1052 | int |
1053 | xfs_ag_extend_space( |
1054 | struct xfs_perag *pag, |
1055 | struct xfs_trans *tp, |
1056 | xfs_extlen_t len) |
1057 | { |
1058 | struct xfs_buf *bp; |
1059 | struct xfs_agi *agi; |
1060 | struct xfs_agf *agf; |
1061 | int error; |
1062 | |
1063 | ASSERT(pag->pag_agno == pag->pag_mount->m_sb.sb_agcount - 1); |
1064 | |
1065 | error = xfs_ialloc_read_agi(pag, tp, agibpp: &bp); |
1066 | if (error) |
1067 | return error; |
1068 | |
1069 | agi = bp->b_addr; |
1070 | be32_add_cpu(&agi->agi_length, len); |
1071 | xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH); |
1072 | |
1073 | /* |
1074 | * Change agf length. |
1075 | */ |
1076 | error = xfs_alloc_read_agf(pag, tp, flags: 0, agfbpp: &bp); |
1077 | if (error) |
1078 | return error; |
1079 | |
1080 | agf = bp->b_addr; |
1081 | be32_add_cpu(&agf->agf_length, len); |
1082 | ASSERT(agf->agf_length == agi->agi_length); |
1083 | xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH); |
1084 | |
1085 | /* |
1086 | * Free the new space. |
1087 | * |
1088 | * XFS_RMAP_OINFO_SKIP_UPDATE is used here to tell the rmap btree that |
1089 | * this doesn't actually exist in the rmap btree. |
1090 | */ |
1091 | error = xfs_rmap_free(tp, bp, pag, be32_to_cpu(agf->agf_length) - len, |
1092 | len, &XFS_RMAP_OINFO_SKIP_UPDATE); |
1093 | if (error) |
1094 | return error; |
1095 | |
1096 | error = xfs_free_extent(tp, pag, be32_to_cpu(agf->agf_length) - len, |
1097 | len, &XFS_RMAP_OINFO_SKIP_UPDATE, XFS_AG_RESV_NONE); |
1098 | if (error) |
1099 | return error; |
1100 | |
1101 | /* Update perag geometry */ |
1102 | pag->block_count = be32_to_cpu(agf->agf_length); |
1103 | __xfs_agino_range(pag->pag_mount, pag->block_count, &pag->agino_min, |
1104 | &pag->agino_max); |
1105 | return 0; |
1106 | } |
1107 | |
1108 | /* Retrieve AG geometry. */ |
1109 | int |
1110 | xfs_ag_get_geometry( |
1111 | struct xfs_perag *pag, |
1112 | struct xfs_ag_geometry *ageo) |
1113 | { |
1114 | struct xfs_buf *agi_bp; |
1115 | struct xfs_buf *agf_bp; |
1116 | struct xfs_agi *agi; |
1117 | struct xfs_agf *agf; |
1118 | unsigned int freeblks; |
1119 | int error; |
1120 | |
1121 | /* Lock the AG headers. */ |
1122 | error = xfs_ialloc_read_agi(pag, NULL, &agi_bp); |
1123 | if (error) |
1124 | return error; |
1125 | error = xfs_alloc_read_agf(pag, NULL, 0, &agf_bp); |
1126 | if (error) |
1127 | goto out_agi; |
1128 | |
1129 | /* Fill out form. */ |
1130 | memset(ageo, 0, sizeof(*ageo)); |
1131 | ageo->ag_number = pag->pag_agno; |
1132 | |
1133 | agi = agi_bp->b_addr; |
1134 | ageo->ag_icount = be32_to_cpu(agi->agi_count); |
1135 | ageo->ag_ifree = be32_to_cpu(agi->agi_freecount); |
1136 | |
1137 | agf = agf_bp->b_addr; |
1138 | ageo->ag_length = be32_to_cpu(agf->agf_length); |
1139 | freeblks = pag->pagf_freeblks + |
1140 | pag->pagf_flcount + |
1141 | pag->pagf_btreeblks - |
1142 | xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE); |
1143 | ageo->ag_freeblks = freeblks; |
1144 | xfs_ag_geom_health(pag, ageo); |
1145 | |
1146 | /* Release resources. */ |
1147 | xfs_buf_relse(agf_bp); |
1148 | out_agi: |
1149 | xfs_buf_relse(agi_bp); |
1150 | return error; |
1151 | } |
1152 | |