1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2014 Red Hat, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_bit.h"
13#include "xfs_mount.h"
14#include "xfs_sb.h"
15#include "xfs_defer.h"
16#include "xfs_btree.h"
17#include "xfs_trans.h"
18#include "xfs_alloc.h"
19#include "xfs_rmap.h"
20#include "xfs_rmap_btree.h"
21#include "xfs_trace.h"
22#include "xfs_errortag.h"
23#include "xfs_error.h"
24#include "xfs_inode.h"
25#include "xfs_ag.h"
26#include "xfs_health.h"
27
28struct kmem_cache *xfs_rmap_intent_cache;
29
30/*
31 * Lookup the first record less than or equal to [bno, len, owner, offset]
32 * in the btree given by cur.
33 */
34int
35xfs_rmap_lookup_le(
36 struct xfs_btree_cur *cur,
37 xfs_agblock_t bno,
38 uint64_t owner,
39 uint64_t offset,
40 unsigned int flags,
41 struct xfs_rmap_irec *irec,
42 int *stat)
43{
44 int get_stat = 0;
45 int error;
46
47 cur->bc_rec.r.rm_startblock = bno;
48 cur->bc_rec.r.rm_blockcount = 0;
49 cur->bc_rec.r.rm_owner = owner;
50 cur->bc_rec.r.rm_offset = offset;
51 cur->bc_rec.r.rm_flags = flags;
52
53 error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
54 if (error || !(*stat) || !irec)
55 return error;
56
57 error = xfs_rmap_get_rec(cur, irec, stat: &get_stat);
58 if (error)
59 return error;
60 if (!get_stat) {
61 xfs_btree_mark_sick(cur);
62 return -EFSCORRUPTED;
63 }
64
65 return 0;
66}
67
68/*
69 * Lookup the record exactly matching [bno, len, owner, offset]
70 * in the btree given by cur.
71 */
72int
73xfs_rmap_lookup_eq(
74 struct xfs_btree_cur *cur,
75 xfs_agblock_t bno,
76 xfs_extlen_t len,
77 uint64_t owner,
78 uint64_t offset,
79 unsigned int flags,
80 int *stat)
81{
82 cur->bc_rec.r.rm_startblock = bno;
83 cur->bc_rec.r.rm_blockcount = len;
84 cur->bc_rec.r.rm_owner = owner;
85 cur->bc_rec.r.rm_offset = offset;
86 cur->bc_rec.r.rm_flags = flags;
87 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
88}
89
90/*
91 * Update the record referred to by cur to the value given
92 * by [bno, len, owner, offset].
93 * This either works (return 0) or gets an EFSCORRUPTED error.
94 */
95STATIC int
96xfs_rmap_update(
97 struct xfs_btree_cur *cur,
98 struct xfs_rmap_irec *irec)
99{
100 union xfs_btree_rec rec;
101 int error;
102
103 trace_xfs_rmap_update(cur->bc_mp, cur->bc_ag.pag->pag_agno,
104 irec->rm_startblock, irec->rm_blockcount,
105 irec->rm_owner, irec->rm_offset, irec->rm_flags);
106
107 rec.rmap.rm_startblock = cpu_to_be32(irec->rm_startblock);
108 rec.rmap.rm_blockcount = cpu_to_be32(irec->rm_blockcount);
109 rec.rmap.rm_owner = cpu_to_be64(irec->rm_owner);
110 rec.rmap.rm_offset = cpu_to_be64(
111 xfs_rmap_irec_offset_pack(irec));
112 error = xfs_btree_update(cur, &rec);
113 if (error)
114 trace_xfs_rmap_update_error(cur->bc_mp,
115 cur->bc_ag.pag->pag_agno, error, _RET_IP_);
116 return error;
117}
118
119int
120xfs_rmap_insert(
121 struct xfs_btree_cur *rcur,
122 xfs_agblock_t agbno,
123 xfs_extlen_t len,
124 uint64_t owner,
125 uint64_t offset,
126 unsigned int flags)
127{
128 int i;
129 int error;
130
131 trace_xfs_rmap_insert(rcur->bc_mp, rcur->bc_ag.pag->pag_agno, agbno,
132 len, owner, offset, flags);
133
134 error = xfs_rmap_lookup_eq(rcur, agbno, len, owner, offset, flags, &i);
135 if (error)
136 goto done;
137 if (XFS_IS_CORRUPT(rcur->bc_mp, i != 0)) {
138 xfs_btree_mark_sick(cur: rcur);
139 error = -EFSCORRUPTED;
140 goto done;
141 }
142
143 rcur->bc_rec.r.rm_startblock = agbno;
144 rcur->bc_rec.r.rm_blockcount = len;
145 rcur->bc_rec.r.rm_owner = owner;
146 rcur->bc_rec.r.rm_offset = offset;
147 rcur->bc_rec.r.rm_flags = flags;
148 error = xfs_btree_insert(rcur, &i);
149 if (error)
150 goto done;
151 if (XFS_IS_CORRUPT(rcur->bc_mp, i != 1)) {
152 xfs_btree_mark_sick(cur: rcur);
153 error = -EFSCORRUPTED;
154 goto done;
155 }
156done:
157 if (error)
158 trace_xfs_rmap_insert_error(rcur->bc_mp,
159 rcur->bc_ag.pag->pag_agno, error, _RET_IP_);
160 return error;
161}
162
163STATIC int
164xfs_rmap_delete(
165 struct xfs_btree_cur *rcur,
166 xfs_agblock_t agbno,
167 xfs_extlen_t len,
168 uint64_t owner,
169 uint64_t offset,
170 unsigned int flags)
171{
172 int i;
173 int error;
174
175 trace_xfs_rmap_delete(rcur->bc_mp, rcur->bc_ag.pag->pag_agno, agbno,
176 len, owner, offset, flags);
177
178 error = xfs_rmap_lookup_eq(rcur, agbno, len, owner, offset, flags, &i);
179 if (error)
180 goto done;
181 if (XFS_IS_CORRUPT(rcur->bc_mp, i != 1)) {
182 xfs_btree_mark_sick(cur: rcur);
183 error = -EFSCORRUPTED;
184 goto done;
185 }
186
187 error = xfs_btree_delete(rcur, &i);
188 if (error)
189 goto done;
190 if (XFS_IS_CORRUPT(rcur->bc_mp, i != 1)) {
191 xfs_btree_mark_sick(cur: rcur);
192 error = -EFSCORRUPTED;
193 goto done;
194 }
195done:
196 if (error)
197 trace_xfs_rmap_delete_error(rcur->bc_mp,
198 rcur->bc_ag.pag->pag_agno, error, _RET_IP_);
199 return error;
200}
201
202/* Convert an internal btree record to an rmap record. */
203xfs_failaddr_t
204xfs_rmap_btrec_to_irec(
205 const union xfs_btree_rec *rec,
206 struct xfs_rmap_irec *irec)
207{
208 irec->rm_startblock = be32_to_cpu(rec->rmap.rm_startblock);
209 irec->rm_blockcount = be32_to_cpu(rec->rmap.rm_blockcount);
210 irec->rm_owner = be64_to_cpu(rec->rmap.rm_owner);
211 return xfs_rmap_irec_offset_unpack(be64_to_cpu(rec->rmap.rm_offset),
212 irec);
213}
214
215/* Simple checks for rmap records. */
216xfs_failaddr_t
217xfs_rmap_check_irec(
218 struct xfs_perag *pag,
219 const struct xfs_rmap_irec *irec)
220{
221 struct xfs_mount *mp = pag->pag_mount;
222 bool is_inode;
223 bool is_unwritten;
224 bool is_bmbt;
225 bool is_attr;
226
227 if (irec->rm_blockcount == 0)
228 return __this_address;
229 if (irec->rm_startblock <= XFS_AGFL_BLOCK(mp)) {
230 if (irec->rm_owner != XFS_RMAP_OWN_FS)
231 return __this_address;
232 if (irec->rm_blockcount != XFS_AGFL_BLOCK(mp) + 1)
233 return __this_address;
234 } else {
235 /* check for valid extent range, including overflow */
236 if (!xfs_verify_agbext(pag, irec->rm_startblock,
237 irec->rm_blockcount))
238 return __this_address;
239 }
240
241 if (!(xfs_verify_ino(mp, irec->rm_owner) ||
242 (irec->rm_owner <= XFS_RMAP_OWN_FS &&
243 irec->rm_owner >= XFS_RMAP_OWN_MIN)))
244 return __this_address;
245
246 /* Check flags. */
247 is_inode = !XFS_RMAP_NON_INODE_OWNER(irec->rm_owner);
248 is_bmbt = irec->rm_flags & XFS_RMAP_BMBT_BLOCK;
249 is_attr = irec->rm_flags & XFS_RMAP_ATTR_FORK;
250 is_unwritten = irec->rm_flags & XFS_RMAP_UNWRITTEN;
251
252 if (is_bmbt && irec->rm_offset != 0)
253 return __this_address;
254
255 if (!is_inode && irec->rm_offset != 0)
256 return __this_address;
257
258 if (is_unwritten && (is_bmbt || !is_inode || is_attr))
259 return __this_address;
260
261 if (!is_inode && (is_bmbt || is_unwritten || is_attr))
262 return __this_address;
263
264 /* Check for a valid fork offset, if applicable. */
265 if (is_inode && !is_bmbt &&
266 !xfs_verify_fileext(mp, irec->rm_offset, irec->rm_blockcount))
267 return __this_address;
268
269 return NULL;
270}
271
272static inline xfs_failaddr_t
273xfs_rmap_check_btrec(
274 struct xfs_btree_cur *cur,
275 const struct xfs_rmap_irec *irec)
276{
277 if (xfs_btree_is_mem_rmap(cur->bc_ops))
278 return xfs_rmap_check_irec(cur->bc_mem.pag, irec);
279 return xfs_rmap_check_irec(cur->bc_ag.pag, irec);
280}
281
282static inline int
283xfs_rmap_complain_bad_rec(
284 struct xfs_btree_cur *cur,
285 xfs_failaddr_t fa,
286 const struct xfs_rmap_irec *irec)
287{
288 struct xfs_mount *mp = cur->bc_mp;
289
290 if (xfs_btree_is_mem_rmap(cur->bc_ops))
291 xfs_warn(mp,
292 "In-Memory Reverse Mapping BTree record corruption detected at %pS!", fa);
293 else
294 xfs_warn(mp,
295 "Reverse Mapping BTree record corruption in AG %d detected at %pS!",
296 cur->bc_ag.pag->pag_agno, fa);
297 xfs_warn(mp,
298 "Owner 0x%llx, flags 0x%x, start block 0x%x block count 0x%x",
299 irec->rm_owner, irec->rm_flags, irec->rm_startblock,
300 irec->rm_blockcount);
301 xfs_btree_mark_sick(cur);
302 return -EFSCORRUPTED;
303}
304
305/*
306 * Get the data from the pointed-to record.
307 */
308int
309xfs_rmap_get_rec(
310 struct xfs_btree_cur *cur,
311 struct xfs_rmap_irec *irec,
312 int *stat)
313{
314 union xfs_btree_rec *rec;
315 xfs_failaddr_t fa;
316 int error;
317
318 error = xfs_btree_get_rec(cur, &rec, stat);
319 if (error || !*stat)
320 return error;
321
322 fa = xfs_rmap_btrec_to_irec(rec, irec);
323 if (!fa)
324 fa = xfs_rmap_check_btrec(cur, irec);
325 if (fa)
326 return xfs_rmap_complain_bad_rec(cur, fa, irec);
327
328 return 0;
329}
330
331struct xfs_find_left_neighbor_info {
332 struct xfs_rmap_irec high;
333 struct xfs_rmap_irec *irec;
334};
335
336/* For each rmap given, figure out if it matches the key we want. */
337STATIC int
338xfs_rmap_find_left_neighbor_helper(
339 struct xfs_btree_cur *cur,
340 const struct xfs_rmap_irec *rec,
341 void *priv)
342{
343 struct xfs_find_left_neighbor_info *info = priv;
344
345 trace_xfs_rmap_find_left_neighbor_candidate(cur->bc_mp,
346 cur->bc_ag.pag->pag_agno, rec->rm_startblock,
347 rec->rm_blockcount, rec->rm_owner, rec->rm_offset,
348 rec->rm_flags);
349
350 if (rec->rm_owner != info->high.rm_owner)
351 return 0;
352 if (!XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) &&
353 !(rec->rm_flags & XFS_RMAP_BMBT_BLOCK) &&
354 rec->rm_offset + rec->rm_blockcount - 1 != info->high.rm_offset)
355 return 0;
356
357 *info->irec = *rec;
358 return -ECANCELED;
359}
360
361/*
362 * Find the record to the left of the given extent, being careful only to
363 * return a match with the same owner and adjacent physical and logical
364 * block ranges.
365 */
366STATIC int
367xfs_rmap_find_left_neighbor(
368 struct xfs_btree_cur *cur,
369 xfs_agblock_t bno,
370 uint64_t owner,
371 uint64_t offset,
372 unsigned int flags,
373 struct xfs_rmap_irec *irec,
374 int *stat)
375{
376 struct xfs_find_left_neighbor_info info;
377 int found = 0;
378 int error;
379
380 *stat = 0;
381 if (bno == 0)
382 return 0;
383 info.high.rm_startblock = bno - 1;
384 info.high.rm_owner = owner;
385 if (!XFS_RMAP_NON_INODE_OWNER(owner) &&
386 !(flags & XFS_RMAP_BMBT_BLOCK)) {
387 if (offset == 0)
388 return 0;
389 info.high.rm_offset = offset - 1;
390 } else
391 info.high.rm_offset = 0;
392 info.high.rm_flags = flags;
393 info.high.rm_blockcount = 0;
394 info.irec = irec;
395
396 trace_xfs_rmap_find_left_neighbor_query(cur->bc_mp,
397 cur->bc_ag.pag->pag_agno, bno, 0, owner, offset, flags);
398
399 /*
400 * Historically, we always used the range query to walk every reverse
401 * mapping that could possibly overlap the key that the caller asked
402 * for, and filter out the ones that don't. That is very slow when
403 * there are a lot of records.
404 *
405 * However, there are two scenarios where the classic btree search can
406 * produce correct results -- if the index contains a record that is an
407 * exact match for the lookup key; and if there are no other records
408 * between the record we want and the key we supplied.
409 *
410 * As an optimization, try a non-overlapped lookup first. This makes
411 * extent conversion and remap operations run a bit faster if the
412 * physical extents aren't being shared. If we don't find what we
413 * want, we fall back to the overlapped query.
414 */
415 error = xfs_rmap_lookup_le(cur, bno, owner, offset, flags, irec,
416 &found);
417 if (error)
418 return error;
419 if (found)
420 error = xfs_rmap_find_left_neighbor_helper(cur, irec, &info);
421 if (!error)
422 error = xfs_rmap_query_range(cur, low_rec: &info.high, high_rec: &info.high,
423 fn: xfs_rmap_find_left_neighbor_helper, priv: &info);
424 if (error != -ECANCELED)
425 return error;
426
427 *stat = 1;
428 trace_xfs_rmap_find_left_neighbor_result(cur->bc_mp,
429 cur->bc_ag.pag->pag_agno, irec->rm_startblock,
430 irec->rm_blockcount, irec->rm_owner, irec->rm_offset,
431 irec->rm_flags);
432 return 0;
433}
434
435/* For each rmap given, figure out if it matches the key we want. */
436STATIC int
437xfs_rmap_lookup_le_range_helper(
438 struct xfs_btree_cur *cur,
439 const struct xfs_rmap_irec *rec,
440 void *priv)
441{
442 struct xfs_find_left_neighbor_info *info = priv;
443
444 trace_xfs_rmap_lookup_le_range_candidate(cur->bc_mp,
445 cur->bc_ag.pag->pag_agno, rec->rm_startblock,
446 rec->rm_blockcount, rec->rm_owner, rec->rm_offset,
447 rec->rm_flags);
448
449 if (rec->rm_owner != info->high.rm_owner)
450 return 0;
451 if (!XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) &&
452 !(rec->rm_flags & XFS_RMAP_BMBT_BLOCK) &&
453 (rec->rm_offset > info->high.rm_offset ||
454 rec->rm_offset + rec->rm_blockcount <= info->high.rm_offset))
455 return 0;
456
457 *info->irec = *rec;
458 return -ECANCELED;
459}
460
461/*
462 * Find the record to the left of the given extent, being careful only to
463 * return a match with the same owner and overlapping physical and logical
464 * block ranges. This is the overlapping-interval version of
465 * xfs_rmap_lookup_le.
466 */
467int
468xfs_rmap_lookup_le_range(
469 struct xfs_btree_cur *cur,
470 xfs_agblock_t bno,
471 uint64_t owner,
472 uint64_t offset,
473 unsigned int flags,
474 struct xfs_rmap_irec *irec,
475 int *stat)
476{
477 struct xfs_find_left_neighbor_info info;
478 int found = 0;
479 int error;
480
481 info.high.rm_startblock = bno;
482 info.high.rm_owner = owner;
483 if (!XFS_RMAP_NON_INODE_OWNER(owner) && !(flags & XFS_RMAP_BMBT_BLOCK))
484 info.high.rm_offset = offset;
485 else
486 info.high.rm_offset = 0;
487 info.high.rm_flags = flags;
488 info.high.rm_blockcount = 0;
489 *stat = 0;
490 info.irec = irec;
491
492 trace_xfs_rmap_lookup_le_range(cur->bc_mp, cur->bc_ag.pag->pag_agno,
493 bno, 0, owner, offset, flags);
494
495 /*
496 * Historically, we always used the range query to walk every reverse
497 * mapping that could possibly overlap the key that the caller asked
498 * for, and filter out the ones that don't. That is very slow when
499 * there are a lot of records.
500 *
501 * However, there are two scenarios where the classic btree search can
502 * produce correct results -- if the index contains a record that is an
503 * exact match for the lookup key; and if there are no other records
504 * between the record we want and the key we supplied.
505 *
506 * As an optimization, try a non-overlapped lookup first. This makes
507 * scrub run much faster on most filesystems because bmbt records are
508 * usually an exact match for rmap records. If we don't find what we
509 * want, we fall back to the overlapped query.
510 */
511 error = xfs_rmap_lookup_le(cur, bno, owner, offset, flags, irec,
512 &found);
513 if (error)
514 return error;
515 if (found)
516 error = xfs_rmap_lookup_le_range_helper(cur, irec, &info);
517 if (!error)
518 error = xfs_rmap_query_range(cur, low_rec: &info.high, high_rec: &info.high,
519 fn: xfs_rmap_lookup_le_range_helper, priv: &info);
520 if (error != -ECANCELED)
521 return error;
522
523 *stat = 1;
524 trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
525 cur->bc_ag.pag->pag_agno, irec->rm_startblock,
526 irec->rm_blockcount, irec->rm_owner, irec->rm_offset,
527 irec->rm_flags);
528 return 0;
529}
530
531/*
532 * Perform all the relevant owner checks for a removal op. If we're doing an
533 * unknown-owner removal then we have no owner information to check.
534 */
535static int
536xfs_rmap_free_check_owner(
537 struct xfs_btree_cur *cur,
538 uint64_t ltoff,
539 struct xfs_rmap_irec *rec,
540 xfs_filblks_t len,
541 uint64_t owner,
542 uint64_t offset,
543 unsigned int flags)
544{
545 struct xfs_mount *mp = cur->bc_mp;
546 int error = 0;
547
548 if (owner == XFS_RMAP_OWN_UNKNOWN)
549 return 0;
550
551 /* Make sure the unwritten flag matches. */
552 if (XFS_IS_CORRUPT(mp,
553 (flags & XFS_RMAP_UNWRITTEN) !=
554 (rec->rm_flags & XFS_RMAP_UNWRITTEN))) {
555 xfs_btree_mark_sick(cur);
556 error = -EFSCORRUPTED;
557 goto out;
558 }
559
560 /* Make sure the owner matches what we expect to find in the tree. */
561 if (XFS_IS_CORRUPT(mp, owner != rec->rm_owner)) {
562 xfs_btree_mark_sick(cur);
563 error = -EFSCORRUPTED;
564 goto out;
565 }
566
567 /* Check the offset, if necessary. */
568 if (XFS_RMAP_NON_INODE_OWNER(owner))
569 goto out;
570
571 if (flags & XFS_RMAP_BMBT_BLOCK) {
572 if (XFS_IS_CORRUPT(mp,
573 !(rec->rm_flags & XFS_RMAP_BMBT_BLOCK))) {
574 xfs_btree_mark_sick(cur);
575 error = -EFSCORRUPTED;
576 goto out;
577 }
578 } else {
579 if (XFS_IS_CORRUPT(mp, rec->rm_offset > offset)) {
580 xfs_btree_mark_sick(cur);
581 error = -EFSCORRUPTED;
582 goto out;
583 }
584 if (XFS_IS_CORRUPT(mp,
585 offset + len > ltoff + rec->rm_blockcount)) {
586 xfs_btree_mark_sick(cur);
587 error = -EFSCORRUPTED;
588 goto out;
589 }
590 }
591
592out:
593 return error;
594}
595
596/*
597 * Find the extent in the rmap btree and remove it.
598 *
599 * The record we find should always be an exact match for the extent that we're
600 * looking for, since we insert them into the btree without modification.
601 *
602 * Special Case #1: when growing the filesystem, we "free" an extent when
603 * growing the last AG. This extent is new space and so it is not tracked as
604 * used space in the btree. The growfs code will pass in an owner of
605 * XFS_RMAP_OWN_NULL to indicate that it expected that there is no owner of this
606 * extent. We verify that - the extent lookup result in a record that does not
607 * overlap.
608 *
609 * Special Case #2: EFIs do not record the owner of the extent, so when
610 * recovering EFIs from the log we pass in XFS_RMAP_OWN_UNKNOWN to tell the rmap
611 * btree to ignore the owner (i.e. wildcard match) so we don't trigger
612 * corruption checks during log recovery.
613 */
614STATIC int
615xfs_rmap_unmap(
616 struct xfs_btree_cur *cur,
617 xfs_agblock_t bno,
618 xfs_extlen_t len,
619 bool unwritten,
620 const struct xfs_owner_info *oinfo)
621{
622 struct xfs_mount *mp = cur->bc_mp;
623 struct xfs_rmap_irec ltrec;
624 uint64_t ltoff;
625 int error = 0;
626 int i;
627 uint64_t owner;
628 uint64_t offset;
629 unsigned int flags;
630 bool ignore_off;
631
632 xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
633 ignore_off = XFS_RMAP_NON_INODE_OWNER(owner) ||
634 (flags & XFS_RMAP_BMBT_BLOCK);
635 if (unwritten)
636 flags |= XFS_RMAP_UNWRITTEN;
637 trace_xfs_rmap_unmap(mp, cur->bc_ag.pag->pag_agno, bno, len,
638 unwritten, oinfo);
639
640 /*
641 * We should always have a left record because there's a static record
642 * for the AG headers at rm_startblock == 0 created by mkfs/growfs that
643 * will not ever be removed from the tree.
644 */
645 error = xfs_rmap_lookup_le(cur, bno, owner, offset, flags, &ltrec, &i);
646 if (error)
647 goto out_error;
648 if (XFS_IS_CORRUPT(mp, i != 1)) {
649 xfs_btree_mark_sick(cur);
650 error = -EFSCORRUPTED;
651 goto out_error;
652 }
653
654 trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
655 cur->bc_ag.pag->pag_agno, ltrec.rm_startblock,
656 ltrec.rm_blockcount, ltrec.rm_owner,
657 ltrec.rm_offset, ltrec.rm_flags);
658 ltoff = ltrec.rm_offset;
659
660 /*
661 * For growfs, the incoming extent must be beyond the left record we
662 * just found as it is new space and won't be used by anyone. This is
663 * just a corruption check as we don't actually do anything with this
664 * extent. Note that we need to use >= instead of > because it might
665 * be the case that the "left" extent goes all the way to EOFS.
666 */
667 if (owner == XFS_RMAP_OWN_NULL) {
668 if (XFS_IS_CORRUPT(mp,
669 bno <
670 ltrec.rm_startblock + ltrec.rm_blockcount)) {
671 xfs_btree_mark_sick(cur);
672 error = -EFSCORRUPTED;
673 goto out_error;
674 }
675 goto out_done;
676 }
677
678 /*
679 * If we're doing an unknown-owner removal for EFI recovery, we expect
680 * to find the full range in the rmapbt or nothing at all. If we
681 * don't find any rmaps overlapping either end of the range, we're
682 * done. Hopefully this means that the EFI creator already queued
683 * (and finished) a RUI to remove the rmap.
684 */
685 if (owner == XFS_RMAP_OWN_UNKNOWN &&
686 ltrec.rm_startblock + ltrec.rm_blockcount <= bno) {
687 struct xfs_rmap_irec rtrec;
688
689 error = xfs_btree_increment(cur, 0, &i);
690 if (error)
691 goto out_error;
692 if (i == 0)
693 goto out_done;
694 error = xfs_rmap_get_rec(cur, irec: &rtrec, stat: &i);
695 if (error)
696 goto out_error;
697 if (XFS_IS_CORRUPT(mp, i != 1)) {
698 xfs_btree_mark_sick(cur);
699 error = -EFSCORRUPTED;
700 goto out_error;
701 }
702 if (rtrec.rm_startblock >= bno + len)
703 goto out_done;
704 }
705
706 /* Make sure the extent we found covers the entire freeing range. */
707 if (XFS_IS_CORRUPT(mp,
708 ltrec.rm_startblock > bno ||
709 ltrec.rm_startblock + ltrec.rm_blockcount <
710 bno + len)) {
711 xfs_btree_mark_sick(cur);
712 error = -EFSCORRUPTED;
713 goto out_error;
714 }
715
716 /* Check owner information. */
717 error = xfs_rmap_free_check_owner(cur, ltoff, &ltrec, len, owner,
718 offset, flags);
719 if (error)
720 goto out_error;
721
722 if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) {
723 /* exact match, simply remove the record from rmap tree */
724 trace_xfs_rmap_delete(mp, cur->bc_ag.pag->pag_agno,
725 ltrec.rm_startblock, ltrec.rm_blockcount,
726 ltrec.rm_owner, ltrec.rm_offset,
727 ltrec.rm_flags);
728 error = xfs_btree_delete(cur, &i);
729 if (error)
730 goto out_error;
731 if (XFS_IS_CORRUPT(mp, i != 1)) {
732 xfs_btree_mark_sick(cur);
733 error = -EFSCORRUPTED;
734 goto out_error;
735 }
736 } else if (ltrec.rm_startblock == bno) {
737 /*
738 * overlap left hand side of extent: move the start, trim the
739 * length and update the current record.
740 *
741 * ltbno ltlen
742 * Orig: |oooooooooooooooooooo|
743 * Freeing: |fffffffff|
744 * Result: |rrrrrrrrrr|
745 * bno len
746 */
747 ltrec.rm_startblock += len;
748 ltrec.rm_blockcount -= len;
749 if (!ignore_off)
750 ltrec.rm_offset += len;
751 error = xfs_rmap_update(cur, &ltrec);
752 if (error)
753 goto out_error;
754 } else if (ltrec.rm_startblock + ltrec.rm_blockcount == bno + len) {
755 /*
756 * overlap right hand side of extent: trim the length and update
757 * the current record.
758 *
759 * ltbno ltlen
760 * Orig: |oooooooooooooooooooo|
761 * Freeing: |fffffffff|
762 * Result: |rrrrrrrrrr|
763 * bno len
764 */
765 ltrec.rm_blockcount -= len;
766 error = xfs_rmap_update(cur, &ltrec);
767 if (error)
768 goto out_error;
769 } else {
770
771 /*
772 * overlap middle of extent: trim the length of the existing
773 * record to the length of the new left-extent size, increment
774 * the insertion position so we can insert a new record
775 * containing the remaining right-extent space.
776 *
777 * ltbno ltlen
778 * Orig: |oooooooooooooooooooo|
779 * Freeing: |fffffffff|
780 * Result: |rrrrr| |rrrr|
781 * bno len
782 */
783 xfs_extlen_t orig_len = ltrec.rm_blockcount;
784
785 ltrec.rm_blockcount = bno - ltrec.rm_startblock;
786 error = xfs_rmap_update(cur, &ltrec);
787 if (error)
788 goto out_error;
789
790 error = xfs_btree_increment(cur, 0, &i);
791 if (error)
792 goto out_error;
793
794 cur->bc_rec.r.rm_startblock = bno + len;
795 cur->bc_rec.r.rm_blockcount = orig_len - len -
796 ltrec.rm_blockcount;
797 cur->bc_rec.r.rm_owner = ltrec.rm_owner;
798 if (ignore_off)
799 cur->bc_rec.r.rm_offset = 0;
800 else
801 cur->bc_rec.r.rm_offset = offset + len;
802 cur->bc_rec.r.rm_flags = flags;
803 trace_xfs_rmap_insert(mp, cur->bc_ag.pag->pag_agno,
804 cur->bc_rec.r.rm_startblock,
805 cur->bc_rec.r.rm_blockcount,
806 cur->bc_rec.r.rm_owner,
807 cur->bc_rec.r.rm_offset,
808 cur->bc_rec.r.rm_flags);
809 error = xfs_btree_insert(cur, &i);
810 if (error)
811 goto out_error;
812 }
813
814out_done:
815 trace_xfs_rmap_unmap_done(mp, cur->bc_ag.pag->pag_agno, bno, len,
816 unwritten, oinfo);
817out_error:
818 if (error)
819 trace_xfs_rmap_unmap_error(mp, cur->bc_ag.pag->pag_agno,
820 error, _RET_IP_);
821 return error;
822}
823
824#ifdef CONFIG_XFS_LIVE_HOOKS
825/*
826 * Use a static key here to reduce the overhead of rmapbt live updates. If
827 * the compiler supports jump labels, the static branch will be replaced by a
828 * nop sled when there are no hook users. Online fsck is currently the only
829 * caller, so this is a reasonable tradeoff.
830 *
831 * Note: Patching the kernel code requires taking the cpu hotplug lock. Other
832 * parts of the kernel allocate memory with that lock held, which means that
833 * XFS callers cannot hold any locks that might be used by memory reclaim or
834 * writeback when calling the static_branch_{inc,dec} functions.
835 */
836DEFINE_STATIC_XFS_HOOK_SWITCH(xfs_rmap_hooks_switch);
837
838void
839xfs_rmap_hook_disable(void)
840{
841 xfs_hooks_switch_off(&xfs_rmap_hooks_switch);
842}
843
844void
845xfs_rmap_hook_enable(void)
846{
847 xfs_hooks_switch_on(&xfs_rmap_hooks_switch);
848}
849
850/* Call downstream hooks for a reverse mapping update. */
851static inline void
852xfs_rmap_update_hook(
853 struct xfs_trans *tp,
854 struct xfs_perag *pag,
855 enum xfs_rmap_intent_type op,
856 xfs_agblock_t startblock,
857 xfs_extlen_t blockcount,
858 bool unwritten,
859 const struct xfs_owner_info *oinfo)
860{
861 if (xfs_hooks_switched_on(&xfs_rmap_hooks_switch)) {
862 struct xfs_rmap_update_params p = {
863 .startblock = startblock,
864 .blockcount = blockcount,
865 .unwritten = unwritten,
866 .oinfo = *oinfo, /* struct copy */
867 };
868
869 if (pag)
870 xfs_hooks_call(&pag->pag_rmap_update_hooks, op, &p);
871 }
872}
873
874/* Call the specified function during a reverse mapping update. */
875int
876xfs_rmap_hook_add(
877 struct xfs_perag *pag,
878 struct xfs_rmap_hook *hook)
879{
880 return xfs_hooks_add(&pag->pag_rmap_update_hooks, &hook->rmap_hook);
881}
882
883/* Stop calling the specified function during a reverse mapping update. */
884void
885xfs_rmap_hook_del(
886 struct xfs_perag *pag,
887 struct xfs_rmap_hook *hook)
888{
889 xfs_hooks_del(&pag->pag_rmap_update_hooks, &hook->rmap_hook);
890}
891
892/* Configure rmap update hook functions. */
893void
894xfs_rmap_hook_setup(
895 struct xfs_rmap_hook *hook,
896 notifier_fn_t mod_fn)
897{
898 xfs_hook_setup(&hook->rmap_hook, mod_fn);
899}
900#else
901# define xfs_rmap_update_hook(t, p, o, s, b, u, oi) do { } while (0)
902#endif /* CONFIG_XFS_LIVE_HOOKS */
903
904/*
905 * Remove a reference to an extent in the rmap btree.
906 */
907int
908xfs_rmap_free(
909 struct xfs_trans *tp,
910 struct xfs_buf *agbp,
911 struct xfs_perag *pag,
912 xfs_agblock_t bno,
913 xfs_extlen_t len,
914 const struct xfs_owner_info *oinfo)
915{
916 struct xfs_mount *mp = tp->t_mountp;
917 struct xfs_btree_cur *cur;
918 int error;
919
920 if (!xfs_has_rmapbt(mp))
921 return 0;
922
923 cur = xfs_rmapbt_init_cursor(mp, tp, bp: agbp, pag);
924 xfs_rmap_update_hook(tp, pag, XFS_RMAP_UNMAP, bno, len, false, oinfo);
925 error = xfs_rmap_unmap(cur, bno, len, false, oinfo);
926
927 xfs_btree_del_cursor(cur, error);
928 return error;
929}
930
931/*
932 * A mergeable rmap must have the same owner and the same values for
933 * the unwritten, attr_fork, and bmbt flags. The startblock and
934 * offset are checked separately.
935 */
936static bool
937xfs_rmap_is_mergeable(
938 struct xfs_rmap_irec *irec,
939 uint64_t owner,
940 unsigned int flags)
941{
942 if (irec->rm_owner == XFS_RMAP_OWN_NULL)
943 return false;
944 if (irec->rm_owner != owner)
945 return false;
946 if ((flags & XFS_RMAP_UNWRITTEN) ^
947 (irec->rm_flags & XFS_RMAP_UNWRITTEN))
948 return false;
949 if ((flags & XFS_RMAP_ATTR_FORK) ^
950 (irec->rm_flags & XFS_RMAP_ATTR_FORK))
951 return false;
952 if ((flags & XFS_RMAP_BMBT_BLOCK) ^
953 (irec->rm_flags & XFS_RMAP_BMBT_BLOCK))
954 return false;
955 return true;
956}
957
958/*
959 * When we allocate a new block, the first thing we do is add a reference to
960 * the extent in the rmap btree. This takes the form of a [agbno, length,
961 * owner, offset] record. Flags are encoded in the high bits of the offset
962 * field.
963 */
964STATIC int
965xfs_rmap_map(
966 struct xfs_btree_cur *cur,
967 xfs_agblock_t bno,
968 xfs_extlen_t len,
969 bool unwritten,
970 const struct xfs_owner_info *oinfo)
971{
972 struct xfs_mount *mp = cur->bc_mp;
973 struct xfs_rmap_irec ltrec;
974 struct xfs_rmap_irec gtrec;
975 int have_gt;
976 int have_lt;
977 int error = 0;
978 int i;
979 uint64_t owner;
980 uint64_t offset;
981 unsigned int flags = 0;
982 bool ignore_off;
983
984 xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
985 ASSERT(owner != 0);
986 ignore_off = XFS_RMAP_NON_INODE_OWNER(owner) ||
987 (flags & XFS_RMAP_BMBT_BLOCK);
988 if (unwritten)
989 flags |= XFS_RMAP_UNWRITTEN;
990 trace_xfs_rmap_map(mp, cur->bc_ag.pag->pag_agno, bno, len,
991 unwritten, oinfo);
992 ASSERT(!xfs_rmap_should_skip_owner_update(oinfo));
993
994 /*
995 * For the initial lookup, look for an exact match or the left-adjacent
996 * record for our insertion point. This will also give us the record for
997 * start block contiguity tests.
998 */
999 error = xfs_rmap_lookup_le(cur, bno, owner, offset, flags, &ltrec,
1000 &have_lt);
1001 if (error)
1002 goto out_error;
1003 if (have_lt) {
1004 trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
1005 cur->bc_ag.pag->pag_agno, ltrec.rm_startblock,
1006 ltrec.rm_blockcount, ltrec.rm_owner,
1007 ltrec.rm_offset, ltrec.rm_flags);
1008
1009 if (!xfs_rmap_is_mergeable(&ltrec, owner, flags))
1010 have_lt = 0;
1011 }
1012
1013 if (XFS_IS_CORRUPT(mp,
1014 have_lt != 0 &&
1015 ltrec.rm_startblock + ltrec.rm_blockcount > bno)) {
1016 xfs_btree_mark_sick(cur);
1017 error = -EFSCORRUPTED;
1018 goto out_error;
1019 }
1020
1021 /*
1022 * Increment the cursor to see if we have a right-adjacent record to our
1023 * insertion point. This will give us the record for end block
1024 * contiguity tests.
1025 */
1026 error = xfs_btree_increment(cur, 0, &have_gt);
1027 if (error)
1028 goto out_error;
1029 if (have_gt) {
1030 error = xfs_rmap_get_rec(cur, irec: &gtrec, stat: &have_gt);
1031 if (error)
1032 goto out_error;
1033 if (XFS_IS_CORRUPT(mp, have_gt != 1)) {
1034 xfs_btree_mark_sick(cur);
1035 error = -EFSCORRUPTED;
1036 goto out_error;
1037 }
1038 if (XFS_IS_CORRUPT(mp, bno + len > gtrec.rm_startblock)) {
1039 xfs_btree_mark_sick(cur);
1040 error = -EFSCORRUPTED;
1041 goto out_error;
1042 }
1043 trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
1044 cur->bc_ag.pag->pag_agno, gtrec.rm_startblock,
1045 gtrec.rm_blockcount, gtrec.rm_owner,
1046 gtrec.rm_offset, gtrec.rm_flags);
1047 if (!xfs_rmap_is_mergeable(&gtrec, owner, flags))
1048 have_gt = 0;
1049 }
1050
1051 /*
1052 * Note: cursor currently points one record to the right of ltrec, even
1053 * if there is no record in the tree to the right.
1054 */
1055 if (have_lt &&
1056 ltrec.rm_startblock + ltrec.rm_blockcount == bno &&
1057 (ignore_off || ltrec.rm_offset + ltrec.rm_blockcount == offset)) {
1058 /*
1059 * left edge contiguous, merge into left record.
1060 *
1061 * ltbno ltlen
1062 * orig: |ooooooooo|
1063 * adding: |aaaaaaaaa|
1064 * result: |rrrrrrrrrrrrrrrrrrr|
1065 * bno len
1066 */
1067 ltrec.rm_blockcount += len;
1068 if (have_gt &&
1069 bno + len == gtrec.rm_startblock &&
1070 (ignore_off || offset + len == gtrec.rm_offset) &&
1071 (unsigned long)ltrec.rm_blockcount + len +
1072 gtrec.rm_blockcount <= XFS_RMAP_LEN_MAX) {
1073 /*
1074 * right edge also contiguous, delete right record
1075 * and merge into left record.
1076 *
1077 * ltbno ltlen gtbno gtlen
1078 * orig: |ooooooooo| |ooooooooo|
1079 * adding: |aaaaaaaaa|
1080 * result: |rrrrrrrrrrrrrrrrrrrrrrrrrrrrr|
1081 */
1082 ltrec.rm_blockcount += gtrec.rm_blockcount;
1083 trace_xfs_rmap_delete(mp, cur->bc_ag.pag->pag_agno,
1084 gtrec.rm_startblock,
1085 gtrec.rm_blockcount,
1086 gtrec.rm_owner,
1087 gtrec.rm_offset,
1088 gtrec.rm_flags);
1089 error = xfs_btree_delete(cur, &i);
1090 if (error)
1091 goto out_error;
1092 if (XFS_IS_CORRUPT(mp, i != 1)) {
1093 xfs_btree_mark_sick(cur);
1094 error = -EFSCORRUPTED;
1095 goto out_error;
1096 }
1097 }
1098
1099 /* point the cursor back to the left record and update */
1100 error = xfs_btree_decrement(cur, 0, &have_gt);
1101 if (error)
1102 goto out_error;
1103 error = xfs_rmap_update(cur, &ltrec);
1104 if (error)
1105 goto out_error;
1106 } else if (have_gt &&
1107 bno + len == gtrec.rm_startblock &&
1108 (ignore_off || offset + len == gtrec.rm_offset)) {
1109 /*
1110 * right edge contiguous, merge into right record.
1111 *
1112 * gtbno gtlen
1113 * Orig: |ooooooooo|
1114 * adding: |aaaaaaaaa|
1115 * Result: |rrrrrrrrrrrrrrrrrrr|
1116 * bno len
1117 */
1118 gtrec.rm_startblock = bno;
1119 gtrec.rm_blockcount += len;
1120 if (!ignore_off)
1121 gtrec.rm_offset = offset;
1122 error = xfs_rmap_update(cur, &gtrec);
1123 if (error)
1124 goto out_error;
1125 } else {
1126 /*
1127 * no contiguous edge with identical owner, insert
1128 * new record at current cursor position.
1129 */
1130 cur->bc_rec.r.rm_startblock = bno;
1131 cur->bc_rec.r.rm_blockcount = len;
1132 cur->bc_rec.r.rm_owner = owner;
1133 cur->bc_rec.r.rm_offset = offset;
1134 cur->bc_rec.r.rm_flags = flags;
1135 trace_xfs_rmap_insert(mp, cur->bc_ag.pag->pag_agno, bno, len,
1136 owner, offset, flags);
1137 error = xfs_btree_insert(cur, &i);
1138 if (error)
1139 goto out_error;
1140 if (XFS_IS_CORRUPT(mp, i != 1)) {
1141 xfs_btree_mark_sick(cur);
1142 error = -EFSCORRUPTED;
1143 goto out_error;
1144 }
1145 }
1146
1147 trace_xfs_rmap_map_done(mp, cur->bc_ag.pag->pag_agno, bno, len,
1148 unwritten, oinfo);
1149out_error:
1150 if (error)
1151 trace_xfs_rmap_map_error(mp, cur->bc_ag.pag->pag_agno,
1152 error, _RET_IP_);
1153 return error;
1154}
1155
1156/*
1157 * Add a reference to an extent in the rmap btree.
1158 */
1159int
1160xfs_rmap_alloc(
1161 struct xfs_trans *tp,
1162 struct xfs_buf *agbp,
1163 struct xfs_perag *pag,
1164 xfs_agblock_t bno,
1165 xfs_extlen_t len,
1166 const struct xfs_owner_info *oinfo)
1167{
1168 struct xfs_mount *mp = tp->t_mountp;
1169 struct xfs_btree_cur *cur;
1170 int error;
1171
1172 if (!xfs_has_rmapbt(mp))
1173 return 0;
1174
1175 cur = xfs_rmapbt_init_cursor(mp, tp, bp: agbp, pag);
1176 xfs_rmap_update_hook(tp, pag, XFS_RMAP_MAP, bno, len, false, oinfo);
1177 error = xfs_rmap_map(cur, bno, len, false, oinfo);
1178
1179 xfs_btree_del_cursor(cur, error);
1180 return error;
1181}
1182
1183#define RMAP_LEFT_CONTIG (1 << 0)
1184#define RMAP_RIGHT_CONTIG (1 << 1)
1185#define RMAP_LEFT_FILLING (1 << 2)
1186#define RMAP_RIGHT_FILLING (1 << 3)
1187#define RMAP_LEFT_VALID (1 << 6)
1188#define RMAP_RIGHT_VALID (1 << 7)
1189
1190#define LEFT r[0]
1191#define RIGHT r[1]
1192#define PREV r[2]
1193#define NEW r[3]
1194
1195/*
1196 * Convert an unwritten extent to a real extent or vice versa.
1197 * Does not handle overlapping extents.
1198 */
1199STATIC int
1200xfs_rmap_convert(
1201 struct xfs_btree_cur *cur,
1202 xfs_agblock_t bno,
1203 xfs_extlen_t len,
1204 bool unwritten,
1205 const struct xfs_owner_info *oinfo)
1206{
1207 struct xfs_mount *mp = cur->bc_mp;
1208 struct xfs_rmap_irec r[4]; /* neighbor extent entries */
1209 /* left is 0, right is 1, */
1210 /* prev is 2, new is 3 */
1211 uint64_t owner;
1212 uint64_t offset;
1213 uint64_t new_endoff;
1214 unsigned int oldext;
1215 unsigned int newext;
1216 unsigned int flags = 0;
1217 int i;
1218 int state = 0;
1219 int error;
1220
1221 xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
1222 ASSERT(!(XFS_RMAP_NON_INODE_OWNER(owner) ||
1223 (flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK))));
1224 oldext = unwritten ? XFS_RMAP_UNWRITTEN : 0;
1225 new_endoff = offset + len;
1226 trace_xfs_rmap_convert(mp, cur->bc_ag.pag->pag_agno, bno, len,
1227 unwritten, oinfo);
1228
1229 /*
1230 * For the initial lookup, look for an exact match or the left-adjacent
1231 * record for our insertion point. This will also give us the record for
1232 * start block contiguity tests.
1233 */
1234 error = xfs_rmap_lookup_le(cur, bno, owner, offset, oldext, &PREV, &i);
1235 if (error)
1236 goto done;
1237 if (XFS_IS_CORRUPT(mp, i != 1)) {
1238 xfs_btree_mark_sick(cur);
1239 error = -EFSCORRUPTED;
1240 goto done;
1241 }
1242
1243 trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
1244 cur->bc_ag.pag->pag_agno, PREV.rm_startblock,
1245 PREV.rm_blockcount, PREV.rm_owner,
1246 PREV.rm_offset, PREV.rm_flags);
1247
1248 ASSERT(PREV.rm_offset <= offset);
1249 ASSERT(PREV.rm_offset + PREV.rm_blockcount >= new_endoff);
1250 ASSERT((PREV.rm_flags & XFS_RMAP_UNWRITTEN) == oldext);
1251 newext = ~oldext & XFS_RMAP_UNWRITTEN;
1252
1253 /*
1254 * Set flags determining what part of the previous oldext allocation
1255 * extent is being replaced by a newext allocation.
1256 */
1257 if (PREV.rm_offset == offset)
1258 state |= RMAP_LEFT_FILLING;
1259 if (PREV.rm_offset + PREV.rm_blockcount == new_endoff)
1260 state |= RMAP_RIGHT_FILLING;
1261
1262 /*
1263 * Decrement the cursor to see if we have a left-adjacent record to our
1264 * insertion point. This will give us the record for end block
1265 * contiguity tests.
1266 */
1267 error = xfs_btree_decrement(cur, 0, &i);
1268 if (error)
1269 goto done;
1270 if (i) {
1271 state |= RMAP_LEFT_VALID;
1272 error = xfs_rmap_get_rec(cur, irec: &LEFT, stat: &i);
1273 if (error)
1274 goto done;
1275 if (XFS_IS_CORRUPT(mp, i != 1)) {
1276 xfs_btree_mark_sick(cur);
1277 error = -EFSCORRUPTED;
1278 goto done;
1279 }
1280 if (XFS_IS_CORRUPT(mp,
1281 LEFT.rm_startblock + LEFT.rm_blockcount >
1282 bno)) {
1283 xfs_btree_mark_sick(cur);
1284 error = -EFSCORRUPTED;
1285 goto done;
1286 }
1287 trace_xfs_rmap_find_left_neighbor_result(cur->bc_mp,
1288 cur->bc_ag.pag->pag_agno, LEFT.rm_startblock,
1289 LEFT.rm_blockcount, LEFT.rm_owner,
1290 LEFT.rm_offset, LEFT.rm_flags);
1291 if (LEFT.rm_startblock + LEFT.rm_blockcount == bno &&
1292 LEFT.rm_offset + LEFT.rm_blockcount == offset &&
1293 xfs_rmap_is_mergeable(&LEFT, owner, newext))
1294 state |= RMAP_LEFT_CONTIG;
1295 }
1296
1297 /*
1298 * Increment the cursor to see if we have a right-adjacent record to our
1299 * insertion point. This will give us the record for end block
1300 * contiguity tests.
1301 */
1302 error = xfs_btree_increment(cur, 0, &i);
1303 if (error)
1304 goto done;
1305 if (XFS_IS_CORRUPT(mp, i != 1)) {
1306 xfs_btree_mark_sick(cur);
1307 error = -EFSCORRUPTED;
1308 goto done;
1309 }
1310 error = xfs_btree_increment(cur, 0, &i);
1311 if (error)
1312 goto done;
1313 if (i) {
1314 state |= RMAP_RIGHT_VALID;
1315 error = xfs_rmap_get_rec(cur, irec: &RIGHT, stat: &i);
1316 if (error)
1317 goto done;
1318 if (XFS_IS_CORRUPT(mp, i != 1)) {
1319 xfs_btree_mark_sick(cur);
1320 error = -EFSCORRUPTED;
1321 goto done;
1322 }
1323 if (XFS_IS_CORRUPT(mp, bno + len > RIGHT.rm_startblock)) {
1324 xfs_btree_mark_sick(cur);
1325 error = -EFSCORRUPTED;
1326 goto done;
1327 }
1328 trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
1329 cur->bc_ag.pag->pag_agno, RIGHT.rm_startblock,
1330 RIGHT.rm_blockcount, RIGHT.rm_owner,
1331 RIGHT.rm_offset, RIGHT.rm_flags);
1332 if (bno + len == RIGHT.rm_startblock &&
1333 offset + len == RIGHT.rm_offset &&
1334 xfs_rmap_is_mergeable(&RIGHT, owner, newext))
1335 state |= RMAP_RIGHT_CONTIG;
1336 }
1337
1338 /* check that left + prev + right is not too long */
1339 if ((state & (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
1340 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG)) ==
1341 (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
1342 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG) &&
1343 (unsigned long)LEFT.rm_blockcount + len +
1344 RIGHT.rm_blockcount > XFS_RMAP_LEN_MAX)
1345 state &= ~RMAP_RIGHT_CONTIG;
1346
1347 trace_xfs_rmap_convert_state(mp, cur->bc_ag.pag->pag_agno, state,
1348 _RET_IP_);
1349
1350 /* reset the cursor back to PREV */
1351 error = xfs_rmap_lookup_le(cur, bno, owner, offset, oldext, NULL, &i);
1352 if (error)
1353 goto done;
1354 if (XFS_IS_CORRUPT(mp, i != 1)) {
1355 xfs_btree_mark_sick(cur);
1356 error = -EFSCORRUPTED;
1357 goto done;
1358 }
1359
1360 /*
1361 * Switch out based on the FILLING and CONTIG state bits.
1362 */
1363 switch (state & (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
1364 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG)) {
1365 case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
1366 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
1367 /*
1368 * Setting all of a previous oldext extent to newext.
1369 * The left and right neighbors are both contiguous with new.
1370 */
1371 error = xfs_btree_increment(cur, 0, &i);
1372 if (error)
1373 goto done;
1374 if (XFS_IS_CORRUPT(mp, i != 1)) {
1375 xfs_btree_mark_sick(cur);
1376 error = -EFSCORRUPTED;
1377 goto done;
1378 }
1379 trace_xfs_rmap_delete(mp, cur->bc_ag.pag->pag_agno,
1380 RIGHT.rm_startblock, RIGHT.rm_blockcount,
1381 RIGHT.rm_owner, RIGHT.rm_offset,
1382 RIGHT.rm_flags);
1383 error = xfs_btree_delete(cur, &i);
1384 if (error)
1385 goto done;
1386 if (XFS_IS_CORRUPT(mp, i != 1)) {
1387 xfs_btree_mark_sick(cur);
1388 error = -EFSCORRUPTED;
1389 goto done;
1390 }
1391 error = xfs_btree_decrement(cur, 0, &i);
1392 if (error)
1393 goto done;
1394 if (XFS_IS_CORRUPT(mp, i != 1)) {
1395 xfs_btree_mark_sick(cur);
1396 error = -EFSCORRUPTED;
1397 goto done;
1398 }
1399 trace_xfs_rmap_delete(mp, cur->bc_ag.pag->pag_agno,
1400 PREV.rm_startblock, PREV.rm_blockcount,
1401 PREV.rm_owner, PREV.rm_offset,
1402 PREV.rm_flags);
1403 error = xfs_btree_delete(cur, &i);
1404 if (error)
1405 goto done;
1406 if (XFS_IS_CORRUPT(mp, i != 1)) {
1407 xfs_btree_mark_sick(cur);
1408 error = -EFSCORRUPTED;
1409 goto done;
1410 }
1411 error = xfs_btree_decrement(cur, 0, &i);
1412 if (error)
1413 goto done;
1414 if (XFS_IS_CORRUPT(mp, i != 1)) {
1415 xfs_btree_mark_sick(cur);
1416 error = -EFSCORRUPTED;
1417 goto done;
1418 }
1419 NEW = LEFT;
1420 NEW.rm_blockcount += PREV.rm_blockcount + RIGHT.rm_blockcount;
1421 error = xfs_rmap_update(cur, &NEW);
1422 if (error)
1423 goto done;
1424 break;
1425
1426 case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG:
1427 /*
1428 * Setting all of a previous oldext extent to newext.
1429 * The left neighbor is contiguous, the right is not.
1430 */
1431 trace_xfs_rmap_delete(mp, cur->bc_ag.pag->pag_agno,
1432 PREV.rm_startblock, PREV.rm_blockcount,
1433 PREV.rm_owner, PREV.rm_offset,
1434 PREV.rm_flags);
1435 error = xfs_btree_delete(cur, &i);
1436 if (error)
1437 goto done;
1438 if (XFS_IS_CORRUPT(mp, i != 1)) {
1439 xfs_btree_mark_sick(cur);
1440 error = -EFSCORRUPTED;
1441 goto done;
1442 }
1443 error = xfs_btree_decrement(cur, 0, &i);
1444 if (error)
1445 goto done;
1446 if (XFS_IS_CORRUPT(mp, i != 1)) {
1447 xfs_btree_mark_sick(cur);
1448 error = -EFSCORRUPTED;
1449 goto done;
1450 }
1451 NEW = LEFT;
1452 NEW.rm_blockcount += PREV.rm_blockcount;
1453 error = xfs_rmap_update(cur, &NEW);
1454 if (error)
1455 goto done;
1456 break;
1457
1458 case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
1459 /*
1460 * Setting all of a previous oldext extent to newext.
1461 * The right neighbor is contiguous, the left is not.
1462 */
1463 error = xfs_btree_increment(cur, 0, &i);
1464 if (error)
1465 goto done;
1466 if (XFS_IS_CORRUPT(mp, i != 1)) {
1467 xfs_btree_mark_sick(cur);
1468 error = -EFSCORRUPTED;
1469 goto done;
1470 }
1471 trace_xfs_rmap_delete(mp, cur->bc_ag.pag->pag_agno,
1472 RIGHT.rm_startblock, RIGHT.rm_blockcount,
1473 RIGHT.rm_owner, RIGHT.rm_offset,
1474 RIGHT.rm_flags);
1475 error = xfs_btree_delete(cur, &i);
1476 if (error)
1477 goto done;
1478 if (XFS_IS_CORRUPT(mp, i != 1)) {
1479 xfs_btree_mark_sick(cur);
1480 error = -EFSCORRUPTED;
1481 goto done;
1482 }
1483 error = xfs_btree_decrement(cur, 0, &i);
1484 if (error)
1485 goto done;
1486 if (XFS_IS_CORRUPT(mp, i != 1)) {
1487 xfs_btree_mark_sick(cur);
1488 error = -EFSCORRUPTED;
1489 goto done;
1490 }
1491 NEW = PREV;
1492 NEW.rm_blockcount = len + RIGHT.rm_blockcount;
1493 NEW.rm_flags = newext;
1494 error = xfs_rmap_update(cur, &NEW);
1495 if (error)
1496 goto done;
1497 break;
1498
1499 case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING:
1500 /*
1501 * Setting all of a previous oldext extent to newext.
1502 * Neither the left nor right neighbors are contiguous with
1503 * the new one.
1504 */
1505 NEW = PREV;
1506 NEW.rm_flags = newext;
1507 error = xfs_rmap_update(cur, &NEW);
1508 if (error)
1509 goto done;
1510 break;
1511
1512 case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG:
1513 /*
1514 * Setting the first part of a previous oldext extent to newext.
1515 * The left neighbor is contiguous.
1516 */
1517 NEW = PREV;
1518 NEW.rm_offset += len;
1519 NEW.rm_startblock += len;
1520 NEW.rm_blockcount -= len;
1521 error = xfs_rmap_update(cur, &NEW);
1522 if (error)
1523 goto done;
1524 error = xfs_btree_decrement(cur, 0, &i);
1525 if (error)
1526 goto done;
1527 NEW = LEFT;
1528 NEW.rm_blockcount += len;
1529 error = xfs_rmap_update(cur, &NEW);
1530 if (error)
1531 goto done;
1532 break;
1533
1534 case RMAP_LEFT_FILLING:
1535 /*
1536 * Setting the first part of a previous oldext extent to newext.
1537 * The left neighbor is not contiguous.
1538 */
1539 NEW = PREV;
1540 NEW.rm_startblock += len;
1541 NEW.rm_offset += len;
1542 NEW.rm_blockcount -= len;
1543 error = xfs_rmap_update(cur, &NEW);
1544 if (error)
1545 goto done;
1546 NEW.rm_startblock = bno;
1547 NEW.rm_owner = owner;
1548 NEW.rm_offset = offset;
1549 NEW.rm_blockcount = len;
1550 NEW.rm_flags = newext;
1551 cur->bc_rec.r = NEW;
1552 trace_xfs_rmap_insert(mp, cur->bc_ag.pag->pag_agno, bno,
1553 len, owner, offset, newext);
1554 error = xfs_btree_insert(cur, &i);
1555 if (error)
1556 goto done;
1557 if (XFS_IS_CORRUPT(mp, i != 1)) {
1558 xfs_btree_mark_sick(cur);
1559 error = -EFSCORRUPTED;
1560 goto done;
1561 }
1562 break;
1563
1564 case RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
1565 /*
1566 * Setting the last part of a previous oldext extent to newext.
1567 * The right neighbor is contiguous with the new allocation.
1568 */
1569 NEW = PREV;
1570 NEW.rm_blockcount -= len;
1571 error = xfs_rmap_update(cur, &NEW);
1572 if (error)
1573 goto done;
1574 error = xfs_btree_increment(cur, 0, &i);
1575 if (error)
1576 goto done;
1577 NEW = RIGHT;
1578 NEW.rm_offset = offset;
1579 NEW.rm_startblock = bno;
1580 NEW.rm_blockcount += len;
1581 error = xfs_rmap_update(cur, &NEW);
1582 if (error)
1583 goto done;
1584 break;
1585
1586 case RMAP_RIGHT_FILLING:
1587 /*
1588 * Setting the last part of a previous oldext extent to newext.
1589 * The right neighbor is not contiguous.
1590 */
1591 NEW = PREV;
1592 NEW.rm_blockcount -= len;
1593 error = xfs_rmap_update(cur, &NEW);
1594 if (error)
1595 goto done;
1596 error = xfs_rmap_lookup_eq(cur, bno, len, owner, offset,
1597 oldext, &i);
1598 if (error)
1599 goto done;
1600 if (XFS_IS_CORRUPT(mp, i != 0)) {
1601 xfs_btree_mark_sick(cur);
1602 error = -EFSCORRUPTED;
1603 goto done;
1604 }
1605 NEW.rm_startblock = bno;
1606 NEW.rm_owner = owner;
1607 NEW.rm_offset = offset;
1608 NEW.rm_blockcount = len;
1609 NEW.rm_flags = newext;
1610 cur->bc_rec.r = NEW;
1611 trace_xfs_rmap_insert(mp, cur->bc_ag.pag->pag_agno, bno,
1612 len, owner, offset, newext);
1613 error = xfs_btree_insert(cur, &i);
1614 if (error)
1615 goto done;
1616 if (XFS_IS_CORRUPT(mp, i != 1)) {
1617 xfs_btree_mark_sick(cur);
1618 error = -EFSCORRUPTED;
1619 goto done;
1620 }
1621 break;
1622
1623 case 0:
1624 /*
1625 * Setting the middle part of a previous oldext extent to
1626 * newext. Contiguity is impossible here.
1627 * One extent becomes three extents.
1628 */
1629 /* new right extent - oldext */
1630 NEW.rm_startblock = bno + len;
1631 NEW.rm_owner = owner;
1632 NEW.rm_offset = new_endoff;
1633 NEW.rm_blockcount = PREV.rm_offset + PREV.rm_blockcount -
1634 new_endoff;
1635 NEW.rm_flags = PREV.rm_flags;
1636 error = xfs_rmap_update(cur, &NEW);
1637 if (error)
1638 goto done;
1639 /* new left extent - oldext */
1640 NEW = PREV;
1641 NEW.rm_blockcount = offset - PREV.rm_offset;
1642 cur->bc_rec.r = NEW;
1643 trace_xfs_rmap_insert(mp, cur->bc_ag.pag->pag_agno,
1644 NEW.rm_startblock, NEW.rm_blockcount,
1645 NEW.rm_owner, NEW.rm_offset,
1646 NEW.rm_flags);
1647 error = xfs_btree_insert(cur, &i);
1648 if (error)
1649 goto done;
1650 if (XFS_IS_CORRUPT(mp, i != 1)) {
1651 xfs_btree_mark_sick(cur);
1652 error = -EFSCORRUPTED;
1653 goto done;
1654 }
1655 /*
1656 * Reset the cursor to the position of the new extent
1657 * we are about to insert as we can't trust it after
1658 * the previous insert.
1659 */
1660 error = xfs_rmap_lookup_eq(cur, bno, len, owner, offset,
1661 oldext, &i);
1662 if (error)
1663 goto done;
1664 if (XFS_IS_CORRUPT(mp, i != 0)) {
1665 xfs_btree_mark_sick(cur);
1666 error = -EFSCORRUPTED;
1667 goto done;
1668 }
1669 /* new middle extent - newext */
1670 cur->bc_rec.r.rm_flags &= ~XFS_RMAP_UNWRITTEN;
1671 cur->bc_rec.r.rm_flags |= newext;
1672 trace_xfs_rmap_insert(mp, cur->bc_ag.pag->pag_agno, bno, len,
1673 owner, offset, newext);
1674 error = xfs_btree_insert(cur, &i);
1675 if (error)
1676 goto done;
1677 if (XFS_IS_CORRUPT(mp, i != 1)) {
1678 xfs_btree_mark_sick(cur);
1679 error = -EFSCORRUPTED;
1680 goto done;
1681 }
1682 break;
1683
1684 case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
1685 case RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
1686 case RMAP_LEFT_FILLING | RMAP_RIGHT_CONTIG:
1687 case RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG:
1688 case RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
1689 case RMAP_LEFT_CONTIG:
1690 case RMAP_RIGHT_CONTIG:
1691 /*
1692 * These cases are all impossible.
1693 */
1694 ASSERT(0);
1695 }
1696
1697 trace_xfs_rmap_convert_done(mp, cur->bc_ag.pag->pag_agno, bno, len,
1698 unwritten, oinfo);
1699done:
1700 if (error)
1701 trace_xfs_rmap_convert_error(cur->bc_mp,
1702 cur->bc_ag.pag->pag_agno, error, _RET_IP_);
1703 return error;
1704}
1705
1706/*
1707 * Convert an unwritten extent to a real extent or vice versa. If there is no
1708 * possibility of overlapping extents, delegate to the simpler convert
1709 * function.
1710 */
1711STATIC int
1712xfs_rmap_convert_shared(
1713 struct xfs_btree_cur *cur,
1714 xfs_agblock_t bno,
1715 xfs_extlen_t len,
1716 bool unwritten,
1717 const struct xfs_owner_info *oinfo)
1718{
1719 struct xfs_mount *mp = cur->bc_mp;
1720 struct xfs_rmap_irec r[4]; /* neighbor extent entries */
1721 /* left is 0, right is 1, */
1722 /* prev is 2, new is 3 */
1723 uint64_t owner;
1724 uint64_t offset;
1725 uint64_t new_endoff;
1726 unsigned int oldext;
1727 unsigned int newext;
1728 unsigned int flags = 0;
1729 int i;
1730 int state = 0;
1731 int error;
1732
1733 xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
1734 ASSERT(!(XFS_RMAP_NON_INODE_OWNER(owner) ||
1735 (flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK))));
1736 oldext = unwritten ? XFS_RMAP_UNWRITTEN : 0;
1737 new_endoff = offset + len;
1738 trace_xfs_rmap_convert(mp, cur->bc_ag.pag->pag_agno, bno, len,
1739 unwritten, oinfo);
1740
1741 /*
1742 * For the initial lookup, look for and exact match or the left-adjacent
1743 * record for our insertion point. This will also give us the record for
1744 * start block contiguity tests.
1745 */
1746 error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, oldext,
1747 &PREV, &i);
1748 if (error)
1749 goto done;
1750 if (XFS_IS_CORRUPT(mp, i != 1)) {
1751 xfs_btree_mark_sick(cur);
1752 error = -EFSCORRUPTED;
1753 goto done;
1754 }
1755
1756 ASSERT(PREV.rm_offset <= offset);
1757 ASSERT(PREV.rm_offset + PREV.rm_blockcount >= new_endoff);
1758 ASSERT((PREV.rm_flags & XFS_RMAP_UNWRITTEN) == oldext);
1759 newext = ~oldext & XFS_RMAP_UNWRITTEN;
1760
1761 /*
1762 * Set flags determining what part of the previous oldext allocation
1763 * extent is being replaced by a newext allocation.
1764 */
1765 if (PREV.rm_offset == offset)
1766 state |= RMAP_LEFT_FILLING;
1767 if (PREV.rm_offset + PREV.rm_blockcount == new_endoff)
1768 state |= RMAP_RIGHT_FILLING;
1769
1770 /* Is there a left record that abuts our range? */
1771 error = xfs_rmap_find_left_neighbor(cur, bno, owner, offset, newext,
1772 &LEFT, &i);
1773 if (error)
1774 goto done;
1775 if (i) {
1776 state |= RMAP_LEFT_VALID;
1777 if (XFS_IS_CORRUPT(mp,
1778 LEFT.rm_startblock + LEFT.rm_blockcount >
1779 bno)) {
1780 xfs_btree_mark_sick(cur);
1781 error = -EFSCORRUPTED;
1782 goto done;
1783 }
1784 if (xfs_rmap_is_mergeable(&LEFT, owner, newext))
1785 state |= RMAP_LEFT_CONTIG;
1786 }
1787
1788 /* Is there a right record that abuts our range? */
1789 error = xfs_rmap_lookup_eq(cur, bno + len, len, owner, offset + len,
1790 newext, &i);
1791 if (error)
1792 goto done;
1793 if (i) {
1794 state |= RMAP_RIGHT_VALID;
1795 error = xfs_rmap_get_rec(cur, irec: &RIGHT, stat: &i);
1796 if (error)
1797 goto done;
1798 if (XFS_IS_CORRUPT(mp, i != 1)) {
1799 xfs_btree_mark_sick(cur);
1800 error = -EFSCORRUPTED;
1801 goto done;
1802 }
1803 if (XFS_IS_CORRUPT(mp, bno + len > RIGHT.rm_startblock)) {
1804 xfs_btree_mark_sick(cur);
1805 error = -EFSCORRUPTED;
1806 goto done;
1807 }
1808 trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
1809 cur->bc_ag.pag->pag_agno, RIGHT.rm_startblock,
1810 RIGHT.rm_blockcount, RIGHT.rm_owner,
1811 RIGHT.rm_offset, RIGHT.rm_flags);
1812 if (xfs_rmap_is_mergeable(&RIGHT, owner, newext))
1813 state |= RMAP_RIGHT_CONTIG;
1814 }
1815
1816 /* check that left + prev + right is not too long */
1817 if ((state & (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
1818 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG)) ==
1819 (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
1820 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG) &&
1821 (unsigned long)LEFT.rm_blockcount + len +
1822 RIGHT.rm_blockcount > XFS_RMAP_LEN_MAX)
1823 state &= ~RMAP_RIGHT_CONTIG;
1824
1825 trace_xfs_rmap_convert_state(mp, cur->bc_ag.pag->pag_agno, state,
1826 _RET_IP_);
1827 /*
1828 * Switch out based on the FILLING and CONTIG state bits.
1829 */
1830 switch (state & (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
1831 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG)) {
1832 case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
1833 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
1834 /*
1835 * Setting all of a previous oldext extent to newext.
1836 * The left and right neighbors are both contiguous with new.
1837 */
1838 error = xfs_rmap_delete(cur, RIGHT.rm_startblock,
1839 RIGHT.rm_blockcount, RIGHT.rm_owner,
1840 RIGHT.rm_offset, RIGHT.rm_flags);
1841 if (error)
1842 goto done;
1843 error = xfs_rmap_delete(cur, PREV.rm_startblock,
1844 PREV.rm_blockcount, PREV.rm_owner,
1845 PREV.rm_offset, PREV.rm_flags);
1846 if (error)
1847 goto done;
1848 NEW = LEFT;
1849 error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
1850 NEW.rm_blockcount, NEW.rm_owner,
1851 NEW.rm_offset, NEW.rm_flags, &i);
1852 if (error)
1853 goto done;
1854 if (XFS_IS_CORRUPT(mp, i != 1)) {
1855 xfs_btree_mark_sick(cur);
1856 error = -EFSCORRUPTED;
1857 goto done;
1858 }
1859 NEW.rm_blockcount += PREV.rm_blockcount + RIGHT.rm_blockcount;
1860 error = xfs_rmap_update(cur, &NEW);
1861 if (error)
1862 goto done;
1863 break;
1864
1865 case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG:
1866 /*
1867 * Setting all of a previous oldext extent to newext.
1868 * The left neighbor is contiguous, the right is not.
1869 */
1870 error = xfs_rmap_delete(cur, PREV.rm_startblock,
1871 PREV.rm_blockcount, PREV.rm_owner,
1872 PREV.rm_offset, PREV.rm_flags);
1873 if (error)
1874 goto done;
1875 NEW = LEFT;
1876 error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
1877 NEW.rm_blockcount, NEW.rm_owner,
1878 NEW.rm_offset, NEW.rm_flags, &i);
1879 if (error)
1880 goto done;
1881 if (XFS_IS_CORRUPT(mp, i != 1)) {
1882 xfs_btree_mark_sick(cur);
1883 error = -EFSCORRUPTED;
1884 goto done;
1885 }
1886 NEW.rm_blockcount += PREV.rm_blockcount;
1887 error = xfs_rmap_update(cur, &NEW);
1888 if (error)
1889 goto done;
1890 break;
1891
1892 case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
1893 /*
1894 * Setting all of a previous oldext extent to newext.
1895 * The right neighbor is contiguous, the left is not.
1896 */
1897 error = xfs_rmap_delete(cur, RIGHT.rm_startblock,
1898 RIGHT.rm_blockcount, RIGHT.rm_owner,
1899 RIGHT.rm_offset, RIGHT.rm_flags);
1900 if (error)
1901 goto done;
1902 NEW = PREV;
1903 error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
1904 NEW.rm_blockcount, NEW.rm_owner,
1905 NEW.rm_offset, NEW.rm_flags, &i);
1906 if (error)
1907 goto done;
1908 if (XFS_IS_CORRUPT(mp, i != 1)) {
1909 xfs_btree_mark_sick(cur);
1910 error = -EFSCORRUPTED;
1911 goto done;
1912 }
1913 NEW.rm_blockcount += RIGHT.rm_blockcount;
1914 NEW.rm_flags = RIGHT.rm_flags;
1915 error = xfs_rmap_update(cur, &NEW);
1916 if (error)
1917 goto done;
1918 break;
1919
1920 case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING:
1921 /*
1922 * Setting all of a previous oldext extent to newext.
1923 * Neither the left nor right neighbors are contiguous with
1924 * the new one.
1925 */
1926 NEW = PREV;
1927 error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
1928 NEW.rm_blockcount, NEW.rm_owner,
1929 NEW.rm_offset, NEW.rm_flags, &i);
1930 if (error)
1931 goto done;
1932 if (XFS_IS_CORRUPT(mp, i != 1)) {
1933 xfs_btree_mark_sick(cur);
1934 error = -EFSCORRUPTED;
1935 goto done;
1936 }
1937 NEW.rm_flags = newext;
1938 error = xfs_rmap_update(cur, &NEW);
1939 if (error)
1940 goto done;
1941 break;
1942
1943 case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG:
1944 /*
1945 * Setting the first part of a previous oldext extent to newext.
1946 * The left neighbor is contiguous.
1947 */
1948 NEW = PREV;
1949 error = xfs_rmap_delete(cur, NEW.rm_startblock,
1950 NEW.rm_blockcount, NEW.rm_owner,
1951 NEW.rm_offset, NEW.rm_flags);
1952 if (error)
1953 goto done;
1954 NEW.rm_offset += len;
1955 NEW.rm_startblock += len;
1956 NEW.rm_blockcount -= len;
1957 error = xfs_rmap_insert(cur, NEW.rm_startblock,
1958 NEW.rm_blockcount, NEW.rm_owner,
1959 NEW.rm_offset, NEW.rm_flags);
1960 if (error)
1961 goto done;
1962 NEW = LEFT;
1963 error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
1964 NEW.rm_blockcount, NEW.rm_owner,
1965 NEW.rm_offset, NEW.rm_flags, &i);
1966 if (error)
1967 goto done;
1968 if (XFS_IS_CORRUPT(mp, i != 1)) {
1969 xfs_btree_mark_sick(cur);
1970 error = -EFSCORRUPTED;
1971 goto done;
1972 }
1973 NEW.rm_blockcount += len;
1974 error = xfs_rmap_update(cur, &NEW);
1975 if (error)
1976 goto done;
1977 break;
1978
1979 case RMAP_LEFT_FILLING:
1980 /*
1981 * Setting the first part of a previous oldext extent to newext.
1982 * The left neighbor is not contiguous.
1983 */
1984 NEW = PREV;
1985 error = xfs_rmap_delete(cur, NEW.rm_startblock,
1986 NEW.rm_blockcount, NEW.rm_owner,
1987 NEW.rm_offset, NEW.rm_flags);
1988 if (error)
1989 goto done;
1990 NEW.rm_offset += len;
1991 NEW.rm_startblock += len;
1992 NEW.rm_blockcount -= len;
1993 error = xfs_rmap_insert(cur, NEW.rm_startblock,
1994 NEW.rm_blockcount, NEW.rm_owner,
1995 NEW.rm_offset, NEW.rm_flags);
1996 if (error)
1997 goto done;
1998 error = xfs_rmap_insert(cur, bno, len, owner, offset, newext);
1999 if (error)
2000 goto done;
2001 break;
2002
2003 case RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
2004 /*
2005 * Setting the last part of a previous oldext extent to newext.
2006 * The right neighbor is contiguous with the new allocation.
2007 */
2008 NEW = PREV;
2009 error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
2010 NEW.rm_blockcount, NEW.rm_owner,
2011 NEW.rm_offset, NEW.rm_flags, &i);
2012 if (error)
2013 goto done;
2014 if (XFS_IS_CORRUPT(mp, i != 1)) {
2015 xfs_btree_mark_sick(cur);
2016 error = -EFSCORRUPTED;
2017 goto done;
2018 }
2019 NEW.rm_blockcount = offset - NEW.rm_offset;
2020 error = xfs_rmap_update(cur, &NEW);
2021 if (error)
2022 goto done;
2023 NEW = RIGHT;
2024 error = xfs_rmap_delete(cur, NEW.rm_startblock,
2025 NEW.rm_blockcount, NEW.rm_owner,
2026 NEW.rm_offset, NEW.rm_flags);
2027 if (error)
2028 goto done;
2029 NEW.rm_offset = offset;
2030 NEW.rm_startblock = bno;
2031 NEW.rm_blockcount += len;
2032 error = xfs_rmap_insert(cur, NEW.rm_startblock,
2033 NEW.rm_blockcount, NEW.rm_owner,
2034 NEW.rm_offset, NEW.rm_flags);
2035 if (error)
2036 goto done;
2037 break;
2038
2039 case RMAP_RIGHT_FILLING:
2040 /*
2041 * Setting the last part of a previous oldext extent to newext.
2042 * The right neighbor is not contiguous.
2043 */
2044 NEW = PREV;
2045 error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
2046 NEW.rm_blockcount, NEW.rm_owner,
2047 NEW.rm_offset, NEW.rm_flags, &i);
2048 if (error)
2049 goto done;
2050 if (XFS_IS_CORRUPT(mp, i != 1)) {
2051 xfs_btree_mark_sick(cur);
2052 error = -EFSCORRUPTED;
2053 goto done;
2054 }
2055 NEW.rm_blockcount -= len;
2056 error = xfs_rmap_update(cur, &NEW);
2057 if (error)
2058 goto done;
2059 error = xfs_rmap_insert(cur, bno, len, owner, offset, newext);
2060 if (error)
2061 goto done;
2062 break;
2063
2064 case 0:
2065 /*
2066 * Setting the middle part of a previous oldext extent to
2067 * newext. Contiguity is impossible here.
2068 * One extent becomes three extents.
2069 */
2070 /* new right extent - oldext */
2071 NEW.rm_startblock = bno + len;
2072 NEW.rm_owner = owner;
2073 NEW.rm_offset = new_endoff;
2074 NEW.rm_blockcount = PREV.rm_offset + PREV.rm_blockcount -
2075 new_endoff;
2076 NEW.rm_flags = PREV.rm_flags;
2077 error = xfs_rmap_insert(cur, NEW.rm_startblock,
2078 NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset,
2079 NEW.rm_flags);
2080 if (error)
2081 goto done;
2082 /* new left extent - oldext */
2083 NEW = PREV;
2084 error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
2085 NEW.rm_blockcount, NEW.rm_owner,
2086 NEW.rm_offset, NEW.rm_flags, &i);
2087 if (error)
2088 goto done;
2089 if (XFS_IS_CORRUPT(mp, i != 1)) {
2090 xfs_btree_mark_sick(cur);
2091 error = -EFSCORRUPTED;
2092 goto done;
2093 }
2094 NEW.rm_blockcount = offset - NEW.rm_offset;
2095 error = xfs_rmap_update(cur, &NEW);
2096 if (error)
2097 goto done;
2098 /* new middle extent - newext */
2099 NEW.rm_startblock = bno;
2100 NEW.rm_blockcount = len;
2101 NEW.rm_owner = owner;
2102 NEW.rm_offset = offset;
2103 NEW.rm_flags = newext;
2104 error = xfs_rmap_insert(cur, NEW.rm_startblock,
2105 NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset,
2106 NEW.rm_flags);
2107 if (error)
2108 goto done;
2109 break;
2110
2111 case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
2112 case RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
2113 case RMAP_LEFT_FILLING | RMAP_RIGHT_CONTIG:
2114 case RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG:
2115 case RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
2116 case RMAP_LEFT_CONTIG:
2117 case RMAP_RIGHT_CONTIG:
2118 /*
2119 * These cases are all impossible.
2120 */
2121 ASSERT(0);
2122 }
2123
2124 trace_xfs_rmap_convert_done(mp, cur->bc_ag.pag->pag_agno, bno, len,
2125 unwritten, oinfo);
2126done:
2127 if (error)
2128 trace_xfs_rmap_convert_error(cur->bc_mp,
2129 cur->bc_ag.pag->pag_agno, error, _RET_IP_);
2130 return error;
2131}
2132
2133#undef NEW
2134#undef LEFT
2135#undef RIGHT
2136#undef PREV
2137
2138/*
2139 * Find an extent in the rmap btree and unmap it. For rmap extent types that
2140 * can overlap (data fork rmaps on reflink filesystems) we must be careful
2141 * that the prev/next records in the btree might belong to another owner.
2142 * Therefore we must use delete+insert to alter any of the key fields.
2143 *
2144 * For every other situation there can only be one owner for a given extent,
2145 * so we can call the regular _free function.
2146 */
2147STATIC int
2148xfs_rmap_unmap_shared(
2149 struct xfs_btree_cur *cur,
2150 xfs_agblock_t bno,
2151 xfs_extlen_t len,
2152 bool unwritten,
2153 const struct xfs_owner_info *oinfo)
2154{
2155 struct xfs_mount *mp = cur->bc_mp;
2156 struct xfs_rmap_irec ltrec;
2157 uint64_t ltoff;
2158 int error = 0;
2159 int i;
2160 uint64_t owner;
2161 uint64_t offset;
2162 unsigned int flags;
2163
2164 xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
2165 if (unwritten)
2166 flags |= XFS_RMAP_UNWRITTEN;
2167 trace_xfs_rmap_unmap(mp, cur->bc_ag.pag->pag_agno, bno, len,
2168 unwritten, oinfo);
2169
2170 /*
2171 * We should always have a left record because there's a static record
2172 * for the AG headers at rm_startblock == 0 created by mkfs/growfs that
2173 * will not ever be removed from the tree.
2174 */
2175 error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, flags,
2176 &ltrec, &i);
2177 if (error)
2178 goto out_error;
2179 if (XFS_IS_CORRUPT(mp, i != 1)) {
2180 xfs_btree_mark_sick(cur);
2181 error = -EFSCORRUPTED;
2182 goto out_error;
2183 }
2184 ltoff = ltrec.rm_offset;
2185
2186 /* Make sure the extent we found covers the entire freeing range. */
2187 if (XFS_IS_CORRUPT(mp,
2188 ltrec.rm_startblock > bno ||
2189 ltrec.rm_startblock + ltrec.rm_blockcount <
2190 bno + len)) {
2191 xfs_btree_mark_sick(cur);
2192 error = -EFSCORRUPTED;
2193 goto out_error;
2194 }
2195
2196 /* Make sure the owner matches what we expect to find in the tree. */
2197 if (XFS_IS_CORRUPT(mp, owner != ltrec.rm_owner)) {
2198 xfs_btree_mark_sick(cur);
2199 error = -EFSCORRUPTED;
2200 goto out_error;
2201 }
2202
2203 /* Make sure the unwritten flag matches. */
2204 if (XFS_IS_CORRUPT(mp,
2205 (flags & XFS_RMAP_UNWRITTEN) !=
2206 (ltrec.rm_flags & XFS_RMAP_UNWRITTEN))) {
2207 xfs_btree_mark_sick(cur);
2208 error = -EFSCORRUPTED;
2209 goto out_error;
2210 }
2211
2212 /* Check the offset. */
2213 if (XFS_IS_CORRUPT(mp, ltrec.rm_offset > offset)) {
2214 xfs_btree_mark_sick(cur);
2215 error = -EFSCORRUPTED;
2216 goto out_error;
2217 }
2218 if (XFS_IS_CORRUPT(mp, offset > ltoff + ltrec.rm_blockcount)) {
2219 xfs_btree_mark_sick(cur);
2220 error = -EFSCORRUPTED;
2221 goto out_error;
2222 }
2223
2224 if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) {
2225 /* Exact match, simply remove the record from rmap tree. */
2226 error = xfs_rmap_delete(cur, ltrec.rm_startblock,
2227 ltrec.rm_blockcount, ltrec.rm_owner,
2228 ltrec.rm_offset, ltrec.rm_flags);
2229 if (error)
2230 goto out_error;
2231 } else if (ltrec.rm_startblock == bno) {
2232 /*
2233 * Overlap left hand side of extent: move the start, trim the
2234 * length and update the current record.
2235 *
2236 * ltbno ltlen
2237 * Orig: |oooooooooooooooooooo|
2238 * Freeing: |fffffffff|
2239 * Result: |rrrrrrrrrr|
2240 * bno len
2241 */
2242
2243 /* Delete prev rmap. */
2244 error = xfs_rmap_delete(cur, ltrec.rm_startblock,
2245 ltrec.rm_blockcount, ltrec.rm_owner,
2246 ltrec.rm_offset, ltrec.rm_flags);
2247 if (error)
2248 goto out_error;
2249
2250 /* Add an rmap at the new offset. */
2251 ltrec.rm_startblock += len;
2252 ltrec.rm_blockcount -= len;
2253 ltrec.rm_offset += len;
2254 error = xfs_rmap_insert(cur, ltrec.rm_startblock,
2255 ltrec.rm_blockcount, ltrec.rm_owner,
2256 ltrec.rm_offset, ltrec.rm_flags);
2257 if (error)
2258 goto out_error;
2259 } else if (ltrec.rm_startblock + ltrec.rm_blockcount == bno + len) {
2260 /*
2261 * Overlap right hand side of extent: trim the length and
2262 * update the current record.
2263 *
2264 * ltbno ltlen
2265 * Orig: |oooooooooooooooooooo|
2266 * Freeing: |fffffffff|
2267 * Result: |rrrrrrrrrr|
2268 * bno len
2269 */
2270 error = xfs_rmap_lookup_eq(cur, ltrec.rm_startblock,
2271 ltrec.rm_blockcount, ltrec.rm_owner,
2272 ltrec.rm_offset, ltrec.rm_flags, &i);
2273 if (error)
2274 goto out_error;
2275 if (XFS_IS_CORRUPT(mp, i != 1)) {
2276 xfs_btree_mark_sick(cur);
2277 error = -EFSCORRUPTED;
2278 goto out_error;
2279 }
2280 ltrec.rm_blockcount -= len;
2281 error = xfs_rmap_update(cur, &ltrec);
2282 if (error)
2283 goto out_error;
2284 } else {
2285 /*
2286 * Overlap middle of extent: trim the length of the existing
2287 * record to the length of the new left-extent size, increment
2288 * the insertion position so we can insert a new record
2289 * containing the remaining right-extent space.
2290 *
2291 * ltbno ltlen
2292 * Orig: |oooooooooooooooooooo|
2293 * Freeing: |fffffffff|
2294 * Result: |rrrrr| |rrrr|
2295 * bno len
2296 */
2297 xfs_extlen_t orig_len = ltrec.rm_blockcount;
2298
2299 /* Shrink the left side of the rmap */
2300 error = xfs_rmap_lookup_eq(cur, ltrec.rm_startblock,
2301 ltrec.rm_blockcount, ltrec.rm_owner,
2302 ltrec.rm_offset, ltrec.rm_flags, &i);
2303 if (error)
2304 goto out_error;
2305 if (XFS_IS_CORRUPT(mp, i != 1)) {
2306 xfs_btree_mark_sick(cur);
2307 error = -EFSCORRUPTED;
2308 goto out_error;
2309 }
2310 ltrec.rm_blockcount = bno - ltrec.rm_startblock;
2311 error = xfs_rmap_update(cur, &ltrec);
2312 if (error)
2313 goto out_error;
2314
2315 /* Add an rmap at the new offset */
2316 error = xfs_rmap_insert(cur, bno + len,
2317 orig_len - len - ltrec.rm_blockcount,
2318 ltrec.rm_owner, offset + len,
2319 ltrec.rm_flags);
2320 if (error)
2321 goto out_error;
2322 }
2323
2324 trace_xfs_rmap_unmap_done(mp, cur->bc_ag.pag->pag_agno, bno, len,
2325 unwritten, oinfo);
2326out_error:
2327 if (error)
2328 trace_xfs_rmap_unmap_error(cur->bc_mp,
2329 cur->bc_ag.pag->pag_agno, error, _RET_IP_);
2330 return error;
2331}
2332
2333/*
2334 * Find an extent in the rmap btree and map it. For rmap extent types that
2335 * can overlap (data fork rmaps on reflink filesystems) we must be careful
2336 * that the prev/next records in the btree might belong to another owner.
2337 * Therefore we must use delete+insert to alter any of the key fields.
2338 *
2339 * For every other situation there can only be one owner for a given extent,
2340 * so we can call the regular _alloc function.
2341 */
2342STATIC int
2343xfs_rmap_map_shared(
2344 struct xfs_btree_cur *cur,
2345 xfs_agblock_t bno,
2346 xfs_extlen_t len,
2347 bool unwritten,
2348 const struct xfs_owner_info *oinfo)
2349{
2350 struct xfs_mount *mp = cur->bc_mp;
2351 struct xfs_rmap_irec ltrec;
2352 struct xfs_rmap_irec gtrec;
2353 int have_gt;
2354 int have_lt;
2355 int error = 0;
2356 int i;
2357 uint64_t owner;
2358 uint64_t offset;
2359 unsigned int flags = 0;
2360
2361 xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
2362 if (unwritten)
2363 flags |= XFS_RMAP_UNWRITTEN;
2364 trace_xfs_rmap_map(mp, cur->bc_ag.pag->pag_agno, bno, len,
2365 unwritten, oinfo);
2366
2367 /* Is there a left record that abuts our range? */
2368 error = xfs_rmap_find_left_neighbor(cur, bno, owner, offset, flags,
2369 &ltrec, &have_lt);
2370 if (error)
2371 goto out_error;
2372 if (have_lt &&
2373 !xfs_rmap_is_mergeable(&ltrec, owner, flags))
2374 have_lt = 0;
2375
2376 /* Is there a right record that abuts our range? */
2377 error = xfs_rmap_lookup_eq(cur, bno + len, len, owner, offset + len,
2378 flags, &have_gt);
2379 if (error)
2380 goto out_error;
2381 if (have_gt) {
2382 error = xfs_rmap_get_rec(cur, irec: &gtrec, stat: &have_gt);
2383 if (error)
2384 goto out_error;
2385 if (XFS_IS_CORRUPT(mp, have_gt != 1)) {
2386 xfs_btree_mark_sick(cur);
2387 error = -EFSCORRUPTED;
2388 goto out_error;
2389 }
2390 trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
2391 cur->bc_ag.pag->pag_agno, gtrec.rm_startblock,
2392 gtrec.rm_blockcount, gtrec.rm_owner,
2393 gtrec.rm_offset, gtrec.rm_flags);
2394
2395 if (!xfs_rmap_is_mergeable(&gtrec, owner, flags))
2396 have_gt = 0;
2397 }
2398
2399 if (have_lt &&
2400 ltrec.rm_startblock + ltrec.rm_blockcount == bno &&
2401 ltrec.rm_offset + ltrec.rm_blockcount == offset) {
2402 /*
2403 * Left edge contiguous, merge into left record.
2404 *
2405 * ltbno ltlen
2406 * orig: |ooooooooo|
2407 * adding: |aaaaaaaaa|
2408 * result: |rrrrrrrrrrrrrrrrrrr|
2409 * bno len
2410 */
2411 ltrec.rm_blockcount += len;
2412 if (have_gt &&
2413 bno + len == gtrec.rm_startblock &&
2414 offset + len == gtrec.rm_offset) {
2415 /*
2416 * Right edge also contiguous, delete right record
2417 * and merge into left record.
2418 *
2419 * ltbno ltlen gtbno gtlen
2420 * orig: |ooooooooo| |ooooooooo|
2421 * adding: |aaaaaaaaa|
2422 * result: |rrrrrrrrrrrrrrrrrrrrrrrrrrrrr|
2423 */
2424 ltrec.rm_blockcount += gtrec.rm_blockcount;
2425 error = xfs_rmap_delete(cur, gtrec.rm_startblock,
2426 gtrec.rm_blockcount, gtrec.rm_owner,
2427 gtrec.rm_offset, gtrec.rm_flags);
2428 if (error)
2429 goto out_error;
2430 }
2431
2432 /* Point the cursor back to the left record and update. */
2433 error = xfs_rmap_lookup_eq(cur, ltrec.rm_startblock,
2434 ltrec.rm_blockcount, ltrec.rm_owner,
2435 ltrec.rm_offset, ltrec.rm_flags, &i);
2436 if (error)
2437 goto out_error;
2438 if (XFS_IS_CORRUPT(mp, i != 1)) {
2439 xfs_btree_mark_sick(cur);
2440 error = -EFSCORRUPTED;
2441 goto out_error;
2442 }
2443
2444 error = xfs_rmap_update(cur, &ltrec);
2445 if (error)
2446 goto out_error;
2447 } else if (have_gt &&
2448 bno + len == gtrec.rm_startblock &&
2449 offset + len == gtrec.rm_offset) {
2450 /*
2451 * Right edge contiguous, merge into right record.
2452 *
2453 * gtbno gtlen
2454 * Orig: |ooooooooo|
2455 * adding: |aaaaaaaaa|
2456 * Result: |rrrrrrrrrrrrrrrrrrr|
2457 * bno len
2458 */
2459 /* Delete the old record. */
2460 error = xfs_rmap_delete(cur, gtrec.rm_startblock,
2461 gtrec.rm_blockcount, gtrec.rm_owner,
2462 gtrec.rm_offset, gtrec.rm_flags);
2463 if (error)
2464 goto out_error;
2465
2466 /* Move the start and re-add it. */
2467 gtrec.rm_startblock = bno;
2468 gtrec.rm_blockcount += len;
2469 gtrec.rm_offset = offset;
2470 error = xfs_rmap_insert(cur, gtrec.rm_startblock,
2471 gtrec.rm_blockcount, gtrec.rm_owner,
2472 gtrec.rm_offset, gtrec.rm_flags);
2473 if (error)
2474 goto out_error;
2475 } else {
2476 /*
2477 * No contiguous edge with identical owner, insert
2478 * new record at current cursor position.
2479 */
2480 error = xfs_rmap_insert(cur, bno, len, owner, offset, flags);
2481 if (error)
2482 goto out_error;
2483 }
2484
2485 trace_xfs_rmap_map_done(mp, cur->bc_ag.pag->pag_agno, bno, len,
2486 unwritten, oinfo);
2487out_error:
2488 if (error)
2489 trace_xfs_rmap_map_error(cur->bc_mp,
2490 cur->bc_ag.pag->pag_agno, error, _RET_IP_);
2491 return error;
2492}
2493
2494/* Insert a raw rmap into the rmapbt. */
2495int
2496xfs_rmap_map_raw(
2497 struct xfs_btree_cur *cur,
2498 struct xfs_rmap_irec *rmap)
2499{
2500 struct xfs_owner_info oinfo;
2501
2502 xfs_owner_info_pack(&oinfo, rmap->rm_owner, rmap->rm_offset,
2503 rmap->rm_flags);
2504
2505 if ((rmap->rm_flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK |
2506 XFS_RMAP_UNWRITTEN)) ||
2507 XFS_RMAP_NON_INODE_OWNER(rmap->rm_owner))
2508 return xfs_rmap_map(cur, rmap->rm_startblock,
2509 rmap->rm_blockcount,
2510 rmap->rm_flags & XFS_RMAP_UNWRITTEN,
2511 &oinfo);
2512
2513 return xfs_rmap_map_shared(cur, rmap->rm_startblock,
2514 rmap->rm_blockcount,
2515 rmap->rm_flags & XFS_RMAP_UNWRITTEN,
2516 &oinfo);
2517}
2518
2519struct xfs_rmap_query_range_info {
2520 xfs_rmap_query_range_fn fn;
2521 void *priv;
2522};
2523
2524/* Format btree record and pass to our callback. */
2525STATIC int
2526xfs_rmap_query_range_helper(
2527 struct xfs_btree_cur *cur,
2528 const union xfs_btree_rec *rec,
2529 void *priv)
2530{
2531 struct xfs_rmap_query_range_info *query = priv;
2532 struct xfs_rmap_irec irec;
2533 xfs_failaddr_t fa;
2534
2535 fa = xfs_rmap_btrec_to_irec(rec, &irec);
2536 if (!fa)
2537 fa = xfs_rmap_check_btrec(cur, &irec);
2538 if (fa)
2539 return xfs_rmap_complain_bad_rec(cur, fa, &irec);
2540
2541 return query->fn(cur, &irec, query->priv);
2542}
2543
2544/* Find all rmaps between two keys. */
2545int
2546xfs_rmap_query_range(
2547 struct xfs_btree_cur *cur,
2548 const struct xfs_rmap_irec *low_rec,
2549 const struct xfs_rmap_irec *high_rec,
2550 xfs_rmap_query_range_fn fn,
2551 void *priv)
2552{
2553 union xfs_btree_irec low_brec = { .r = *low_rec };
2554 union xfs_btree_irec high_brec = { .r = *high_rec };
2555 struct xfs_rmap_query_range_info query = { .priv = priv, .fn = fn };
2556
2557 return xfs_btree_query_range(cur, low_rec: &low_brec, high_rec: &high_brec,
2558 fn: xfs_rmap_query_range_helper, priv: &query);
2559}
2560
2561/* Find all rmaps. */
2562int
2563xfs_rmap_query_all(
2564 struct xfs_btree_cur *cur,
2565 xfs_rmap_query_range_fn fn,
2566 void *priv)
2567{
2568 struct xfs_rmap_query_range_info query;
2569
2570 query.priv = priv;
2571 query.fn = fn;
2572 return xfs_btree_query_all(cur, fn: xfs_rmap_query_range_helper, priv: &query);
2573}
2574
2575/* Clean up after calling xfs_rmap_finish_one. */
2576void
2577xfs_rmap_finish_one_cleanup(
2578 struct xfs_trans *tp,
2579 struct xfs_btree_cur *rcur,
2580 int error)
2581{
2582 struct xfs_buf *agbp;
2583
2584 if (rcur == NULL)
2585 return;
2586 agbp = rcur->bc_ag.agbp;
2587 xfs_btree_del_cursor(cur: rcur, error);
2588 if (error)
2589 xfs_trans_brelse(tp, agbp);
2590}
2591
2592/* Commit an rmap operation into the ondisk tree. */
2593int
2594__xfs_rmap_finish_intent(
2595 struct xfs_btree_cur *rcur,
2596 enum xfs_rmap_intent_type op,
2597 xfs_agblock_t bno,
2598 xfs_extlen_t len,
2599 const struct xfs_owner_info *oinfo,
2600 bool unwritten)
2601{
2602 switch (op) {
2603 case XFS_RMAP_ALLOC:
2604 case XFS_RMAP_MAP:
2605 return xfs_rmap_map(rcur, bno, len, unwritten, oinfo);
2606 case XFS_RMAP_MAP_SHARED:
2607 return xfs_rmap_map_shared(rcur, bno, len, unwritten, oinfo);
2608 case XFS_RMAP_FREE:
2609 case XFS_RMAP_UNMAP:
2610 return xfs_rmap_unmap(rcur, bno, len, unwritten, oinfo);
2611 case XFS_RMAP_UNMAP_SHARED:
2612 return xfs_rmap_unmap_shared(rcur, bno, len, unwritten, oinfo);
2613 case XFS_RMAP_CONVERT:
2614 return xfs_rmap_convert(rcur, bno, len, !unwritten, oinfo);
2615 case XFS_RMAP_CONVERT_SHARED:
2616 return xfs_rmap_convert_shared(rcur, bno, len, !unwritten,
2617 oinfo);
2618 default:
2619 ASSERT(0);
2620 return -EFSCORRUPTED;
2621 }
2622}
2623
2624/*
2625 * Process one of the deferred rmap operations. We pass back the
2626 * btree cursor to maintain our lock on the rmapbt between calls.
2627 * This saves time and eliminates a buffer deadlock between the
2628 * superblock and the AGF because we'll always grab them in the same
2629 * order.
2630 */
2631int
2632xfs_rmap_finish_one(
2633 struct xfs_trans *tp,
2634 struct xfs_rmap_intent *ri,
2635 struct xfs_btree_cur **pcur)
2636{
2637 struct xfs_mount *mp = tp->t_mountp;
2638 struct xfs_btree_cur *rcur;
2639 struct xfs_buf *agbp = NULL;
2640 int error = 0;
2641 struct xfs_owner_info oinfo;
2642 xfs_agblock_t bno;
2643 bool unwritten;
2644
2645 bno = XFS_FSB_TO_AGBNO(mp, ri->ri_bmap.br_startblock);
2646
2647 trace_xfs_rmap_deferred(mp, ri->ri_pag->pag_agno, ri->ri_type, bno,
2648 ri->ri_owner, ri->ri_whichfork,
2649 ri->ri_bmap.br_startoff, ri->ri_bmap.br_blockcount,
2650 ri->ri_bmap.br_state);
2651
2652 if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_RMAP_FINISH_ONE))
2653 return -EIO;
2654
2655 /*
2656 * If we haven't gotten a cursor or the cursor AG doesn't match
2657 * the startblock, get one now.
2658 */
2659 rcur = *pcur;
2660 if (rcur != NULL && rcur->bc_ag.pag != ri->ri_pag) {
2661 xfs_rmap_finish_one_cleanup(tp, rcur, error: 0);
2662 rcur = NULL;
2663 *pcur = NULL;
2664 }
2665 if (rcur == NULL) {
2666 /*
2667 * Refresh the freelist before we start changing the
2668 * rmapbt, because a shape change could cause us to
2669 * allocate blocks.
2670 */
2671 error = xfs_free_extent_fix_freelist(tp, pag: ri->ri_pag, agbp: &agbp);
2672 if (error) {
2673 xfs_ag_mark_sick(pag: ri->ri_pag, XFS_SICK_AG_AGFL);
2674 return error;
2675 }
2676 if (XFS_IS_CORRUPT(tp->t_mountp, !agbp)) {
2677 xfs_ag_mark_sick(pag: ri->ri_pag, XFS_SICK_AG_AGFL);
2678 return -EFSCORRUPTED;
2679 }
2680
2681 rcur = xfs_rmapbt_init_cursor(mp, tp, bp: agbp, pag: ri->ri_pag);
2682 }
2683 *pcur = rcur;
2684
2685 xfs_rmap_ino_owner(&oinfo, ri->ri_owner, ri->ri_whichfork,
2686 ri->ri_bmap.br_startoff);
2687 unwritten = ri->ri_bmap.br_state == XFS_EXT_UNWRITTEN;
2688 bno = XFS_FSB_TO_AGBNO(rcur->bc_mp, ri->ri_bmap.br_startblock);
2689
2690 error = __xfs_rmap_finish_intent(rcur, ri->ri_type, bno,
2691 ri->ri_bmap.br_blockcount, &oinfo, unwritten);
2692 if (error)
2693 return error;
2694
2695 xfs_rmap_update_hook(tp, ri->ri_pag, ri->ri_type, bno,
2696 ri->ri_bmap.br_blockcount, unwritten, &oinfo);
2697 return 0;
2698}
2699
2700/*
2701 * Don't defer an rmap if we aren't an rmap filesystem.
2702 */
2703static bool
2704xfs_rmap_update_is_needed(
2705 struct xfs_mount *mp,
2706 int whichfork)
2707{
2708 return xfs_has_rmapbt(mp) && whichfork != XFS_COW_FORK;
2709}
2710
2711/*
2712 * Record a rmap intent; the list is kept sorted first by AG and then by
2713 * increasing age.
2714 */
2715static void
2716__xfs_rmap_add(
2717 struct xfs_trans *tp,
2718 enum xfs_rmap_intent_type type,
2719 uint64_t owner,
2720 int whichfork,
2721 struct xfs_bmbt_irec *bmap)
2722{
2723 struct xfs_rmap_intent *ri;
2724
2725 trace_xfs_rmap_defer(tp->t_mountp,
2726 XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock),
2727 type,
2728 XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock),
2729 owner, whichfork,
2730 bmap->br_startoff,
2731 bmap->br_blockcount,
2732 bmap->br_state);
2733
2734 ri = kmem_cache_alloc(xfs_rmap_intent_cache, GFP_KERNEL | __GFP_NOFAIL);
2735 INIT_LIST_HEAD(&ri->ri_list);
2736 ri->ri_type = type;
2737 ri->ri_owner = owner;
2738 ri->ri_whichfork = whichfork;
2739 ri->ri_bmap = *bmap;
2740
2741 xfs_rmap_update_get_group(mp: tp->t_mountp, ri);
2742 xfs_defer_add(tp, h: &ri->ri_list, ops: &xfs_rmap_update_defer_type);
2743}
2744
2745/* Map an extent into a file. */
2746void
2747xfs_rmap_map_extent(
2748 struct xfs_trans *tp,
2749 struct xfs_inode *ip,
2750 int whichfork,
2751 struct xfs_bmbt_irec *PREV)
2752{
2753 enum xfs_rmap_intent_type type = XFS_RMAP_MAP;
2754
2755 if (!xfs_rmap_update_is_needed(tp->t_mountp, whichfork))
2756 return;
2757
2758 if (whichfork != XFS_ATTR_FORK && xfs_is_reflink_inode(ip))
2759 type = XFS_RMAP_MAP_SHARED;
2760
2761 __xfs_rmap_add(tp, type, ip->i_ino, whichfork, PREV);
2762}
2763
2764/* Unmap an extent out of a file. */
2765void
2766xfs_rmap_unmap_extent(
2767 struct xfs_trans *tp,
2768 struct xfs_inode *ip,
2769 int whichfork,
2770 struct xfs_bmbt_irec *PREV)
2771{
2772 enum xfs_rmap_intent_type type = XFS_RMAP_UNMAP;
2773
2774 if (!xfs_rmap_update_is_needed(tp->t_mountp, whichfork))
2775 return;
2776
2777 if (whichfork != XFS_ATTR_FORK && xfs_is_reflink_inode(ip))
2778 type = XFS_RMAP_UNMAP_SHARED;
2779
2780 __xfs_rmap_add(tp, type, ip->i_ino, whichfork, PREV);
2781}
2782
2783/*
2784 * Convert a data fork extent from unwritten to real or vice versa.
2785 *
2786 * Note that tp can be NULL here as no transaction is used for COW fork
2787 * unwritten conversion.
2788 */
2789void
2790xfs_rmap_convert_extent(
2791 struct xfs_mount *mp,
2792 struct xfs_trans *tp,
2793 struct xfs_inode *ip,
2794 int whichfork,
2795 struct xfs_bmbt_irec *PREV)
2796{
2797 enum xfs_rmap_intent_type type = XFS_RMAP_CONVERT;
2798
2799 if (!xfs_rmap_update_is_needed(mp, whichfork))
2800 return;
2801
2802 if (whichfork != XFS_ATTR_FORK && xfs_is_reflink_inode(ip))
2803 type = XFS_RMAP_CONVERT_SHARED;
2804
2805 __xfs_rmap_add(tp, type, ip->i_ino, whichfork, PREV);
2806}
2807
2808/* Schedule the creation of an rmap for non-file data. */
2809void
2810xfs_rmap_alloc_extent(
2811 struct xfs_trans *tp,
2812 xfs_agnumber_t agno,
2813 xfs_agblock_t bno,
2814 xfs_extlen_t len,
2815 uint64_t owner)
2816{
2817 struct xfs_bmbt_irec bmap;
2818
2819 if (!xfs_rmap_update_is_needed(tp->t_mountp, XFS_DATA_FORK))
2820 return;
2821
2822 bmap.br_startblock = XFS_AGB_TO_FSB(tp->t_mountp, agno, bno);
2823 bmap.br_blockcount = len;
2824 bmap.br_startoff = 0;
2825 bmap.br_state = XFS_EXT_NORM;
2826
2827 __xfs_rmap_add(tp, XFS_RMAP_ALLOC, owner, XFS_DATA_FORK, &bmap);
2828}
2829
2830/* Schedule the deletion of an rmap for non-file data. */
2831void
2832xfs_rmap_free_extent(
2833 struct xfs_trans *tp,
2834 xfs_agnumber_t agno,
2835 xfs_agblock_t bno,
2836 xfs_extlen_t len,
2837 uint64_t owner)
2838{
2839 struct xfs_bmbt_irec bmap;
2840
2841 if (!xfs_rmap_update_is_needed(tp->t_mountp, XFS_DATA_FORK))
2842 return;
2843
2844 bmap.br_startblock = XFS_AGB_TO_FSB(tp->t_mountp, agno, bno);
2845 bmap.br_blockcount = len;
2846 bmap.br_startoff = 0;
2847 bmap.br_state = XFS_EXT_NORM;
2848
2849 __xfs_rmap_add(tp, XFS_RMAP_FREE, owner, XFS_DATA_FORK, &bmap);
2850}
2851
2852/* Compare rmap records. Returns -1 if a < b, 1 if a > b, and 0 if equal. */
2853int
2854xfs_rmap_compare(
2855 const struct xfs_rmap_irec *a,
2856 const struct xfs_rmap_irec *b)
2857{
2858 __u64 oa;
2859 __u64 ob;
2860
2861 oa = xfs_rmap_irec_offset_pack(a);
2862 ob = xfs_rmap_irec_offset_pack(b);
2863
2864 if (a->rm_startblock < b->rm_startblock)
2865 return -1;
2866 else if (a->rm_startblock > b->rm_startblock)
2867 return 1;
2868 else if (a->rm_owner < b->rm_owner)
2869 return -1;
2870 else if (a->rm_owner > b->rm_owner)
2871 return 1;
2872 else if (oa < ob)
2873 return -1;
2874 else if (oa > ob)
2875 return 1;
2876 else
2877 return 0;
2878}
2879
2880/*
2881 * Scan the physical storage part of the keyspace of the reverse mapping index
2882 * and tell us if the area has no records, is fully mapped by records, or is
2883 * partially filled.
2884 */
2885int
2886xfs_rmap_has_records(
2887 struct xfs_btree_cur *cur,
2888 xfs_agblock_t bno,
2889 xfs_extlen_t len,
2890 enum xbtree_recpacking *outcome)
2891{
2892 union xfs_btree_key mask = {
2893 .rmap.rm_startblock = cpu_to_be32(-1U),
2894 };
2895 union xfs_btree_irec low;
2896 union xfs_btree_irec high;
2897
2898 memset(&low, 0, sizeof(low));
2899 low.r.rm_startblock = bno;
2900 memset(&high, 0xFF, sizeof(high));
2901 high.r.rm_startblock = bno + len - 1;
2902
2903 return xfs_btree_has_records(cur, low: &low, high: &high, mask: &mask, outcome);
2904}
2905
2906struct xfs_rmap_ownercount {
2907 /* Owner that we're looking for. */
2908 struct xfs_rmap_irec good;
2909
2910 /* rmap search keys */
2911 struct xfs_rmap_irec low;
2912 struct xfs_rmap_irec high;
2913
2914 struct xfs_rmap_matches *results;
2915
2916 /* Stop early if we find a nonmatch? */
2917 bool stop_on_nonmatch;
2918};
2919
2920/* Does this rmap represent space that can have multiple owners? */
2921static inline bool
2922xfs_rmap_shareable(
2923 struct xfs_mount *mp,
2924 const struct xfs_rmap_irec *rmap)
2925{
2926 if (!xfs_has_reflink(mp))
2927 return false;
2928 if (XFS_RMAP_NON_INODE_OWNER(rmap->rm_owner))
2929 return false;
2930 if (rmap->rm_flags & (XFS_RMAP_ATTR_FORK |
2931 XFS_RMAP_BMBT_BLOCK))
2932 return false;
2933 return true;
2934}
2935
2936static inline void
2937xfs_rmap_ownercount_init(
2938 struct xfs_rmap_ownercount *roc,
2939 xfs_agblock_t bno,
2940 xfs_extlen_t len,
2941 const struct xfs_owner_info *oinfo,
2942 struct xfs_rmap_matches *results)
2943{
2944 memset(roc, 0, sizeof(*roc));
2945 roc->results = results;
2946
2947 roc->low.rm_startblock = bno;
2948 memset(&roc->high, 0xFF, sizeof(roc->high));
2949 roc->high.rm_startblock = bno + len - 1;
2950
2951 memset(results, 0, sizeof(*results));
2952 roc->good.rm_startblock = bno;
2953 roc->good.rm_blockcount = len;
2954 roc->good.rm_owner = oinfo->oi_owner;
2955 roc->good.rm_offset = oinfo->oi_offset;
2956 if (oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK)
2957 roc->good.rm_flags |= XFS_RMAP_ATTR_FORK;
2958 if (oinfo->oi_flags & XFS_OWNER_INFO_BMBT_BLOCK)
2959 roc->good.rm_flags |= XFS_RMAP_BMBT_BLOCK;
2960}
2961
2962/* Figure out if this is a match for the owner. */
2963STATIC int
2964xfs_rmap_count_owners_helper(
2965 struct xfs_btree_cur *cur,
2966 const struct xfs_rmap_irec *rec,
2967 void *priv)
2968{
2969 struct xfs_rmap_ownercount *roc = priv;
2970 struct xfs_rmap_irec check = *rec;
2971 unsigned int keyflags;
2972 bool filedata;
2973 int64_t delta;
2974
2975 filedata = !XFS_RMAP_NON_INODE_OWNER(check.rm_owner) &&
2976 !(check.rm_flags & XFS_RMAP_BMBT_BLOCK);
2977
2978 /* Trim the part of check that comes before the comparison range. */
2979 delta = (int64_t)roc->good.rm_startblock - check.rm_startblock;
2980 if (delta > 0) {
2981 check.rm_startblock += delta;
2982 check.rm_blockcount -= delta;
2983 if (filedata)
2984 check.rm_offset += delta;
2985 }
2986
2987 /* Trim the part of check that comes after the comparison range. */
2988 delta = (check.rm_startblock + check.rm_blockcount) -
2989 (roc->good.rm_startblock + roc->good.rm_blockcount);
2990 if (delta > 0)
2991 check.rm_blockcount -= delta;
2992
2993 /* Don't care about unwritten status for establishing ownership. */
2994 keyflags = check.rm_flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK);
2995
2996 if (check.rm_startblock == roc->good.rm_startblock &&
2997 check.rm_blockcount == roc->good.rm_blockcount &&
2998 check.rm_owner == roc->good.rm_owner &&
2999 check.rm_offset == roc->good.rm_offset &&
3000 keyflags == roc->good.rm_flags) {
3001 roc->results->matches++;
3002 } else {
3003 roc->results->non_owner_matches++;
3004 if (xfs_rmap_shareable(cur->bc_mp, &roc->good) ^
3005 xfs_rmap_shareable(cur->bc_mp, &check))
3006 roc->results->bad_non_owner_matches++;
3007 }
3008
3009 if (roc->results->non_owner_matches && roc->stop_on_nonmatch)
3010 return -ECANCELED;
3011
3012 return 0;
3013}
3014
3015/* Count the number of owners and non-owners of this range of blocks. */
3016int
3017xfs_rmap_count_owners(
3018 struct xfs_btree_cur *cur,
3019 xfs_agblock_t bno,
3020 xfs_extlen_t len,
3021 const struct xfs_owner_info *oinfo,
3022 struct xfs_rmap_matches *results)
3023{
3024 struct xfs_rmap_ownercount roc;
3025 int error;
3026
3027 xfs_rmap_ownercount_init(&roc, bno, len, oinfo, results);
3028 error = xfs_rmap_query_range(cur, low_rec: &roc.low, high_rec: &roc.high,
3029 fn: xfs_rmap_count_owners_helper, priv: &roc);
3030 if (error)
3031 return error;
3032
3033 /*
3034 * There can't be any non-owner rmaps that conflict with the given
3035 * owner if we didn't find any rmaps matching the owner.
3036 */
3037 if (!results->matches)
3038 results->bad_non_owner_matches = 0;
3039
3040 return 0;
3041}
3042
3043/*
3044 * Given an extent and some owner info, can we find records overlapping
3045 * the extent whose owner info does not match the given owner?
3046 */
3047int
3048xfs_rmap_has_other_keys(
3049 struct xfs_btree_cur *cur,
3050 xfs_agblock_t bno,
3051 xfs_extlen_t len,
3052 const struct xfs_owner_info *oinfo,
3053 bool *has_other)
3054{
3055 struct xfs_rmap_matches res;
3056 struct xfs_rmap_ownercount roc;
3057 int error;
3058
3059 xfs_rmap_ownercount_init(&roc, bno, len, oinfo, &res);
3060 roc.stop_on_nonmatch = true;
3061
3062 error = xfs_rmap_query_range(cur, low_rec: &roc.low, high_rec: &roc.high,
3063 fn: xfs_rmap_count_owners_helper, priv: &roc);
3064 if (error == -ECANCELED) {
3065 *has_other = true;
3066 return 0;
3067 }
3068 if (error)
3069 return error;
3070
3071 *has_other = false;
3072 return 0;
3073}
3074
3075const struct xfs_owner_info XFS_RMAP_OINFO_SKIP_UPDATE = {
3076 .oi_owner = XFS_RMAP_OWN_NULL,
3077};
3078const struct xfs_owner_info XFS_RMAP_OINFO_ANY_OWNER = {
3079 .oi_owner = XFS_RMAP_OWN_UNKNOWN,
3080};
3081const struct xfs_owner_info XFS_RMAP_OINFO_FS = {
3082 .oi_owner = XFS_RMAP_OWN_FS,
3083};
3084const struct xfs_owner_info XFS_RMAP_OINFO_LOG = {
3085 .oi_owner = XFS_RMAP_OWN_LOG,
3086};
3087const struct xfs_owner_info XFS_RMAP_OINFO_AG = {
3088 .oi_owner = XFS_RMAP_OWN_AG,
3089};
3090const struct xfs_owner_info XFS_RMAP_OINFO_INOBT = {
3091 .oi_owner = XFS_RMAP_OWN_INOBT,
3092};
3093const struct xfs_owner_info XFS_RMAP_OINFO_INODES = {
3094 .oi_owner = XFS_RMAP_OWN_INODES,
3095};
3096const struct xfs_owner_info XFS_RMAP_OINFO_REFC = {
3097 .oi_owner = XFS_RMAP_OWN_REFC,
3098};
3099const struct xfs_owner_info XFS_RMAP_OINFO_COW = {
3100 .oi_owner = XFS_RMAP_OWN_COW,
3101};
3102
3103int __init
3104xfs_rmap_intent_init_cache(void)
3105{
3106 xfs_rmap_intent_cache = kmem_cache_create("xfs_rmap_intent",
3107 sizeof(struct xfs_rmap_intent),
3108 0, 0, NULL);
3109
3110 return xfs_rmap_intent_cache != NULL ? 0 : -ENOMEM;
3111}
3112
3113void
3114xfs_rmap_intent_destroy_cache(void)
3115{
3116 kmem_cache_destroy(xfs_rmap_intent_cache);
3117 xfs_rmap_intent_cache = NULL;
3118}
3119

source code of linux/fs/xfs/libxfs/xfs_rmap.c