1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_bit.h"
13#include "xfs_sb.h"
14#include "xfs_mount.h"
15#include "xfs_inode.h"
16#include "xfs_iwalk.h"
17#include "xfs_quota.h"
18#include "xfs_bmap.h"
19#include "xfs_bmap_util.h"
20#include "xfs_trans.h"
21#include "xfs_trans_space.h"
22#include "xfs_qm.h"
23#include "xfs_trace.h"
24#include "xfs_icache.h"
25#include "xfs_error.h"
26#include "xfs_ag.h"
27#include "xfs_ialloc.h"
28#include "xfs_log_priv.h"
29#include "xfs_health.h"
30
31/*
32 * The global quota manager. There is only one of these for the entire
33 * system, _not_ one per file system. XQM keeps track of the overall
34 * quota functionality, including maintaining the freelist and hash
35 * tables of dquots.
36 */
37STATIC int xfs_qm_init_quotainos(struct xfs_mount *mp);
38STATIC int xfs_qm_init_quotainfo(struct xfs_mount *mp);
39
40STATIC void xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
41STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
42/*
43 * We use the batch lookup interface to iterate over the dquots as it
44 * currently is the only interface into the radix tree code that allows
45 * fuzzy lookups instead of exact matches. Holding the lock over multiple
46 * operations is fine as all callers are used either during mount/umount
47 * or quotaoff.
48 */
49#define XFS_DQ_LOOKUP_BATCH 32
50
51STATIC int
52xfs_qm_dquot_walk(
53 struct xfs_mount *mp,
54 xfs_dqtype_t type,
55 int (*execute)(struct xfs_dquot *dqp, void *data),
56 void *data)
57{
58 struct xfs_quotainfo *qi = mp->m_quotainfo;
59 struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
60 uint32_t next_index;
61 int last_error = 0;
62 int skipped;
63 int nr_found;
64
65restart:
66 skipped = 0;
67 next_index = 0;
68 nr_found = 0;
69
70 while (1) {
71 struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
72 int error;
73 int i;
74
75 mutex_lock(&qi->qi_tree_lock);
76 nr_found = radix_tree_gang_lookup(tree, results: (void **)batch,
77 first_index: next_index, XFS_DQ_LOOKUP_BATCH);
78 if (!nr_found) {
79 mutex_unlock(lock: &qi->qi_tree_lock);
80 break;
81 }
82
83 for (i = 0; i < nr_found; i++) {
84 struct xfs_dquot *dqp = batch[i];
85
86 next_index = dqp->q_id + 1;
87
88 error = execute(batch[i], data);
89 if (error == -EAGAIN) {
90 skipped++;
91 continue;
92 }
93 if (error && last_error != -EFSCORRUPTED)
94 last_error = error;
95 }
96
97 mutex_unlock(lock: &qi->qi_tree_lock);
98
99 /* bail out if the filesystem is corrupted. */
100 if (last_error == -EFSCORRUPTED) {
101 skipped = 0;
102 break;
103 }
104 /* we're done if id overflows back to zero */
105 if (!next_index)
106 break;
107 }
108
109 if (skipped) {
110 delay(ticks: 1);
111 goto restart;
112 }
113
114 return last_error;
115}
116
117
118/*
119 * Purge a dquot from all tracking data structures and free it.
120 */
121STATIC int
122xfs_qm_dqpurge(
123 struct xfs_dquot *dqp,
124 void *data)
125{
126 struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
127 int error = -EAGAIN;
128
129 xfs_dqlock(dqp);
130 if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
131 goto out_unlock;
132
133 dqp->q_flags |= XFS_DQFLAG_FREEING;
134
135 xfs_dqflock(dqp);
136
137 /*
138 * If we are turning this type of quotas off, we don't care
139 * about the dirty metadata sitting in this dquot. OTOH, if
140 * we're unmounting, we do care, so we flush it and wait.
141 */
142 if (XFS_DQ_IS_DIRTY(dqp)) {
143 struct xfs_buf *bp = NULL;
144
145 /*
146 * We don't care about getting disk errors here. We need
147 * to purge this dquot anyway, so we go ahead regardless.
148 */
149 error = xfs_qm_dqflush(dqp, bpp: &bp);
150 if (!error) {
151 error = xfs_bwrite(bp);
152 xfs_buf_relse(bp);
153 } else if (error == -EAGAIN) {
154 dqp->q_flags &= ~XFS_DQFLAG_FREEING;
155 goto out_unlock;
156 }
157 xfs_dqflock(dqp);
158 }
159
160 ASSERT(atomic_read(&dqp->q_pincount) == 0);
161 ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) ||
162 !test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
163
164 xfs_dqfunlock(dqp);
165 xfs_dqunlock(dqp);
166
167 radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
168 qi->qi_dquots--;
169
170 /*
171 * We move dquots to the freelist as soon as their reference count
172 * hits zero, so it really should be on the freelist here.
173 */
174 ASSERT(!list_empty(&dqp->q_lru));
175 list_lru_del_obj(lru: &qi->qi_lru, item: &dqp->q_lru);
176 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
177
178 xfs_qm_dqdestroy(dqp);
179 return 0;
180
181out_unlock:
182 xfs_dqunlock(dqp);
183 return error;
184}
185
186/*
187 * Purge the dquot cache.
188 */
189static void
190xfs_qm_dqpurge_all(
191 struct xfs_mount *mp)
192{
193 xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
194 xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
195 xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
196}
197
198/*
199 * Just destroy the quotainfo structure.
200 */
201void
202xfs_qm_unmount(
203 struct xfs_mount *mp)
204{
205 if (mp->m_quotainfo) {
206 xfs_qm_dqpurge_all(mp);
207 xfs_qm_destroy_quotainfo(mp);
208 }
209}
210
211/*
212 * Called from the vfsops layer.
213 */
214void
215xfs_qm_unmount_quotas(
216 xfs_mount_t *mp)
217{
218 /*
219 * Release the dquots that root inode, et al might be holding,
220 * before we flush quotas and blow away the quotainfo structure.
221 */
222 ASSERT(mp->m_rootip);
223 xfs_qm_dqdetach(mp->m_rootip);
224 if (mp->m_rbmip)
225 xfs_qm_dqdetach(mp->m_rbmip);
226 if (mp->m_rsumip)
227 xfs_qm_dqdetach(mp->m_rsumip);
228
229 /*
230 * Release the quota inodes.
231 */
232 if (mp->m_quotainfo) {
233 if (mp->m_quotainfo->qi_uquotaip) {
234 xfs_irele(ip: mp->m_quotainfo->qi_uquotaip);
235 mp->m_quotainfo->qi_uquotaip = NULL;
236 }
237 if (mp->m_quotainfo->qi_gquotaip) {
238 xfs_irele(ip: mp->m_quotainfo->qi_gquotaip);
239 mp->m_quotainfo->qi_gquotaip = NULL;
240 }
241 if (mp->m_quotainfo->qi_pquotaip) {
242 xfs_irele(ip: mp->m_quotainfo->qi_pquotaip);
243 mp->m_quotainfo->qi_pquotaip = NULL;
244 }
245 }
246}
247
248STATIC int
249xfs_qm_dqattach_one(
250 struct xfs_inode *ip,
251 xfs_dqtype_t type,
252 bool doalloc,
253 struct xfs_dquot **IO_idqpp)
254{
255 struct xfs_dquot *dqp;
256 int error;
257
258 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
259 error = 0;
260
261 /*
262 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
263 * or &i_gdquot. This made the code look weird, but made the logic a lot
264 * simpler.
265 */
266 dqp = *IO_idqpp;
267 if (dqp) {
268 trace_xfs_dqattach_found(dqp);
269 return 0;
270 }
271
272 /*
273 * Find the dquot from somewhere. This bumps the reference count of
274 * dquot and returns it locked. This can return ENOENT if dquot didn't
275 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
276 * turned off suddenly.
277 */
278 error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
279 if (error)
280 return error;
281
282 trace_xfs_dqattach_get(dqp);
283
284 /*
285 * dqget may have dropped and re-acquired the ilock, but it guarantees
286 * that the dquot returned is the one that should go in the inode.
287 */
288 *IO_idqpp = dqp;
289 xfs_dqunlock(dqp);
290 return 0;
291}
292
293static bool
294xfs_qm_need_dqattach(
295 struct xfs_inode *ip)
296{
297 struct xfs_mount *mp = ip->i_mount;
298
299 if (!XFS_IS_QUOTA_ON(mp))
300 return false;
301 if (!XFS_NOT_DQATTACHED(mp, ip))
302 return false;
303 if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
304 return false;
305 return true;
306}
307
308/*
309 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
310 * into account.
311 * If @doalloc is true, the dquot(s) will be allocated if needed.
312 * Inode may get unlocked and relocked in here, and the caller must deal with
313 * the consequences.
314 */
315int
316xfs_qm_dqattach_locked(
317 xfs_inode_t *ip,
318 bool doalloc)
319{
320 xfs_mount_t *mp = ip->i_mount;
321 int error = 0;
322
323 if (!xfs_qm_need_dqattach(ip))
324 return 0;
325
326 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
327
328 if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
329 error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER,
330 doalloc, &ip->i_udquot);
331 if (error)
332 goto done;
333 ASSERT(ip->i_udquot);
334 }
335
336 if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
337 error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP,
338 doalloc, &ip->i_gdquot);
339 if (error)
340 goto done;
341 ASSERT(ip->i_gdquot);
342 }
343
344 if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
345 error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ,
346 doalloc, &ip->i_pdquot);
347 if (error)
348 goto done;
349 ASSERT(ip->i_pdquot);
350 }
351
352done:
353 /*
354 * Don't worry about the dquots that we may have attached before any
355 * error - they'll get detached later if it has not already been done.
356 */
357 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
358 return error;
359}
360
361int
362xfs_qm_dqattach(
363 struct xfs_inode *ip)
364{
365 int error;
366
367 if (!xfs_qm_need_dqattach(ip))
368 return 0;
369
370 xfs_ilock(ip, XFS_ILOCK_EXCL);
371 error = xfs_qm_dqattach_locked(ip, doalloc: false);
372 xfs_iunlock(ip, XFS_ILOCK_EXCL);
373
374 return error;
375}
376
377/*
378 * Release dquots (and their references) if any.
379 * The inode should be locked EXCL except when this's called by
380 * xfs_ireclaim.
381 */
382void
383xfs_qm_dqdetach(
384 xfs_inode_t *ip)
385{
386 if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
387 return;
388
389 trace_xfs_dquot_dqdetach(ip);
390
391 ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
392 if (ip->i_udquot) {
393 xfs_qm_dqrele(ip->i_udquot);
394 ip->i_udquot = NULL;
395 }
396 if (ip->i_gdquot) {
397 xfs_qm_dqrele(ip->i_gdquot);
398 ip->i_gdquot = NULL;
399 }
400 if (ip->i_pdquot) {
401 xfs_qm_dqrele(ip->i_pdquot);
402 ip->i_pdquot = NULL;
403 }
404}
405
406struct xfs_qm_isolate {
407 struct list_head buffers;
408 struct list_head dispose;
409};
410
411static enum lru_status
412xfs_qm_dquot_isolate(
413 struct list_head *item,
414 struct list_lru_one *lru,
415 spinlock_t *lru_lock,
416 void *arg)
417 __releases(lru_lock) __acquires(lru_lock)
418{
419 struct xfs_dquot *dqp = container_of(item,
420 struct xfs_dquot, q_lru);
421 struct xfs_qm_isolate *isol = arg;
422
423 if (!xfs_dqlock_nowait(dqp))
424 goto out_miss_busy;
425
426 /*
427 * If something else is freeing this dquot and hasn't yet removed it
428 * from the LRU, leave it for the freeing task to complete the freeing
429 * process rather than risk it being free from under us here.
430 */
431 if (dqp->q_flags & XFS_DQFLAG_FREEING)
432 goto out_miss_unlock;
433
434 /*
435 * This dquot has acquired a reference in the meantime remove it from
436 * the freelist and try again.
437 */
438 if (dqp->q_nrefs) {
439 xfs_dqunlock(dqp);
440 XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
441
442 trace_xfs_dqreclaim_want(dqp);
443 list_lru_isolate(list: lru, item: &dqp->q_lru);
444 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
445 return LRU_REMOVED;
446 }
447
448 /*
449 * If the dquot is dirty, flush it. If it's already being flushed, just
450 * skip it so there is time for the IO to complete before we try to
451 * reclaim it again on the next LRU pass.
452 */
453 if (!xfs_dqflock_nowait(dqp))
454 goto out_miss_unlock;
455
456 if (XFS_DQ_IS_DIRTY(dqp)) {
457 struct xfs_buf *bp = NULL;
458 int error;
459
460 trace_xfs_dqreclaim_dirty(dqp);
461
462 /* we have to drop the LRU lock to flush the dquot */
463 spin_unlock(lock: lru_lock);
464
465 error = xfs_qm_dqflush(dqp, bpp: &bp);
466 if (error)
467 goto out_unlock_dirty;
468
469 xfs_buf_delwri_queue(bp, &isol->buffers);
470 xfs_buf_relse(bp);
471 goto out_unlock_dirty;
472 }
473 xfs_dqfunlock(dqp);
474
475 /*
476 * Prevent lookups now that we are past the point of no return.
477 */
478 dqp->q_flags |= XFS_DQFLAG_FREEING;
479 xfs_dqunlock(dqp);
480
481 ASSERT(dqp->q_nrefs == 0);
482 list_lru_isolate_move(list: lru, item: &dqp->q_lru, head: &isol->dispose);
483 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
484 trace_xfs_dqreclaim_done(dqp);
485 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
486 return LRU_REMOVED;
487
488out_miss_unlock:
489 xfs_dqunlock(dqp);
490out_miss_busy:
491 trace_xfs_dqreclaim_busy(dqp);
492 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
493 return LRU_SKIP;
494
495out_unlock_dirty:
496 trace_xfs_dqreclaim_busy(dqp);
497 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
498 xfs_dqunlock(dqp);
499 spin_lock(lock: lru_lock);
500 return LRU_RETRY;
501}
502
503static unsigned long
504xfs_qm_shrink_scan(
505 struct shrinker *shrink,
506 struct shrink_control *sc)
507{
508 struct xfs_quotainfo *qi = shrink->private_data;
509 struct xfs_qm_isolate isol;
510 unsigned long freed;
511 int error;
512
513 if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
514 return 0;
515
516 INIT_LIST_HEAD(list: &isol.buffers);
517 INIT_LIST_HEAD(list: &isol.dispose);
518
519 freed = list_lru_shrink_walk(lru: &qi->qi_lru, sc,
520 isolate: xfs_qm_dquot_isolate, cb_arg: &isol);
521
522 error = xfs_buf_delwri_submit(&isol.buffers);
523 if (error)
524 xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
525
526 while (!list_empty(head: &isol.dispose)) {
527 struct xfs_dquot *dqp;
528
529 dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
530 list_del_init(entry: &dqp->q_lru);
531 xfs_qm_dqfree_one(dqp);
532 }
533
534 return freed;
535}
536
537static unsigned long
538xfs_qm_shrink_count(
539 struct shrinker *shrink,
540 struct shrink_control *sc)
541{
542 struct xfs_quotainfo *qi = shrink->private_data;
543
544 return list_lru_shrink_count(lru: &qi->qi_lru, sc);
545}
546
547STATIC void
548xfs_qm_set_defquota(
549 struct xfs_mount *mp,
550 xfs_dqtype_t type,
551 struct xfs_quotainfo *qinf)
552{
553 struct xfs_dquot *dqp;
554 struct xfs_def_quota *defq;
555 int error;
556
557 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
558 if (error)
559 return;
560
561 defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
562
563 /*
564 * Timers and warnings have been already set, let's just set the
565 * default limits for this quota type
566 */
567 defq->blk.hard = dqp->q_blk.hardlimit;
568 defq->blk.soft = dqp->q_blk.softlimit;
569 defq->ino.hard = dqp->q_ino.hardlimit;
570 defq->ino.soft = dqp->q_ino.softlimit;
571 defq->rtb.hard = dqp->q_rtb.hardlimit;
572 defq->rtb.soft = dqp->q_rtb.softlimit;
573 xfs_qm_dqdestroy(dqp);
574}
575
576/* Initialize quota time limits from the root dquot. */
577static void
578xfs_qm_init_timelimits(
579 struct xfs_mount *mp,
580 xfs_dqtype_t type)
581{
582 struct xfs_quotainfo *qinf = mp->m_quotainfo;
583 struct xfs_def_quota *defq;
584 struct xfs_dquot *dqp;
585 int error;
586
587 defq = xfs_get_defquota(qinf, type);
588
589 defq->blk.time = XFS_QM_BTIMELIMIT;
590 defq->ino.time = XFS_QM_ITIMELIMIT;
591 defq->rtb.time = XFS_QM_RTBTIMELIMIT;
592
593 /*
594 * We try to get the limits from the superuser's limits fields.
595 * This is quite hacky, but it is standard quota practice.
596 *
597 * Since we may not have done a quotacheck by this point, just read
598 * the dquot without attaching it to any hashtables or lists.
599 */
600 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
601 if (error)
602 return;
603
604 /*
605 * The warnings and timers set the grace period given to
606 * a user or group before he or she can not perform any
607 * more writing. If it is zero, a default is used.
608 */
609 if (dqp->q_blk.timer)
610 defq->blk.time = dqp->q_blk.timer;
611 if (dqp->q_ino.timer)
612 defq->ino.time = dqp->q_ino.timer;
613 if (dqp->q_rtb.timer)
614 defq->rtb.time = dqp->q_rtb.timer;
615
616 xfs_qm_dqdestroy(dqp);
617}
618
619/*
620 * This initializes all the quota information that's kept in the
621 * mount structure
622 */
623STATIC int
624xfs_qm_init_quotainfo(
625 struct xfs_mount *mp)
626{
627 struct xfs_quotainfo *qinf;
628 int error;
629
630 ASSERT(XFS_IS_QUOTA_ON(mp));
631
632 qinf = mp->m_quotainfo = kzalloc(size: sizeof(struct xfs_quotainfo),
633 GFP_KERNEL | __GFP_NOFAIL);
634
635 error = list_lru_init(&qinf->qi_lru);
636 if (error)
637 goto out_free_qinf;
638
639 /*
640 * See if quotainodes are setup, and if not, allocate them,
641 * and change the superblock accordingly.
642 */
643 error = xfs_qm_init_quotainos(mp);
644 if (error)
645 goto out_free_lru;
646
647 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_KERNEL);
648 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_KERNEL);
649 INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_KERNEL);
650 mutex_init(&qinf->qi_tree_lock);
651
652 /* mutex used to serialize quotaoffs */
653 mutex_init(&qinf->qi_quotaofflock);
654
655 /* Precalc some constants */
656 qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
657 qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
658 if (xfs_has_bigtime(mp)) {
659 qinf->qi_expiry_min =
660 xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN);
661 qinf->qi_expiry_max =
662 xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX);
663 } else {
664 qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN;
665 qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX;
666 }
667 trace_xfs_quota_expiry_range(mp, min: qinf->qi_expiry_min,
668 max: qinf->qi_expiry_max);
669
670 mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
671
672 xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
673 xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
674 xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
675
676 if (XFS_IS_UQUOTA_ON(mp))
677 xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
678 if (XFS_IS_GQUOTA_ON(mp))
679 xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
680 if (XFS_IS_PQUOTA_ON(mp))
681 xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
682
683 qinf->qi_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, fmt: "xfs-qm:%s",
684 mp->m_super->s_id);
685 if (!qinf->qi_shrinker) {
686 error = -ENOMEM;
687 goto out_free_inos;
688 }
689
690 qinf->qi_shrinker->count_objects = xfs_qm_shrink_count;
691 qinf->qi_shrinker->scan_objects = xfs_qm_shrink_scan;
692 qinf->qi_shrinker->private_data = qinf;
693
694 shrinker_register(shrinker: qinf->qi_shrinker);
695
696 xfs_hooks_init(chain: &qinf->qi_mod_ino_dqtrx_hooks);
697 xfs_hooks_init(chain: &qinf->qi_apply_dqtrx_hooks);
698
699 return 0;
700
701out_free_inos:
702 mutex_destroy(lock: &qinf->qi_quotaofflock);
703 mutex_destroy(lock: &qinf->qi_tree_lock);
704 xfs_qm_destroy_quotainos(qi: qinf);
705out_free_lru:
706 list_lru_destroy(lru: &qinf->qi_lru);
707out_free_qinf:
708 kfree(objp: qinf);
709 mp->m_quotainfo = NULL;
710 return error;
711}
712
713/*
714 * Gets called when unmounting a filesystem or when all quotas get
715 * turned off.
716 * This purges the quota inodes, destroys locks and frees itself.
717 */
718void
719xfs_qm_destroy_quotainfo(
720 struct xfs_mount *mp)
721{
722 struct xfs_quotainfo *qi;
723
724 qi = mp->m_quotainfo;
725 ASSERT(qi != NULL);
726
727 shrinker_free(shrinker: qi->qi_shrinker);
728 list_lru_destroy(lru: &qi->qi_lru);
729 xfs_qm_destroy_quotainos(qi);
730 mutex_destroy(lock: &qi->qi_tree_lock);
731 mutex_destroy(lock: &qi->qi_quotaofflock);
732 kfree(objp: qi);
733 mp->m_quotainfo = NULL;
734}
735
736/*
737 * Create an inode and return with a reference already taken, but unlocked
738 * This is how we create quota inodes
739 */
740STATIC int
741xfs_qm_qino_alloc(
742 struct xfs_mount *mp,
743 struct xfs_inode **ipp,
744 unsigned int flags)
745{
746 struct xfs_trans *tp;
747 int error;
748 bool need_alloc = true;
749
750 *ipp = NULL;
751 /*
752 * With superblock that doesn't have separate pquotino, we
753 * share an inode between gquota and pquota. If the on-disk
754 * superblock has GQUOTA and the filesystem is now mounted
755 * with PQUOTA, just use sb_gquotino for sb_pquotino and
756 * vice-versa.
757 */
758 if (!xfs_has_pquotino(mp) &&
759 (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
760 xfs_ino_t ino = NULLFSINO;
761
762 if ((flags & XFS_QMOPT_PQUOTA) &&
763 (mp->m_sb.sb_gquotino != NULLFSINO)) {
764 ino = mp->m_sb.sb_gquotino;
765 if (XFS_IS_CORRUPT(mp,
766 mp->m_sb.sb_pquotino != NULLFSINO)) {
767 xfs_fs_mark_sick(mp, XFS_SICK_FS_PQUOTA);
768 return -EFSCORRUPTED;
769 }
770 } else if ((flags & XFS_QMOPT_GQUOTA) &&
771 (mp->m_sb.sb_pquotino != NULLFSINO)) {
772 ino = mp->m_sb.sb_pquotino;
773 if (XFS_IS_CORRUPT(mp,
774 mp->m_sb.sb_gquotino != NULLFSINO)) {
775 xfs_fs_mark_sick(mp, XFS_SICK_FS_GQUOTA);
776 return -EFSCORRUPTED;
777 }
778 }
779 if (ino != NULLFSINO) {
780 error = xfs_iget(mp, NULL, ino, flags: 0, lock_flags: 0, ipp);
781 if (error)
782 return error;
783 mp->m_sb.sb_gquotino = NULLFSINO;
784 mp->m_sb.sb_pquotino = NULLFSINO;
785 need_alloc = false;
786 }
787 }
788
789 error = xfs_trans_alloc(mp, resp: &M_RES(mp)->tr_create,
790 blocks: need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
791 rtextents: 0, flags: 0, tpp: &tp);
792 if (error)
793 return error;
794
795 if (need_alloc) {
796 xfs_ino_t ino;
797
798 error = xfs_dialloc(&tp, 0, S_IFREG, &ino);
799 if (!error)
800 error = xfs_init_new_inode(idmap: &nop_mnt_idmap, tp, NULL, ino,
801 S_IFREG, nlink: 1, rdev: 0, prid: 0, init_xattrs: false, ipp);
802 if (error) {
803 xfs_trans_cancel(tp);
804 return error;
805 }
806 }
807
808 /*
809 * Make the changes in the superblock, and log those too.
810 * sbfields arg may contain fields other than *QUOTINO;
811 * VERSIONNUM for example.
812 */
813 spin_lock(lock: &mp->m_sb_lock);
814 if (flags & XFS_QMOPT_SBVERSION) {
815 ASSERT(!xfs_has_quota(mp));
816
817 xfs_add_quota(mp);
818 mp->m_sb.sb_uquotino = NULLFSINO;
819 mp->m_sb.sb_gquotino = NULLFSINO;
820 mp->m_sb.sb_pquotino = NULLFSINO;
821
822 /* qflags will get updated fully _after_ quotacheck */
823 mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
824 }
825 if (flags & XFS_QMOPT_UQUOTA)
826 mp->m_sb.sb_uquotino = (*ipp)->i_ino;
827 else if (flags & XFS_QMOPT_GQUOTA)
828 mp->m_sb.sb_gquotino = (*ipp)->i_ino;
829 else
830 mp->m_sb.sb_pquotino = (*ipp)->i_ino;
831 spin_unlock(lock: &mp->m_sb_lock);
832 xfs_log_sb(tp);
833
834 error = xfs_trans_commit(tp);
835 if (error) {
836 ASSERT(xfs_is_shutdown(mp));
837 xfs_alert(mp, "%s failed (error %d)!", __func__, error);
838 }
839 if (need_alloc)
840 xfs_finish_inode_setup(ip: *ipp);
841 return error;
842}
843
844
845STATIC void
846xfs_qm_reset_dqcounts(
847 struct xfs_mount *mp,
848 struct xfs_buf *bp,
849 xfs_dqid_t id,
850 xfs_dqtype_t type)
851{
852 struct xfs_dqblk *dqb;
853 int j;
854
855 trace_xfs_reset_dqcounts(bp, _RET_IP_);
856
857 /*
858 * Reset all counters and timers. They'll be
859 * started afresh by xfs_qm_quotacheck.
860 */
861#ifdef DEBUG
862 j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
863 sizeof(struct xfs_dqblk);
864 ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
865#endif
866 dqb = bp->b_addr;
867 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
868 struct xfs_disk_dquot *ddq;
869
870 ddq = (struct xfs_disk_dquot *)&dqb[j];
871
872 /*
873 * Do a sanity check, and if needed, repair the dqblk. Don't
874 * output any warnings because it's perfectly possible to
875 * find uninitialised dquot blks. See comment in
876 * xfs_dquot_verify.
877 */
878 if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
879 (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
880 xfs_dqblk_repair(mp, &dqb[j], id + j, type);
881
882 /*
883 * Reset type in case we are reusing group quota file for
884 * project quotas or vice versa
885 */
886 ddq->d_type = type;
887 ddq->d_bcount = 0;
888 ddq->d_icount = 0;
889 ddq->d_rtbcount = 0;
890
891 /*
892 * dquot id 0 stores the default grace period and the maximum
893 * warning limit that were set by the administrator, so we
894 * should not reset them.
895 */
896 if (ddq->d_id != 0) {
897 ddq->d_btimer = 0;
898 ddq->d_itimer = 0;
899 ddq->d_rtbtimer = 0;
900 ddq->d_bwarns = 0;
901 ddq->d_iwarns = 0;
902 ddq->d_rtbwarns = 0;
903 if (xfs_has_bigtime(mp))
904 ddq->d_type |= XFS_DQTYPE_BIGTIME;
905 }
906
907 if (xfs_has_crc(mp)) {
908 xfs_update_cksum((char *)&dqb[j],
909 sizeof(struct xfs_dqblk),
910 XFS_DQUOT_CRC_OFF);
911 }
912 }
913}
914
915STATIC int
916xfs_qm_reset_dqcounts_all(
917 struct xfs_mount *mp,
918 xfs_dqid_t firstid,
919 xfs_fsblock_t bno,
920 xfs_filblks_t blkcnt,
921 xfs_dqtype_t type,
922 struct list_head *buffer_list)
923{
924 struct xfs_buf *bp;
925 int error = 0;
926
927 ASSERT(blkcnt > 0);
928
929 /*
930 * Blkcnt arg can be a very big number, and might even be
931 * larger than the log itself. So, we have to break it up into
932 * manageable-sized transactions.
933 * Note that we don't start a permanent transaction here; we might
934 * not be able to get a log reservation for the whole thing up front,
935 * and we don't really care to either, because we just discard
936 * everything if we were to crash in the middle of this loop.
937 */
938 while (blkcnt--) {
939 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
940 XFS_FSB_TO_DADDR(mp, bno),
941 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
942 &xfs_dquot_buf_ops);
943
944 /*
945 * CRC and validation errors will return a EFSCORRUPTED here. If
946 * this occurs, re-read without CRC validation so that we can
947 * repair the damage via xfs_qm_reset_dqcounts(). This process
948 * will leave a trace in the log indicating corruption has
949 * been detected.
950 */
951 if (error == -EFSCORRUPTED) {
952 error = xfs_trans_read_buf(mp, NULL, target: mp->m_ddev_targp,
953 blkno: XFS_FSB_TO_DADDR(mp, bno),
954 numblks: mp->m_quotainfo->qi_dqchunklen, flags: 0, bpp: &bp,
955 NULL);
956 }
957
958 if (error)
959 break;
960
961 /*
962 * A corrupt buffer might not have a verifier attached, so
963 * make sure we have the correct one attached before writeback
964 * occurs.
965 */
966 bp->b_ops = &xfs_dquot_buf_ops;
967 xfs_qm_reset_dqcounts(mp, bp, firstid, type);
968 xfs_buf_delwri_queue(bp, buffer_list);
969 xfs_buf_relse(bp);
970
971 /* goto the next block. */
972 bno++;
973 firstid += mp->m_quotainfo->qi_dqperchunk;
974 }
975
976 return error;
977}
978
979/*
980 * Iterate over all allocated dquot blocks in this quota inode, zeroing all
981 * counters for every chunk of dquots that we find.
982 */
983STATIC int
984xfs_qm_reset_dqcounts_buf(
985 struct xfs_mount *mp,
986 struct xfs_inode *qip,
987 xfs_dqtype_t type,
988 struct list_head *buffer_list)
989{
990 struct xfs_bmbt_irec *map;
991 int i, nmaps; /* number of map entries */
992 int error; /* return value */
993 xfs_fileoff_t lblkno;
994 xfs_filblks_t maxlblkcnt;
995 xfs_dqid_t firstid;
996 xfs_fsblock_t rablkno;
997 xfs_filblks_t rablkcnt;
998
999 error = 0;
1000 /*
1001 * This looks racy, but we can't keep an inode lock across a
1002 * trans_reserve. But, this gets called during quotacheck, and that
1003 * happens only at mount time which is single threaded.
1004 */
1005 if (qip->i_nblocks == 0)
1006 return 0;
1007
1008 map = kmalloc(XFS_DQITER_MAP_SIZE * sizeof(*map),
1009 GFP_KERNEL | __GFP_NOFAIL);
1010
1011 lblkno = 0;
1012 maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1013 do {
1014 uint lock_mode;
1015
1016 nmaps = XFS_DQITER_MAP_SIZE;
1017 /*
1018 * We aren't changing the inode itself. Just changing
1019 * some of its data. No new blocks are added here, and
1020 * the inode is never added to the transaction.
1021 */
1022 lock_mode = xfs_ilock_data_map_shared(qip);
1023 error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1024 map, &nmaps, 0);
1025 xfs_iunlock(qip, lock_mode);
1026 if (error)
1027 break;
1028
1029 ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1030 for (i = 0; i < nmaps; i++) {
1031 ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1032 ASSERT(map[i].br_blockcount);
1033
1034
1035 lblkno += map[i].br_blockcount;
1036
1037 if (map[i].br_startblock == HOLESTARTBLOCK)
1038 continue;
1039
1040 firstid = (xfs_dqid_t) map[i].br_startoff *
1041 mp->m_quotainfo->qi_dqperchunk;
1042 /*
1043 * Do a read-ahead on the next extent.
1044 */
1045 if ((i+1 < nmaps) &&
1046 (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1047 rablkcnt = map[i+1].br_blockcount;
1048 rablkno = map[i+1].br_startblock;
1049 while (rablkcnt--) {
1050 xfs_buf_readahead(mp->m_ddev_targp,
1051 XFS_FSB_TO_DADDR(mp, rablkno),
1052 mp->m_quotainfo->qi_dqchunklen,
1053 &xfs_dquot_buf_ops);
1054 rablkno++;
1055 }
1056 }
1057 /*
1058 * Iterate thru all the blks in the extent and
1059 * reset the counters of all the dquots inside them.
1060 */
1061 error = xfs_qm_reset_dqcounts_all(mp, firstid,
1062 map[i].br_startblock,
1063 map[i].br_blockcount,
1064 type, buffer_list);
1065 if (error)
1066 goto out;
1067 }
1068 } while (nmaps > 0);
1069
1070out:
1071 kfree(objp: map);
1072 return error;
1073}
1074
1075/*
1076 * Called by dqusage_adjust in doing a quotacheck.
1077 *
1078 * Given the inode, and a dquot id this updates both the incore dqout as well
1079 * as the buffer copy. This is so that once the quotacheck is done, we can
1080 * just log all the buffers, as opposed to logging numerous updates to
1081 * individual dquots.
1082 */
1083STATIC int
1084xfs_qm_quotacheck_dqadjust(
1085 struct xfs_inode *ip,
1086 xfs_dqtype_t type,
1087 xfs_qcnt_t nblks,
1088 xfs_qcnt_t rtblks)
1089{
1090 struct xfs_mount *mp = ip->i_mount;
1091 struct xfs_dquot *dqp;
1092 xfs_dqid_t id;
1093 int error;
1094
1095 id = xfs_qm_id_for_quotatype(ip, type);
1096 error = xfs_qm_dqget(mp, id, type, true, &dqp);
1097 if (error) {
1098 /*
1099 * Shouldn't be able to turn off quotas here.
1100 */
1101 ASSERT(error != -ESRCH);
1102 ASSERT(error != -ENOENT);
1103 return error;
1104 }
1105
1106 trace_xfs_dqadjust(dqp);
1107
1108 /*
1109 * Adjust the inode count and the block count to reflect this inode's
1110 * resource usage.
1111 */
1112 dqp->q_ino.count++;
1113 dqp->q_ino.reserved++;
1114 if (nblks) {
1115 dqp->q_blk.count += nblks;
1116 dqp->q_blk.reserved += nblks;
1117 }
1118 if (rtblks) {
1119 dqp->q_rtb.count += rtblks;
1120 dqp->q_rtb.reserved += rtblks;
1121 }
1122
1123 /*
1124 * Set default limits, adjust timers (since we changed usages)
1125 *
1126 * There are no timers for the default values set in the root dquot.
1127 */
1128 if (dqp->q_id) {
1129 xfs_qm_adjust_dqlimits(d: dqp);
1130 xfs_qm_adjust_dqtimers(d: dqp);
1131 }
1132
1133 dqp->q_flags |= XFS_DQFLAG_DIRTY;
1134 xfs_qm_dqput(dqp);
1135 return 0;
1136}
1137
1138/*
1139 * callback routine supplied to bulkstat(). Given an inumber, find its
1140 * dquots and update them to account for resources taken by that inode.
1141 */
1142/* ARGSUSED */
1143STATIC int
1144xfs_qm_dqusage_adjust(
1145 struct xfs_mount *mp,
1146 struct xfs_trans *tp,
1147 xfs_ino_t ino,
1148 void *data)
1149{
1150 struct xfs_inode *ip;
1151 xfs_qcnt_t nblks;
1152 xfs_filblks_t rtblks = 0; /* total rt blks */
1153 int error;
1154
1155 ASSERT(XFS_IS_QUOTA_ON(mp));
1156
1157 /*
1158 * rootino must have its resources accounted for, not so with the quota
1159 * inodes.
1160 */
1161 if (xfs_is_quota_inode(&mp->m_sb, ino))
1162 return 0;
1163
1164 /*
1165 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1166 * at mount time and therefore nobody will be racing chown/chproj.
1167 */
1168 error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, lock_flags: 0, ipp: &ip);
1169 if (error == -EINVAL || error == -ENOENT)
1170 return 0;
1171 if (error)
1172 return error;
1173
1174 /*
1175 * Reload the incore unlinked list to avoid failure in inodegc.
1176 * Use an unlocked check here because unrecovered unlinked inodes
1177 * should be somewhat rare.
1178 */
1179 if (xfs_inode_unlinked_incomplete(ip)) {
1180 error = xfs_inode_reload_unlinked(ip);
1181 if (error) {
1182 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1183 goto error0;
1184 }
1185 }
1186
1187 ASSERT(ip->i_delayed_blks == 0);
1188
1189 if (XFS_IS_REALTIME_INODE(ip)) {
1190 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
1191
1192 error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1193 if (error)
1194 goto error0;
1195
1196 xfs_bmap_count_leaves(ifp, &rtblks);
1197 }
1198
1199 nblks = (xfs_qcnt_t)ip->i_nblocks - rtblks;
1200 xfs_iflags_clear(ip, XFS_IQUOTAUNCHECKED);
1201
1202 /*
1203 * Add the (disk blocks and inode) resources occupied by this
1204 * inode to its dquots. We do this adjustment in the incore dquot,
1205 * and also copy the changes to its buffer.
1206 * We don't care about putting these changes in a transaction
1207 * envelope because if we crash in the middle of a 'quotacheck'
1208 * we have to start from the beginning anyway.
1209 * Once we're done, we'll log all the dquot bufs.
1210 *
1211 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1212 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1213 */
1214 if (XFS_IS_UQUOTA_ON(mp)) {
1215 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
1216 rtblks);
1217 if (error)
1218 goto error0;
1219 }
1220
1221 if (XFS_IS_GQUOTA_ON(mp)) {
1222 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
1223 rtblks);
1224 if (error)
1225 goto error0;
1226 }
1227
1228 if (XFS_IS_PQUOTA_ON(mp)) {
1229 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
1230 rtblks);
1231 if (error)
1232 goto error0;
1233 }
1234
1235error0:
1236 xfs_irele(ip);
1237 return error;
1238}
1239
1240STATIC int
1241xfs_qm_flush_one(
1242 struct xfs_dquot *dqp,
1243 void *data)
1244{
1245 struct xfs_mount *mp = dqp->q_mount;
1246 struct list_head *buffer_list = data;
1247 struct xfs_buf *bp = NULL;
1248 int error = 0;
1249
1250 xfs_dqlock(dqp);
1251 if (dqp->q_flags & XFS_DQFLAG_FREEING)
1252 goto out_unlock;
1253 if (!XFS_DQ_IS_DIRTY(dqp))
1254 goto out_unlock;
1255
1256 /*
1257 * The only way the dquot is already flush locked by the time quotacheck
1258 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1259 * it for the final time. Quotacheck collects all dquot bufs in the
1260 * local delwri queue before dquots are dirtied, so reclaim can't have
1261 * possibly queued it for I/O. The only way out is to push the buffer to
1262 * cycle the flush lock.
1263 */
1264 if (!xfs_dqflock_nowait(dqp)) {
1265 /* buf is pinned in-core by delwri list */
1266 error = xfs_buf_incore(target: mp->m_ddev_targp, blkno: dqp->q_blkno,
1267 numblks: mp->m_quotainfo->qi_dqchunklen, flags: 0, bpp: &bp);
1268 if (error)
1269 goto out_unlock;
1270
1271 if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1272 error = -EAGAIN;
1273 xfs_buf_relse(bp);
1274 goto out_unlock;
1275 }
1276 xfs_buf_unlock(bp);
1277
1278 xfs_buf_delwri_pushbuf(bp, buffer_list);
1279 xfs_buf_rele(bp);
1280
1281 error = -EAGAIN;
1282 goto out_unlock;
1283 }
1284
1285 error = xfs_qm_dqflush(dqp, bpp: &bp);
1286 if (error)
1287 goto out_unlock;
1288
1289 xfs_buf_delwri_queue(bp, buffer_list);
1290 xfs_buf_relse(bp);
1291out_unlock:
1292 xfs_dqunlock(dqp);
1293 return error;
1294}
1295
1296/*
1297 * Walk thru all the filesystem inodes and construct a consistent view
1298 * of the disk quota world. If the quotacheck fails, disable quotas.
1299 */
1300STATIC int
1301xfs_qm_quotacheck(
1302 xfs_mount_t *mp)
1303{
1304 int error, error2;
1305 uint flags;
1306 LIST_HEAD (buffer_list);
1307 struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
1308 struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
1309 struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
1310
1311 flags = 0;
1312
1313 ASSERT(uip || gip || pip);
1314 ASSERT(XFS_IS_QUOTA_ON(mp));
1315
1316 xfs_notice(mp, "Quotacheck needed: Please wait.");
1317
1318 /*
1319 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1320 * their counters to zero. We need a clean slate.
1321 * We don't log our changes till later.
1322 */
1323 if (uip) {
1324 error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
1325 &buffer_list);
1326 if (error)
1327 goto error_return;
1328 flags |= XFS_UQUOTA_CHKD;
1329 }
1330
1331 if (gip) {
1332 error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
1333 &buffer_list);
1334 if (error)
1335 goto error_return;
1336 flags |= XFS_GQUOTA_CHKD;
1337 }
1338
1339 if (pip) {
1340 error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
1341 &buffer_list);
1342 if (error)
1343 goto error_return;
1344 flags |= XFS_PQUOTA_CHKD;
1345 }
1346
1347 xfs_set_quotacheck_running(mp);
1348 error = xfs_iwalk_threaded(mp, startino: 0, flags: 0, iwalk_fn: xfs_qm_dqusage_adjust, inode_records: 0, poll: true,
1349 NULL);
1350 xfs_clear_quotacheck_running(mp);
1351
1352 /*
1353 * On error, the inode walk may have partially populated the dquot
1354 * caches. We must purge them before disabling quota and tearing down
1355 * the quotainfo, or else the dquots will leak.
1356 */
1357 if (error)
1358 goto error_purge;
1359
1360 /*
1361 * We've made all the changes that we need to make incore. Flush them
1362 * down to disk buffers if everything was updated successfully.
1363 */
1364 if (XFS_IS_UQUOTA_ON(mp)) {
1365 error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
1366 &buffer_list);
1367 }
1368 if (XFS_IS_GQUOTA_ON(mp)) {
1369 error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
1370 &buffer_list);
1371 if (!error)
1372 error = error2;
1373 }
1374 if (XFS_IS_PQUOTA_ON(mp)) {
1375 error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
1376 &buffer_list);
1377 if (!error)
1378 error = error2;
1379 }
1380
1381 error2 = xfs_buf_delwri_submit(&buffer_list);
1382 if (!error)
1383 error = error2;
1384
1385 /*
1386 * We can get this error if we couldn't do a dquot allocation inside
1387 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1388 * dirty dquots that might be cached, we just want to get rid of them
1389 * and turn quotaoff. The dquots won't be attached to any of the inodes
1390 * at this point (because we intentionally didn't in dqget_noattach).
1391 */
1392 if (error)
1393 goto error_purge;
1394
1395 /*
1396 * If one type of quotas is off, then it will lose its
1397 * quotachecked status, since we won't be doing accounting for
1398 * that type anymore.
1399 */
1400 mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1401 mp->m_qflags |= flags;
1402
1403error_return:
1404 xfs_buf_delwri_cancel(&buffer_list);
1405
1406 if (error) {
1407 xfs_warn(mp,
1408 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1409 error);
1410 /*
1411 * We must turn off quotas.
1412 */
1413 ASSERT(mp->m_quotainfo != NULL);
1414 xfs_qm_destroy_quotainfo(mp);
1415 if (xfs_mount_reset_sbqflags(mp)) {
1416 xfs_warn(mp,
1417 "Quotacheck: Failed to reset quota flags.");
1418 }
1419 xfs_fs_mark_sick(mp, XFS_SICK_FS_QUOTACHECK);
1420 } else {
1421 xfs_notice(mp, "Quotacheck: Done.");
1422 xfs_fs_mark_healthy(mp, XFS_SICK_FS_QUOTACHECK);
1423 }
1424
1425 return error;
1426
1427error_purge:
1428 /*
1429 * On error, we may have inodes queued for inactivation. This may try
1430 * to attach dquots to the inode before running cleanup operations on
1431 * the inode and this can race with the xfs_qm_destroy_quotainfo() call
1432 * below that frees mp->m_quotainfo. To avoid this race, flush all the
1433 * pending inodegc operations before we purge the dquots from memory,
1434 * ensuring that background inactivation is idle whilst we turn off
1435 * quotas.
1436 */
1437 xfs_inodegc_flush(mp);
1438 xfs_qm_dqpurge_all(mp);
1439 goto error_return;
1440
1441}
1442
1443/*
1444 * This is called from xfs_mountfs to start quotas and initialize all
1445 * necessary data structures like quotainfo. This is also responsible for
1446 * running a quotacheck as necessary. We are guaranteed that the superblock
1447 * is consistently read in at this point.
1448 *
1449 * If we fail here, the mount will continue with quota turned off. We don't
1450 * need to inidicate success or failure at all.
1451 */
1452void
1453xfs_qm_mount_quotas(
1454 struct xfs_mount *mp)
1455{
1456 int error = 0;
1457 uint sbf;
1458
1459 /*
1460 * If quotas on realtime volumes is not supported, we disable
1461 * quotas immediately.
1462 */
1463 if (mp->m_sb.sb_rextents) {
1464 xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1465 mp->m_qflags = 0;
1466 goto write_changes;
1467 }
1468
1469 ASSERT(XFS_IS_QUOTA_ON(mp));
1470
1471 /*
1472 * Allocate the quotainfo structure inside the mount struct, and
1473 * create quotainode(s), and change/rev superblock if necessary.
1474 */
1475 error = xfs_qm_init_quotainfo(mp);
1476 if (error) {
1477 /*
1478 * We must turn off quotas.
1479 */
1480 ASSERT(mp->m_quotainfo == NULL);
1481 mp->m_qflags = 0;
1482 goto write_changes;
1483 }
1484 /*
1485 * If any of the quotas are not consistent, do a quotacheck.
1486 */
1487 if (XFS_QM_NEED_QUOTACHECK(mp)) {
1488 error = xfs_qm_quotacheck(mp);
1489 if (error) {
1490 /* Quotacheck failed and disabled quotas. */
1491 return;
1492 }
1493 }
1494 /*
1495 * If one type of quotas is off, then it will lose its
1496 * quotachecked status, since we won't be doing accounting for
1497 * that type anymore.
1498 */
1499 if (!XFS_IS_UQUOTA_ON(mp))
1500 mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1501 if (!XFS_IS_GQUOTA_ON(mp))
1502 mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1503 if (!XFS_IS_PQUOTA_ON(mp))
1504 mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1505
1506 write_changes:
1507 /*
1508 * We actually don't have to acquire the m_sb_lock at all.
1509 * This can only be called from mount, and that's single threaded. XXX
1510 */
1511 spin_lock(lock: &mp->m_sb_lock);
1512 sbf = mp->m_sb.sb_qflags;
1513 mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1514 spin_unlock(lock: &mp->m_sb_lock);
1515
1516 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1517 if (xfs_sync_sb(mp, false)) {
1518 /*
1519 * We could only have been turning quotas off.
1520 * We aren't in very good shape actually because
1521 * the incore structures are convinced that quotas are
1522 * off, but the on disk superblock doesn't know that !
1523 */
1524 ASSERT(!(XFS_IS_QUOTA_ON(mp)));
1525 xfs_alert(mp, "%s: Superblock update failed!",
1526 __func__);
1527 }
1528 }
1529
1530 if (error) {
1531 xfs_warn(mp, "Failed to initialize disk quotas.");
1532 return;
1533 }
1534}
1535
1536/*
1537 * This is called after the superblock has been read in and we're ready to
1538 * iget the quota inodes.
1539 */
1540STATIC int
1541xfs_qm_init_quotainos(
1542 xfs_mount_t *mp)
1543{
1544 struct xfs_inode *uip = NULL;
1545 struct xfs_inode *gip = NULL;
1546 struct xfs_inode *pip = NULL;
1547 int error;
1548 uint flags = 0;
1549
1550 ASSERT(mp->m_quotainfo);
1551
1552 /*
1553 * Get the uquota and gquota inodes
1554 */
1555 if (xfs_has_quota(mp)) {
1556 if (XFS_IS_UQUOTA_ON(mp) &&
1557 mp->m_sb.sb_uquotino != NULLFSINO) {
1558 ASSERT(mp->m_sb.sb_uquotino > 0);
1559 error = xfs_iget(mp, NULL, ino: mp->m_sb.sb_uquotino,
1560 flags: 0, lock_flags: 0, ipp: &uip);
1561 if (error)
1562 return error;
1563 }
1564 if (XFS_IS_GQUOTA_ON(mp) &&
1565 mp->m_sb.sb_gquotino != NULLFSINO) {
1566 ASSERT(mp->m_sb.sb_gquotino > 0);
1567 error = xfs_iget(mp, NULL, ino: mp->m_sb.sb_gquotino,
1568 flags: 0, lock_flags: 0, ipp: &gip);
1569 if (error)
1570 goto error_rele;
1571 }
1572 if (XFS_IS_PQUOTA_ON(mp) &&
1573 mp->m_sb.sb_pquotino != NULLFSINO) {
1574 ASSERT(mp->m_sb.sb_pquotino > 0);
1575 error = xfs_iget(mp, NULL, ino: mp->m_sb.sb_pquotino,
1576 flags: 0, lock_flags: 0, ipp: &pip);
1577 if (error)
1578 goto error_rele;
1579 }
1580 } else {
1581 flags |= XFS_QMOPT_SBVERSION;
1582 }
1583
1584 /*
1585 * Create the three inodes, if they don't exist already. The changes
1586 * made above will get added to a transaction and logged in one of
1587 * the qino_alloc calls below. If the device is readonly,
1588 * temporarily switch to read-write to do this.
1589 */
1590 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1591 error = xfs_qm_qino_alloc(mp, &uip,
1592 flags | XFS_QMOPT_UQUOTA);
1593 if (error)
1594 goto error_rele;
1595
1596 flags &= ~XFS_QMOPT_SBVERSION;
1597 }
1598 if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1599 error = xfs_qm_qino_alloc(mp, &gip,
1600 flags | XFS_QMOPT_GQUOTA);
1601 if (error)
1602 goto error_rele;
1603
1604 flags &= ~XFS_QMOPT_SBVERSION;
1605 }
1606 if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1607 error = xfs_qm_qino_alloc(mp, &pip,
1608 flags | XFS_QMOPT_PQUOTA);
1609 if (error)
1610 goto error_rele;
1611 }
1612
1613 mp->m_quotainfo->qi_uquotaip = uip;
1614 mp->m_quotainfo->qi_gquotaip = gip;
1615 mp->m_quotainfo->qi_pquotaip = pip;
1616
1617 return 0;
1618
1619error_rele:
1620 if (uip)
1621 xfs_irele(ip: uip);
1622 if (gip)
1623 xfs_irele(ip: gip);
1624 if (pip)
1625 xfs_irele(ip: pip);
1626 return error;
1627}
1628
1629STATIC void
1630xfs_qm_destroy_quotainos(
1631 struct xfs_quotainfo *qi)
1632{
1633 if (qi->qi_uquotaip) {
1634 xfs_irele(ip: qi->qi_uquotaip);
1635 qi->qi_uquotaip = NULL; /* paranoia */
1636 }
1637 if (qi->qi_gquotaip) {
1638 xfs_irele(ip: qi->qi_gquotaip);
1639 qi->qi_gquotaip = NULL;
1640 }
1641 if (qi->qi_pquotaip) {
1642 xfs_irele(ip: qi->qi_pquotaip);
1643 qi->qi_pquotaip = NULL;
1644 }
1645}
1646
1647STATIC void
1648xfs_qm_dqfree_one(
1649 struct xfs_dquot *dqp)
1650{
1651 struct xfs_mount *mp = dqp->q_mount;
1652 struct xfs_quotainfo *qi = mp->m_quotainfo;
1653
1654 mutex_lock(&qi->qi_tree_lock);
1655 radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
1656
1657 qi->qi_dquots--;
1658 mutex_unlock(lock: &qi->qi_tree_lock);
1659
1660 xfs_qm_dqdestroy(dqp);
1661}
1662
1663/* --------------- utility functions for vnodeops ---------------- */
1664
1665
1666/*
1667 * Given an inode, a uid, gid and prid make sure that we have
1668 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1669 * quotas by creating this file.
1670 * This also attaches dquot(s) to the given inode after locking it,
1671 * and returns the dquots corresponding to the uid and/or gid.
1672 *
1673 * in : inode (unlocked)
1674 * out : udquot, gdquot with references taken and unlocked
1675 */
1676int
1677xfs_qm_vop_dqalloc(
1678 struct xfs_inode *ip,
1679 kuid_t uid,
1680 kgid_t gid,
1681 prid_t prid,
1682 uint flags,
1683 struct xfs_dquot **O_udqpp,
1684 struct xfs_dquot **O_gdqpp,
1685 struct xfs_dquot **O_pdqpp)
1686{
1687 struct xfs_mount *mp = ip->i_mount;
1688 struct inode *inode = VFS_I(ip);
1689 struct user_namespace *user_ns = inode->i_sb->s_user_ns;
1690 struct xfs_dquot *uq = NULL;
1691 struct xfs_dquot *gq = NULL;
1692 struct xfs_dquot *pq = NULL;
1693 int error;
1694 uint lockflags;
1695
1696 if (!XFS_IS_QUOTA_ON(mp))
1697 return 0;
1698
1699 lockflags = XFS_ILOCK_EXCL;
1700 xfs_ilock(ip, lockflags);
1701
1702 if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1703 gid = inode->i_gid;
1704
1705 /*
1706 * Attach the dquot(s) to this inode, doing a dquot allocation
1707 * if necessary. The dquot(s) will not be locked.
1708 */
1709 if (XFS_NOT_DQATTACHED(mp, ip)) {
1710 error = xfs_qm_dqattach_locked(ip, doalloc: true);
1711 if (error) {
1712 xfs_iunlock(ip, lockflags);
1713 return error;
1714 }
1715 }
1716
1717 if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1718 ASSERT(O_udqpp);
1719 if (!uid_eq(left: inode->i_uid, right: uid)) {
1720 /*
1721 * What we need is the dquot that has this uid, and
1722 * if we send the inode to dqget, the uid of the inode
1723 * takes priority over what's sent in the uid argument.
1724 * We must unlock inode here before calling dqget if
1725 * we're not sending the inode, because otherwise
1726 * we'll deadlock by doing trans_reserve while
1727 * holding ilock.
1728 */
1729 xfs_iunlock(ip, lockflags);
1730 error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1731 XFS_DQTYPE_USER, true, &uq);
1732 if (error) {
1733 ASSERT(error != -ENOENT);
1734 return error;
1735 }
1736 /*
1737 * Get the ilock in the right order.
1738 */
1739 xfs_dqunlock(dqp: uq);
1740 lockflags = XFS_ILOCK_SHARED;
1741 xfs_ilock(ip, lockflags);
1742 } else {
1743 /*
1744 * Take an extra reference, because we'll return
1745 * this to caller
1746 */
1747 ASSERT(ip->i_udquot);
1748 uq = xfs_qm_dqhold(dqp: ip->i_udquot);
1749 }
1750 }
1751 if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1752 ASSERT(O_gdqpp);
1753 if (!gid_eq(left: inode->i_gid, right: gid)) {
1754 xfs_iunlock(ip, lockflags);
1755 error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
1756 XFS_DQTYPE_GROUP, true, &gq);
1757 if (error) {
1758 ASSERT(error != -ENOENT);
1759 goto error_rele;
1760 }
1761 xfs_dqunlock(dqp: gq);
1762 lockflags = XFS_ILOCK_SHARED;
1763 xfs_ilock(ip, lockflags);
1764 } else {
1765 ASSERT(ip->i_gdquot);
1766 gq = xfs_qm_dqhold(dqp: ip->i_gdquot);
1767 }
1768 }
1769 if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1770 ASSERT(O_pdqpp);
1771 if (ip->i_projid != prid) {
1772 xfs_iunlock(ip, lockflags);
1773 error = xfs_qm_dqget(mp, prid,
1774 XFS_DQTYPE_PROJ, true, &pq);
1775 if (error) {
1776 ASSERT(error != -ENOENT);
1777 goto error_rele;
1778 }
1779 xfs_dqunlock(dqp: pq);
1780 lockflags = XFS_ILOCK_SHARED;
1781 xfs_ilock(ip, lockflags);
1782 } else {
1783 ASSERT(ip->i_pdquot);
1784 pq = xfs_qm_dqhold(dqp: ip->i_pdquot);
1785 }
1786 }
1787 trace_xfs_dquot_dqalloc(ip);
1788
1789 xfs_iunlock(ip, lockflags);
1790 if (O_udqpp)
1791 *O_udqpp = uq;
1792 else
1793 xfs_qm_dqrele(uq);
1794 if (O_gdqpp)
1795 *O_gdqpp = gq;
1796 else
1797 xfs_qm_dqrele(gq);
1798 if (O_pdqpp)
1799 *O_pdqpp = pq;
1800 else
1801 xfs_qm_dqrele(pq);
1802 return 0;
1803
1804error_rele:
1805 xfs_qm_dqrele(gq);
1806 xfs_qm_dqrele(uq);
1807 return error;
1808}
1809
1810/*
1811 * Actually transfer ownership, and do dquot modifications.
1812 * These were already reserved.
1813 */
1814struct xfs_dquot *
1815xfs_qm_vop_chown(
1816 struct xfs_trans *tp,
1817 struct xfs_inode *ip,
1818 struct xfs_dquot **IO_olddq,
1819 struct xfs_dquot *newdq)
1820{
1821 struct xfs_dquot *prevdq;
1822 uint bfield = XFS_IS_REALTIME_INODE(ip) ?
1823 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1824
1825
1826 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1827 ASSERT(XFS_IS_QUOTA_ON(ip->i_mount));
1828
1829 /* old dquot */
1830 prevdq = *IO_olddq;
1831 ASSERT(prevdq);
1832 ASSERT(prevdq != newdq);
1833
1834 xfs_trans_mod_ino_dquot(tp, ip, dqp: prevdq, field: bfield, delta: -(ip->i_nblocks));
1835 xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1836
1837 /* the sparkling new dquot */
1838 xfs_trans_mod_ino_dquot(tp, ip, dqp: newdq, field: bfield, delta: ip->i_nblocks);
1839 xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1840
1841 /*
1842 * Back when we made quota reservations for the chown, we reserved the
1843 * ondisk blocks + delalloc blocks with the new dquot. Now that we've
1844 * switched the dquots, decrease the new dquot's block reservation
1845 * (having already bumped up the real counter) so that we don't have
1846 * any reservation to give back when we commit.
1847 */
1848 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_RES_BLKS,
1849 -ip->i_delayed_blks);
1850
1851 /*
1852 * Give the incore reservation for delalloc blocks back to the old
1853 * dquot. We don't normally handle delalloc quota reservations
1854 * transactionally, so just lock the dquot and subtract from the
1855 * reservation. Dirty the transaction because it's too late to turn
1856 * back now.
1857 */
1858 tp->t_flags |= XFS_TRANS_DIRTY;
1859 xfs_dqlock(dqp: prevdq);
1860 ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
1861 prevdq->q_blk.reserved -= ip->i_delayed_blks;
1862 xfs_dqunlock(dqp: prevdq);
1863
1864 /*
1865 * Take an extra reference, because the inode is going to keep
1866 * this dquot pointer even after the trans_commit.
1867 */
1868 *IO_olddq = xfs_qm_dqhold(dqp: newdq);
1869
1870 return prevdq;
1871}
1872
1873int
1874xfs_qm_vop_rename_dqattach(
1875 struct xfs_inode **i_tab)
1876{
1877 struct xfs_mount *mp = i_tab[0]->i_mount;
1878 int i;
1879
1880 if (!XFS_IS_QUOTA_ON(mp))
1881 return 0;
1882
1883 for (i = 0; (i < 4 && i_tab[i]); i++) {
1884 struct xfs_inode *ip = i_tab[i];
1885 int error;
1886
1887 /*
1888 * Watch out for duplicate entries in the table.
1889 */
1890 if (i == 0 || ip != i_tab[i-1]) {
1891 if (XFS_NOT_DQATTACHED(mp, ip)) {
1892 error = xfs_qm_dqattach(ip);
1893 if (error)
1894 return error;
1895 }
1896 }
1897 }
1898 return 0;
1899}
1900
1901void
1902xfs_qm_vop_create_dqattach(
1903 struct xfs_trans *tp,
1904 struct xfs_inode *ip,
1905 struct xfs_dquot *udqp,
1906 struct xfs_dquot *gdqp,
1907 struct xfs_dquot *pdqp)
1908{
1909 struct xfs_mount *mp = tp->t_mountp;
1910
1911 if (!XFS_IS_QUOTA_ON(mp))
1912 return;
1913
1914 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1915
1916 if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1917 ASSERT(ip->i_udquot == NULL);
1918 ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
1919
1920 ip->i_udquot = xfs_qm_dqhold(dqp: udqp);
1921 }
1922 if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1923 ASSERT(ip->i_gdquot == NULL);
1924 ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
1925
1926 ip->i_gdquot = xfs_qm_dqhold(dqp: gdqp);
1927 }
1928 if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1929 ASSERT(ip->i_pdquot == NULL);
1930 ASSERT(ip->i_projid == pdqp->q_id);
1931
1932 ip->i_pdquot = xfs_qm_dqhold(dqp: pdqp);
1933 }
1934
1935 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, 1);
1936}
1937
1938/* Decide if this inode's dquot is near an enforcement boundary. */
1939bool
1940xfs_inode_near_dquot_enforcement(
1941 struct xfs_inode *ip,
1942 xfs_dqtype_t type)
1943{
1944 struct xfs_dquot *dqp;
1945 int64_t freesp;
1946
1947 /* We only care for quotas that are enabled and enforced. */
1948 dqp = xfs_inode_dquot(ip, type);
1949 if (!dqp || !xfs_dquot_is_enforced(dqp))
1950 return false;
1951
1952 if (xfs_dquot_res_over_limits(qres: &dqp->q_ino) ||
1953 xfs_dquot_res_over_limits(qres: &dqp->q_rtb))
1954 return true;
1955
1956 /* For space on the data device, check the various thresholds. */
1957 if (!dqp->q_prealloc_hi_wmark)
1958 return false;
1959
1960 if (dqp->q_blk.reserved < dqp->q_prealloc_lo_wmark)
1961 return false;
1962
1963 if (dqp->q_blk.reserved >= dqp->q_prealloc_hi_wmark)
1964 return true;
1965
1966 freesp = dqp->q_prealloc_hi_wmark - dqp->q_blk.reserved;
1967 if (freesp < dqp->q_low_space[XFS_QLOWSP_5_PCNT])
1968 return true;
1969
1970 return false;
1971}
1972

source code of linux/fs/xfs/xfs_qm.c