1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * vfsv0 quota IO operations on file |
4 | */ |
5 | |
6 | #include <linux/errno.h> |
7 | #include <linux/fs.h> |
8 | #include <linux/mount.h> |
9 | #include <linux/dqblk_v2.h> |
10 | #include <linux/kernel.h> |
11 | #include <linux/init.h> |
12 | #include <linux/module.h> |
13 | #include <linux/slab.h> |
14 | #include <linux/quotaops.h> |
15 | |
16 | #include <asm/byteorder.h> |
17 | |
18 | #include "quota_tree.h" |
19 | |
20 | MODULE_AUTHOR("Jan Kara" ); |
21 | MODULE_DESCRIPTION("Quota trie support" ); |
22 | MODULE_LICENSE("GPL" ); |
23 | |
24 | /* |
25 | * Maximum quota tree depth we support. Only to limit recursion when working |
26 | * with the tree. |
27 | */ |
28 | #define MAX_QTREE_DEPTH 6 |
29 | |
30 | #define __QUOTA_QT_PARANOIA |
31 | |
32 | static int __get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth) |
33 | { |
34 | unsigned int epb = info->dqi_usable_bs >> 2; |
35 | |
36 | depth = info->dqi_qtree_depth - depth - 1; |
37 | while (depth--) |
38 | id /= epb; |
39 | return id % epb; |
40 | } |
41 | |
42 | static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth) |
43 | { |
44 | qid_t id = from_kqid(to: &init_user_ns, qid); |
45 | |
46 | return __get_index(info, id, depth); |
47 | } |
48 | |
49 | /* Number of entries in one blocks */ |
50 | static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info) |
51 | { |
52 | return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader)) |
53 | / info->dqi_entry_size; |
54 | } |
55 | |
56 | static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) |
57 | { |
58 | struct super_block *sb = info->dqi_sb; |
59 | |
60 | memset(buf, 0, info->dqi_usable_bs); |
61 | return sb->s_op->quota_read(sb, info->dqi_type, buf, |
62 | info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits); |
63 | } |
64 | |
65 | static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) |
66 | { |
67 | struct super_block *sb = info->dqi_sb; |
68 | ssize_t ret; |
69 | |
70 | ret = sb->s_op->quota_write(sb, info->dqi_type, buf, |
71 | info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits); |
72 | if (ret != info->dqi_usable_bs) { |
73 | quota_error(sb, "dquota write failed" ); |
74 | if (ret >= 0) |
75 | ret = -EIO; |
76 | } |
77 | return ret; |
78 | } |
79 | |
80 | static inline int do_check_range(struct super_block *sb, const char *val_name, |
81 | uint val, uint min_val, uint max_val) |
82 | { |
83 | if (val < min_val || val > max_val) { |
84 | quota_error(sb, "Getting %s %u out of range %u-%u" , |
85 | val_name, val, min_val, max_val); |
86 | return -EUCLEAN; |
87 | } |
88 | |
89 | return 0; |
90 | } |
91 | |
92 | static int (struct qtree_mem_dqinfo *info, |
93 | struct qt_disk_dqdbheader *dh) |
94 | { |
95 | int err = 0; |
96 | |
97 | err = do_check_range(sb: info->dqi_sb, val_name: "dqdh_next_free" , |
98 | le32_to_cpu(dh->dqdh_next_free), min_val: 0, |
99 | max_val: info->dqi_blocks - 1); |
100 | if (err) |
101 | return err; |
102 | err = do_check_range(sb: info->dqi_sb, val_name: "dqdh_prev_free" , |
103 | le32_to_cpu(dh->dqdh_prev_free), min_val: 0, |
104 | max_val: info->dqi_blocks - 1); |
105 | if (err) |
106 | return err; |
107 | err = do_check_range(sb: info->dqi_sb, val_name: "dqdh_entries" , |
108 | le16_to_cpu(dh->dqdh_entries), min_val: 0, |
109 | max_val: qtree_dqstr_in_blk(info)); |
110 | |
111 | return err; |
112 | } |
113 | |
114 | /* Remove empty block from list and return it */ |
115 | static int get_free_dqblk(struct qtree_mem_dqinfo *info) |
116 | { |
117 | char *buf = kmalloc(size: info->dqi_usable_bs, GFP_KERNEL); |
118 | struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; |
119 | int ret, blk; |
120 | |
121 | if (!buf) |
122 | return -ENOMEM; |
123 | if (info->dqi_free_blk) { |
124 | blk = info->dqi_free_blk; |
125 | ret = read_blk(info, blk, buf); |
126 | if (ret < 0) |
127 | goto out_buf; |
128 | ret = check_dquot_block_header(info, dh); |
129 | if (ret) |
130 | goto out_buf; |
131 | info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free); |
132 | } |
133 | else { |
134 | memset(buf, 0, info->dqi_usable_bs); |
135 | /* Assure block allocation... */ |
136 | ret = write_blk(info, blk: info->dqi_blocks, buf); |
137 | if (ret < 0) |
138 | goto out_buf; |
139 | blk = info->dqi_blocks++; |
140 | } |
141 | mark_info_dirty(sb: info->dqi_sb, type: info->dqi_type); |
142 | ret = blk; |
143 | out_buf: |
144 | kfree(objp: buf); |
145 | return ret; |
146 | } |
147 | |
148 | /* Insert empty block to the list */ |
149 | static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk) |
150 | { |
151 | struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; |
152 | int err; |
153 | |
154 | dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk); |
155 | dh->dqdh_prev_free = cpu_to_le32(0); |
156 | dh->dqdh_entries = cpu_to_le16(0); |
157 | err = write_blk(info, blk, buf); |
158 | if (err < 0) |
159 | return err; |
160 | info->dqi_free_blk = blk; |
161 | mark_info_dirty(sb: info->dqi_sb, type: info->dqi_type); |
162 | return 0; |
163 | } |
164 | |
165 | /* Remove given block from the list of blocks with free entries */ |
166 | static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, |
167 | uint blk) |
168 | { |
169 | char *tmpbuf = kmalloc(size: info->dqi_usable_bs, GFP_KERNEL); |
170 | struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; |
171 | uint nextblk = le32_to_cpu(dh->dqdh_next_free); |
172 | uint prevblk = le32_to_cpu(dh->dqdh_prev_free); |
173 | int err; |
174 | |
175 | if (!tmpbuf) |
176 | return -ENOMEM; |
177 | if (nextblk) { |
178 | err = read_blk(info, blk: nextblk, buf: tmpbuf); |
179 | if (err < 0) |
180 | goto out_buf; |
181 | ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = |
182 | dh->dqdh_prev_free; |
183 | err = write_blk(info, blk: nextblk, buf: tmpbuf); |
184 | if (err < 0) |
185 | goto out_buf; |
186 | } |
187 | if (prevblk) { |
188 | err = read_blk(info, blk: prevblk, buf: tmpbuf); |
189 | if (err < 0) |
190 | goto out_buf; |
191 | ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free = |
192 | dh->dqdh_next_free; |
193 | err = write_blk(info, blk: prevblk, buf: tmpbuf); |
194 | if (err < 0) |
195 | goto out_buf; |
196 | } else { |
197 | info->dqi_free_entry = nextblk; |
198 | mark_info_dirty(sb: info->dqi_sb, type: info->dqi_type); |
199 | } |
200 | kfree(objp: tmpbuf); |
201 | dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0); |
202 | /* No matter whether write succeeds block is out of list */ |
203 | if (write_blk(info, blk, buf) < 0) |
204 | quota_error(info->dqi_sb, "Can't write block (%u) " |
205 | "with free entries" , blk); |
206 | return 0; |
207 | out_buf: |
208 | kfree(objp: tmpbuf); |
209 | return err; |
210 | } |
211 | |
212 | /* Insert given block to the beginning of list with free entries */ |
213 | static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, |
214 | uint blk) |
215 | { |
216 | char *tmpbuf = kmalloc(size: info->dqi_usable_bs, GFP_KERNEL); |
217 | struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; |
218 | int err; |
219 | |
220 | if (!tmpbuf) |
221 | return -ENOMEM; |
222 | dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry); |
223 | dh->dqdh_prev_free = cpu_to_le32(0); |
224 | err = write_blk(info, blk, buf); |
225 | if (err < 0) |
226 | goto out_buf; |
227 | if (info->dqi_free_entry) { |
228 | err = read_blk(info, blk: info->dqi_free_entry, buf: tmpbuf); |
229 | if (err < 0) |
230 | goto out_buf; |
231 | ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = |
232 | cpu_to_le32(blk); |
233 | err = write_blk(info, blk: info->dqi_free_entry, buf: tmpbuf); |
234 | if (err < 0) |
235 | goto out_buf; |
236 | } |
237 | kfree(objp: tmpbuf); |
238 | info->dqi_free_entry = blk; |
239 | mark_info_dirty(sb: info->dqi_sb, type: info->dqi_type); |
240 | return 0; |
241 | out_buf: |
242 | kfree(objp: tmpbuf); |
243 | return err; |
244 | } |
245 | |
246 | /* Is the entry in the block free? */ |
247 | int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk) |
248 | { |
249 | int i; |
250 | |
251 | for (i = 0; i < info->dqi_entry_size; i++) |
252 | if (disk[i]) |
253 | return 0; |
254 | return 1; |
255 | } |
256 | EXPORT_SYMBOL(qtree_entry_unused); |
257 | |
258 | /* Find space for dquot */ |
259 | static uint find_free_dqentry(struct qtree_mem_dqinfo *info, |
260 | struct dquot *dquot, int *err) |
261 | { |
262 | uint blk, i; |
263 | struct qt_disk_dqdbheader *dh; |
264 | char *buf = kmalloc(size: info->dqi_usable_bs, GFP_KERNEL); |
265 | char *ddquot; |
266 | |
267 | *err = 0; |
268 | if (!buf) { |
269 | *err = -ENOMEM; |
270 | return 0; |
271 | } |
272 | dh = (struct qt_disk_dqdbheader *)buf; |
273 | if (info->dqi_free_entry) { |
274 | blk = info->dqi_free_entry; |
275 | *err = read_blk(info, blk, buf); |
276 | if (*err < 0) |
277 | goto out_buf; |
278 | *err = check_dquot_block_header(info, dh); |
279 | if (*err) |
280 | goto out_buf; |
281 | } else { |
282 | blk = get_free_dqblk(info); |
283 | if ((int)blk < 0) { |
284 | *err = blk; |
285 | kfree(objp: buf); |
286 | return 0; |
287 | } |
288 | memset(buf, 0, info->dqi_usable_bs); |
289 | /* This is enough as the block is already zeroed and the entry |
290 | * list is empty... */ |
291 | info->dqi_free_entry = blk; |
292 | mark_info_dirty(sb: dquot->dq_sb, type: dquot->dq_id.type); |
293 | } |
294 | /* Block will be full? */ |
295 | if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) { |
296 | *err = remove_free_dqentry(info, buf, blk); |
297 | if (*err < 0) { |
298 | quota_error(dquot->dq_sb, "Can't remove block (%u) " |
299 | "from entry free list" , blk); |
300 | goto out_buf; |
301 | } |
302 | } |
303 | le16_add_cpu(var: &dh->dqdh_entries, val: 1); |
304 | /* Find free structure in block */ |
305 | ddquot = buf + sizeof(struct qt_disk_dqdbheader); |
306 | for (i = 0; i < qtree_dqstr_in_blk(info); i++) { |
307 | if (qtree_entry_unused(info, ddquot)) |
308 | break; |
309 | ddquot += info->dqi_entry_size; |
310 | } |
311 | #ifdef __QUOTA_QT_PARANOIA |
312 | if (i == qtree_dqstr_in_blk(info)) { |
313 | quota_error(dquot->dq_sb, "Data block full but it shouldn't" ); |
314 | *err = -EIO; |
315 | goto out_buf; |
316 | } |
317 | #endif |
318 | *err = write_blk(info, blk, buf); |
319 | if (*err < 0) { |
320 | quota_error(dquot->dq_sb, "Can't write quota data block %u" , |
321 | blk); |
322 | goto out_buf; |
323 | } |
324 | dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) + |
325 | sizeof(struct qt_disk_dqdbheader) + |
326 | i * info->dqi_entry_size; |
327 | kfree(objp: buf); |
328 | return blk; |
329 | out_buf: |
330 | kfree(objp: buf); |
331 | return 0; |
332 | } |
333 | |
334 | /* Insert reference to structure into the trie */ |
335 | static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, |
336 | uint *blks, int depth) |
337 | { |
338 | char *buf = kmalloc(size: info->dqi_usable_bs, GFP_KERNEL); |
339 | int ret = 0, newson = 0, newact = 0; |
340 | __le32 *ref; |
341 | uint newblk; |
342 | int i; |
343 | |
344 | if (!buf) |
345 | return -ENOMEM; |
346 | if (!blks[depth]) { |
347 | ret = get_free_dqblk(info); |
348 | if (ret < 0) |
349 | goto out_buf; |
350 | for (i = 0; i < depth; i++) |
351 | if (ret == blks[i]) { |
352 | quota_error(dquot->dq_sb, |
353 | "Free block already used in tree: block %u" , |
354 | ret); |
355 | ret = -EIO; |
356 | goto out_buf; |
357 | } |
358 | blks[depth] = ret; |
359 | memset(buf, 0, info->dqi_usable_bs); |
360 | newact = 1; |
361 | } else { |
362 | ret = read_blk(info, blk: blks[depth], buf); |
363 | if (ret < 0) { |
364 | quota_error(dquot->dq_sb, "Can't read tree quota " |
365 | "block %u" , blks[depth]); |
366 | goto out_buf; |
367 | } |
368 | } |
369 | ref = (__le32 *)buf; |
370 | newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); |
371 | ret = do_check_range(sb: dquot->dq_sb, val_name: "block" , val: newblk, min_val: 0, |
372 | max_val: info->dqi_blocks - 1); |
373 | if (ret) |
374 | goto out_buf; |
375 | if (!newblk) { |
376 | newson = 1; |
377 | } else { |
378 | for (i = 0; i <= depth; i++) |
379 | if (newblk == blks[i]) { |
380 | quota_error(dquot->dq_sb, |
381 | "Cycle in quota tree detected: block %u index %u" , |
382 | blks[depth], |
383 | get_index(info, dquot->dq_id, depth)); |
384 | ret = -EIO; |
385 | goto out_buf; |
386 | } |
387 | } |
388 | blks[depth + 1] = newblk; |
389 | if (depth == info->dqi_qtree_depth - 1) { |
390 | #ifdef __QUOTA_QT_PARANOIA |
391 | if (newblk) { |
392 | quota_error(dquot->dq_sb, "Inserting already present " |
393 | "quota entry (block %u)" , |
394 | le32_to_cpu(ref[get_index(info, |
395 | dquot->dq_id, depth)])); |
396 | ret = -EIO; |
397 | goto out_buf; |
398 | } |
399 | #endif |
400 | blks[depth + 1] = find_free_dqentry(info, dquot, err: &ret); |
401 | } else { |
402 | ret = do_insert_tree(info, dquot, blks, depth: depth + 1); |
403 | } |
404 | if (newson && ret >= 0) { |
405 | ref[get_index(info, qid: dquot->dq_id, depth)] = |
406 | cpu_to_le32(blks[depth + 1]); |
407 | ret = write_blk(info, blk: blks[depth], buf); |
408 | } else if (newact && ret < 0) { |
409 | put_free_dqblk(info, buf, blk: blks[depth]); |
410 | } |
411 | out_buf: |
412 | kfree(objp: buf); |
413 | return ret; |
414 | } |
415 | |
416 | /* Wrapper for inserting quota structure into tree */ |
417 | static inline int dq_insert_tree(struct qtree_mem_dqinfo *info, |
418 | struct dquot *dquot) |
419 | { |
420 | uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF }; |
421 | |
422 | #ifdef __QUOTA_QT_PARANOIA |
423 | if (info->dqi_blocks <= QT_TREEOFF) { |
424 | quota_error(dquot->dq_sb, "Quota tree root isn't allocated!" ); |
425 | return -EIO; |
426 | } |
427 | #endif |
428 | if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) { |
429 | quota_error(dquot->dq_sb, "Quota tree depth too big!" ); |
430 | return -EIO; |
431 | } |
432 | return do_insert_tree(info, dquot, blks, depth: 0); |
433 | } |
434 | |
435 | /* |
436 | * We don't have to be afraid of deadlocks as we never have quotas on quota |
437 | * files... |
438 | */ |
439 | int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) |
440 | { |
441 | int type = dquot->dq_id.type; |
442 | struct super_block *sb = dquot->dq_sb; |
443 | ssize_t ret; |
444 | char *ddquot = kmalloc(size: info->dqi_entry_size, GFP_KERNEL); |
445 | |
446 | if (!ddquot) |
447 | return -ENOMEM; |
448 | |
449 | /* dq_off is guarded by dqio_sem */ |
450 | if (!dquot->dq_off) { |
451 | ret = dq_insert_tree(info, dquot); |
452 | if (ret < 0) { |
453 | quota_error(sb, "Error %zd occurred while creating " |
454 | "quota" , ret); |
455 | kfree(objp: ddquot); |
456 | return ret; |
457 | } |
458 | } |
459 | spin_lock(lock: &dquot->dq_dqb_lock); |
460 | info->dqi_ops->mem2disk_dqblk(ddquot, dquot); |
461 | spin_unlock(lock: &dquot->dq_dqb_lock); |
462 | ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size, |
463 | dquot->dq_off); |
464 | if (ret != info->dqi_entry_size) { |
465 | quota_error(sb, "dquota write failed" ); |
466 | if (ret >= 0) |
467 | ret = -ENOSPC; |
468 | } else { |
469 | ret = 0; |
470 | } |
471 | dqstats_inc(type: DQST_WRITES); |
472 | kfree(objp: ddquot); |
473 | |
474 | return ret; |
475 | } |
476 | EXPORT_SYMBOL(qtree_write_dquot); |
477 | |
478 | /* Free dquot entry in data block */ |
479 | static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, |
480 | uint blk) |
481 | { |
482 | struct qt_disk_dqdbheader *dh; |
483 | char *buf = kmalloc(size: info->dqi_usable_bs, GFP_KERNEL); |
484 | int ret = 0; |
485 | |
486 | if (!buf) |
487 | return -ENOMEM; |
488 | if (dquot->dq_off >> info->dqi_blocksize_bits != blk) { |
489 | quota_error(dquot->dq_sb, "Quota structure has offset to " |
490 | "other block (%u) than it should (%u)" , blk, |
491 | (uint)(dquot->dq_off >> info->dqi_blocksize_bits)); |
492 | ret = -EIO; |
493 | goto out_buf; |
494 | } |
495 | ret = read_blk(info, blk, buf); |
496 | if (ret < 0) { |
497 | quota_error(dquot->dq_sb, "Can't read quota data block %u" , |
498 | blk); |
499 | goto out_buf; |
500 | } |
501 | dh = (struct qt_disk_dqdbheader *)buf; |
502 | ret = check_dquot_block_header(info, dh); |
503 | if (ret) |
504 | goto out_buf; |
505 | le16_add_cpu(var: &dh->dqdh_entries, val: -1); |
506 | if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */ |
507 | ret = remove_free_dqentry(info, buf, blk); |
508 | if (ret >= 0) |
509 | ret = put_free_dqblk(info, buf, blk); |
510 | if (ret < 0) { |
511 | quota_error(dquot->dq_sb, "Can't move quota data block " |
512 | "(%u) to free list" , blk); |
513 | goto out_buf; |
514 | } |
515 | } else { |
516 | memset(buf + |
517 | (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)), |
518 | 0, info->dqi_entry_size); |
519 | if (le16_to_cpu(dh->dqdh_entries) == |
520 | qtree_dqstr_in_blk(info) - 1) { |
521 | /* Insert will write block itself */ |
522 | ret = insert_free_dqentry(info, buf, blk); |
523 | if (ret < 0) { |
524 | quota_error(dquot->dq_sb, "Can't insert quota " |
525 | "data block (%u) to free entry list" , blk); |
526 | goto out_buf; |
527 | } |
528 | } else { |
529 | ret = write_blk(info, blk, buf); |
530 | if (ret < 0) { |
531 | quota_error(dquot->dq_sb, "Can't write quota " |
532 | "data block %u" , blk); |
533 | goto out_buf; |
534 | } |
535 | } |
536 | } |
537 | dquot->dq_off = 0; /* Quota is now unattached */ |
538 | out_buf: |
539 | kfree(objp: buf); |
540 | return ret; |
541 | } |
542 | |
543 | /* Remove reference to dquot from tree */ |
544 | static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, |
545 | uint *blks, int depth) |
546 | { |
547 | char *buf = kmalloc(size: info->dqi_usable_bs, GFP_KERNEL); |
548 | int ret = 0; |
549 | uint newblk; |
550 | __le32 *ref = (__le32 *)buf; |
551 | int i; |
552 | |
553 | if (!buf) |
554 | return -ENOMEM; |
555 | ret = read_blk(info, blk: blks[depth], buf); |
556 | if (ret < 0) { |
557 | quota_error(dquot->dq_sb, "Can't read quota data block %u" , |
558 | blks[depth]); |
559 | goto out_buf; |
560 | } |
561 | newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); |
562 | ret = do_check_range(sb: dquot->dq_sb, val_name: "block" , val: newblk, QT_TREEOFF, |
563 | max_val: info->dqi_blocks - 1); |
564 | if (ret) |
565 | goto out_buf; |
566 | |
567 | for (i = 0; i <= depth; i++) |
568 | if (newblk == blks[i]) { |
569 | quota_error(dquot->dq_sb, |
570 | "Cycle in quota tree detected: block %u index %u" , |
571 | blks[depth], |
572 | get_index(info, dquot->dq_id, depth)); |
573 | ret = -EIO; |
574 | goto out_buf; |
575 | } |
576 | if (depth == info->dqi_qtree_depth - 1) { |
577 | ret = free_dqentry(info, dquot, blk: newblk); |
578 | blks[depth + 1] = 0; |
579 | } else { |
580 | blks[depth + 1] = newblk; |
581 | ret = remove_tree(info, dquot, blks, depth: depth + 1); |
582 | } |
583 | if (ret >= 0 && !blks[depth + 1]) { |
584 | ref[get_index(info, qid: dquot->dq_id, depth)] = cpu_to_le32(0); |
585 | /* Block got empty? */ |
586 | for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++) |
587 | ; |
588 | /* Don't put the root block into the free block list */ |
589 | if (i == (info->dqi_usable_bs >> 2) |
590 | && blks[depth] != QT_TREEOFF) { |
591 | put_free_dqblk(info, buf, blk: blks[depth]); |
592 | blks[depth] = 0; |
593 | } else { |
594 | ret = write_blk(info, blk: blks[depth], buf); |
595 | if (ret < 0) |
596 | quota_error(dquot->dq_sb, |
597 | "Can't write quota tree block %u" , |
598 | blks[depth]); |
599 | } |
600 | } |
601 | out_buf: |
602 | kfree(objp: buf); |
603 | return ret; |
604 | } |
605 | |
606 | /* Delete dquot from tree */ |
607 | int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) |
608 | { |
609 | uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF }; |
610 | |
611 | if (!dquot->dq_off) /* Even not allocated? */ |
612 | return 0; |
613 | if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) { |
614 | quota_error(dquot->dq_sb, "Quota tree depth too big!" ); |
615 | return -EIO; |
616 | } |
617 | return remove_tree(info, dquot, blks, depth: 0); |
618 | } |
619 | EXPORT_SYMBOL(qtree_delete_dquot); |
620 | |
621 | /* Find entry in block */ |
622 | static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, |
623 | struct dquot *dquot, uint blk) |
624 | { |
625 | char *buf = kmalloc(size: info->dqi_usable_bs, GFP_KERNEL); |
626 | loff_t ret = 0; |
627 | int i; |
628 | char *ddquot; |
629 | |
630 | if (!buf) |
631 | return -ENOMEM; |
632 | ret = read_blk(info, blk, buf); |
633 | if (ret < 0) { |
634 | quota_error(dquot->dq_sb, "Can't read quota tree " |
635 | "block %u" , blk); |
636 | goto out_buf; |
637 | } |
638 | ddquot = buf + sizeof(struct qt_disk_dqdbheader); |
639 | for (i = 0; i < qtree_dqstr_in_blk(info); i++) { |
640 | if (info->dqi_ops->is_id(ddquot, dquot)) |
641 | break; |
642 | ddquot += info->dqi_entry_size; |
643 | } |
644 | if (i == qtree_dqstr_in_blk(info)) { |
645 | quota_error(dquot->dq_sb, |
646 | "Quota for id %u referenced but not present" , |
647 | from_kqid(&init_user_ns, dquot->dq_id)); |
648 | ret = -EIO; |
649 | goto out_buf; |
650 | } else { |
651 | ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct |
652 | qt_disk_dqdbheader) + i * info->dqi_entry_size; |
653 | } |
654 | out_buf: |
655 | kfree(objp: buf); |
656 | return ret; |
657 | } |
658 | |
659 | /* Find entry for given id in the tree */ |
660 | static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, |
661 | struct dquot *dquot, uint *blks, int depth) |
662 | { |
663 | char *buf = kmalloc(size: info->dqi_usable_bs, GFP_KERNEL); |
664 | loff_t ret = 0; |
665 | __le32 *ref = (__le32 *)buf; |
666 | uint blk; |
667 | int i; |
668 | |
669 | if (!buf) |
670 | return -ENOMEM; |
671 | ret = read_blk(info, blk: blks[depth], buf); |
672 | if (ret < 0) { |
673 | quota_error(dquot->dq_sb, "Can't read quota tree block %u" , |
674 | blks[depth]); |
675 | goto out_buf; |
676 | } |
677 | ret = 0; |
678 | blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); |
679 | if (!blk) /* No reference? */ |
680 | goto out_buf; |
681 | ret = do_check_range(sb: dquot->dq_sb, val_name: "block" , val: blk, QT_TREEOFF, |
682 | max_val: info->dqi_blocks - 1); |
683 | if (ret) |
684 | goto out_buf; |
685 | |
686 | /* Check for cycles in the tree */ |
687 | for (i = 0; i <= depth; i++) |
688 | if (blk == blks[i]) { |
689 | quota_error(dquot->dq_sb, |
690 | "Cycle in quota tree detected: block %u index %u" , |
691 | blks[depth], |
692 | get_index(info, dquot->dq_id, depth)); |
693 | ret = -EIO; |
694 | goto out_buf; |
695 | } |
696 | blks[depth + 1] = blk; |
697 | if (depth < info->dqi_qtree_depth - 1) |
698 | ret = find_tree_dqentry(info, dquot, blks, depth: depth + 1); |
699 | else |
700 | ret = find_block_dqentry(info, dquot, blk); |
701 | out_buf: |
702 | kfree(objp: buf); |
703 | return ret; |
704 | } |
705 | |
706 | /* Find entry for given id in the tree - wrapper function */ |
707 | static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info, |
708 | struct dquot *dquot) |
709 | { |
710 | uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF }; |
711 | |
712 | if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) { |
713 | quota_error(dquot->dq_sb, "Quota tree depth too big!" ); |
714 | return -EIO; |
715 | } |
716 | return find_tree_dqentry(info, dquot, blks, depth: 0); |
717 | } |
718 | |
719 | int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) |
720 | { |
721 | int type = dquot->dq_id.type; |
722 | struct super_block *sb = dquot->dq_sb; |
723 | loff_t offset; |
724 | char *ddquot; |
725 | int ret = 0; |
726 | |
727 | #ifdef __QUOTA_QT_PARANOIA |
728 | /* Invalidated quota? */ |
729 | if (!sb_dqopt(sb: dquot->dq_sb)->files[type]) { |
730 | quota_error(sb, "Quota invalidated while reading!" ); |
731 | return -EIO; |
732 | } |
733 | #endif |
734 | /* Do we know offset of the dquot entry in the quota file? */ |
735 | if (!dquot->dq_off) { |
736 | offset = find_dqentry(info, dquot); |
737 | if (offset <= 0) { /* Entry not present? */ |
738 | if (offset < 0) |
739 | quota_error(sb,"Can't read quota structure " |
740 | "for id %u" , |
741 | from_kqid(&init_user_ns, |
742 | dquot->dq_id)); |
743 | dquot->dq_off = 0; |
744 | set_bit(DQ_FAKE_B, addr: &dquot->dq_flags); |
745 | memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); |
746 | ret = offset; |
747 | goto out; |
748 | } |
749 | dquot->dq_off = offset; |
750 | } |
751 | ddquot = kmalloc(size: info->dqi_entry_size, GFP_KERNEL); |
752 | if (!ddquot) |
753 | return -ENOMEM; |
754 | ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size, |
755 | dquot->dq_off); |
756 | if (ret != info->dqi_entry_size) { |
757 | if (ret >= 0) |
758 | ret = -EIO; |
759 | quota_error(sb, "Error while reading quota structure for id %u" , |
760 | from_kqid(&init_user_ns, dquot->dq_id)); |
761 | set_bit(DQ_FAKE_B, addr: &dquot->dq_flags); |
762 | memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); |
763 | kfree(objp: ddquot); |
764 | goto out; |
765 | } |
766 | spin_lock(lock: &dquot->dq_dqb_lock); |
767 | info->dqi_ops->disk2mem_dqblk(dquot, ddquot); |
768 | if (!dquot->dq_dqb.dqb_bhardlimit && |
769 | !dquot->dq_dqb.dqb_bsoftlimit && |
770 | !dquot->dq_dqb.dqb_ihardlimit && |
771 | !dquot->dq_dqb.dqb_isoftlimit) |
772 | set_bit(DQ_FAKE_B, addr: &dquot->dq_flags); |
773 | spin_unlock(lock: &dquot->dq_dqb_lock); |
774 | kfree(objp: ddquot); |
775 | out: |
776 | dqstats_inc(type: DQST_READS); |
777 | return ret; |
778 | } |
779 | EXPORT_SYMBOL(qtree_read_dquot); |
780 | |
781 | /* Check whether dquot should not be deleted. We know we are |
782 | * the only one operating on dquot (thanks to dq_lock) */ |
783 | int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) |
784 | { |
785 | if (test_bit(DQ_FAKE_B, &dquot->dq_flags) && |
786 | !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace)) |
787 | return qtree_delete_dquot(info, dquot); |
788 | return 0; |
789 | } |
790 | EXPORT_SYMBOL(qtree_release_dquot); |
791 | |
792 | static int find_next_id(struct qtree_mem_dqinfo *info, qid_t *id, |
793 | unsigned int blk, int depth) |
794 | { |
795 | char *buf = kmalloc(size: info->dqi_usable_bs, GFP_KERNEL); |
796 | __le32 *ref = (__le32 *)buf; |
797 | ssize_t ret; |
798 | unsigned int epb = info->dqi_usable_bs >> 2; |
799 | unsigned int level_inc = 1; |
800 | int i; |
801 | |
802 | if (!buf) |
803 | return -ENOMEM; |
804 | |
805 | for (i = depth; i < info->dqi_qtree_depth - 1; i++) |
806 | level_inc *= epb; |
807 | |
808 | ret = read_blk(info, blk, buf); |
809 | if (ret < 0) { |
810 | quota_error(info->dqi_sb, |
811 | "Can't read quota tree block %u" , blk); |
812 | goto out_buf; |
813 | } |
814 | for (i = __get_index(info, id: *id, depth); i < epb; i++) { |
815 | uint blk_no = le32_to_cpu(ref[i]); |
816 | |
817 | if (blk_no == 0) { |
818 | *id += level_inc; |
819 | continue; |
820 | } |
821 | ret = do_check_range(sb: info->dqi_sb, val_name: "block" , val: blk_no, min_val: 0, |
822 | max_val: info->dqi_blocks - 1); |
823 | if (ret) |
824 | goto out_buf; |
825 | if (depth == info->dqi_qtree_depth - 1) { |
826 | ret = 0; |
827 | goto out_buf; |
828 | } |
829 | ret = find_next_id(info, id, blk: blk_no, depth: depth + 1); |
830 | if (ret != -ENOENT) |
831 | break; |
832 | } |
833 | if (i == epb) { |
834 | ret = -ENOENT; |
835 | goto out_buf; |
836 | } |
837 | out_buf: |
838 | kfree(objp: buf); |
839 | return ret; |
840 | } |
841 | |
842 | int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid) |
843 | { |
844 | qid_t id = from_kqid(to: &init_user_ns, qid: *qid); |
845 | int ret; |
846 | |
847 | ret = find_next_id(info, id: &id, QT_TREEOFF, depth: 0); |
848 | if (ret < 0) |
849 | return ret; |
850 | *qid = make_kqid(from: &init_user_ns, type: qid->type, qid: id); |
851 | return 0; |
852 | } |
853 | EXPORT_SYMBOL(qtree_get_next_id); |
854 | |