1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * fs/ext4/extents_status.c |
4 | * |
5 | * Written by Yongqiang Yang <xiaoqiangnk@gmail.com> |
6 | * Modified by |
7 | * Allison Henderson <achender@linux.vnet.ibm.com> |
8 | * Hugh Dickins <hughd@google.com> |
9 | * Zheng Liu <wenqing.lz@taobao.com> |
10 | * |
11 | * Ext4 extents status tree core functions. |
12 | */ |
13 | #include <linux/list_sort.h> |
14 | #include <linux/proc_fs.h> |
15 | #include <linux/seq_file.h> |
16 | #include "ext4.h" |
17 | |
18 | #include <trace/events/ext4.h> |
19 | |
20 | /* |
21 | * According to previous discussion in Ext4 Developer Workshop, we |
22 | * will introduce a new structure called io tree to track all extent |
23 | * status in order to solve some problems that we have met |
24 | * (e.g. Reservation space warning), and provide extent-level locking. |
25 | * Delay extent tree is the first step to achieve this goal. It is |
26 | * original built by Yongqiang Yang. At that time it is called delay |
27 | * extent tree, whose goal is only track delayed extents in memory to |
28 | * simplify the implementation of fiemap and bigalloc, and introduce |
29 | * lseek SEEK_DATA/SEEK_HOLE support. That is why it is still called |
30 | * delay extent tree at the first commit. But for better understand |
31 | * what it does, it has been rename to extent status tree. |
32 | * |
33 | * Step1: |
34 | * Currently the first step has been done. All delayed extents are |
35 | * tracked in the tree. It maintains the delayed extent when a delayed |
36 | * allocation is issued, and the delayed extent is written out or |
37 | * invalidated. Therefore the implementation of fiemap and bigalloc |
38 | * are simplified, and SEEK_DATA/SEEK_HOLE are introduced. |
39 | * |
40 | * The following comment describes the implemenmtation of extent |
41 | * status tree and future works. |
42 | * |
43 | * Step2: |
44 | * In this step all extent status are tracked by extent status tree. |
45 | * Thus, we can first try to lookup a block mapping in this tree before |
46 | * finding it in extent tree. Hence, single extent cache can be removed |
47 | * because extent status tree can do a better job. Extents in status |
48 | * tree are loaded on-demand. Therefore, the extent status tree may not |
49 | * contain all of the extents in a file. Meanwhile we define a shrinker |
50 | * to reclaim memory from extent status tree because fragmented extent |
51 | * tree will make status tree cost too much memory. written/unwritten/- |
52 | * hole extents in the tree will be reclaimed by this shrinker when we |
53 | * are under high memory pressure. Delayed extents will not be |
54 | * reclimed because fiemap, bigalloc, and seek_data/hole need it. |
55 | */ |
56 | |
57 | /* |
58 | * Extent status tree implementation for ext4. |
59 | * |
60 | * |
61 | * ========================================================================== |
62 | * Extent status tree tracks all extent status. |
63 | * |
64 | * 1. Why we need to implement extent status tree? |
65 | * |
66 | * Without extent status tree, ext4 identifies a delayed extent by looking |
67 | * up page cache, this has several deficiencies - complicated, buggy, |
68 | * and inefficient code. |
69 | * |
70 | * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a |
71 | * block or a range of blocks are belonged to a delayed extent. |
72 | * |
73 | * Let us have a look at how they do without extent status tree. |
74 | * -- FIEMAP |
75 | * FIEMAP looks up page cache to identify delayed allocations from holes. |
76 | * |
77 | * -- SEEK_HOLE/DATA |
78 | * SEEK_HOLE/DATA has the same problem as FIEMAP. |
79 | * |
80 | * -- bigalloc |
81 | * bigalloc looks up page cache to figure out if a block is |
82 | * already under delayed allocation or not to determine whether |
83 | * quota reserving is needed for the cluster. |
84 | * |
85 | * -- writeout |
86 | * Writeout looks up whole page cache to see if a buffer is |
87 | * mapped, If there are not very many delayed buffers, then it is |
88 | * time consuming. |
89 | * |
90 | * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA, |
91 | * bigalloc and writeout can figure out if a block or a range of |
92 | * blocks is under delayed allocation(belonged to a delayed extent) or |
93 | * not by searching the extent tree. |
94 | * |
95 | * |
96 | * ========================================================================== |
97 | * 2. Ext4 extent status tree impelmentation |
98 | * |
99 | * -- extent |
100 | * A extent is a range of blocks which are contiguous logically and |
101 | * physically. Unlike extent in extent tree, this extent in ext4 is |
102 | * a in-memory struct, there is no corresponding on-disk data. There |
103 | * is no limit on length of extent, so an extent can contain as many |
104 | * blocks as they are contiguous logically and physically. |
105 | * |
106 | * -- extent status tree |
107 | * Every inode has an extent status tree and all allocation blocks |
108 | * are added to the tree with different status. The extent in the |
109 | * tree are ordered by logical block no. |
110 | * |
111 | * -- operations on a extent status tree |
112 | * There are three important operations on a delayed extent tree: find |
113 | * next extent, adding a extent(a range of blocks) and removing a extent. |
114 | * |
115 | * -- race on a extent status tree |
116 | * Extent status tree is protected by inode->i_es_lock. |
117 | * |
118 | * -- memory consumption |
119 | * Fragmented extent tree will make extent status tree cost too much |
120 | * memory. Hence, we will reclaim written/unwritten/hole extents from |
121 | * the tree under a heavy memory pressure. |
122 | * |
123 | * |
124 | * ========================================================================== |
125 | * 3. Performance analysis |
126 | * |
127 | * -- overhead |
128 | * 1. There is a cache extent for write access, so if writes are |
129 | * not very random, adding space operaions are in O(1) time. |
130 | * |
131 | * -- gain |
132 | * 2. Code is much simpler, more readable, more maintainable and |
133 | * more efficient. |
134 | * |
135 | * |
136 | * ========================================================================== |
137 | * 4. TODO list |
138 | * |
139 | * -- Refactor delayed space reservation |
140 | * |
141 | * -- Extent-level locking |
142 | */ |
143 | |
144 | static struct kmem_cache *ext4_es_cachep; |
145 | static struct kmem_cache *ext4_pending_cachep; |
146 | |
147 | static int __es_insert_extent(struct inode *inode, struct extent_status *newes, |
148 | struct extent_status *prealloc); |
149 | static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, |
150 | ext4_lblk_t end, int *reserved, |
151 | struct extent_status *prealloc); |
152 | static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan); |
153 | static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, |
154 | struct ext4_inode_info *locked_ei); |
155 | static int __revise_pending(struct inode *inode, ext4_lblk_t lblk, |
156 | ext4_lblk_t len, |
157 | struct pending_reservation **prealloc); |
158 | |
159 | int __init ext4_init_es(void) |
160 | { |
161 | ext4_es_cachep = KMEM_CACHE(extent_status, SLAB_RECLAIM_ACCOUNT); |
162 | if (ext4_es_cachep == NULL) |
163 | return -ENOMEM; |
164 | return 0; |
165 | } |
166 | |
167 | void ext4_exit_es(void) |
168 | { |
169 | kmem_cache_destroy(s: ext4_es_cachep); |
170 | } |
171 | |
172 | void ext4_es_init_tree(struct ext4_es_tree *tree) |
173 | { |
174 | tree->root = RB_ROOT; |
175 | tree->cache_es = NULL; |
176 | } |
177 | |
178 | #ifdef ES_DEBUG__ |
179 | static void ext4_es_print_tree(struct inode *inode) |
180 | { |
181 | struct ext4_es_tree *tree; |
182 | struct rb_node *node; |
183 | |
184 | printk(KERN_DEBUG "status extents for inode %lu:" , inode->i_ino); |
185 | tree = &EXT4_I(inode)->i_es_tree; |
186 | node = rb_first(&tree->root); |
187 | while (node) { |
188 | struct extent_status *es; |
189 | es = rb_entry(node, struct extent_status, rb_node); |
190 | printk(KERN_DEBUG " [%u/%u) %llu %x" , |
191 | es->es_lblk, es->es_len, |
192 | ext4_es_pblock(es), ext4_es_status(es)); |
193 | node = rb_next(node); |
194 | } |
195 | printk(KERN_DEBUG "\n" ); |
196 | } |
197 | #else |
198 | #define ext4_es_print_tree(inode) |
199 | #endif |
200 | |
201 | static inline ext4_lblk_t ext4_es_end(struct extent_status *es) |
202 | { |
203 | BUG_ON(es->es_lblk + es->es_len < es->es_lblk); |
204 | return es->es_lblk + es->es_len - 1; |
205 | } |
206 | |
207 | /* |
208 | * search through the tree for an delayed extent with a given offset. If |
209 | * it can't be found, try to find next extent. |
210 | */ |
211 | static struct extent_status *__es_tree_search(struct rb_root *root, |
212 | ext4_lblk_t lblk) |
213 | { |
214 | struct rb_node *node = root->rb_node; |
215 | struct extent_status *es = NULL; |
216 | |
217 | while (node) { |
218 | es = rb_entry(node, struct extent_status, rb_node); |
219 | if (lblk < es->es_lblk) |
220 | node = node->rb_left; |
221 | else if (lblk > ext4_es_end(es)) |
222 | node = node->rb_right; |
223 | else |
224 | return es; |
225 | } |
226 | |
227 | if (es && lblk < es->es_lblk) |
228 | return es; |
229 | |
230 | if (es && lblk > ext4_es_end(es)) { |
231 | node = rb_next(&es->rb_node); |
232 | return node ? rb_entry(node, struct extent_status, rb_node) : |
233 | NULL; |
234 | } |
235 | |
236 | return NULL; |
237 | } |
238 | |
239 | /* |
240 | * ext4_es_find_extent_range - find extent with specified status within block |
241 | * range or next extent following block range in |
242 | * extents status tree |
243 | * |
244 | * @inode - file containing the range |
245 | * @matching_fn - pointer to function that matches extents with desired status |
246 | * @lblk - logical block defining start of range |
247 | * @end - logical block defining end of range |
248 | * @es - extent found, if any |
249 | * |
250 | * Find the first extent within the block range specified by @lblk and @end |
251 | * in the extents status tree that satisfies @matching_fn. If a match |
252 | * is found, it's returned in @es. If not, and a matching extent is found |
253 | * beyond the block range, it's returned in @es. If no match is found, an |
254 | * extent is returned in @es whose es_lblk, es_len, and es_pblk components |
255 | * are 0. |
256 | */ |
257 | static void __es_find_extent_range(struct inode *inode, |
258 | int (*matching_fn)(struct extent_status *es), |
259 | ext4_lblk_t lblk, ext4_lblk_t end, |
260 | struct extent_status *es) |
261 | { |
262 | struct ext4_es_tree *tree = NULL; |
263 | struct extent_status *es1 = NULL; |
264 | struct rb_node *node; |
265 | |
266 | WARN_ON(es == NULL); |
267 | WARN_ON(end < lblk); |
268 | |
269 | tree = &EXT4_I(inode)->i_es_tree; |
270 | |
271 | /* see if the extent has been cached */ |
272 | es->es_lblk = es->es_len = es->es_pblk = 0; |
273 | es1 = READ_ONCE(tree->cache_es); |
274 | if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) { |
275 | es_debug("%u cached by [%u/%u) %llu %x\n" , |
276 | lblk, es1->es_lblk, es1->es_len, |
277 | ext4_es_pblock(es1), ext4_es_status(es1)); |
278 | goto out; |
279 | } |
280 | |
281 | es1 = __es_tree_search(root: &tree->root, lblk); |
282 | |
283 | out: |
284 | if (es1 && !matching_fn(es1)) { |
285 | while ((node = rb_next(&es1->rb_node)) != NULL) { |
286 | es1 = rb_entry(node, struct extent_status, rb_node); |
287 | if (es1->es_lblk > end) { |
288 | es1 = NULL; |
289 | break; |
290 | } |
291 | if (matching_fn(es1)) |
292 | break; |
293 | } |
294 | } |
295 | |
296 | if (es1 && matching_fn(es1)) { |
297 | WRITE_ONCE(tree->cache_es, es1); |
298 | es->es_lblk = es1->es_lblk; |
299 | es->es_len = es1->es_len; |
300 | es->es_pblk = es1->es_pblk; |
301 | } |
302 | |
303 | } |
304 | |
305 | /* |
306 | * Locking for __es_find_extent_range() for external use |
307 | */ |
308 | void ext4_es_find_extent_range(struct inode *inode, |
309 | int (*matching_fn)(struct extent_status *es), |
310 | ext4_lblk_t lblk, ext4_lblk_t end, |
311 | struct extent_status *es) |
312 | { |
313 | if (EXT4_SB(sb: inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) |
314 | return; |
315 | |
316 | trace_ext4_es_find_extent_range_enter(inode, lblk); |
317 | |
318 | read_lock(&EXT4_I(inode)->i_es_lock); |
319 | __es_find_extent_range(inode, matching_fn, lblk, end, es); |
320 | read_unlock(&EXT4_I(inode)->i_es_lock); |
321 | |
322 | trace_ext4_es_find_extent_range_exit(inode, es); |
323 | } |
324 | |
325 | /* |
326 | * __es_scan_range - search block range for block with specified status |
327 | * in extents status tree |
328 | * |
329 | * @inode - file containing the range |
330 | * @matching_fn - pointer to function that matches extents with desired status |
331 | * @lblk - logical block defining start of range |
332 | * @end - logical block defining end of range |
333 | * |
334 | * Returns true if at least one block in the specified block range satisfies |
335 | * the criterion specified by @matching_fn, and false if not. If at least |
336 | * one extent has the specified status, then there is at least one block |
337 | * in the cluster with that status. Should only be called by code that has |
338 | * taken i_es_lock. |
339 | */ |
340 | static bool __es_scan_range(struct inode *inode, |
341 | int (*matching_fn)(struct extent_status *es), |
342 | ext4_lblk_t start, ext4_lblk_t end) |
343 | { |
344 | struct extent_status es; |
345 | |
346 | __es_find_extent_range(inode, matching_fn, lblk: start, end, es: &es); |
347 | if (es.es_len == 0) |
348 | return false; /* no matching extent in the tree */ |
349 | else if (es.es_lblk <= start && |
350 | start < es.es_lblk + es.es_len) |
351 | return true; |
352 | else if (start <= es.es_lblk && es.es_lblk <= end) |
353 | return true; |
354 | else |
355 | return false; |
356 | } |
357 | /* |
358 | * Locking for __es_scan_range() for external use |
359 | */ |
360 | bool ext4_es_scan_range(struct inode *inode, |
361 | int (*matching_fn)(struct extent_status *es), |
362 | ext4_lblk_t lblk, ext4_lblk_t end) |
363 | { |
364 | bool ret; |
365 | |
366 | if (EXT4_SB(sb: inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) |
367 | return false; |
368 | |
369 | read_lock(&EXT4_I(inode)->i_es_lock); |
370 | ret = __es_scan_range(inode, matching_fn, start: lblk, end); |
371 | read_unlock(&EXT4_I(inode)->i_es_lock); |
372 | |
373 | return ret; |
374 | } |
375 | |
376 | /* |
377 | * __es_scan_clu - search cluster for block with specified status in |
378 | * extents status tree |
379 | * |
380 | * @inode - file containing the cluster |
381 | * @matching_fn - pointer to function that matches extents with desired status |
382 | * @lblk - logical block in cluster to be searched |
383 | * |
384 | * Returns true if at least one extent in the cluster containing @lblk |
385 | * satisfies the criterion specified by @matching_fn, and false if not. If at |
386 | * least one extent has the specified status, then there is at least one block |
387 | * in the cluster with that status. Should only be called by code that has |
388 | * taken i_es_lock. |
389 | */ |
390 | static bool __es_scan_clu(struct inode *inode, |
391 | int (*matching_fn)(struct extent_status *es), |
392 | ext4_lblk_t lblk) |
393 | { |
394 | struct ext4_sb_info *sbi = EXT4_SB(sb: inode->i_sb); |
395 | ext4_lblk_t lblk_start, lblk_end; |
396 | |
397 | lblk_start = EXT4_LBLK_CMASK(sbi, lblk); |
398 | lblk_end = lblk_start + sbi->s_cluster_ratio - 1; |
399 | |
400 | return __es_scan_range(inode, matching_fn, start: lblk_start, end: lblk_end); |
401 | } |
402 | |
403 | /* |
404 | * Locking for __es_scan_clu() for external use |
405 | */ |
406 | bool ext4_es_scan_clu(struct inode *inode, |
407 | int (*matching_fn)(struct extent_status *es), |
408 | ext4_lblk_t lblk) |
409 | { |
410 | bool ret; |
411 | |
412 | if (EXT4_SB(sb: inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) |
413 | return false; |
414 | |
415 | read_lock(&EXT4_I(inode)->i_es_lock); |
416 | ret = __es_scan_clu(inode, matching_fn, lblk); |
417 | read_unlock(&EXT4_I(inode)->i_es_lock); |
418 | |
419 | return ret; |
420 | } |
421 | |
422 | static void ext4_es_list_add(struct inode *inode) |
423 | { |
424 | struct ext4_inode_info *ei = EXT4_I(inode); |
425 | struct ext4_sb_info *sbi = EXT4_SB(sb: inode->i_sb); |
426 | |
427 | if (!list_empty(head: &ei->i_es_list)) |
428 | return; |
429 | |
430 | spin_lock(lock: &sbi->s_es_lock); |
431 | if (list_empty(head: &ei->i_es_list)) { |
432 | list_add_tail(new: &ei->i_es_list, head: &sbi->s_es_list); |
433 | sbi->s_es_nr_inode++; |
434 | } |
435 | spin_unlock(lock: &sbi->s_es_lock); |
436 | } |
437 | |
438 | static void ext4_es_list_del(struct inode *inode) |
439 | { |
440 | struct ext4_inode_info *ei = EXT4_I(inode); |
441 | struct ext4_sb_info *sbi = EXT4_SB(sb: inode->i_sb); |
442 | |
443 | spin_lock(lock: &sbi->s_es_lock); |
444 | if (!list_empty(head: &ei->i_es_list)) { |
445 | list_del_init(entry: &ei->i_es_list); |
446 | sbi->s_es_nr_inode--; |
447 | WARN_ON_ONCE(sbi->s_es_nr_inode < 0); |
448 | } |
449 | spin_unlock(lock: &sbi->s_es_lock); |
450 | } |
451 | |
452 | static inline struct pending_reservation *__alloc_pending(bool nofail) |
453 | { |
454 | if (!nofail) |
455 | return kmem_cache_alloc(cachep: ext4_pending_cachep, GFP_ATOMIC); |
456 | |
457 | return kmem_cache_zalloc(k: ext4_pending_cachep, GFP_KERNEL | __GFP_NOFAIL); |
458 | } |
459 | |
460 | static inline void __free_pending(struct pending_reservation *pr) |
461 | { |
462 | kmem_cache_free(s: ext4_pending_cachep, objp: pr); |
463 | } |
464 | |
465 | /* |
466 | * Returns true if we cannot fail to allocate memory for this extent_status |
467 | * entry and cannot reclaim it until its status changes. |
468 | */ |
469 | static inline bool ext4_es_must_keep(struct extent_status *es) |
470 | { |
471 | /* fiemap, bigalloc, and seek_data/hole need to use it. */ |
472 | if (ext4_es_is_delayed(es)) |
473 | return true; |
474 | |
475 | return false; |
476 | } |
477 | |
478 | static inline struct extent_status *__es_alloc_extent(bool nofail) |
479 | { |
480 | if (!nofail) |
481 | return kmem_cache_alloc(cachep: ext4_es_cachep, GFP_ATOMIC); |
482 | |
483 | return kmem_cache_zalloc(k: ext4_es_cachep, GFP_KERNEL | __GFP_NOFAIL); |
484 | } |
485 | |
486 | static void ext4_es_init_extent(struct inode *inode, struct extent_status *es, |
487 | ext4_lblk_t lblk, ext4_lblk_t len, ext4_fsblk_t pblk) |
488 | { |
489 | es->es_lblk = lblk; |
490 | es->es_len = len; |
491 | es->es_pblk = pblk; |
492 | |
493 | /* We never try to reclaim a must kept extent, so we don't count it. */ |
494 | if (!ext4_es_must_keep(es)) { |
495 | if (!EXT4_I(inode)->i_es_shk_nr++) |
496 | ext4_es_list_add(inode); |
497 | percpu_counter_inc(fbc: &EXT4_SB(sb: inode->i_sb)-> |
498 | s_es_stats.es_stats_shk_cnt); |
499 | } |
500 | |
501 | EXT4_I(inode)->i_es_all_nr++; |
502 | percpu_counter_inc(fbc: &EXT4_SB(sb: inode->i_sb)->s_es_stats.es_stats_all_cnt); |
503 | } |
504 | |
505 | static inline void __es_free_extent(struct extent_status *es) |
506 | { |
507 | kmem_cache_free(s: ext4_es_cachep, objp: es); |
508 | } |
509 | |
510 | static void ext4_es_free_extent(struct inode *inode, struct extent_status *es) |
511 | { |
512 | EXT4_I(inode)->i_es_all_nr--; |
513 | percpu_counter_dec(fbc: &EXT4_SB(sb: inode->i_sb)->s_es_stats.es_stats_all_cnt); |
514 | |
515 | /* Decrease the shrink counter when we can reclaim the extent. */ |
516 | if (!ext4_es_must_keep(es)) { |
517 | BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0); |
518 | if (!--EXT4_I(inode)->i_es_shk_nr) |
519 | ext4_es_list_del(inode); |
520 | percpu_counter_dec(fbc: &EXT4_SB(sb: inode->i_sb)-> |
521 | s_es_stats.es_stats_shk_cnt); |
522 | } |
523 | |
524 | __es_free_extent(es); |
525 | } |
526 | |
527 | /* |
528 | * Check whether or not two extents can be merged |
529 | * Condition: |
530 | * - logical block number is contiguous |
531 | * - physical block number is contiguous |
532 | * - status is equal |
533 | */ |
534 | static int ext4_es_can_be_merged(struct extent_status *es1, |
535 | struct extent_status *es2) |
536 | { |
537 | if (ext4_es_type(es: es1) != ext4_es_type(es: es2)) |
538 | return 0; |
539 | |
540 | if (((__u64) es1->es_len) + es2->es_len > EXT_MAX_BLOCKS) { |
541 | pr_warn("ES assertion failed when merging extents. " |
542 | "The sum of lengths of es1 (%d) and es2 (%d) " |
543 | "is bigger than allowed file size (%d)\n" , |
544 | es1->es_len, es2->es_len, EXT_MAX_BLOCKS); |
545 | WARN_ON(1); |
546 | return 0; |
547 | } |
548 | |
549 | if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk) |
550 | return 0; |
551 | |
552 | if ((ext4_es_is_written(es: es1) || ext4_es_is_unwritten(es: es1)) && |
553 | (ext4_es_pblock(es: es1) + es1->es_len == ext4_es_pblock(es: es2))) |
554 | return 1; |
555 | |
556 | if (ext4_es_is_hole(es: es1)) |
557 | return 1; |
558 | |
559 | /* we need to check delayed extent is without unwritten status */ |
560 | if (ext4_es_is_delayed(es: es1) && !ext4_es_is_unwritten(es: es1)) |
561 | return 1; |
562 | |
563 | return 0; |
564 | } |
565 | |
566 | static struct extent_status * |
567 | ext4_es_try_to_merge_left(struct inode *inode, struct extent_status *es) |
568 | { |
569 | struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; |
570 | struct extent_status *es1; |
571 | struct rb_node *node; |
572 | |
573 | node = rb_prev(&es->rb_node); |
574 | if (!node) |
575 | return es; |
576 | |
577 | es1 = rb_entry(node, struct extent_status, rb_node); |
578 | if (ext4_es_can_be_merged(es1, es2: es)) { |
579 | es1->es_len += es->es_len; |
580 | if (ext4_es_is_referenced(es)) |
581 | ext4_es_set_referenced(es: es1); |
582 | rb_erase(&es->rb_node, &tree->root); |
583 | ext4_es_free_extent(inode, es); |
584 | es = es1; |
585 | } |
586 | |
587 | return es; |
588 | } |
589 | |
590 | static struct extent_status * |
591 | ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es) |
592 | { |
593 | struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; |
594 | struct extent_status *es1; |
595 | struct rb_node *node; |
596 | |
597 | node = rb_next(&es->rb_node); |
598 | if (!node) |
599 | return es; |
600 | |
601 | es1 = rb_entry(node, struct extent_status, rb_node); |
602 | if (ext4_es_can_be_merged(es1: es, es2: es1)) { |
603 | es->es_len += es1->es_len; |
604 | if (ext4_es_is_referenced(es: es1)) |
605 | ext4_es_set_referenced(es); |
606 | rb_erase(node, &tree->root); |
607 | ext4_es_free_extent(inode, es: es1); |
608 | } |
609 | |
610 | return es; |
611 | } |
612 | |
613 | #ifdef ES_AGGRESSIVE_TEST |
614 | #include "ext4_extents.h" /* Needed when ES_AGGRESSIVE_TEST is defined */ |
615 | |
616 | static void ext4_es_insert_extent_ext_check(struct inode *inode, |
617 | struct extent_status *es) |
618 | { |
619 | struct ext4_ext_path *path = NULL; |
620 | struct ext4_extent *ex; |
621 | ext4_lblk_t ee_block; |
622 | ext4_fsblk_t ee_start; |
623 | unsigned short ee_len; |
624 | int depth, ee_status, es_status; |
625 | |
626 | path = ext4_find_extent(inode, es->es_lblk, NULL, EXT4_EX_NOCACHE); |
627 | if (IS_ERR(path)) |
628 | return; |
629 | |
630 | depth = ext_depth(inode); |
631 | ex = path[depth].p_ext; |
632 | |
633 | if (ex) { |
634 | |
635 | ee_block = le32_to_cpu(ex->ee_block); |
636 | ee_start = ext4_ext_pblock(ex); |
637 | ee_len = ext4_ext_get_actual_len(ex); |
638 | |
639 | ee_status = ext4_ext_is_unwritten(ex) ? 1 : 0; |
640 | es_status = ext4_es_is_unwritten(es) ? 1 : 0; |
641 | |
642 | /* |
643 | * Make sure ex and es are not overlap when we try to insert |
644 | * a delayed/hole extent. |
645 | */ |
646 | if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) { |
647 | if (in_range(es->es_lblk, ee_block, ee_len)) { |
648 | pr_warn("ES insert assertion failed for " |
649 | "inode: %lu we can find an extent " |
650 | "at block [%d/%d/%llu/%c], but we " |
651 | "want to add a delayed/hole extent " |
652 | "[%d/%d/%llu/%x]\n" , |
653 | inode->i_ino, ee_block, ee_len, |
654 | ee_start, ee_status ? 'u' : 'w', |
655 | es->es_lblk, es->es_len, |
656 | ext4_es_pblock(es), ext4_es_status(es)); |
657 | } |
658 | goto out; |
659 | } |
660 | |
661 | /* |
662 | * We don't check ee_block == es->es_lblk, etc. because es |
663 | * might be a part of whole extent, vice versa. |
664 | */ |
665 | if (es->es_lblk < ee_block || |
666 | ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) { |
667 | pr_warn("ES insert assertion failed for inode: %lu " |
668 | "ex_status [%d/%d/%llu/%c] != " |
669 | "es_status [%d/%d/%llu/%c]\n" , inode->i_ino, |
670 | ee_block, ee_len, ee_start, |
671 | ee_status ? 'u' : 'w', es->es_lblk, es->es_len, |
672 | ext4_es_pblock(es), es_status ? 'u' : 'w'); |
673 | goto out; |
674 | } |
675 | |
676 | if (ee_status ^ es_status) { |
677 | pr_warn("ES insert assertion failed for inode: %lu " |
678 | "ex_status [%d/%d/%llu/%c] != " |
679 | "es_status [%d/%d/%llu/%c]\n" , inode->i_ino, |
680 | ee_block, ee_len, ee_start, |
681 | ee_status ? 'u' : 'w', es->es_lblk, es->es_len, |
682 | ext4_es_pblock(es), es_status ? 'u' : 'w'); |
683 | } |
684 | } else { |
685 | /* |
686 | * We can't find an extent on disk. So we need to make sure |
687 | * that we don't want to add an written/unwritten extent. |
688 | */ |
689 | if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) { |
690 | pr_warn("ES insert assertion failed for inode: %lu " |
691 | "can't find an extent at block %d but we want " |
692 | "to add a written/unwritten extent " |
693 | "[%d/%d/%llu/%x]\n" , inode->i_ino, |
694 | es->es_lblk, es->es_lblk, es->es_len, |
695 | ext4_es_pblock(es), ext4_es_status(es)); |
696 | } |
697 | } |
698 | out: |
699 | ext4_free_ext_path(path); |
700 | } |
701 | |
702 | static void ext4_es_insert_extent_ind_check(struct inode *inode, |
703 | struct extent_status *es) |
704 | { |
705 | struct ext4_map_blocks map; |
706 | int retval; |
707 | |
708 | /* |
709 | * Here we call ext4_ind_map_blocks to lookup a block mapping because |
710 | * 'Indirect' structure is defined in indirect.c. So we couldn't |
711 | * access direct/indirect tree from outside. It is too dirty to define |
712 | * this function in indirect.c file. |
713 | */ |
714 | |
715 | map.m_lblk = es->es_lblk; |
716 | map.m_len = es->es_len; |
717 | |
718 | retval = ext4_ind_map_blocks(NULL, inode, &map, 0); |
719 | if (retval > 0) { |
720 | if (ext4_es_is_delayed(es) || ext4_es_is_hole(es)) { |
721 | /* |
722 | * We want to add a delayed/hole extent but this |
723 | * block has been allocated. |
724 | */ |
725 | pr_warn("ES insert assertion failed for inode: %lu " |
726 | "We can find blocks but we want to add a " |
727 | "delayed/hole extent [%d/%d/%llu/%x]\n" , |
728 | inode->i_ino, es->es_lblk, es->es_len, |
729 | ext4_es_pblock(es), ext4_es_status(es)); |
730 | return; |
731 | } else if (ext4_es_is_written(es)) { |
732 | if (retval != es->es_len) { |
733 | pr_warn("ES insert assertion failed for " |
734 | "inode: %lu retval %d != es_len %d\n" , |
735 | inode->i_ino, retval, es->es_len); |
736 | return; |
737 | } |
738 | if (map.m_pblk != ext4_es_pblock(es)) { |
739 | pr_warn("ES insert assertion failed for " |
740 | "inode: %lu m_pblk %llu != " |
741 | "es_pblk %llu\n" , |
742 | inode->i_ino, map.m_pblk, |
743 | ext4_es_pblock(es)); |
744 | return; |
745 | } |
746 | } else { |
747 | /* |
748 | * We don't need to check unwritten extent because |
749 | * indirect-based file doesn't have it. |
750 | */ |
751 | BUG(); |
752 | } |
753 | } else if (retval == 0) { |
754 | if (ext4_es_is_written(es)) { |
755 | pr_warn("ES insert assertion failed for inode: %lu " |
756 | "We can't find the block but we want to add " |
757 | "a written extent [%d/%d/%llu/%x]\n" , |
758 | inode->i_ino, es->es_lblk, es->es_len, |
759 | ext4_es_pblock(es), ext4_es_status(es)); |
760 | return; |
761 | } |
762 | } |
763 | } |
764 | |
765 | static inline void ext4_es_insert_extent_check(struct inode *inode, |
766 | struct extent_status *es) |
767 | { |
768 | /* |
769 | * We don't need to worry about the race condition because |
770 | * caller takes i_data_sem locking. |
771 | */ |
772 | BUG_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem)); |
773 | if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) |
774 | ext4_es_insert_extent_ext_check(inode, es); |
775 | else |
776 | ext4_es_insert_extent_ind_check(inode, es); |
777 | } |
778 | #else |
779 | static inline void ext4_es_insert_extent_check(struct inode *inode, |
780 | struct extent_status *es) |
781 | { |
782 | } |
783 | #endif |
784 | |
785 | static int __es_insert_extent(struct inode *inode, struct extent_status *newes, |
786 | struct extent_status *prealloc) |
787 | { |
788 | struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; |
789 | struct rb_node **p = &tree->root.rb_node; |
790 | struct rb_node *parent = NULL; |
791 | struct extent_status *es; |
792 | |
793 | while (*p) { |
794 | parent = *p; |
795 | es = rb_entry(parent, struct extent_status, rb_node); |
796 | |
797 | if (newes->es_lblk < es->es_lblk) { |
798 | if (ext4_es_can_be_merged(es1: newes, es2: es)) { |
799 | /* |
800 | * Here we can modify es_lblk directly |
801 | * because it isn't overlapped. |
802 | */ |
803 | es->es_lblk = newes->es_lblk; |
804 | es->es_len += newes->es_len; |
805 | if (ext4_es_is_written(es) || |
806 | ext4_es_is_unwritten(es)) |
807 | ext4_es_store_pblock(es, |
808 | pb: newes->es_pblk); |
809 | es = ext4_es_try_to_merge_left(inode, es); |
810 | goto out; |
811 | } |
812 | p = &(*p)->rb_left; |
813 | } else if (newes->es_lblk > ext4_es_end(es)) { |
814 | if (ext4_es_can_be_merged(es1: es, es2: newes)) { |
815 | es->es_len += newes->es_len; |
816 | es = ext4_es_try_to_merge_right(inode, es); |
817 | goto out; |
818 | } |
819 | p = &(*p)->rb_right; |
820 | } else { |
821 | BUG(); |
822 | return -EINVAL; |
823 | } |
824 | } |
825 | |
826 | if (prealloc) |
827 | es = prealloc; |
828 | else |
829 | es = __es_alloc_extent(nofail: false); |
830 | if (!es) |
831 | return -ENOMEM; |
832 | ext4_es_init_extent(inode, es, lblk: newes->es_lblk, len: newes->es_len, |
833 | pblk: newes->es_pblk); |
834 | |
835 | rb_link_node(node: &es->rb_node, parent, rb_link: p); |
836 | rb_insert_color(&es->rb_node, &tree->root); |
837 | |
838 | out: |
839 | tree->cache_es = es; |
840 | return 0; |
841 | } |
842 | |
843 | /* |
844 | * ext4_es_insert_extent() adds information to an inode's extent |
845 | * status tree. |
846 | */ |
847 | void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk, |
848 | ext4_lblk_t len, ext4_fsblk_t pblk, |
849 | unsigned int status) |
850 | { |
851 | struct extent_status newes; |
852 | ext4_lblk_t end = lblk + len - 1; |
853 | int err1 = 0, err2 = 0, err3 = 0; |
854 | struct ext4_sb_info *sbi = EXT4_SB(sb: inode->i_sb); |
855 | struct extent_status *es1 = NULL; |
856 | struct extent_status *es2 = NULL; |
857 | struct pending_reservation *pr = NULL; |
858 | bool revise_pending = false; |
859 | |
860 | if (EXT4_SB(sb: inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) |
861 | return; |
862 | |
863 | es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n" , |
864 | lblk, len, pblk, status, inode->i_ino); |
865 | |
866 | if (!len) |
867 | return; |
868 | |
869 | BUG_ON(end < lblk); |
870 | |
871 | if ((status & EXTENT_STATUS_DELAYED) && |
872 | (status & EXTENT_STATUS_WRITTEN)) { |
873 | ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as " |
874 | " delayed and written which can potentially " |
875 | " cause data loss." , lblk, len); |
876 | WARN_ON(1); |
877 | } |
878 | |
879 | newes.es_lblk = lblk; |
880 | newes.es_len = len; |
881 | ext4_es_store_pblock_status(es: &newes, pb: pblk, status); |
882 | trace_ext4_es_insert_extent(inode, es: &newes); |
883 | |
884 | ext4_es_insert_extent_check(inode, es: &newes); |
885 | |
886 | revise_pending = sbi->s_cluster_ratio > 1 && |
887 | test_opt(inode->i_sb, DELALLOC) && |
888 | (status & (EXTENT_STATUS_WRITTEN | |
889 | EXTENT_STATUS_UNWRITTEN)); |
890 | retry: |
891 | if (err1 && !es1) |
892 | es1 = __es_alloc_extent(nofail: true); |
893 | if ((err1 || err2) && !es2) |
894 | es2 = __es_alloc_extent(nofail: true); |
895 | if ((err1 || err2 || err3) && revise_pending && !pr) |
896 | pr = __alloc_pending(nofail: true); |
897 | write_lock(&EXT4_I(inode)->i_es_lock); |
898 | |
899 | err1 = __es_remove_extent(inode, lblk, end, NULL, prealloc: es1); |
900 | if (err1 != 0) |
901 | goto error; |
902 | /* Free preallocated extent if it didn't get used. */ |
903 | if (es1) { |
904 | if (!es1->es_len) |
905 | __es_free_extent(es: es1); |
906 | es1 = NULL; |
907 | } |
908 | |
909 | err2 = __es_insert_extent(inode, newes: &newes, prealloc: es2); |
910 | if (err2 == -ENOMEM && !ext4_es_must_keep(es: &newes)) |
911 | err2 = 0; |
912 | if (err2 != 0) |
913 | goto error; |
914 | /* Free preallocated extent if it didn't get used. */ |
915 | if (es2) { |
916 | if (!es2->es_len) |
917 | __es_free_extent(es: es2); |
918 | es2 = NULL; |
919 | } |
920 | |
921 | if (revise_pending) { |
922 | err3 = __revise_pending(inode, lblk, len, prealloc: &pr); |
923 | if (err3 != 0) |
924 | goto error; |
925 | if (pr) { |
926 | __free_pending(pr); |
927 | pr = NULL; |
928 | } |
929 | } |
930 | error: |
931 | write_unlock(&EXT4_I(inode)->i_es_lock); |
932 | if (err1 || err2 || err3) |
933 | goto retry; |
934 | |
935 | ext4_es_print_tree(inode); |
936 | return; |
937 | } |
938 | |
939 | /* |
940 | * ext4_es_cache_extent() inserts information into the extent status |
941 | * tree if and only if there isn't information about the range in |
942 | * question already. |
943 | */ |
944 | void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk, |
945 | ext4_lblk_t len, ext4_fsblk_t pblk, |
946 | unsigned int status) |
947 | { |
948 | struct extent_status *es; |
949 | struct extent_status newes; |
950 | ext4_lblk_t end = lblk + len - 1; |
951 | |
952 | if (EXT4_SB(sb: inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) |
953 | return; |
954 | |
955 | newes.es_lblk = lblk; |
956 | newes.es_len = len; |
957 | ext4_es_store_pblock_status(es: &newes, pb: pblk, status); |
958 | trace_ext4_es_cache_extent(inode, es: &newes); |
959 | |
960 | if (!len) |
961 | return; |
962 | |
963 | BUG_ON(end < lblk); |
964 | |
965 | write_lock(&EXT4_I(inode)->i_es_lock); |
966 | |
967 | es = __es_tree_search(root: &EXT4_I(inode)->i_es_tree.root, lblk); |
968 | if (!es || es->es_lblk > end) |
969 | __es_insert_extent(inode, newes: &newes, NULL); |
970 | write_unlock(&EXT4_I(inode)->i_es_lock); |
971 | } |
972 | |
973 | /* |
974 | * ext4_es_lookup_extent() looks up an extent in extent status tree. |
975 | * |
976 | * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks. |
977 | * |
978 | * Return: 1 on found, 0 on not |
979 | */ |
980 | int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, |
981 | ext4_lblk_t *next_lblk, |
982 | struct extent_status *es) |
983 | { |
984 | struct ext4_es_tree *tree; |
985 | struct ext4_es_stats *stats; |
986 | struct extent_status *es1 = NULL; |
987 | struct rb_node *node; |
988 | int found = 0; |
989 | |
990 | if (EXT4_SB(sb: inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) |
991 | return 0; |
992 | |
993 | trace_ext4_es_lookup_extent_enter(inode, lblk); |
994 | es_debug("lookup extent in block %u\n" , lblk); |
995 | |
996 | tree = &EXT4_I(inode)->i_es_tree; |
997 | read_lock(&EXT4_I(inode)->i_es_lock); |
998 | |
999 | /* find extent in cache firstly */ |
1000 | es->es_lblk = es->es_len = es->es_pblk = 0; |
1001 | es1 = READ_ONCE(tree->cache_es); |
1002 | if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) { |
1003 | es_debug("%u cached by [%u/%u)\n" , |
1004 | lblk, es1->es_lblk, es1->es_len); |
1005 | found = 1; |
1006 | goto out; |
1007 | } |
1008 | |
1009 | node = tree->root.rb_node; |
1010 | while (node) { |
1011 | es1 = rb_entry(node, struct extent_status, rb_node); |
1012 | if (lblk < es1->es_lblk) |
1013 | node = node->rb_left; |
1014 | else if (lblk > ext4_es_end(es: es1)) |
1015 | node = node->rb_right; |
1016 | else { |
1017 | found = 1; |
1018 | break; |
1019 | } |
1020 | } |
1021 | |
1022 | out: |
1023 | stats = &EXT4_SB(sb: inode->i_sb)->s_es_stats; |
1024 | if (found) { |
1025 | BUG_ON(!es1); |
1026 | es->es_lblk = es1->es_lblk; |
1027 | es->es_len = es1->es_len; |
1028 | es->es_pblk = es1->es_pblk; |
1029 | if (!ext4_es_is_referenced(es: es1)) |
1030 | ext4_es_set_referenced(es: es1); |
1031 | percpu_counter_inc(fbc: &stats->es_stats_cache_hits); |
1032 | if (next_lblk) { |
1033 | node = rb_next(&es1->rb_node); |
1034 | if (node) { |
1035 | es1 = rb_entry(node, struct extent_status, |
1036 | rb_node); |
1037 | *next_lblk = es1->es_lblk; |
1038 | } else |
1039 | *next_lblk = 0; |
1040 | } |
1041 | } else { |
1042 | percpu_counter_inc(fbc: &stats->es_stats_cache_misses); |
1043 | } |
1044 | |
1045 | read_unlock(&EXT4_I(inode)->i_es_lock); |
1046 | |
1047 | trace_ext4_es_lookup_extent_exit(inode, es, found); |
1048 | return found; |
1049 | } |
1050 | |
1051 | struct rsvd_count { |
1052 | int ndelonly; |
1053 | bool first_do_lblk_found; |
1054 | ext4_lblk_t first_do_lblk; |
1055 | ext4_lblk_t last_do_lblk; |
1056 | struct extent_status *left_es; |
1057 | bool partial; |
1058 | ext4_lblk_t lclu; |
1059 | }; |
1060 | |
1061 | /* |
1062 | * init_rsvd - initialize reserved count data before removing block range |
1063 | * in file from extent status tree |
1064 | * |
1065 | * @inode - file containing range |
1066 | * @lblk - first block in range |
1067 | * @es - pointer to first extent in range |
1068 | * @rc - pointer to reserved count data |
1069 | * |
1070 | * Assumes es is not NULL |
1071 | */ |
1072 | static void init_rsvd(struct inode *inode, ext4_lblk_t lblk, |
1073 | struct extent_status *es, struct rsvd_count *rc) |
1074 | { |
1075 | struct ext4_sb_info *sbi = EXT4_SB(sb: inode->i_sb); |
1076 | struct rb_node *node; |
1077 | |
1078 | rc->ndelonly = 0; |
1079 | |
1080 | /* |
1081 | * for bigalloc, note the first delonly block in the range has not |
1082 | * been found, record the extent containing the block to the left of |
1083 | * the region to be removed, if any, and note that there's no partial |
1084 | * cluster to track |
1085 | */ |
1086 | if (sbi->s_cluster_ratio > 1) { |
1087 | rc->first_do_lblk_found = false; |
1088 | if (lblk > es->es_lblk) { |
1089 | rc->left_es = es; |
1090 | } else { |
1091 | node = rb_prev(&es->rb_node); |
1092 | rc->left_es = node ? rb_entry(node, |
1093 | struct extent_status, |
1094 | rb_node) : NULL; |
1095 | } |
1096 | rc->partial = false; |
1097 | } |
1098 | } |
1099 | |
1100 | /* |
1101 | * count_rsvd - count the clusters containing delayed and not unwritten |
1102 | * (delonly) blocks in a range within an extent and add to |
1103 | * the running tally in rsvd_count |
1104 | * |
1105 | * @inode - file containing extent |
1106 | * @lblk - first block in range |
1107 | * @len - length of range in blocks |
1108 | * @es - pointer to extent containing clusters to be counted |
1109 | * @rc - pointer to reserved count data |
1110 | * |
1111 | * Tracks partial clusters found at the beginning and end of extents so |
1112 | * they aren't overcounted when they span adjacent extents |
1113 | */ |
1114 | static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len, |
1115 | struct extent_status *es, struct rsvd_count *rc) |
1116 | { |
1117 | struct ext4_sb_info *sbi = EXT4_SB(sb: inode->i_sb); |
1118 | ext4_lblk_t i, end, nclu; |
1119 | |
1120 | if (!ext4_es_is_delonly(es)) |
1121 | return; |
1122 | |
1123 | WARN_ON(len <= 0); |
1124 | |
1125 | if (sbi->s_cluster_ratio == 1) { |
1126 | rc->ndelonly += (int) len; |
1127 | return; |
1128 | } |
1129 | |
1130 | /* bigalloc */ |
1131 | |
1132 | i = (lblk < es->es_lblk) ? es->es_lblk : lblk; |
1133 | end = lblk + (ext4_lblk_t) len - 1; |
1134 | end = (end > ext4_es_end(es)) ? ext4_es_end(es) : end; |
1135 | |
1136 | /* record the first block of the first delonly extent seen */ |
1137 | if (!rc->first_do_lblk_found) { |
1138 | rc->first_do_lblk = i; |
1139 | rc->first_do_lblk_found = true; |
1140 | } |
1141 | |
1142 | /* update the last lblk in the region seen so far */ |
1143 | rc->last_do_lblk = end; |
1144 | |
1145 | /* |
1146 | * if we're tracking a partial cluster and the current extent |
1147 | * doesn't start with it, count it and stop tracking |
1148 | */ |
1149 | if (rc->partial && (rc->lclu != EXT4_B2C(sbi, i))) { |
1150 | rc->ndelonly++; |
1151 | rc->partial = false; |
1152 | } |
1153 | |
1154 | /* |
1155 | * if the first cluster doesn't start on a cluster boundary but |
1156 | * ends on one, count it |
1157 | */ |
1158 | if (EXT4_LBLK_COFF(sbi, i) != 0) { |
1159 | if (end >= EXT4_LBLK_CFILL(sbi, i)) { |
1160 | rc->ndelonly++; |
1161 | rc->partial = false; |
1162 | i = EXT4_LBLK_CFILL(sbi, i) + 1; |
1163 | } |
1164 | } |
1165 | |
1166 | /* |
1167 | * if the current cluster starts on a cluster boundary, count the |
1168 | * number of whole delonly clusters in the extent |
1169 | */ |
1170 | if ((i + sbi->s_cluster_ratio - 1) <= end) { |
1171 | nclu = (end - i + 1) >> sbi->s_cluster_bits; |
1172 | rc->ndelonly += nclu; |
1173 | i += nclu << sbi->s_cluster_bits; |
1174 | } |
1175 | |
1176 | /* |
1177 | * start tracking a partial cluster if there's a partial at the end |
1178 | * of the current extent and we're not already tracking one |
1179 | */ |
1180 | if (!rc->partial && i <= end) { |
1181 | rc->partial = true; |
1182 | rc->lclu = EXT4_B2C(sbi, i); |
1183 | } |
1184 | } |
1185 | |
1186 | /* |
1187 | * __pr_tree_search - search for a pending cluster reservation |
1188 | * |
1189 | * @root - root of pending reservation tree |
1190 | * @lclu - logical cluster to search for |
1191 | * |
1192 | * Returns the pending reservation for the cluster identified by @lclu |
1193 | * if found. If not, returns a reservation for the next cluster if any, |
1194 | * and if not, returns NULL. |
1195 | */ |
1196 | static struct pending_reservation *__pr_tree_search(struct rb_root *root, |
1197 | ext4_lblk_t lclu) |
1198 | { |
1199 | struct rb_node *node = root->rb_node; |
1200 | struct pending_reservation *pr = NULL; |
1201 | |
1202 | while (node) { |
1203 | pr = rb_entry(node, struct pending_reservation, rb_node); |
1204 | if (lclu < pr->lclu) |
1205 | node = node->rb_left; |
1206 | else if (lclu > pr->lclu) |
1207 | node = node->rb_right; |
1208 | else |
1209 | return pr; |
1210 | } |
1211 | if (pr && lclu < pr->lclu) |
1212 | return pr; |
1213 | if (pr && lclu > pr->lclu) { |
1214 | node = rb_next(&pr->rb_node); |
1215 | return node ? rb_entry(node, struct pending_reservation, |
1216 | rb_node) : NULL; |
1217 | } |
1218 | return NULL; |
1219 | } |
1220 | |
1221 | /* |
1222 | * get_rsvd - calculates and returns the number of cluster reservations to be |
1223 | * released when removing a block range from the extent status tree |
1224 | * and releases any pending reservations within the range |
1225 | * |
1226 | * @inode - file containing block range |
1227 | * @end - last block in range |
1228 | * @right_es - pointer to extent containing next block beyond end or NULL |
1229 | * @rc - pointer to reserved count data |
1230 | * |
1231 | * The number of reservations to be released is equal to the number of |
1232 | * clusters containing delayed and not unwritten (delonly) blocks within |
1233 | * the range, minus the number of clusters still containing delonly blocks |
1234 | * at the ends of the range, and minus the number of pending reservations |
1235 | * within the range. |
1236 | */ |
1237 | static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end, |
1238 | struct extent_status *right_es, |
1239 | struct rsvd_count *rc) |
1240 | { |
1241 | struct ext4_sb_info *sbi = EXT4_SB(sb: inode->i_sb); |
1242 | struct pending_reservation *pr; |
1243 | struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree; |
1244 | struct rb_node *node; |
1245 | ext4_lblk_t first_lclu, last_lclu; |
1246 | bool left_delonly, right_delonly, count_pending; |
1247 | struct extent_status *es; |
1248 | |
1249 | if (sbi->s_cluster_ratio > 1) { |
1250 | /* count any remaining partial cluster */ |
1251 | if (rc->partial) |
1252 | rc->ndelonly++; |
1253 | |
1254 | if (rc->ndelonly == 0) |
1255 | return 0; |
1256 | |
1257 | first_lclu = EXT4_B2C(sbi, rc->first_do_lblk); |
1258 | last_lclu = EXT4_B2C(sbi, rc->last_do_lblk); |
1259 | |
1260 | /* |
1261 | * decrease the delonly count by the number of clusters at the |
1262 | * ends of the range that still contain delonly blocks - |
1263 | * these clusters still need to be reserved |
1264 | */ |
1265 | left_delonly = right_delonly = false; |
1266 | |
1267 | es = rc->left_es; |
1268 | while (es && ext4_es_end(es) >= |
1269 | EXT4_LBLK_CMASK(sbi, rc->first_do_lblk)) { |
1270 | if (ext4_es_is_delonly(es)) { |
1271 | rc->ndelonly--; |
1272 | left_delonly = true; |
1273 | break; |
1274 | } |
1275 | node = rb_prev(&es->rb_node); |
1276 | if (!node) |
1277 | break; |
1278 | es = rb_entry(node, struct extent_status, rb_node); |
1279 | } |
1280 | if (right_es && (!left_delonly || first_lclu != last_lclu)) { |
1281 | if (end < ext4_es_end(es: right_es)) { |
1282 | es = right_es; |
1283 | } else { |
1284 | node = rb_next(&right_es->rb_node); |
1285 | es = node ? rb_entry(node, struct extent_status, |
1286 | rb_node) : NULL; |
1287 | } |
1288 | while (es && es->es_lblk <= |
1289 | EXT4_LBLK_CFILL(sbi, rc->last_do_lblk)) { |
1290 | if (ext4_es_is_delonly(es)) { |
1291 | rc->ndelonly--; |
1292 | right_delonly = true; |
1293 | break; |
1294 | } |
1295 | node = rb_next(&es->rb_node); |
1296 | if (!node) |
1297 | break; |
1298 | es = rb_entry(node, struct extent_status, |
1299 | rb_node); |
1300 | } |
1301 | } |
1302 | |
1303 | /* |
1304 | * Determine the block range that should be searched for |
1305 | * pending reservations, if any. Clusters on the ends of the |
1306 | * original removed range containing delonly blocks are |
1307 | * excluded. They've already been accounted for and it's not |
1308 | * possible to determine if an associated pending reservation |
1309 | * should be released with the information available in the |
1310 | * extents status tree. |
1311 | */ |
1312 | if (first_lclu == last_lclu) { |
1313 | if (left_delonly | right_delonly) |
1314 | count_pending = false; |
1315 | else |
1316 | count_pending = true; |
1317 | } else { |
1318 | if (left_delonly) |
1319 | first_lclu++; |
1320 | if (right_delonly) |
1321 | last_lclu--; |
1322 | if (first_lclu <= last_lclu) |
1323 | count_pending = true; |
1324 | else |
1325 | count_pending = false; |
1326 | } |
1327 | |
1328 | /* |
1329 | * a pending reservation found between first_lclu and last_lclu |
1330 | * represents an allocated cluster that contained at least one |
1331 | * delonly block, so the delonly total must be reduced by one |
1332 | * for each pending reservation found and released |
1333 | */ |
1334 | if (count_pending) { |
1335 | pr = __pr_tree_search(root: &tree->root, lclu: first_lclu); |
1336 | while (pr && pr->lclu <= last_lclu) { |
1337 | rc->ndelonly--; |
1338 | node = rb_next(&pr->rb_node); |
1339 | rb_erase(&pr->rb_node, &tree->root); |
1340 | __free_pending(pr); |
1341 | if (!node) |
1342 | break; |
1343 | pr = rb_entry(node, struct pending_reservation, |
1344 | rb_node); |
1345 | } |
1346 | } |
1347 | } |
1348 | return rc->ndelonly; |
1349 | } |
1350 | |
1351 | |
1352 | /* |
1353 | * __es_remove_extent - removes block range from extent status tree |
1354 | * |
1355 | * @inode - file containing range |
1356 | * @lblk - first block in range |
1357 | * @end - last block in range |
1358 | * @reserved - number of cluster reservations released |
1359 | * @prealloc - pre-allocated es to avoid memory allocation failures |
1360 | * |
1361 | * If @reserved is not NULL and delayed allocation is enabled, counts |
1362 | * block/cluster reservations freed by removing range and if bigalloc |
1363 | * enabled cancels pending reservations as needed. Returns 0 on success, |
1364 | * error code on failure. |
1365 | */ |
1366 | static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, |
1367 | ext4_lblk_t end, int *reserved, |
1368 | struct extent_status *prealloc) |
1369 | { |
1370 | struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; |
1371 | struct rb_node *node; |
1372 | struct extent_status *es; |
1373 | struct extent_status orig_es; |
1374 | ext4_lblk_t len1, len2; |
1375 | ext4_fsblk_t block; |
1376 | int err = 0; |
1377 | bool count_reserved = true; |
1378 | struct rsvd_count rc; |
1379 | |
1380 | if (reserved == NULL || !test_opt(inode->i_sb, DELALLOC)) |
1381 | count_reserved = false; |
1382 | |
1383 | es = __es_tree_search(root: &tree->root, lblk); |
1384 | if (!es) |
1385 | goto out; |
1386 | if (es->es_lblk > end) |
1387 | goto out; |
1388 | |
1389 | /* Simply invalidate cache_es. */ |
1390 | tree->cache_es = NULL; |
1391 | if (count_reserved) |
1392 | init_rsvd(inode, lblk, es, rc: &rc); |
1393 | |
1394 | orig_es.es_lblk = es->es_lblk; |
1395 | orig_es.es_len = es->es_len; |
1396 | orig_es.es_pblk = es->es_pblk; |
1397 | |
1398 | len1 = lblk > es->es_lblk ? lblk - es->es_lblk : 0; |
1399 | len2 = ext4_es_end(es) > end ? ext4_es_end(es) - end : 0; |
1400 | if (len1 > 0) |
1401 | es->es_len = len1; |
1402 | if (len2 > 0) { |
1403 | if (len1 > 0) { |
1404 | struct extent_status newes; |
1405 | |
1406 | newes.es_lblk = end + 1; |
1407 | newes.es_len = len2; |
1408 | block = 0x7FDEADBEEFULL; |
1409 | if (ext4_es_is_written(es: &orig_es) || |
1410 | ext4_es_is_unwritten(es: &orig_es)) |
1411 | block = ext4_es_pblock(es: &orig_es) + |
1412 | orig_es.es_len - len2; |
1413 | ext4_es_store_pblock_status(es: &newes, pb: block, |
1414 | status: ext4_es_status(es: &orig_es)); |
1415 | err = __es_insert_extent(inode, newes: &newes, prealloc); |
1416 | if (err) { |
1417 | if (!ext4_es_must_keep(es: &newes)) |
1418 | return 0; |
1419 | |
1420 | es->es_lblk = orig_es.es_lblk; |
1421 | es->es_len = orig_es.es_len; |
1422 | goto out; |
1423 | } |
1424 | } else { |
1425 | es->es_lblk = end + 1; |
1426 | es->es_len = len2; |
1427 | if (ext4_es_is_written(es) || |
1428 | ext4_es_is_unwritten(es)) { |
1429 | block = orig_es.es_pblk + orig_es.es_len - len2; |
1430 | ext4_es_store_pblock(es, pb: block); |
1431 | } |
1432 | } |
1433 | if (count_reserved) |
1434 | count_rsvd(inode, lblk: orig_es.es_lblk + len1, |
1435 | len: orig_es.es_len - len1 - len2, es: &orig_es, rc: &rc); |
1436 | goto out_get_reserved; |
1437 | } |
1438 | |
1439 | if (len1 > 0) { |
1440 | if (count_reserved) |
1441 | count_rsvd(inode, lblk, len: orig_es.es_len - len1, |
1442 | es: &orig_es, rc: &rc); |
1443 | node = rb_next(&es->rb_node); |
1444 | if (node) |
1445 | es = rb_entry(node, struct extent_status, rb_node); |
1446 | else |
1447 | es = NULL; |
1448 | } |
1449 | |
1450 | while (es && ext4_es_end(es) <= end) { |
1451 | if (count_reserved) |
1452 | count_rsvd(inode, lblk: es->es_lblk, len: es->es_len, es, rc: &rc); |
1453 | node = rb_next(&es->rb_node); |
1454 | rb_erase(&es->rb_node, &tree->root); |
1455 | ext4_es_free_extent(inode, es); |
1456 | if (!node) { |
1457 | es = NULL; |
1458 | break; |
1459 | } |
1460 | es = rb_entry(node, struct extent_status, rb_node); |
1461 | } |
1462 | |
1463 | if (es && es->es_lblk < end + 1) { |
1464 | ext4_lblk_t orig_len = es->es_len; |
1465 | |
1466 | len1 = ext4_es_end(es) - end; |
1467 | if (count_reserved) |
1468 | count_rsvd(inode, lblk: es->es_lblk, len: orig_len - len1, |
1469 | es, rc: &rc); |
1470 | es->es_lblk = end + 1; |
1471 | es->es_len = len1; |
1472 | if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) { |
1473 | block = es->es_pblk + orig_len - len1; |
1474 | ext4_es_store_pblock(es, pb: block); |
1475 | } |
1476 | } |
1477 | |
1478 | out_get_reserved: |
1479 | if (count_reserved) |
1480 | *reserved = get_rsvd(inode, end, right_es: es, rc: &rc); |
1481 | out: |
1482 | return err; |
1483 | } |
1484 | |
1485 | /* |
1486 | * ext4_es_remove_extent - removes block range from extent status tree |
1487 | * |
1488 | * @inode - file containing range |
1489 | * @lblk - first block in range |
1490 | * @len - number of blocks to remove |
1491 | * |
1492 | * Reduces block/cluster reservation count and for bigalloc cancels pending |
1493 | * reservations as needed. |
1494 | */ |
1495 | void ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk, |
1496 | ext4_lblk_t len) |
1497 | { |
1498 | ext4_lblk_t end; |
1499 | int err = 0; |
1500 | int reserved = 0; |
1501 | struct extent_status *es = NULL; |
1502 | |
1503 | if (EXT4_SB(sb: inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) |
1504 | return; |
1505 | |
1506 | trace_ext4_es_remove_extent(inode, lblk, len); |
1507 | es_debug("remove [%u/%u) from extent status tree of inode %lu\n" , |
1508 | lblk, len, inode->i_ino); |
1509 | |
1510 | if (!len) |
1511 | return; |
1512 | |
1513 | end = lblk + len - 1; |
1514 | BUG_ON(end < lblk); |
1515 | |
1516 | retry: |
1517 | if (err && !es) |
1518 | es = __es_alloc_extent(nofail: true); |
1519 | /* |
1520 | * ext4_clear_inode() depends on us taking i_es_lock unconditionally |
1521 | * so that we are sure __es_shrink() is done with the inode before it |
1522 | * is reclaimed. |
1523 | */ |
1524 | write_lock(&EXT4_I(inode)->i_es_lock); |
1525 | err = __es_remove_extent(inode, lblk, end, reserved: &reserved, prealloc: es); |
1526 | /* Free preallocated extent if it didn't get used. */ |
1527 | if (es) { |
1528 | if (!es->es_len) |
1529 | __es_free_extent(es); |
1530 | es = NULL; |
1531 | } |
1532 | write_unlock(&EXT4_I(inode)->i_es_lock); |
1533 | if (err) |
1534 | goto retry; |
1535 | |
1536 | ext4_es_print_tree(inode); |
1537 | ext4_da_release_space(inode, to_free: reserved); |
1538 | return; |
1539 | } |
1540 | |
1541 | static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, |
1542 | struct ext4_inode_info *locked_ei) |
1543 | { |
1544 | struct ext4_inode_info *ei; |
1545 | struct ext4_es_stats *es_stats; |
1546 | ktime_t start_time; |
1547 | u64 scan_time; |
1548 | int nr_to_walk; |
1549 | int nr_shrunk = 0; |
1550 | int retried = 0, nr_skipped = 0; |
1551 | |
1552 | es_stats = &sbi->s_es_stats; |
1553 | start_time = ktime_get(); |
1554 | |
1555 | retry: |
1556 | spin_lock(lock: &sbi->s_es_lock); |
1557 | nr_to_walk = sbi->s_es_nr_inode; |
1558 | while (nr_to_walk-- > 0) { |
1559 | if (list_empty(head: &sbi->s_es_list)) { |
1560 | spin_unlock(lock: &sbi->s_es_lock); |
1561 | goto out; |
1562 | } |
1563 | ei = list_first_entry(&sbi->s_es_list, struct ext4_inode_info, |
1564 | i_es_list); |
1565 | /* Move the inode to the tail */ |
1566 | list_move_tail(list: &ei->i_es_list, head: &sbi->s_es_list); |
1567 | |
1568 | /* |
1569 | * Normally we try hard to avoid shrinking precached inodes, |
1570 | * but we will as a last resort. |
1571 | */ |
1572 | if (!retried && ext4_test_inode_state(inode: &ei->vfs_inode, |
1573 | bit: EXT4_STATE_EXT_PRECACHED)) { |
1574 | nr_skipped++; |
1575 | continue; |
1576 | } |
1577 | |
1578 | if (ei == locked_ei || !write_trylock(&ei->i_es_lock)) { |
1579 | nr_skipped++; |
1580 | continue; |
1581 | } |
1582 | /* |
1583 | * Now we hold i_es_lock which protects us from inode reclaim |
1584 | * freeing inode under us |
1585 | */ |
1586 | spin_unlock(lock: &sbi->s_es_lock); |
1587 | |
1588 | nr_shrunk += es_reclaim_extents(ei, nr_to_scan: &nr_to_scan); |
1589 | write_unlock(&ei->i_es_lock); |
1590 | |
1591 | if (nr_to_scan <= 0) |
1592 | goto out; |
1593 | spin_lock(lock: &sbi->s_es_lock); |
1594 | } |
1595 | spin_unlock(lock: &sbi->s_es_lock); |
1596 | |
1597 | /* |
1598 | * If we skipped any inodes, and we weren't able to make any |
1599 | * forward progress, try again to scan precached inodes. |
1600 | */ |
1601 | if ((nr_shrunk == 0) && nr_skipped && !retried) { |
1602 | retried++; |
1603 | goto retry; |
1604 | } |
1605 | |
1606 | if (locked_ei && nr_shrunk == 0) |
1607 | nr_shrunk = es_reclaim_extents(ei: locked_ei, nr_to_scan: &nr_to_scan); |
1608 | |
1609 | out: |
1610 | scan_time = ktime_to_ns(ktime_sub(ktime_get(), start_time)); |
1611 | if (likely(es_stats->es_stats_scan_time)) |
1612 | es_stats->es_stats_scan_time = (scan_time + |
1613 | es_stats->es_stats_scan_time*3) / 4; |
1614 | else |
1615 | es_stats->es_stats_scan_time = scan_time; |
1616 | if (scan_time > es_stats->es_stats_max_scan_time) |
1617 | es_stats->es_stats_max_scan_time = scan_time; |
1618 | if (likely(es_stats->es_stats_shrunk)) |
1619 | es_stats->es_stats_shrunk = (nr_shrunk + |
1620 | es_stats->es_stats_shrunk*3) / 4; |
1621 | else |
1622 | es_stats->es_stats_shrunk = nr_shrunk; |
1623 | |
1624 | trace_ext4_es_shrink(sb: sbi->s_sb, nr_shrunk, scan_time, |
1625 | nr_skipped, retried); |
1626 | return nr_shrunk; |
1627 | } |
1628 | |
1629 | static unsigned long ext4_es_count(struct shrinker *shrink, |
1630 | struct shrink_control *sc) |
1631 | { |
1632 | unsigned long nr; |
1633 | struct ext4_sb_info *sbi; |
1634 | |
1635 | sbi = shrink->private_data; |
1636 | nr = percpu_counter_read_positive(fbc: &sbi->s_es_stats.es_stats_shk_cnt); |
1637 | trace_ext4_es_shrink_count(sb: sbi->s_sb, nr_to_scan: sc->nr_to_scan, cache_cnt: nr); |
1638 | return nr; |
1639 | } |
1640 | |
1641 | static unsigned long ext4_es_scan(struct shrinker *shrink, |
1642 | struct shrink_control *sc) |
1643 | { |
1644 | struct ext4_sb_info *sbi = shrink->private_data; |
1645 | int nr_to_scan = sc->nr_to_scan; |
1646 | int ret, nr_shrunk; |
1647 | |
1648 | ret = percpu_counter_read_positive(fbc: &sbi->s_es_stats.es_stats_shk_cnt); |
1649 | trace_ext4_es_shrink_scan_enter(sb: sbi->s_sb, nr_to_scan, cache_cnt: ret); |
1650 | |
1651 | nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL); |
1652 | |
1653 | ret = percpu_counter_read_positive(fbc: &sbi->s_es_stats.es_stats_shk_cnt); |
1654 | trace_ext4_es_shrink_scan_exit(sb: sbi->s_sb, nr_shrunk, cache_cnt: ret); |
1655 | return nr_shrunk; |
1656 | } |
1657 | |
1658 | int ext4_seq_es_shrinker_info_show(struct seq_file *seq, void *v) |
1659 | { |
1660 | struct ext4_sb_info *sbi = EXT4_SB(sb: (struct super_block *) seq->private); |
1661 | struct ext4_es_stats *es_stats = &sbi->s_es_stats; |
1662 | struct ext4_inode_info *ei, *max = NULL; |
1663 | unsigned int inode_cnt = 0; |
1664 | |
1665 | if (v != SEQ_START_TOKEN) |
1666 | return 0; |
1667 | |
1668 | /* here we just find an inode that has the max nr. of objects */ |
1669 | spin_lock(lock: &sbi->s_es_lock); |
1670 | list_for_each_entry(ei, &sbi->s_es_list, i_es_list) { |
1671 | inode_cnt++; |
1672 | if (max && max->i_es_all_nr < ei->i_es_all_nr) |
1673 | max = ei; |
1674 | else if (!max) |
1675 | max = ei; |
1676 | } |
1677 | spin_unlock(lock: &sbi->s_es_lock); |
1678 | |
1679 | seq_printf(m: seq, fmt: "stats:\n %lld objects\n %lld reclaimable objects\n" , |
1680 | percpu_counter_sum_positive(fbc: &es_stats->es_stats_all_cnt), |
1681 | percpu_counter_sum_positive(fbc: &es_stats->es_stats_shk_cnt)); |
1682 | seq_printf(m: seq, fmt: " %lld/%lld cache hits/misses\n" , |
1683 | percpu_counter_sum_positive(fbc: &es_stats->es_stats_cache_hits), |
1684 | percpu_counter_sum_positive(fbc: &es_stats->es_stats_cache_misses)); |
1685 | if (inode_cnt) |
1686 | seq_printf(m: seq, fmt: " %d inodes on list\n" , inode_cnt); |
1687 | |
1688 | seq_printf(m: seq, fmt: "average:\n %llu us scan time\n" , |
1689 | div_u64(dividend: es_stats->es_stats_scan_time, divisor: 1000)); |
1690 | seq_printf(m: seq, fmt: " %lu shrunk objects\n" , es_stats->es_stats_shrunk); |
1691 | if (inode_cnt) |
1692 | seq_printf(m: seq, |
1693 | fmt: "maximum:\n %lu inode (%u objects, %u reclaimable)\n" |
1694 | " %llu us max scan time\n" , |
1695 | max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_shk_nr, |
1696 | div_u64(dividend: es_stats->es_stats_max_scan_time, divisor: 1000)); |
1697 | |
1698 | return 0; |
1699 | } |
1700 | |
1701 | int ext4_es_register_shrinker(struct ext4_sb_info *sbi) |
1702 | { |
1703 | int err; |
1704 | |
1705 | /* Make sure we have enough bits for physical block number */ |
1706 | BUILD_BUG_ON(ES_SHIFT < 48); |
1707 | INIT_LIST_HEAD(list: &sbi->s_es_list); |
1708 | sbi->s_es_nr_inode = 0; |
1709 | spin_lock_init(&sbi->s_es_lock); |
1710 | sbi->s_es_stats.es_stats_shrunk = 0; |
1711 | err = percpu_counter_init(&sbi->s_es_stats.es_stats_cache_hits, 0, |
1712 | GFP_KERNEL); |
1713 | if (err) |
1714 | return err; |
1715 | err = percpu_counter_init(&sbi->s_es_stats.es_stats_cache_misses, 0, |
1716 | GFP_KERNEL); |
1717 | if (err) |
1718 | goto err1; |
1719 | sbi->s_es_stats.es_stats_scan_time = 0; |
1720 | sbi->s_es_stats.es_stats_max_scan_time = 0; |
1721 | err = percpu_counter_init(&sbi->s_es_stats.es_stats_all_cnt, 0, GFP_KERNEL); |
1722 | if (err) |
1723 | goto err2; |
1724 | err = percpu_counter_init(&sbi->s_es_stats.es_stats_shk_cnt, 0, GFP_KERNEL); |
1725 | if (err) |
1726 | goto err3; |
1727 | |
1728 | sbi->s_es_shrinker = shrinker_alloc(flags: 0, fmt: "ext4-es:%s" , sbi->s_sb->s_id); |
1729 | if (!sbi->s_es_shrinker) { |
1730 | err = -ENOMEM; |
1731 | goto err4; |
1732 | } |
1733 | |
1734 | sbi->s_es_shrinker->scan_objects = ext4_es_scan; |
1735 | sbi->s_es_shrinker->count_objects = ext4_es_count; |
1736 | sbi->s_es_shrinker->private_data = sbi; |
1737 | |
1738 | shrinker_register(shrinker: sbi->s_es_shrinker); |
1739 | |
1740 | return 0; |
1741 | err4: |
1742 | percpu_counter_destroy(fbc: &sbi->s_es_stats.es_stats_shk_cnt); |
1743 | err3: |
1744 | percpu_counter_destroy(fbc: &sbi->s_es_stats.es_stats_all_cnt); |
1745 | err2: |
1746 | percpu_counter_destroy(fbc: &sbi->s_es_stats.es_stats_cache_misses); |
1747 | err1: |
1748 | percpu_counter_destroy(fbc: &sbi->s_es_stats.es_stats_cache_hits); |
1749 | return err; |
1750 | } |
1751 | |
1752 | void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi) |
1753 | { |
1754 | percpu_counter_destroy(fbc: &sbi->s_es_stats.es_stats_cache_hits); |
1755 | percpu_counter_destroy(fbc: &sbi->s_es_stats.es_stats_cache_misses); |
1756 | percpu_counter_destroy(fbc: &sbi->s_es_stats.es_stats_all_cnt); |
1757 | percpu_counter_destroy(fbc: &sbi->s_es_stats.es_stats_shk_cnt); |
1758 | shrinker_free(shrinker: sbi->s_es_shrinker); |
1759 | } |
1760 | |
1761 | /* |
1762 | * Shrink extents in given inode from ei->i_es_shrink_lblk till end. Scan at |
1763 | * most *nr_to_scan extents, update *nr_to_scan accordingly. |
1764 | * |
1765 | * Return 0 if we hit end of tree / interval, 1 if we exhausted nr_to_scan. |
1766 | * Increment *nr_shrunk by the number of reclaimed extents. Also update |
1767 | * ei->i_es_shrink_lblk to where we should continue scanning. |
1768 | */ |
1769 | static int es_do_reclaim_extents(struct ext4_inode_info *ei, ext4_lblk_t end, |
1770 | int *nr_to_scan, int *nr_shrunk) |
1771 | { |
1772 | struct inode *inode = &ei->vfs_inode; |
1773 | struct ext4_es_tree *tree = &ei->i_es_tree; |
1774 | struct extent_status *es; |
1775 | struct rb_node *node; |
1776 | |
1777 | es = __es_tree_search(root: &tree->root, lblk: ei->i_es_shrink_lblk); |
1778 | if (!es) |
1779 | goto out_wrap; |
1780 | |
1781 | while (*nr_to_scan > 0) { |
1782 | if (es->es_lblk > end) { |
1783 | ei->i_es_shrink_lblk = end + 1; |
1784 | return 0; |
1785 | } |
1786 | |
1787 | (*nr_to_scan)--; |
1788 | node = rb_next(&es->rb_node); |
1789 | |
1790 | if (ext4_es_must_keep(es)) |
1791 | goto next; |
1792 | if (ext4_es_is_referenced(es)) { |
1793 | ext4_es_clear_referenced(es); |
1794 | goto next; |
1795 | } |
1796 | |
1797 | rb_erase(&es->rb_node, &tree->root); |
1798 | ext4_es_free_extent(inode, es); |
1799 | (*nr_shrunk)++; |
1800 | next: |
1801 | if (!node) |
1802 | goto out_wrap; |
1803 | es = rb_entry(node, struct extent_status, rb_node); |
1804 | } |
1805 | ei->i_es_shrink_lblk = es->es_lblk; |
1806 | return 1; |
1807 | out_wrap: |
1808 | ei->i_es_shrink_lblk = 0; |
1809 | return 0; |
1810 | } |
1811 | |
1812 | static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan) |
1813 | { |
1814 | struct inode *inode = &ei->vfs_inode; |
1815 | int nr_shrunk = 0; |
1816 | ext4_lblk_t start = ei->i_es_shrink_lblk; |
1817 | static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, |
1818 | DEFAULT_RATELIMIT_BURST); |
1819 | |
1820 | if (ei->i_es_shk_nr == 0) |
1821 | return 0; |
1822 | |
1823 | if (ext4_test_inode_state(inode, bit: EXT4_STATE_EXT_PRECACHED) && |
1824 | __ratelimit(&_rs)) |
1825 | ext4_warning(inode->i_sb, "forced shrink of precached extents" ); |
1826 | |
1827 | if (!es_do_reclaim_extents(ei, EXT_MAX_BLOCKS, nr_to_scan, nr_shrunk: &nr_shrunk) && |
1828 | start != 0) |
1829 | es_do_reclaim_extents(ei, end: start - 1, nr_to_scan, nr_shrunk: &nr_shrunk); |
1830 | |
1831 | ei->i_es_tree.cache_es = NULL; |
1832 | return nr_shrunk; |
1833 | } |
1834 | |
1835 | /* |
1836 | * Called to support EXT4_IOC_CLEAR_ES_CACHE. We can only remove |
1837 | * discretionary entries from the extent status cache. (Some entries |
1838 | * must be present for proper operations.) |
1839 | */ |
1840 | void ext4_clear_inode_es(struct inode *inode) |
1841 | { |
1842 | struct ext4_inode_info *ei = EXT4_I(inode); |
1843 | struct extent_status *es; |
1844 | struct ext4_es_tree *tree; |
1845 | struct rb_node *node; |
1846 | |
1847 | write_lock(&ei->i_es_lock); |
1848 | tree = &EXT4_I(inode)->i_es_tree; |
1849 | tree->cache_es = NULL; |
1850 | node = rb_first(&tree->root); |
1851 | while (node) { |
1852 | es = rb_entry(node, struct extent_status, rb_node); |
1853 | node = rb_next(node); |
1854 | if (!ext4_es_must_keep(es)) { |
1855 | rb_erase(&es->rb_node, &tree->root); |
1856 | ext4_es_free_extent(inode, es); |
1857 | } |
1858 | } |
1859 | ext4_clear_inode_state(inode, bit: EXT4_STATE_EXT_PRECACHED); |
1860 | write_unlock(&ei->i_es_lock); |
1861 | } |
1862 | |
1863 | #ifdef ES_DEBUG__ |
1864 | static void ext4_print_pending_tree(struct inode *inode) |
1865 | { |
1866 | struct ext4_pending_tree *tree; |
1867 | struct rb_node *node; |
1868 | struct pending_reservation *pr; |
1869 | |
1870 | printk(KERN_DEBUG "pending reservations for inode %lu:" , inode->i_ino); |
1871 | tree = &EXT4_I(inode)->i_pending_tree; |
1872 | node = rb_first(&tree->root); |
1873 | while (node) { |
1874 | pr = rb_entry(node, struct pending_reservation, rb_node); |
1875 | printk(KERN_DEBUG " %u" , pr->lclu); |
1876 | node = rb_next(node); |
1877 | } |
1878 | printk(KERN_DEBUG "\n" ); |
1879 | } |
1880 | #else |
1881 | #define ext4_print_pending_tree(inode) |
1882 | #endif |
1883 | |
1884 | int __init ext4_init_pending(void) |
1885 | { |
1886 | ext4_pending_cachep = KMEM_CACHE(pending_reservation, SLAB_RECLAIM_ACCOUNT); |
1887 | if (ext4_pending_cachep == NULL) |
1888 | return -ENOMEM; |
1889 | return 0; |
1890 | } |
1891 | |
1892 | void ext4_exit_pending(void) |
1893 | { |
1894 | kmem_cache_destroy(s: ext4_pending_cachep); |
1895 | } |
1896 | |
1897 | void ext4_init_pending_tree(struct ext4_pending_tree *tree) |
1898 | { |
1899 | tree->root = RB_ROOT; |
1900 | } |
1901 | |
1902 | /* |
1903 | * __get_pending - retrieve a pointer to a pending reservation |
1904 | * |
1905 | * @inode - file containing the pending cluster reservation |
1906 | * @lclu - logical cluster of interest |
1907 | * |
1908 | * Returns a pointer to a pending reservation if it's a member of |
1909 | * the set, and NULL if not. Must be called holding i_es_lock. |
1910 | */ |
1911 | static struct pending_reservation *__get_pending(struct inode *inode, |
1912 | ext4_lblk_t lclu) |
1913 | { |
1914 | struct ext4_pending_tree *tree; |
1915 | struct rb_node *node; |
1916 | struct pending_reservation *pr = NULL; |
1917 | |
1918 | tree = &EXT4_I(inode)->i_pending_tree; |
1919 | node = (&tree->root)->rb_node; |
1920 | |
1921 | while (node) { |
1922 | pr = rb_entry(node, struct pending_reservation, rb_node); |
1923 | if (lclu < pr->lclu) |
1924 | node = node->rb_left; |
1925 | else if (lclu > pr->lclu) |
1926 | node = node->rb_right; |
1927 | else if (lclu == pr->lclu) |
1928 | return pr; |
1929 | } |
1930 | return NULL; |
1931 | } |
1932 | |
1933 | /* |
1934 | * __insert_pending - adds a pending cluster reservation to the set of |
1935 | * pending reservations |
1936 | * |
1937 | * @inode - file containing the cluster |
1938 | * @lblk - logical block in the cluster to be added |
1939 | * @prealloc - preallocated pending entry |
1940 | * |
1941 | * Returns 0 on successful insertion and -ENOMEM on failure. If the |
1942 | * pending reservation is already in the set, returns successfully. |
1943 | */ |
1944 | static int __insert_pending(struct inode *inode, ext4_lblk_t lblk, |
1945 | struct pending_reservation **prealloc) |
1946 | { |
1947 | struct ext4_sb_info *sbi = EXT4_SB(sb: inode->i_sb); |
1948 | struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree; |
1949 | struct rb_node **p = &tree->root.rb_node; |
1950 | struct rb_node *parent = NULL; |
1951 | struct pending_reservation *pr; |
1952 | ext4_lblk_t lclu; |
1953 | int ret = 0; |
1954 | |
1955 | lclu = EXT4_B2C(sbi, lblk); |
1956 | /* search to find parent for insertion */ |
1957 | while (*p) { |
1958 | parent = *p; |
1959 | pr = rb_entry(parent, struct pending_reservation, rb_node); |
1960 | |
1961 | if (lclu < pr->lclu) { |
1962 | p = &(*p)->rb_left; |
1963 | } else if (lclu > pr->lclu) { |
1964 | p = &(*p)->rb_right; |
1965 | } else { |
1966 | /* pending reservation already inserted */ |
1967 | goto out; |
1968 | } |
1969 | } |
1970 | |
1971 | if (likely(*prealloc == NULL)) { |
1972 | pr = __alloc_pending(nofail: false); |
1973 | if (!pr) { |
1974 | ret = -ENOMEM; |
1975 | goto out; |
1976 | } |
1977 | } else { |
1978 | pr = *prealloc; |
1979 | *prealloc = NULL; |
1980 | } |
1981 | pr->lclu = lclu; |
1982 | |
1983 | rb_link_node(node: &pr->rb_node, parent, rb_link: p); |
1984 | rb_insert_color(&pr->rb_node, &tree->root); |
1985 | |
1986 | out: |
1987 | return ret; |
1988 | } |
1989 | |
1990 | /* |
1991 | * __remove_pending - removes a pending cluster reservation from the set |
1992 | * of pending reservations |
1993 | * |
1994 | * @inode - file containing the cluster |
1995 | * @lblk - logical block in the pending cluster reservation to be removed |
1996 | * |
1997 | * Returns successfully if pending reservation is not a member of the set. |
1998 | */ |
1999 | static void __remove_pending(struct inode *inode, ext4_lblk_t lblk) |
2000 | { |
2001 | struct ext4_sb_info *sbi = EXT4_SB(sb: inode->i_sb); |
2002 | struct pending_reservation *pr; |
2003 | struct ext4_pending_tree *tree; |
2004 | |
2005 | pr = __get_pending(inode, EXT4_B2C(sbi, lblk)); |
2006 | if (pr != NULL) { |
2007 | tree = &EXT4_I(inode)->i_pending_tree; |
2008 | rb_erase(&pr->rb_node, &tree->root); |
2009 | __free_pending(pr); |
2010 | } |
2011 | } |
2012 | |
2013 | /* |
2014 | * ext4_remove_pending - removes a pending cluster reservation from the set |
2015 | * of pending reservations |
2016 | * |
2017 | * @inode - file containing the cluster |
2018 | * @lblk - logical block in the pending cluster reservation to be removed |
2019 | * |
2020 | * Locking for external use of __remove_pending. |
2021 | */ |
2022 | void ext4_remove_pending(struct inode *inode, ext4_lblk_t lblk) |
2023 | { |
2024 | struct ext4_inode_info *ei = EXT4_I(inode); |
2025 | |
2026 | write_lock(&ei->i_es_lock); |
2027 | __remove_pending(inode, lblk); |
2028 | write_unlock(&ei->i_es_lock); |
2029 | } |
2030 | |
2031 | /* |
2032 | * ext4_is_pending - determine whether a cluster has a pending reservation |
2033 | * on it |
2034 | * |
2035 | * @inode - file containing the cluster |
2036 | * @lblk - logical block in the cluster |
2037 | * |
2038 | * Returns true if there's a pending reservation for the cluster in the |
2039 | * set of pending reservations, and false if not. |
2040 | */ |
2041 | bool ext4_is_pending(struct inode *inode, ext4_lblk_t lblk) |
2042 | { |
2043 | struct ext4_sb_info *sbi = EXT4_SB(sb: inode->i_sb); |
2044 | struct ext4_inode_info *ei = EXT4_I(inode); |
2045 | bool ret; |
2046 | |
2047 | read_lock(&ei->i_es_lock); |
2048 | ret = (bool)(__get_pending(inode, EXT4_B2C(sbi, lblk)) != NULL); |
2049 | read_unlock(&ei->i_es_lock); |
2050 | |
2051 | return ret; |
2052 | } |
2053 | |
2054 | /* |
2055 | * ext4_es_insert_delayed_block - adds a delayed block to the extents status |
2056 | * tree, adding a pending reservation where |
2057 | * needed |
2058 | * |
2059 | * @inode - file containing the newly added block |
2060 | * @lblk - logical block to be added |
2061 | * @allocated - indicates whether a physical cluster has been allocated for |
2062 | * the logical cluster that contains the block |
2063 | */ |
2064 | void ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk, |
2065 | bool allocated) |
2066 | { |
2067 | struct extent_status newes; |
2068 | int err1 = 0, err2 = 0, err3 = 0; |
2069 | struct extent_status *es1 = NULL; |
2070 | struct extent_status *es2 = NULL; |
2071 | struct pending_reservation *pr = NULL; |
2072 | |
2073 | if (EXT4_SB(sb: inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) |
2074 | return; |
2075 | |
2076 | es_debug("add [%u/1) delayed to extent status tree of inode %lu\n" , |
2077 | lblk, inode->i_ino); |
2078 | |
2079 | newes.es_lblk = lblk; |
2080 | newes.es_len = 1; |
2081 | ext4_es_store_pblock_status(es: &newes, pb: ~0, EXTENT_STATUS_DELAYED); |
2082 | trace_ext4_es_insert_delayed_block(inode, es: &newes, allocated); |
2083 | |
2084 | ext4_es_insert_extent_check(inode, es: &newes); |
2085 | |
2086 | retry: |
2087 | if (err1 && !es1) |
2088 | es1 = __es_alloc_extent(nofail: true); |
2089 | if ((err1 || err2) && !es2) |
2090 | es2 = __es_alloc_extent(nofail: true); |
2091 | if ((err1 || err2 || err3) && allocated && !pr) |
2092 | pr = __alloc_pending(nofail: true); |
2093 | write_lock(&EXT4_I(inode)->i_es_lock); |
2094 | |
2095 | err1 = __es_remove_extent(inode, lblk, end: lblk, NULL, prealloc: es1); |
2096 | if (err1 != 0) |
2097 | goto error; |
2098 | /* Free preallocated extent if it didn't get used. */ |
2099 | if (es1) { |
2100 | if (!es1->es_len) |
2101 | __es_free_extent(es: es1); |
2102 | es1 = NULL; |
2103 | } |
2104 | |
2105 | err2 = __es_insert_extent(inode, newes: &newes, prealloc: es2); |
2106 | if (err2 != 0) |
2107 | goto error; |
2108 | /* Free preallocated extent if it didn't get used. */ |
2109 | if (es2) { |
2110 | if (!es2->es_len) |
2111 | __es_free_extent(es: es2); |
2112 | es2 = NULL; |
2113 | } |
2114 | |
2115 | if (allocated) { |
2116 | err3 = __insert_pending(inode, lblk, prealloc: &pr); |
2117 | if (err3 != 0) |
2118 | goto error; |
2119 | if (pr) { |
2120 | __free_pending(pr); |
2121 | pr = NULL; |
2122 | } |
2123 | } |
2124 | error: |
2125 | write_unlock(&EXT4_I(inode)->i_es_lock); |
2126 | if (err1 || err2 || err3) |
2127 | goto retry; |
2128 | |
2129 | ext4_es_print_tree(inode); |
2130 | ext4_print_pending_tree(inode); |
2131 | return; |
2132 | } |
2133 | |
2134 | /* |
2135 | * __es_delayed_clu - count number of clusters containing blocks that |
2136 | * are delayed only |
2137 | * |
2138 | * @inode - file containing block range |
2139 | * @start - logical block defining start of range |
2140 | * @end - logical block defining end of range |
2141 | * |
2142 | * Returns the number of clusters containing only delayed (not delayed |
2143 | * and unwritten) blocks in the range specified by @start and @end. Any |
2144 | * cluster or part of a cluster within the range and containing a delayed |
2145 | * and not unwritten block within the range is counted as a whole cluster. |
2146 | */ |
2147 | static unsigned int __es_delayed_clu(struct inode *inode, ext4_lblk_t start, |
2148 | ext4_lblk_t end) |
2149 | { |
2150 | struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; |
2151 | struct extent_status *es; |
2152 | struct ext4_sb_info *sbi = EXT4_SB(sb: inode->i_sb); |
2153 | struct rb_node *node; |
2154 | ext4_lblk_t first_lclu, last_lclu; |
2155 | unsigned long long last_counted_lclu; |
2156 | unsigned int n = 0; |
2157 | |
2158 | /* guaranteed to be unequal to any ext4_lblk_t value */ |
2159 | last_counted_lclu = ~0ULL; |
2160 | |
2161 | es = __es_tree_search(root: &tree->root, lblk: start); |
2162 | |
2163 | while (es && (es->es_lblk <= end)) { |
2164 | if (ext4_es_is_delonly(es)) { |
2165 | if (es->es_lblk <= start) |
2166 | first_lclu = EXT4_B2C(sbi, start); |
2167 | else |
2168 | first_lclu = EXT4_B2C(sbi, es->es_lblk); |
2169 | |
2170 | if (ext4_es_end(es) >= end) |
2171 | last_lclu = EXT4_B2C(sbi, end); |
2172 | else |
2173 | last_lclu = EXT4_B2C(sbi, ext4_es_end(es)); |
2174 | |
2175 | if (first_lclu == last_counted_lclu) |
2176 | n += last_lclu - first_lclu; |
2177 | else |
2178 | n += last_lclu - first_lclu + 1; |
2179 | last_counted_lclu = last_lclu; |
2180 | } |
2181 | node = rb_next(&es->rb_node); |
2182 | if (!node) |
2183 | break; |
2184 | es = rb_entry(node, struct extent_status, rb_node); |
2185 | } |
2186 | |
2187 | return n; |
2188 | } |
2189 | |
2190 | /* |
2191 | * ext4_es_delayed_clu - count number of clusters containing blocks that |
2192 | * are both delayed and unwritten |
2193 | * |
2194 | * @inode - file containing block range |
2195 | * @lblk - logical block defining start of range |
2196 | * @len - number of blocks in range |
2197 | * |
2198 | * Locking for external use of __es_delayed_clu(). |
2199 | */ |
2200 | unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk, |
2201 | ext4_lblk_t len) |
2202 | { |
2203 | struct ext4_inode_info *ei = EXT4_I(inode); |
2204 | ext4_lblk_t end; |
2205 | unsigned int n; |
2206 | |
2207 | if (len == 0) |
2208 | return 0; |
2209 | |
2210 | end = lblk + len - 1; |
2211 | WARN_ON(end < lblk); |
2212 | |
2213 | read_lock(&ei->i_es_lock); |
2214 | |
2215 | n = __es_delayed_clu(inode, start: lblk, end); |
2216 | |
2217 | read_unlock(&ei->i_es_lock); |
2218 | |
2219 | return n; |
2220 | } |
2221 | |
2222 | /* |
2223 | * __revise_pending - makes, cancels, or leaves unchanged pending cluster |
2224 | * reservations for a specified block range depending |
2225 | * upon the presence or absence of delayed blocks |
2226 | * outside the range within clusters at the ends of the |
2227 | * range |
2228 | * |
2229 | * @inode - file containing the range |
2230 | * @lblk - logical block defining the start of range |
2231 | * @len - length of range in blocks |
2232 | * @prealloc - preallocated pending entry |
2233 | * |
2234 | * Used after a newly allocated extent is added to the extents status tree. |
2235 | * Requires that the extents in the range have either written or unwritten |
2236 | * status. Must be called while holding i_es_lock. |
2237 | */ |
2238 | static int __revise_pending(struct inode *inode, ext4_lblk_t lblk, |
2239 | ext4_lblk_t len, |
2240 | struct pending_reservation **prealloc) |
2241 | { |
2242 | struct ext4_sb_info *sbi = EXT4_SB(sb: inode->i_sb); |
2243 | ext4_lblk_t end = lblk + len - 1; |
2244 | ext4_lblk_t first, last; |
2245 | bool f_del = false, l_del = false; |
2246 | int ret = 0; |
2247 | |
2248 | if (len == 0) |
2249 | return 0; |
2250 | |
2251 | /* |
2252 | * Two cases - block range within single cluster and block range |
2253 | * spanning two or more clusters. Note that a cluster belonging |
2254 | * to a range starting and/or ending on a cluster boundary is treated |
2255 | * as if it does not contain a delayed extent. The new range may |
2256 | * have allocated space for previously delayed blocks out to the |
2257 | * cluster boundary, requiring that any pre-existing pending |
2258 | * reservation be canceled. Because this code only looks at blocks |
2259 | * outside the range, it should revise pending reservations |
2260 | * correctly even if the extent represented by the range can't be |
2261 | * inserted in the extents status tree due to ENOSPC. |
2262 | */ |
2263 | |
2264 | if (EXT4_B2C(sbi, lblk) == EXT4_B2C(sbi, end)) { |
2265 | first = EXT4_LBLK_CMASK(sbi, lblk); |
2266 | if (first != lblk) |
2267 | f_del = __es_scan_range(inode, matching_fn: &ext4_es_is_delonly, |
2268 | start: first, end: lblk - 1); |
2269 | if (f_del) { |
2270 | ret = __insert_pending(inode, lblk: first, prealloc); |
2271 | if (ret < 0) |
2272 | goto out; |
2273 | } else { |
2274 | last = EXT4_LBLK_CMASK(sbi, end) + |
2275 | sbi->s_cluster_ratio - 1; |
2276 | if (last != end) |
2277 | l_del = __es_scan_range(inode, |
2278 | matching_fn: &ext4_es_is_delonly, |
2279 | start: end + 1, end: last); |
2280 | if (l_del) { |
2281 | ret = __insert_pending(inode, lblk: last, prealloc); |
2282 | if (ret < 0) |
2283 | goto out; |
2284 | } else |
2285 | __remove_pending(inode, lblk: last); |
2286 | } |
2287 | } else { |
2288 | first = EXT4_LBLK_CMASK(sbi, lblk); |
2289 | if (first != lblk) |
2290 | f_del = __es_scan_range(inode, matching_fn: &ext4_es_is_delonly, |
2291 | start: first, end: lblk - 1); |
2292 | if (f_del) { |
2293 | ret = __insert_pending(inode, lblk: first, prealloc); |
2294 | if (ret < 0) |
2295 | goto out; |
2296 | } else |
2297 | __remove_pending(inode, lblk: first); |
2298 | |
2299 | last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1; |
2300 | if (last != end) |
2301 | l_del = __es_scan_range(inode, matching_fn: &ext4_es_is_delonly, |
2302 | start: end + 1, end: last); |
2303 | if (l_del) { |
2304 | ret = __insert_pending(inode, lblk: last, prealloc); |
2305 | if (ret < 0) |
2306 | goto out; |
2307 | } else |
2308 | __remove_pending(inode, lblk: last); |
2309 | } |
2310 | out: |
2311 | return ret; |
2312 | } |
2313 | |