1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * linux/fs/ext4/indirect.c |
4 | * |
5 | * from |
6 | * |
7 | * linux/fs/ext4/inode.c |
8 | * |
9 | * Copyright (C) 1992, 1993, 1994, 1995 |
10 | * Remy Card (card@masi.ibp.fr) |
11 | * Laboratoire MASI - Institut Blaise Pascal |
12 | * Universite Pierre et Marie Curie (Paris VI) |
13 | * |
14 | * from |
15 | * |
16 | * linux/fs/minix/inode.c |
17 | * |
18 | * Copyright (C) 1991, 1992 Linus Torvalds |
19 | * |
20 | * Goal-directed block allocation by Stephen Tweedie |
21 | * (sct@redhat.com), 1993, 1998 |
22 | */ |
23 | |
24 | #include "ext4_jbd2.h" |
25 | #include "truncate.h" |
26 | #include <linux/dax.h> |
27 | #include <linux/uio.h> |
28 | |
29 | #include <trace/events/ext4.h> |
30 | |
31 | typedef struct { |
32 | __le32 *p; |
33 | __le32 key; |
34 | struct buffer_head *bh; |
35 | } Indirect; |
36 | |
37 | static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) |
38 | { |
39 | p->key = *(p->p = v); |
40 | p->bh = bh; |
41 | } |
42 | |
43 | /** |
44 | * ext4_block_to_path - parse the block number into array of offsets |
45 | * @inode: inode in question (we are only interested in its superblock) |
46 | * @i_block: block number to be parsed |
47 | * @offsets: array to store the offsets in |
48 | * @boundary: set this non-zero if the referred-to block is likely to be |
49 | * followed (on disk) by an indirect block. |
50 | * |
51 | * To store the locations of file's data ext4 uses a data structure common |
52 | * for UNIX filesystems - tree of pointers anchored in the inode, with |
53 | * data blocks at leaves and indirect blocks in intermediate nodes. |
54 | * This function translates the block number into path in that tree - |
55 | * return value is the path length and @offsets[n] is the offset of |
56 | * pointer to (n+1)th node in the nth one. If @block is out of range |
57 | * (negative or too large) warning is printed and zero returned. |
58 | * |
59 | * Note: function doesn't find node addresses, so no IO is needed. All |
60 | * we need to know is the capacity of indirect blocks (taken from the |
61 | * inode->i_sb). |
62 | */ |
63 | |
64 | /* |
65 | * Portability note: the last comparison (check that we fit into triple |
66 | * indirect block) is spelled differently, because otherwise on an |
67 | * architecture with 32-bit longs and 8Kb pages we might get into trouble |
68 | * if our filesystem had 8Kb blocks. We might use long long, but that would |
69 | * kill us on x86. Oh, well, at least the sign propagation does not matter - |
70 | * i_block would have to be negative in the very beginning, so we would not |
71 | * get there at all. |
72 | */ |
73 | |
74 | static int ext4_block_to_path(struct inode *inode, |
75 | ext4_lblk_t i_block, |
76 | ext4_lblk_t offsets[4], int *boundary) |
77 | { |
78 | int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb); |
79 | int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb); |
80 | const long direct_blocks = EXT4_NDIR_BLOCKS, |
81 | indirect_blocks = ptrs, |
82 | double_blocks = (1 << (ptrs_bits * 2)); |
83 | int n = 0; |
84 | int final = 0; |
85 | |
86 | if (i_block < direct_blocks) { |
87 | offsets[n++] = i_block; |
88 | final = direct_blocks; |
89 | } else if ((i_block -= direct_blocks) < indirect_blocks) { |
90 | offsets[n++] = EXT4_IND_BLOCK; |
91 | offsets[n++] = i_block; |
92 | final = ptrs; |
93 | } else if ((i_block -= indirect_blocks) < double_blocks) { |
94 | offsets[n++] = EXT4_DIND_BLOCK; |
95 | offsets[n++] = i_block >> ptrs_bits; |
96 | offsets[n++] = i_block & (ptrs - 1); |
97 | final = ptrs; |
98 | } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { |
99 | offsets[n++] = EXT4_TIND_BLOCK; |
100 | offsets[n++] = i_block >> (ptrs_bits * 2); |
101 | offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); |
102 | offsets[n++] = i_block & (ptrs - 1); |
103 | final = ptrs; |
104 | } else { |
105 | ext4_warning(inode->i_sb, "block %lu > max in inode %lu" , |
106 | i_block + direct_blocks + |
107 | indirect_blocks + double_blocks, inode->i_ino); |
108 | } |
109 | if (boundary) |
110 | *boundary = final - 1 - (i_block & (ptrs - 1)); |
111 | return n; |
112 | } |
113 | |
114 | /** |
115 | * ext4_get_branch - read the chain of indirect blocks leading to data |
116 | * @inode: inode in question |
117 | * @depth: depth of the chain (1 - direct pointer, etc.) |
118 | * @offsets: offsets of pointers in inode/indirect blocks |
119 | * @chain: place to store the result |
120 | * @err: here we store the error value |
121 | * |
122 | * Function fills the array of triples <key, p, bh> and returns %NULL |
123 | * if everything went OK or the pointer to the last filled triple |
124 | * (incomplete one) otherwise. Upon the return chain[i].key contains |
125 | * the number of (i+1)-th block in the chain (as it is stored in memory, |
126 | * i.e. little-endian 32-bit), chain[i].p contains the address of that |
127 | * number (it points into struct inode for i==0 and into the bh->b_data |
128 | * for i>0) and chain[i].bh points to the buffer_head of i-th indirect |
129 | * block for i>0 and NULL for i==0. In other words, it holds the block |
130 | * numbers of the chain, addresses they were taken from (and where we can |
131 | * verify that chain did not change) and buffer_heads hosting these |
132 | * numbers. |
133 | * |
134 | * Function stops when it stumbles upon zero pointer (absent block) |
135 | * (pointer to last triple returned, *@err == 0) |
136 | * or when it gets an IO error reading an indirect block |
137 | * (ditto, *@err == -EIO) |
138 | * or when it reads all @depth-1 indirect blocks successfully and finds |
139 | * the whole chain, all way to the data (returns %NULL, *err == 0). |
140 | * |
141 | * Need to be called with |
142 | * down_read(&EXT4_I(inode)->i_data_sem) |
143 | */ |
144 | static Indirect *ext4_get_branch(struct inode *inode, int depth, |
145 | ext4_lblk_t *offsets, |
146 | Indirect chain[4], int *err) |
147 | { |
148 | struct super_block *sb = inode->i_sb; |
149 | Indirect *p = chain; |
150 | struct buffer_head *bh; |
151 | unsigned int key; |
152 | int ret = -EIO; |
153 | |
154 | *err = 0; |
155 | /* i_data is not going away, no lock needed */ |
156 | add_chain(p: chain, NULL, EXT4_I(inode)->i_data + *offsets); |
157 | if (!p->key) |
158 | goto no_block; |
159 | while (--depth) { |
160 | key = le32_to_cpu(p->key); |
161 | if (key > ext4_blocks_count(es: EXT4_SB(sb)->s_es)) { |
162 | /* the block was out of range */ |
163 | ret = -EFSCORRUPTED; |
164 | goto failure; |
165 | } |
166 | bh = sb_getblk(sb, block: key); |
167 | if (unlikely(!bh)) { |
168 | ret = -ENOMEM; |
169 | goto failure; |
170 | } |
171 | |
172 | if (!bh_uptodate_or_lock(bh)) { |
173 | if (ext4_read_bh(bh, op_flags: 0, NULL) < 0) { |
174 | put_bh(bh); |
175 | goto failure; |
176 | } |
177 | /* validate block references */ |
178 | if (ext4_check_indirect_blockref(inode, bh)) { |
179 | put_bh(bh); |
180 | goto failure; |
181 | } |
182 | } |
183 | |
184 | add_chain(p: ++p, bh, v: (__le32 *)bh->b_data + *++offsets); |
185 | /* Reader: end */ |
186 | if (!p->key) |
187 | goto no_block; |
188 | } |
189 | return NULL; |
190 | |
191 | failure: |
192 | *err = ret; |
193 | no_block: |
194 | return p; |
195 | } |
196 | |
197 | /** |
198 | * ext4_find_near - find a place for allocation with sufficient locality |
199 | * @inode: owner |
200 | * @ind: descriptor of indirect block. |
201 | * |
202 | * This function returns the preferred place for block allocation. |
203 | * It is used when heuristic for sequential allocation fails. |
204 | * Rules are: |
205 | * + if there is a block to the left of our position - allocate near it. |
206 | * + if pointer will live in indirect block - allocate near that block. |
207 | * + if pointer will live in inode - allocate in the same |
208 | * cylinder group. |
209 | * |
210 | * In the latter case we colour the starting block by the callers PID to |
211 | * prevent it from clashing with concurrent allocations for a different inode |
212 | * in the same block group. The PID is used here so that functionally related |
213 | * files will be close-by on-disk. |
214 | * |
215 | * Caller must make sure that @ind is valid and will stay that way. |
216 | */ |
217 | static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) |
218 | { |
219 | struct ext4_inode_info *ei = EXT4_I(inode); |
220 | __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; |
221 | __le32 *p; |
222 | |
223 | /* Try to find previous block */ |
224 | for (p = ind->p - 1; p >= start; p--) { |
225 | if (*p) |
226 | return le32_to_cpu(*p); |
227 | } |
228 | |
229 | /* No such thing, so let's try location of indirect block */ |
230 | if (ind->bh) |
231 | return ind->bh->b_blocknr; |
232 | |
233 | /* |
234 | * It is going to be referred to from the inode itself? OK, just put it |
235 | * into the same cylinder group then. |
236 | */ |
237 | return ext4_inode_to_goal_block(inode); |
238 | } |
239 | |
240 | /** |
241 | * ext4_find_goal - find a preferred place for allocation. |
242 | * @inode: owner |
243 | * @block: block we want |
244 | * @partial: pointer to the last triple within a chain |
245 | * |
246 | * Normally this function find the preferred place for block allocation, |
247 | * returns it. |
248 | * Because this is only used for non-extent files, we limit the block nr |
249 | * to 32 bits. |
250 | */ |
251 | static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, |
252 | Indirect *partial) |
253 | { |
254 | ext4_fsblk_t goal; |
255 | |
256 | /* |
257 | * XXX need to get goal block from mballoc's data structures |
258 | */ |
259 | |
260 | goal = ext4_find_near(inode, ind: partial); |
261 | goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; |
262 | return goal; |
263 | } |
264 | |
265 | /** |
266 | * ext4_blks_to_allocate - Look up the block map and count the number |
267 | * of direct blocks need to be allocated for the given branch. |
268 | * |
269 | * @branch: chain of indirect blocks |
270 | * @k: number of blocks need for indirect blocks |
271 | * @blks: number of data blocks to be mapped. |
272 | * @blocks_to_boundary: the offset in the indirect block |
273 | * |
274 | * return the total number of blocks to be allocate, including the |
275 | * direct and indirect blocks. |
276 | */ |
277 | static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, |
278 | int blocks_to_boundary) |
279 | { |
280 | unsigned int count = 0; |
281 | |
282 | /* |
283 | * Simple case, [t,d]Indirect block(s) has not allocated yet |
284 | * then it's clear blocks on that path have not allocated |
285 | */ |
286 | if (k > 0) { |
287 | /* right now we don't handle cross boundary allocation */ |
288 | if (blks < blocks_to_boundary + 1) |
289 | count += blks; |
290 | else |
291 | count += blocks_to_boundary + 1; |
292 | return count; |
293 | } |
294 | |
295 | count++; |
296 | while (count < blks && count <= blocks_to_boundary && |
297 | le32_to_cpu(*(branch[0].p + count)) == 0) { |
298 | count++; |
299 | } |
300 | return count; |
301 | } |
302 | |
303 | /** |
304 | * ext4_alloc_branch() - allocate and set up a chain of blocks |
305 | * @handle: handle for this transaction |
306 | * @ar: structure describing the allocation request |
307 | * @indirect_blks: number of allocated indirect blocks |
308 | * @offsets: offsets (in the blocks) to store the pointers to next. |
309 | * @branch: place to store the chain in. |
310 | * |
311 | * This function allocates blocks, zeroes out all but the last one, |
312 | * links them into chain and (if we are synchronous) writes them to disk. |
313 | * In other words, it prepares a branch that can be spliced onto the |
314 | * inode. It stores the information about that chain in the branch[], in |
315 | * the same format as ext4_get_branch() would do. We are calling it after |
316 | * we had read the existing part of chain and partial points to the last |
317 | * triple of that (one with zero ->key). Upon the exit we have the same |
318 | * picture as after the successful ext4_get_block(), except that in one |
319 | * place chain is disconnected - *branch->p is still zero (we did not |
320 | * set the last link), but branch->key contains the number that should |
321 | * be placed into *branch->p to fill that gap. |
322 | * |
323 | * If allocation fails we free all blocks we've allocated (and forget |
324 | * their buffer_heads) and return the error value the from failed |
325 | * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain |
326 | * as described above and return 0. |
327 | */ |
328 | static int ext4_alloc_branch(handle_t *handle, |
329 | struct ext4_allocation_request *ar, |
330 | int indirect_blks, ext4_lblk_t *offsets, |
331 | Indirect *branch) |
332 | { |
333 | struct buffer_head * bh; |
334 | ext4_fsblk_t b, new_blocks[4]; |
335 | __le32 *p; |
336 | int i, j, err, len = 1; |
337 | |
338 | for (i = 0; i <= indirect_blks; i++) { |
339 | if (i == indirect_blks) { |
340 | new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err); |
341 | } else { |
342 | ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle, |
343 | inode: ar->inode, goal: ar->goal, |
344 | flags: ar->flags & EXT4_MB_DELALLOC_RESERVED, |
345 | NULL, errp: &err); |
346 | /* Simplify error cleanup... */ |
347 | branch[i+1].bh = NULL; |
348 | } |
349 | if (err) { |
350 | i--; |
351 | goto failed; |
352 | } |
353 | branch[i].key = cpu_to_le32(new_blocks[i]); |
354 | if (i == 0) |
355 | continue; |
356 | |
357 | bh = branch[i].bh = sb_getblk(sb: ar->inode->i_sb, block: new_blocks[i-1]); |
358 | if (unlikely(!bh)) { |
359 | err = -ENOMEM; |
360 | goto failed; |
361 | } |
362 | lock_buffer(bh); |
363 | BUFFER_TRACE(bh, "call get_create_access" ); |
364 | err = ext4_journal_get_create_access(handle, ar->inode->i_sb, |
365 | bh, EXT4_JTR_NONE); |
366 | if (err) { |
367 | unlock_buffer(bh); |
368 | goto failed; |
369 | } |
370 | |
371 | memset(bh->b_data, 0, bh->b_size); |
372 | p = branch[i].p = (__le32 *) bh->b_data + offsets[i]; |
373 | b = new_blocks[i]; |
374 | |
375 | if (i == indirect_blks) |
376 | len = ar->len; |
377 | for (j = 0; j < len; j++) |
378 | *p++ = cpu_to_le32(b++); |
379 | |
380 | BUFFER_TRACE(bh, "marking uptodate" ); |
381 | set_buffer_uptodate(bh); |
382 | unlock_buffer(bh); |
383 | |
384 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata" ); |
385 | err = ext4_handle_dirty_metadata(handle, ar->inode, bh); |
386 | if (err) |
387 | goto failed; |
388 | } |
389 | return 0; |
390 | failed: |
391 | if (i == indirect_blks) { |
392 | /* Free data blocks */ |
393 | ext4_free_blocks(handle, inode: ar->inode, NULL, block: new_blocks[i], |
394 | count: ar->len, flags: 0); |
395 | i--; |
396 | } |
397 | for (; i >= 0; i--) { |
398 | /* |
399 | * We want to ext4_forget() only freshly allocated indirect |
400 | * blocks. Buffer for new_blocks[i] is at branch[i+1].bh |
401 | * (buffer at branch[0].bh is indirect block / inode already |
402 | * existing before ext4_alloc_branch() was called). Also |
403 | * because blocks are freshly allocated, we don't need to |
404 | * revoke them which is why we don't set |
405 | * EXT4_FREE_BLOCKS_METADATA. |
406 | */ |
407 | ext4_free_blocks(handle, inode: ar->inode, bh: branch[i+1].bh, |
408 | block: new_blocks[i], count: 1, |
409 | flags: branch[i+1].bh ? EXT4_FREE_BLOCKS_FORGET : 0); |
410 | } |
411 | return err; |
412 | } |
413 | |
414 | /** |
415 | * ext4_splice_branch() - splice the allocated branch onto inode. |
416 | * @handle: handle for this transaction |
417 | * @ar: structure describing the allocation request |
418 | * @where: location of missing link |
419 | * @num: number of indirect blocks we are adding |
420 | * |
421 | * This function fills the missing link and does all housekeeping needed in |
422 | * inode (->i_blocks, etc.). In case of success we end up with the full |
423 | * chain to new block and return 0. |
424 | */ |
425 | static int ext4_splice_branch(handle_t *handle, |
426 | struct ext4_allocation_request *ar, |
427 | Indirect *where, int num) |
428 | { |
429 | int i; |
430 | int err = 0; |
431 | ext4_fsblk_t current_block; |
432 | |
433 | /* |
434 | * If we're splicing into a [td]indirect block (as opposed to the |
435 | * inode) then we need to get write access to the [td]indirect block |
436 | * before the splice. |
437 | */ |
438 | if (where->bh) { |
439 | BUFFER_TRACE(where->bh, "get_write_access" ); |
440 | err = ext4_journal_get_write_access(handle, ar->inode->i_sb, |
441 | where->bh, EXT4_JTR_NONE); |
442 | if (err) |
443 | goto err_out; |
444 | } |
445 | /* That's it */ |
446 | |
447 | *where->p = where->key; |
448 | |
449 | /* |
450 | * Update the host buffer_head or inode to point to more just allocated |
451 | * direct blocks blocks |
452 | */ |
453 | if (num == 0 && ar->len > 1) { |
454 | current_block = le32_to_cpu(where->key) + 1; |
455 | for (i = 1; i < ar->len; i++) |
456 | *(where->p + i) = cpu_to_le32(current_block++); |
457 | } |
458 | |
459 | /* We are done with atomic stuff, now do the rest of housekeeping */ |
460 | /* had we spliced it onto indirect block? */ |
461 | if (where->bh) { |
462 | /* |
463 | * If we spliced it onto an indirect block, we haven't |
464 | * altered the inode. Note however that if it is being spliced |
465 | * onto an indirect block at the very end of the file (the |
466 | * file is growing) then we *will* alter the inode to reflect |
467 | * the new i_size. But that is not done here - it is done in |
468 | * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode. |
469 | */ |
470 | ext4_debug("splicing indirect only\n" ); |
471 | BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata" ); |
472 | err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh); |
473 | if (err) |
474 | goto err_out; |
475 | } else { |
476 | /* |
477 | * OK, we spliced it into the inode itself on a direct block. |
478 | */ |
479 | err = ext4_mark_inode_dirty(handle, ar->inode); |
480 | if (unlikely(err)) |
481 | goto err_out; |
482 | ext4_debug("splicing direct\n" ); |
483 | } |
484 | return err; |
485 | |
486 | err_out: |
487 | for (i = 1; i <= num; i++) { |
488 | /* |
489 | * branch[i].bh is newly allocated, so there is no |
490 | * need to revoke the block, which is why we don't |
491 | * need to set EXT4_FREE_BLOCKS_METADATA. |
492 | */ |
493 | ext4_free_blocks(handle, inode: ar->inode, bh: where[i].bh, block: 0, count: 1, |
494 | EXT4_FREE_BLOCKS_FORGET); |
495 | } |
496 | ext4_free_blocks(handle, inode: ar->inode, NULL, le32_to_cpu(where[num].key), |
497 | count: ar->len, flags: 0); |
498 | |
499 | return err; |
500 | } |
501 | |
502 | /* |
503 | * The ext4_ind_map_blocks() function handles non-extents inodes |
504 | * (i.e., using the traditional indirect/double-indirect i_blocks |
505 | * scheme) for ext4_map_blocks(). |
506 | * |
507 | * Allocation strategy is simple: if we have to allocate something, we will |
508 | * have to go the whole way to leaf. So let's do it before attaching anything |
509 | * to tree, set linkage between the newborn blocks, write them if sync is |
510 | * required, recheck the path, free and repeat if check fails, otherwise |
511 | * set the last missing link (that will protect us from any truncate-generated |
512 | * removals - all blocks on the path are immune now) and possibly force the |
513 | * write on the parent block. |
514 | * That has a nice additional property: no special recovery from the failed |
515 | * allocations is needed - we simply release blocks and do not touch anything |
516 | * reachable from inode. |
517 | * |
518 | * `handle' can be NULL if create == 0. |
519 | * |
520 | * return > 0, # of blocks mapped or allocated. |
521 | * return = 0, if plain lookup failed. |
522 | * return < 0, error case. |
523 | * |
524 | * The ext4_ind_get_blocks() function should be called with |
525 | * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem |
526 | * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or |
527 | * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system |
528 | * blocks. |
529 | */ |
530 | int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, |
531 | struct ext4_map_blocks *map, |
532 | int flags) |
533 | { |
534 | struct ext4_allocation_request ar; |
535 | int err = -EIO; |
536 | ext4_lblk_t offsets[4]; |
537 | Indirect chain[4]; |
538 | Indirect *partial; |
539 | int indirect_blks; |
540 | int blocks_to_boundary = 0; |
541 | int depth; |
542 | int count = 0; |
543 | ext4_fsblk_t first_block = 0; |
544 | |
545 | trace_ext4_ind_map_blocks_enter(inode, lblk: map->m_lblk, len: map->m_len, flags); |
546 | ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))); |
547 | ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0); |
548 | depth = ext4_block_to_path(inode, i_block: map->m_lblk, offsets, |
549 | boundary: &blocks_to_boundary); |
550 | |
551 | if (depth == 0) |
552 | goto out; |
553 | |
554 | partial = ext4_get_branch(inode, depth, offsets, chain, err: &err); |
555 | |
556 | /* Simplest case - block found, no allocation needed */ |
557 | if (!partial) { |
558 | first_block = le32_to_cpu(chain[depth - 1].key); |
559 | count++; |
560 | /*map more blocks*/ |
561 | while (count < map->m_len && count <= blocks_to_boundary) { |
562 | ext4_fsblk_t blk; |
563 | |
564 | blk = le32_to_cpu(*(chain[depth-1].p + count)); |
565 | |
566 | if (blk == first_block + count) |
567 | count++; |
568 | else |
569 | break; |
570 | } |
571 | goto got_it; |
572 | } |
573 | |
574 | /* Next simple case - plain lookup failed */ |
575 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { |
576 | unsigned epb = inode->i_sb->s_blocksize / sizeof(u32); |
577 | int i; |
578 | |
579 | /* |
580 | * Count number blocks in a subtree under 'partial'. At each |
581 | * level we count number of complete empty subtrees beyond |
582 | * current offset and then descend into the subtree only |
583 | * partially beyond current offset. |
584 | */ |
585 | count = 0; |
586 | for (i = partial - chain + 1; i < depth; i++) |
587 | count = count * epb + (epb - offsets[i] - 1); |
588 | count++; |
589 | /* Fill in size of a hole we found */ |
590 | map->m_pblk = 0; |
591 | map->m_len = min_t(unsigned int, map->m_len, count); |
592 | goto cleanup; |
593 | } |
594 | |
595 | /* Failed read of indirect block */ |
596 | if (err == -EIO) |
597 | goto cleanup; |
598 | |
599 | /* |
600 | * Okay, we need to do block allocation. |
601 | */ |
602 | if (ext4_has_feature_bigalloc(sb: inode->i_sb)) { |
603 | EXT4_ERROR_INODE(inode, "Can't allocate blocks for " |
604 | "non-extent mapped inodes with bigalloc" ); |
605 | err = -EFSCORRUPTED; |
606 | goto out; |
607 | } |
608 | |
609 | /* Set up for the direct block allocation */ |
610 | memset(&ar, 0, sizeof(ar)); |
611 | ar.inode = inode; |
612 | ar.logical = map->m_lblk; |
613 | if (S_ISREG(inode->i_mode)) |
614 | ar.flags = EXT4_MB_HINT_DATA; |
615 | if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) |
616 | ar.flags |= EXT4_MB_DELALLOC_RESERVED; |
617 | if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) |
618 | ar.flags |= EXT4_MB_USE_RESERVED; |
619 | |
620 | ar.goal = ext4_find_goal(inode, block: map->m_lblk, partial); |
621 | |
622 | /* the number of blocks need to allocate for [d,t]indirect blocks */ |
623 | indirect_blks = (chain + depth) - partial - 1; |
624 | |
625 | /* |
626 | * Next look up the indirect map to count the totoal number of |
627 | * direct blocks to allocate for this branch. |
628 | */ |
629 | ar.len = ext4_blks_to_allocate(branch: partial, k: indirect_blks, |
630 | blks: map->m_len, blocks_to_boundary); |
631 | |
632 | /* |
633 | * Block out ext4_truncate while we alter the tree |
634 | */ |
635 | err = ext4_alloc_branch(handle, ar: &ar, indirect_blks, |
636 | offsets: offsets + (partial - chain), branch: partial); |
637 | |
638 | /* |
639 | * The ext4_splice_branch call will free and forget any buffers |
640 | * on the new chain if there is a failure, but that risks using |
641 | * up transaction credits, especially for bitmaps where the |
642 | * credits cannot be returned. Can we handle this somehow? We |
643 | * may need to return -EAGAIN upwards in the worst case. --sct |
644 | */ |
645 | if (!err) |
646 | err = ext4_splice_branch(handle, ar: &ar, where: partial, num: indirect_blks); |
647 | if (err) |
648 | goto cleanup; |
649 | |
650 | map->m_flags |= EXT4_MAP_NEW; |
651 | |
652 | ext4_update_inode_fsync_trans(handle, inode, datasync: 1); |
653 | count = ar.len; |
654 | |
655 | /* |
656 | * Update reserved blocks/metadata blocks after successful block |
657 | * allocation which had been deferred till now. |
658 | */ |
659 | if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) |
660 | ext4_da_update_reserve_space(inode, used: count, quota_claim: 1); |
661 | |
662 | got_it: |
663 | map->m_flags |= EXT4_MAP_MAPPED; |
664 | map->m_pblk = le32_to_cpu(chain[depth-1].key); |
665 | map->m_len = count; |
666 | if (count > blocks_to_boundary) |
667 | map->m_flags |= EXT4_MAP_BOUNDARY; |
668 | err = count; |
669 | /* Clean up and exit */ |
670 | partial = chain + depth - 1; /* the whole chain */ |
671 | cleanup: |
672 | while (partial > chain) { |
673 | BUFFER_TRACE(partial->bh, "call brelse" ); |
674 | brelse(bh: partial->bh); |
675 | partial--; |
676 | } |
677 | out: |
678 | trace_ext4_ind_map_blocks_exit(inode, flags, map, ret: err); |
679 | return err; |
680 | } |
681 | |
682 | /* |
683 | * Calculate number of indirect blocks touched by mapping @nrblocks logically |
684 | * contiguous blocks |
685 | */ |
686 | int ext4_ind_trans_blocks(struct inode *inode, int nrblocks) |
687 | { |
688 | /* |
689 | * With N contiguous data blocks, we need at most |
690 | * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks, |
691 | * 2 dindirect blocks, and 1 tindirect block |
692 | */ |
693 | return DIV_ROUND_UP(nrblocks, EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4; |
694 | } |
695 | |
696 | static int ext4_ind_trunc_restart_fn(handle_t *handle, struct inode *inode, |
697 | struct buffer_head *bh, int *dropped) |
698 | { |
699 | int err; |
700 | |
701 | if (bh) { |
702 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata" ); |
703 | err = ext4_handle_dirty_metadata(handle, inode, bh); |
704 | if (unlikely(err)) |
705 | return err; |
706 | } |
707 | err = ext4_mark_inode_dirty(handle, inode); |
708 | if (unlikely(err)) |
709 | return err; |
710 | /* |
711 | * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this |
712 | * moment, get_block can be called only for blocks inside i_size since |
713 | * page cache has been already dropped and writes are blocked by |
714 | * i_rwsem. So we can safely drop the i_data_sem here. |
715 | */ |
716 | BUG_ON(EXT4_JOURNAL(inode) == NULL); |
717 | ext4_discard_preallocations(inode); |
718 | up_write(sem: &EXT4_I(inode)->i_data_sem); |
719 | *dropped = 1; |
720 | return 0; |
721 | } |
722 | |
723 | /* |
724 | * Truncate transactions can be complex and absolutely huge. So we need to |
725 | * be able to restart the transaction at a convenient checkpoint to make |
726 | * sure we don't overflow the journal. |
727 | * |
728 | * Try to extend this transaction for the purposes of truncation. If |
729 | * extend fails, we restart transaction. |
730 | */ |
731 | static int ext4_ind_truncate_ensure_credits(handle_t *handle, |
732 | struct inode *inode, |
733 | struct buffer_head *bh, |
734 | int revoke_creds) |
735 | { |
736 | int ret; |
737 | int dropped = 0; |
738 | |
739 | ret = ext4_journal_ensure_credits_fn(handle, EXT4_RESERVE_TRANS_BLOCKS, |
740 | ext4_blocks_for_truncate(inode), revoke_creds, |
741 | ext4_ind_trunc_restart_fn(handle, inode, bh, &dropped)); |
742 | if (dropped) |
743 | down_write(sem: &EXT4_I(inode)->i_data_sem); |
744 | if (ret <= 0) |
745 | return ret; |
746 | if (bh) { |
747 | BUFFER_TRACE(bh, "retaking write access" ); |
748 | ret = ext4_journal_get_write_access(handle, inode->i_sb, bh, |
749 | EXT4_JTR_NONE); |
750 | if (unlikely(ret)) |
751 | return ret; |
752 | } |
753 | return 0; |
754 | } |
755 | |
756 | /* |
757 | * Probably it should be a library function... search for first non-zero word |
758 | * or memcmp with zero_page, whatever is better for particular architecture. |
759 | * Linus? |
760 | */ |
761 | static inline int all_zeroes(__le32 *p, __le32 *q) |
762 | { |
763 | while (p < q) |
764 | if (*p++) |
765 | return 0; |
766 | return 1; |
767 | } |
768 | |
769 | /** |
770 | * ext4_find_shared - find the indirect blocks for partial truncation. |
771 | * @inode: inode in question |
772 | * @depth: depth of the affected branch |
773 | * @offsets: offsets of pointers in that branch (see ext4_block_to_path) |
774 | * @chain: place to store the pointers to partial indirect blocks |
775 | * @top: place to the (detached) top of branch |
776 | * |
777 | * This is a helper function used by ext4_truncate(). |
778 | * |
779 | * When we do truncate() we may have to clean the ends of several |
780 | * indirect blocks but leave the blocks themselves alive. Block is |
781 | * partially truncated if some data below the new i_size is referred |
782 | * from it (and it is on the path to the first completely truncated |
783 | * data block, indeed). We have to free the top of that path along |
784 | * with everything to the right of the path. Since no allocation |
785 | * past the truncation point is possible until ext4_truncate() |
786 | * finishes, we may safely do the latter, but top of branch may |
787 | * require special attention - pageout below the truncation point |
788 | * might try to populate it. |
789 | * |
790 | * We atomically detach the top of branch from the tree, store the |
791 | * block number of its root in *@top, pointers to buffer_heads of |
792 | * partially truncated blocks - in @chain[].bh and pointers to |
793 | * their last elements that should not be removed - in |
794 | * @chain[].p. Return value is the pointer to last filled element |
795 | * of @chain. |
796 | * |
797 | * The work left to caller to do the actual freeing of subtrees: |
798 | * a) free the subtree starting from *@top |
799 | * b) free the subtrees whose roots are stored in |
800 | * (@chain[i].p+1 .. end of @chain[i].bh->b_data) |
801 | * c) free the subtrees growing from the inode past the @chain[0]. |
802 | * (no partially truncated stuff there). */ |
803 | |
804 | static Indirect *ext4_find_shared(struct inode *inode, int depth, |
805 | ext4_lblk_t offsets[4], Indirect chain[4], |
806 | __le32 *top) |
807 | { |
808 | Indirect *partial, *p; |
809 | int k, err; |
810 | |
811 | *top = 0; |
812 | /* Make k index the deepest non-null offset + 1 */ |
813 | for (k = depth; k > 1 && !offsets[k-1]; k--) |
814 | ; |
815 | partial = ext4_get_branch(inode, depth: k, offsets, chain, err: &err); |
816 | /* Writer: pointers */ |
817 | if (!partial) |
818 | partial = chain + k-1; |
819 | /* |
820 | * If the branch acquired continuation since we've looked at it - |
821 | * fine, it should all survive and (new) top doesn't belong to us. |
822 | */ |
823 | if (!partial->key && *partial->p) |
824 | /* Writer: end */ |
825 | goto no_top; |
826 | for (p = partial; (p > chain) && all_zeroes(p: (__le32 *) p->bh->b_data, q: p->p); p--) |
827 | ; |
828 | /* |
829 | * OK, we've found the last block that must survive. The rest of our |
830 | * branch should be detached before unlocking. However, if that rest |
831 | * of branch is all ours and does not grow immediately from the inode |
832 | * it's easier to cheat and just decrement partial->p. |
833 | */ |
834 | if (p == chain + k - 1 && p > chain) { |
835 | p->p--; |
836 | } else { |
837 | *top = *p->p; |
838 | /* Nope, don't do this in ext4. Must leave the tree intact */ |
839 | #if 0 |
840 | *p->p = 0; |
841 | #endif |
842 | } |
843 | /* Writer: end */ |
844 | |
845 | while (partial > p) { |
846 | brelse(bh: partial->bh); |
847 | partial--; |
848 | } |
849 | no_top: |
850 | return partial; |
851 | } |
852 | |
853 | /* |
854 | * Zero a number of block pointers in either an inode or an indirect block. |
855 | * If we restart the transaction we must again get write access to the |
856 | * indirect block for further modification. |
857 | * |
858 | * We release `count' blocks on disk, but (last - first) may be greater |
859 | * than `count' because there can be holes in there. |
860 | * |
861 | * Return 0 on success, 1 on invalid block range |
862 | * and < 0 on fatal error. |
863 | */ |
864 | static int ext4_clear_blocks(handle_t *handle, struct inode *inode, |
865 | struct buffer_head *bh, |
866 | ext4_fsblk_t block_to_free, |
867 | unsigned long count, __le32 *first, |
868 | __le32 *last) |
869 | { |
870 | __le32 *p; |
871 | int flags = EXT4_FREE_BLOCKS_VALIDATED; |
872 | int err; |
873 | |
874 | if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) || |
875 | ext4_test_inode_flag(inode, bit: EXT4_INODE_EA_INODE)) |
876 | flags |= EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_METADATA; |
877 | else if (ext4_should_journal_data(inode)) |
878 | flags |= EXT4_FREE_BLOCKS_FORGET; |
879 | |
880 | if (!ext4_inode_block_valid(inode, start_blk: block_to_free, count)) { |
881 | EXT4_ERROR_INODE(inode, "attempt to clear invalid " |
882 | "blocks %llu len %lu" , |
883 | (unsigned long long) block_to_free, count); |
884 | return 1; |
885 | } |
886 | |
887 | err = ext4_ind_truncate_ensure_credits(handle, inode, bh, |
888 | revoke_creds: ext4_free_data_revoke_credits(inode, blocks: count)); |
889 | if (err < 0) |
890 | goto out_err; |
891 | |
892 | for (p = first; p < last; p++) |
893 | *p = 0; |
894 | |
895 | ext4_free_blocks(handle, inode, NULL, block: block_to_free, count, flags); |
896 | return 0; |
897 | out_err: |
898 | ext4_std_error(inode->i_sb, err); |
899 | return err; |
900 | } |
901 | |
902 | /** |
903 | * ext4_free_data - free a list of data blocks |
904 | * @handle: handle for this transaction |
905 | * @inode: inode we are dealing with |
906 | * @this_bh: indirect buffer_head which contains *@first and *@last |
907 | * @first: array of block numbers |
908 | * @last: points immediately past the end of array |
909 | * |
910 | * We are freeing all blocks referred from that array (numbers are stored as |
911 | * little-endian 32-bit) and updating @inode->i_blocks appropriately. |
912 | * |
913 | * We accumulate contiguous runs of blocks to free. Conveniently, if these |
914 | * blocks are contiguous then releasing them at one time will only affect one |
915 | * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't |
916 | * actually use a lot of journal space. |
917 | * |
918 | * @this_bh will be %NULL if @first and @last point into the inode's direct |
919 | * block pointers. |
920 | */ |
921 | static void ext4_free_data(handle_t *handle, struct inode *inode, |
922 | struct buffer_head *this_bh, |
923 | __le32 *first, __le32 *last) |
924 | { |
925 | ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */ |
926 | unsigned long count = 0; /* Number of blocks in the run */ |
927 | __le32 *block_to_free_p = NULL; /* Pointer into inode/ind |
928 | corresponding to |
929 | block_to_free */ |
930 | ext4_fsblk_t nr; /* Current block # */ |
931 | __le32 *p; /* Pointer into inode/ind |
932 | for current block */ |
933 | int err = 0; |
934 | |
935 | if (this_bh) { /* For indirect block */ |
936 | BUFFER_TRACE(this_bh, "get_write_access" ); |
937 | err = ext4_journal_get_write_access(handle, inode->i_sb, |
938 | this_bh, EXT4_JTR_NONE); |
939 | /* Important: if we can't update the indirect pointers |
940 | * to the blocks, we can't free them. */ |
941 | if (err) |
942 | return; |
943 | } |
944 | |
945 | for (p = first; p < last; p++) { |
946 | nr = le32_to_cpu(*p); |
947 | if (nr) { |
948 | /* accumulate blocks to free if they're contiguous */ |
949 | if (count == 0) { |
950 | block_to_free = nr; |
951 | block_to_free_p = p; |
952 | count = 1; |
953 | } else if (nr == block_to_free + count) { |
954 | count++; |
955 | } else { |
956 | err = ext4_clear_blocks(handle, inode, bh: this_bh, |
957 | block_to_free, count, |
958 | first: block_to_free_p, last: p); |
959 | if (err) |
960 | break; |
961 | block_to_free = nr; |
962 | block_to_free_p = p; |
963 | count = 1; |
964 | } |
965 | } |
966 | } |
967 | |
968 | if (!err && count > 0) |
969 | err = ext4_clear_blocks(handle, inode, bh: this_bh, block_to_free, |
970 | count, first: block_to_free_p, last: p); |
971 | if (err < 0) |
972 | /* fatal error */ |
973 | return; |
974 | |
975 | if (this_bh) { |
976 | BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata" ); |
977 | |
978 | /* |
979 | * The buffer head should have an attached journal head at this |
980 | * point. However, if the data is corrupted and an indirect |
981 | * block pointed to itself, it would have been detached when |
982 | * the block was cleared. Check for this instead of OOPSing. |
983 | */ |
984 | if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(bh: this_bh)) |
985 | ext4_handle_dirty_metadata(handle, inode, this_bh); |
986 | else |
987 | EXT4_ERROR_INODE(inode, |
988 | "circular indirect block detected at " |
989 | "block %llu" , |
990 | (unsigned long long) this_bh->b_blocknr); |
991 | } |
992 | } |
993 | |
994 | /** |
995 | * ext4_free_branches - free an array of branches |
996 | * @handle: JBD handle for this transaction |
997 | * @inode: inode we are dealing with |
998 | * @parent_bh: the buffer_head which contains *@first and *@last |
999 | * @first: array of block numbers |
1000 | * @last: pointer immediately past the end of array |
1001 | * @depth: depth of the branches to free |
1002 | * |
1003 | * We are freeing all blocks referred from these branches (numbers are |
1004 | * stored as little-endian 32-bit) and updating @inode->i_blocks |
1005 | * appropriately. |
1006 | */ |
1007 | static void ext4_free_branches(handle_t *handle, struct inode *inode, |
1008 | struct buffer_head *parent_bh, |
1009 | __le32 *first, __le32 *last, int depth) |
1010 | { |
1011 | ext4_fsblk_t nr; |
1012 | __le32 *p; |
1013 | |
1014 | if (ext4_handle_is_aborted(handle)) |
1015 | return; |
1016 | |
1017 | if (depth--) { |
1018 | struct buffer_head *bh; |
1019 | int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); |
1020 | p = last; |
1021 | while (--p >= first) { |
1022 | nr = le32_to_cpu(*p); |
1023 | if (!nr) |
1024 | continue; /* A hole */ |
1025 | |
1026 | if (!ext4_inode_block_valid(inode, start_blk: nr, count: 1)) { |
1027 | EXT4_ERROR_INODE(inode, |
1028 | "invalid indirect mapped " |
1029 | "block %lu (level %d)" , |
1030 | (unsigned long) nr, depth); |
1031 | break; |
1032 | } |
1033 | |
1034 | /* Go read the buffer for the next level down */ |
1035 | bh = ext4_sb_bread(sb: inode->i_sb, block: nr, op_flags: 0); |
1036 | |
1037 | /* |
1038 | * A read failure? Report error and clear slot |
1039 | * (should be rare). |
1040 | */ |
1041 | if (IS_ERR(ptr: bh)) { |
1042 | ext4_error_inode_block(inode, nr, -PTR_ERR(bh), |
1043 | "Read failure" ); |
1044 | continue; |
1045 | } |
1046 | |
1047 | /* This zaps the entire block. Bottom up. */ |
1048 | BUFFER_TRACE(bh, "free child branches" ); |
1049 | ext4_free_branches(handle, inode, parent_bh: bh, |
1050 | first: (__le32 *) bh->b_data, |
1051 | last: (__le32 *) bh->b_data + addr_per_block, |
1052 | depth); |
1053 | brelse(bh); |
1054 | |
1055 | /* |
1056 | * Everything below this pointer has been |
1057 | * released. Now let this top-of-subtree go. |
1058 | * |
1059 | * We want the freeing of this indirect block to be |
1060 | * atomic in the journal with the updating of the |
1061 | * bitmap block which owns it. So make some room in |
1062 | * the journal. |
1063 | * |
1064 | * We zero the parent pointer *after* freeing its |
1065 | * pointee in the bitmaps, so if extend_transaction() |
1066 | * for some reason fails to put the bitmap changes and |
1067 | * the release into the same transaction, recovery |
1068 | * will merely complain about releasing a free block, |
1069 | * rather than leaking blocks. |
1070 | */ |
1071 | if (ext4_handle_is_aborted(handle)) |
1072 | return; |
1073 | if (ext4_ind_truncate_ensure_credits(handle, inode, |
1074 | NULL, |
1075 | revoke_creds: ext4_free_metadata_revoke_credits( |
1076 | sb: inode->i_sb, blocks: 1)) < 0) |
1077 | return; |
1078 | |
1079 | /* |
1080 | * The forget flag here is critical because if |
1081 | * we are journaling (and not doing data |
1082 | * journaling), we have to make sure a revoke |
1083 | * record is written to prevent the journal |
1084 | * replay from overwriting the (former) |
1085 | * indirect block if it gets reallocated as a |
1086 | * data block. This must happen in the same |
1087 | * transaction where the data blocks are |
1088 | * actually freed. |
1089 | */ |
1090 | ext4_free_blocks(handle, inode, NULL, block: nr, count: 1, |
1091 | EXT4_FREE_BLOCKS_METADATA| |
1092 | EXT4_FREE_BLOCKS_FORGET); |
1093 | |
1094 | if (parent_bh) { |
1095 | /* |
1096 | * The block which we have just freed is |
1097 | * pointed to by an indirect block: journal it |
1098 | */ |
1099 | BUFFER_TRACE(parent_bh, "get_write_access" ); |
1100 | if (!ext4_journal_get_write_access(handle, |
1101 | inode->i_sb, parent_bh, |
1102 | EXT4_JTR_NONE)) { |
1103 | *p = 0; |
1104 | BUFFER_TRACE(parent_bh, |
1105 | "call ext4_handle_dirty_metadata" ); |
1106 | ext4_handle_dirty_metadata(handle, |
1107 | inode, |
1108 | parent_bh); |
1109 | } |
1110 | } |
1111 | } |
1112 | } else { |
1113 | /* We have reached the bottom of the tree. */ |
1114 | BUFFER_TRACE(parent_bh, "free data blocks" ); |
1115 | ext4_free_data(handle, inode, this_bh: parent_bh, first, last); |
1116 | } |
1117 | } |
1118 | |
1119 | void ext4_ind_truncate(handle_t *handle, struct inode *inode) |
1120 | { |
1121 | struct ext4_inode_info *ei = EXT4_I(inode); |
1122 | __le32 *i_data = ei->i_data; |
1123 | int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); |
1124 | ext4_lblk_t offsets[4]; |
1125 | Indirect chain[4]; |
1126 | Indirect *partial; |
1127 | __le32 nr = 0; |
1128 | int n = 0; |
1129 | ext4_lblk_t last_block, max_block; |
1130 | unsigned blocksize = inode->i_sb->s_blocksize; |
1131 | |
1132 | last_block = (inode->i_size + blocksize-1) |
1133 | >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); |
1134 | max_block = (EXT4_SB(sb: inode->i_sb)->s_bitmap_maxbytes + blocksize-1) |
1135 | >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); |
1136 | |
1137 | if (last_block != max_block) { |
1138 | n = ext4_block_to_path(inode, i_block: last_block, offsets, NULL); |
1139 | if (n == 0) |
1140 | return; |
1141 | } |
1142 | |
1143 | ext4_es_remove_extent(inode, lblk: last_block, EXT_MAX_BLOCKS - last_block); |
1144 | |
1145 | /* |
1146 | * The orphan list entry will now protect us from any crash which |
1147 | * occurs before the truncate completes, so it is now safe to propagate |
1148 | * the new, shorter inode size (held for now in i_size) into the |
1149 | * on-disk inode. We do this via i_disksize, which is the value which |
1150 | * ext4 *really* writes onto the disk inode. |
1151 | */ |
1152 | ei->i_disksize = inode->i_size; |
1153 | |
1154 | if (last_block == max_block) { |
1155 | /* |
1156 | * It is unnecessary to free any data blocks if last_block is |
1157 | * equal to the indirect block limit. |
1158 | */ |
1159 | return; |
1160 | } else if (n == 1) { /* direct blocks */ |
1161 | ext4_free_data(handle, inode, NULL, first: i_data+offsets[0], |
1162 | last: i_data + EXT4_NDIR_BLOCKS); |
1163 | goto do_indirects; |
1164 | } |
1165 | |
1166 | partial = ext4_find_shared(inode, depth: n, offsets, chain, top: &nr); |
1167 | /* Kill the top of shared branch (not detached) */ |
1168 | if (nr) { |
1169 | if (partial == chain) { |
1170 | /* Shared branch grows from the inode */ |
1171 | ext4_free_branches(handle, inode, NULL, |
1172 | first: &nr, last: &nr+1, depth: (chain+n-1) - partial); |
1173 | *partial->p = 0; |
1174 | /* |
1175 | * We mark the inode dirty prior to restart, |
1176 | * and prior to stop. No need for it here. |
1177 | */ |
1178 | } else { |
1179 | /* Shared branch grows from an indirect block */ |
1180 | BUFFER_TRACE(partial->bh, "get_write_access" ); |
1181 | ext4_free_branches(handle, inode, parent_bh: partial->bh, |
1182 | first: partial->p, |
1183 | last: partial->p+1, depth: (chain+n-1) - partial); |
1184 | } |
1185 | } |
1186 | /* Clear the ends of indirect blocks on the shared branch */ |
1187 | while (partial > chain) { |
1188 | ext4_free_branches(handle, inode, parent_bh: partial->bh, first: partial->p + 1, |
1189 | last: (__le32*)partial->bh->b_data+addr_per_block, |
1190 | depth: (chain+n-1) - partial); |
1191 | BUFFER_TRACE(partial->bh, "call brelse" ); |
1192 | brelse(bh: partial->bh); |
1193 | partial--; |
1194 | } |
1195 | do_indirects: |
1196 | /* Kill the remaining (whole) subtrees */ |
1197 | switch (offsets[0]) { |
1198 | default: |
1199 | nr = i_data[EXT4_IND_BLOCK]; |
1200 | if (nr) { |
1201 | ext4_free_branches(handle, inode, NULL, first: &nr, last: &nr+1, depth: 1); |
1202 | i_data[EXT4_IND_BLOCK] = 0; |
1203 | } |
1204 | fallthrough; |
1205 | case EXT4_IND_BLOCK: |
1206 | nr = i_data[EXT4_DIND_BLOCK]; |
1207 | if (nr) { |
1208 | ext4_free_branches(handle, inode, NULL, first: &nr, last: &nr+1, depth: 2); |
1209 | i_data[EXT4_DIND_BLOCK] = 0; |
1210 | } |
1211 | fallthrough; |
1212 | case EXT4_DIND_BLOCK: |
1213 | nr = i_data[EXT4_TIND_BLOCK]; |
1214 | if (nr) { |
1215 | ext4_free_branches(handle, inode, NULL, first: &nr, last: &nr+1, depth: 3); |
1216 | i_data[EXT4_TIND_BLOCK] = 0; |
1217 | } |
1218 | fallthrough; |
1219 | case EXT4_TIND_BLOCK: |
1220 | ; |
1221 | } |
1222 | } |
1223 | |
1224 | /** |
1225 | * ext4_ind_remove_space - remove space from the range |
1226 | * @handle: JBD handle for this transaction |
1227 | * @inode: inode we are dealing with |
1228 | * @start: First block to remove |
1229 | * @end: One block after the last block to remove (exclusive) |
1230 | * |
1231 | * Free the blocks in the defined range (end is exclusive endpoint of |
1232 | * range). This is used by ext4_punch_hole(). |
1233 | */ |
1234 | int ext4_ind_remove_space(handle_t *handle, struct inode *inode, |
1235 | ext4_lblk_t start, ext4_lblk_t end) |
1236 | { |
1237 | struct ext4_inode_info *ei = EXT4_I(inode); |
1238 | __le32 *i_data = ei->i_data; |
1239 | int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); |
1240 | ext4_lblk_t offsets[4], offsets2[4]; |
1241 | Indirect chain[4], chain2[4]; |
1242 | Indirect *partial, *partial2; |
1243 | Indirect *p = NULL, *p2 = NULL; |
1244 | ext4_lblk_t max_block; |
1245 | __le32 nr = 0, nr2 = 0; |
1246 | int n = 0, n2 = 0; |
1247 | unsigned blocksize = inode->i_sb->s_blocksize; |
1248 | |
1249 | max_block = (EXT4_SB(sb: inode->i_sb)->s_bitmap_maxbytes + blocksize-1) |
1250 | >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); |
1251 | if (end >= max_block) |
1252 | end = max_block; |
1253 | if ((start >= end) || (start > max_block)) |
1254 | return 0; |
1255 | |
1256 | n = ext4_block_to_path(inode, i_block: start, offsets, NULL); |
1257 | n2 = ext4_block_to_path(inode, i_block: end, offsets: offsets2, NULL); |
1258 | |
1259 | BUG_ON(n > n2); |
1260 | |
1261 | if ((n == 1) && (n == n2)) { |
1262 | /* We're punching only within direct block range */ |
1263 | ext4_free_data(handle, inode, NULL, first: i_data + offsets[0], |
1264 | last: i_data + offsets2[0]); |
1265 | return 0; |
1266 | } else if (n2 > n) { |
1267 | /* |
1268 | * Start and end are on a different levels so we're going to |
1269 | * free partial block at start, and partial block at end of |
1270 | * the range. If there are some levels in between then |
1271 | * do_indirects label will take care of that. |
1272 | */ |
1273 | |
1274 | if (n == 1) { |
1275 | /* |
1276 | * Start is at the direct block level, free |
1277 | * everything to the end of the level. |
1278 | */ |
1279 | ext4_free_data(handle, inode, NULL, first: i_data + offsets[0], |
1280 | last: i_data + EXT4_NDIR_BLOCKS); |
1281 | goto end_range; |
1282 | } |
1283 | |
1284 | |
1285 | partial = p = ext4_find_shared(inode, depth: n, offsets, chain, top: &nr); |
1286 | if (nr) { |
1287 | if (partial == chain) { |
1288 | /* Shared branch grows from the inode */ |
1289 | ext4_free_branches(handle, inode, NULL, |
1290 | first: &nr, last: &nr+1, depth: (chain+n-1) - partial); |
1291 | *partial->p = 0; |
1292 | } else { |
1293 | /* Shared branch grows from an indirect block */ |
1294 | BUFFER_TRACE(partial->bh, "get_write_access" ); |
1295 | ext4_free_branches(handle, inode, parent_bh: partial->bh, |
1296 | first: partial->p, |
1297 | last: partial->p+1, depth: (chain+n-1) - partial); |
1298 | } |
1299 | } |
1300 | |
1301 | /* |
1302 | * Clear the ends of indirect blocks on the shared branch |
1303 | * at the start of the range |
1304 | */ |
1305 | while (partial > chain) { |
1306 | ext4_free_branches(handle, inode, parent_bh: partial->bh, |
1307 | first: partial->p + 1, |
1308 | last: (__le32 *)partial->bh->b_data+addr_per_block, |
1309 | depth: (chain+n-1) - partial); |
1310 | partial--; |
1311 | } |
1312 | |
1313 | end_range: |
1314 | partial2 = p2 = ext4_find_shared(inode, depth: n2, offsets: offsets2, chain: chain2, top: &nr2); |
1315 | if (nr2) { |
1316 | if (partial2 == chain2) { |
1317 | /* |
1318 | * Remember, end is exclusive so here we're at |
1319 | * the start of the next level we're not going |
1320 | * to free. Everything was covered by the start |
1321 | * of the range. |
1322 | */ |
1323 | goto do_indirects; |
1324 | } |
1325 | } else { |
1326 | /* |
1327 | * ext4_find_shared returns Indirect structure which |
1328 | * points to the last element which should not be |
1329 | * removed by truncate. But this is end of the range |
1330 | * in punch_hole so we need to point to the next element |
1331 | */ |
1332 | partial2->p++; |
1333 | } |
1334 | |
1335 | /* |
1336 | * Clear the ends of indirect blocks on the shared branch |
1337 | * at the end of the range |
1338 | */ |
1339 | while (partial2 > chain2) { |
1340 | ext4_free_branches(handle, inode, parent_bh: partial2->bh, |
1341 | first: (__le32 *)partial2->bh->b_data, |
1342 | last: partial2->p, |
1343 | depth: (chain2+n2-1) - partial2); |
1344 | partial2--; |
1345 | } |
1346 | goto do_indirects; |
1347 | } |
1348 | |
1349 | /* Punch happened within the same level (n == n2) */ |
1350 | partial = p = ext4_find_shared(inode, depth: n, offsets, chain, top: &nr); |
1351 | partial2 = p2 = ext4_find_shared(inode, depth: n2, offsets: offsets2, chain: chain2, top: &nr2); |
1352 | |
1353 | /* Free top, but only if partial2 isn't its subtree. */ |
1354 | if (nr) { |
1355 | int level = min(partial - chain, partial2 - chain2); |
1356 | int i; |
1357 | int subtree = 1; |
1358 | |
1359 | for (i = 0; i <= level; i++) { |
1360 | if (offsets[i] != offsets2[i]) { |
1361 | subtree = 0; |
1362 | break; |
1363 | } |
1364 | } |
1365 | |
1366 | if (!subtree) { |
1367 | if (partial == chain) { |
1368 | /* Shared branch grows from the inode */ |
1369 | ext4_free_branches(handle, inode, NULL, |
1370 | first: &nr, last: &nr+1, |
1371 | depth: (chain+n-1) - partial); |
1372 | *partial->p = 0; |
1373 | } else { |
1374 | /* Shared branch grows from an indirect block */ |
1375 | BUFFER_TRACE(partial->bh, "get_write_access" ); |
1376 | ext4_free_branches(handle, inode, parent_bh: partial->bh, |
1377 | first: partial->p, |
1378 | last: partial->p+1, |
1379 | depth: (chain+n-1) - partial); |
1380 | } |
1381 | } |
1382 | } |
1383 | |
1384 | if (!nr2) { |
1385 | /* |
1386 | * ext4_find_shared returns Indirect structure which |
1387 | * points to the last element which should not be |
1388 | * removed by truncate. But this is end of the range |
1389 | * in punch_hole so we need to point to the next element |
1390 | */ |
1391 | partial2->p++; |
1392 | } |
1393 | |
1394 | while (partial > chain || partial2 > chain2) { |
1395 | int depth = (chain+n-1) - partial; |
1396 | int depth2 = (chain2+n2-1) - partial2; |
1397 | |
1398 | if (partial > chain && partial2 > chain2 && |
1399 | partial->bh->b_blocknr == partial2->bh->b_blocknr) { |
1400 | /* |
1401 | * We've converged on the same block. Clear the range, |
1402 | * then we're done. |
1403 | */ |
1404 | ext4_free_branches(handle, inode, parent_bh: partial->bh, |
1405 | first: partial->p + 1, |
1406 | last: partial2->p, |
1407 | depth: (chain+n-1) - partial); |
1408 | goto cleanup; |
1409 | } |
1410 | |
1411 | /* |
1412 | * The start and end partial branches may not be at the same |
1413 | * level even though the punch happened within one level. So, we |
1414 | * give them a chance to arrive at the same level, then walk |
1415 | * them in step with each other until we converge on the same |
1416 | * block. |
1417 | */ |
1418 | if (partial > chain && depth <= depth2) { |
1419 | ext4_free_branches(handle, inode, parent_bh: partial->bh, |
1420 | first: partial->p + 1, |
1421 | last: (__le32 *)partial->bh->b_data+addr_per_block, |
1422 | depth: (chain+n-1) - partial); |
1423 | partial--; |
1424 | } |
1425 | if (partial2 > chain2 && depth2 <= depth) { |
1426 | ext4_free_branches(handle, inode, parent_bh: partial2->bh, |
1427 | first: (__le32 *)partial2->bh->b_data, |
1428 | last: partial2->p, |
1429 | depth: (chain2+n2-1) - partial2); |
1430 | partial2--; |
1431 | } |
1432 | } |
1433 | |
1434 | cleanup: |
1435 | while (p && p > chain) { |
1436 | BUFFER_TRACE(p->bh, "call brelse" ); |
1437 | brelse(bh: p->bh); |
1438 | p--; |
1439 | } |
1440 | while (p2 && p2 > chain2) { |
1441 | BUFFER_TRACE(p2->bh, "call brelse" ); |
1442 | brelse(bh: p2->bh); |
1443 | p2--; |
1444 | } |
1445 | return 0; |
1446 | |
1447 | do_indirects: |
1448 | /* Kill the remaining (whole) subtrees */ |
1449 | switch (offsets[0]) { |
1450 | default: |
1451 | if (++n >= n2) |
1452 | break; |
1453 | nr = i_data[EXT4_IND_BLOCK]; |
1454 | if (nr) { |
1455 | ext4_free_branches(handle, inode, NULL, first: &nr, last: &nr+1, depth: 1); |
1456 | i_data[EXT4_IND_BLOCK] = 0; |
1457 | } |
1458 | fallthrough; |
1459 | case EXT4_IND_BLOCK: |
1460 | if (++n >= n2) |
1461 | break; |
1462 | nr = i_data[EXT4_DIND_BLOCK]; |
1463 | if (nr) { |
1464 | ext4_free_branches(handle, inode, NULL, first: &nr, last: &nr+1, depth: 2); |
1465 | i_data[EXT4_DIND_BLOCK] = 0; |
1466 | } |
1467 | fallthrough; |
1468 | case EXT4_DIND_BLOCK: |
1469 | if (++n >= n2) |
1470 | break; |
1471 | nr = i_data[EXT4_TIND_BLOCK]; |
1472 | if (nr) { |
1473 | ext4_free_branches(handle, inode, NULL, first: &nr, last: &nr+1, depth: 3); |
1474 | i_data[EXT4_TIND_BLOCK] = 0; |
1475 | } |
1476 | fallthrough; |
1477 | case EXT4_TIND_BLOCK: |
1478 | ; |
1479 | } |
1480 | goto cleanup; |
1481 | } |
1482 | |