1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * linux/fs/ufs/inode.c |
4 | * |
5 | * Copyright (C) 1998 |
6 | * Daniel Pirkl <daniel.pirkl@email.cz> |
7 | * Charles University, Faculty of Mathematics and Physics |
8 | * |
9 | * from |
10 | * |
11 | * linux/fs/ext2/inode.c |
12 | * |
13 | * Copyright (C) 1992, 1993, 1994, 1995 |
14 | * Remy Card (card@masi.ibp.fr) |
15 | * Laboratoire MASI - Institut Blaise Pascal |
16 | * Universite Pierre et Marie Curie (Paris VI) |
17 | * |
18 | * from |
19 | * |
20 | * linux/fs/minix/inode.c |
21 | * |
22 | * Copyright (C) 1991, 1992 Linus Torvalds |
23 | * |
24 | * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993 |
25 | * Big-endian to little-endian byte-swapping/bitmaps by |
26 | * David S. Miller (davem@caip.rutgers.edu), 1995 |
27 | */ |
28 | |
29 | #include <linux/uaccess.h> |
30 | |
31 | #include <linux/errno.h> |
32 | #include <linux/fs.h> |
33 | #include <linux/time.h> |
34 | #include <linux/stat.h> |
35 | #include <linux/string.h> |
36 | #include <linux/mm.h> |
37 | #include <linux/buffer_head.h> |
38 | #include <linux/mpage.h> |
39 | #include <linux/writeback.h> |
40 | #include <linux/iversion.h> |
41 | |
42 | #include "ufs_fs.h" |
43 | #include "ufs.h" |
44 | #include "swab.h" |
45 | #include "util.h" |
46 | |
47 | static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4]) |
48 | { |
49 | struct ufs_sb_private_info *uspi = UFS_SB(sb: inode->i_sb)->s_uspi; |
50 | int ptrs = uspi->s_apb; |
51 | int ptrs_bits = uspi->s_apbshift; |
52 | const long direct_blocks = UFS_NDADDR, |
53 | indirect_blocks = ptrs, |
54 | double_blocks = (1 << (ptrs_bits * 2)); |
55 | int n = 0; |
56 | |
57 | |
58 | UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n" ,ptrs,double_blocks); |
59 | if (i_block < direct_blocks) { |
60 | offsets[n++] = i_block; |
61 | } else if ((i_block -= direct_blocks) < indirect_blocks) { |
62 | offsets[n++] = UFS_IND_BLOCK; |
63 | offsets[n++] = i_block; |
64 | } else if ((i_block -= indirect_blocks) < double_blocks) { |
65 | offsets[n++] = UFS_DIND_BLOCK; |
66 | offsets[n++] = i_block >> ptrs_bits; |
67 | offsets[n++] = i_block & (ptrs - 1); |
68 | } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { |
69 | offsets[n++] = UFS_TIND_BLOCK; |
70 | offsets[n++] = i_block >> (ptrs_bits * 2); |
71 | offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); |
72 | offsets[n++] = i_block & (ptrs - 1); |
73 | } else { |
74 | ufs_warning(inode->i_sb, "ufs_block_to_path" , "block > big" ); |
75 | } |
76 | return n; |
77 | } |
78 | |
79 | typedef struct { |
80 | void *p; |
81 | union { |
82 | __fs32 key32; |
83 | __fs64 key64; |
84 | }; |
85 | struct buffer_head *bh; |
86 | } Indirect; |
87 | |
88 | static inline int grow_chain32(struct ufs_inode_info *ufsi, |
89 | struct buffer_head *bh, __fs32 *v, |
90 | Indirect *from, Indirect *to) |
91 | { |
92 | Indirect *p; |
93 | unsigned seq; |
94 | to->bh = bh; |
95 | do { |
96 | seq = read_seqbegin(sl: &ufsi->meta_lock); |
97 | to->key32 = *(__fs32 *)(to->p = v); |
98 | for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++) |
99 | ; |
100 | } while (read_seqretry(sl: &ufsi->meta_lock, start: seq)); |
101 | return (p > to); |
102 | } |
103 | |
104 | static inline int grow_chain64(struct ufs_inode_info *ufsi, |
105 | struct buffer_head *bh, __fs64 *v, |
106 | Indirect *from, Indirect *to) |
107 | { |
108 | Indirect *p; |
109 | unsigned seq; |
110 | to->bh = bh; |
111 | do { |
112 | seq = read_seqbegin(sl: &ufsi->meta_lock); |
113 | to->key64 = *(__fs64 *)(to->p = v); |
114 | for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++) |
115 | ; |
116 | } while (read_seqretry(sl: &ufsi->meta_lock, start: seq)); |
117 | return (p > to); |
118 | } |
119 | |
120 | /* |
121 | * Returns the location of the fragment from |
122 | * the beginning of the filesystem. |
123 | */ |
124 | |
125 | static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth) |
126 | { |
127 | struct ufs_inode_info *ufsi = UFS_I(inode); |
128 | struct super_block *sb = inode->i_sb; |
129 | struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; |
130 | u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift; |
131 | int shift = uspi->s_apbshift-uspi->s_fpbshift; |
132 | Indirect chain[4], *q = chain; |
133 | unsigned *p; |
134 | unsigned flags = UFS_SB(sb)->s_flags; |
135 | u64 res = 0; |
136 | |
137 | UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n" , |
138 | uspi->s_fpbshift, uspi->s_apbmask, |
139 | (unsigned long long)mask); |
140 | |
141 | if (depth == 0) |
142 | goto no_block; |
143 | |
144 | again: |
145 | p = offsets; |
146 | |
147 | if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) |
148 | goto ufs2; |
149 | |
150 | if (!grow_chain32(ufsi, NULL, v: &ufsi->i_u1.i_data[*p++], from: chain, to: q)) |
151 | goto changed; |
152 | if (!q->key32) |
153 | goto no_block; |
154 | while (--depth) { |
155 | __fs32 *ptr; |
156 | struct buffer_head *bh; |
157 | unsigned n = *p++; |
158 | |
159 | bh = sb_bread(sb, block: uspi->s_sbbase + |
160 | fs32_to_cpu(sbp: sb, n: q->key32) + (n>>shift)); |
161 | if (!bh) |
162 | goto no_block; |
163 | ptr = (__fs32 *)bh->b_data + (n & mask); |
164 | if (!grow_chain32(ufsi, bh, v: ptr, from: chain, to: ++q)) |
165 | goto changed; |
166 | if (!q->key32) |
167 | goto no_block; |
168 | } |
169 | res = fs32_to_cpu(sbp: sb, n: q->key32); |
170 | goto found; |
171 | |
172 | ufs2: |
173 | if (!grow_chain64(ufsi, NULL, v: &ufsi->i_u1.u2_i_data[*p++], from: chain, to: q)) |
174 | goto changed; |
175 | if (!q->key64) |
176 | goto no_block; |
177 | |
178 | while (--depth) { |
179 | __fs64 *ptr; |
180 | struct buffer_head *bh; |
181 | unsigned n = *p++; |
182 | |
183 | bh = sb_bread(sb, block: uspi->s_sbbase + |
184 | fs64_to_cpu(sbp: sb, n: q->key64) + (n>>shift)); |
185 | if (!bh) |
186 | goto no_block; |
187 | ptr = (__fs64 *)bh->b_data + (n & mask); |
188 | if (!grow_chain64(ufsi, bh, v: ptr, from: chain, to: ++q)) |
189 | goto changed; |
190 | if (!q->key64) |
191 | goto no_block; |
192 | } |
193 | res = fs64_to_cpu(sbp: sb, n: q->key64); |
194 | found: |
195 | res += uspi->s_sbbase; |
196 | no_block: |
197 | while (q > chain) { |
198 | brelse(bh: q->bh); |
199 | q--; |
200 | } |
201 | return res; |
202 | |
203 | changed: |
204 | while (q > chain) { |
205 | brelse(bh: q->bh); |
206 | q--; |
207 | } |
208 | goto again; |
209 | } |
210 | |
211 | /* |
212 | * Unpacking tails: we have a file with partial final block and |
213 | * we had been asked to extend it. If the fragment being written |
214 | * is within the same block, we need to extend the tail just to cover |
215 | * that fragment. Otherwise the tail is extended to full block. |
216 | * |
217 | * Note that we might need to create a _new_ tail, but that will |
218 | * be handled elsewhere; this is strictly for resizing old |
219 | * ones. |
220 | */ |
221 | static bool |
222 | ufs_extend_tail(struct inode *inode, u64 writes_to, |
223 | int *err, struct page *locked_page) |
224 | { |
225 | struct ufs_inode_info *ufsi = UFS_I(inode); |
226 | struct super_block *sb = inode->i_sb; |
227 | struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; |
228 | unsigned lastfrag = ufsi->i_lastfrag; /* it's a short file, so unsigned is enough */ |
229 | unsigned block = ufs_fragstoblks(lastfrag); |
230 | unsigned new_size; |
231 | void *p; |
232 | u64 tmp; |
233 | |
234 | if (writes_to < (lastfrag | uspi->s_fpbmask)) |
235 | new_size = (writes_to & uspi->s_fpbmask) + 1; |
236 | else |
237 | new_size = uspi->s_fpb; |
238 | |
239 | p = ufs_get_direct_data_ptr(uspi, ufsi, blk: block); |
240 | tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p), |
241 | new_size - (lastfrag & uspi->s_fpbmask), err, |
242 | locked_page); |
243 | return tmp != 0; |
244 | } |
245 | |
246 | /** |
247 | * ufs_inode_getfrag() - allocate new fragment(s) |
248 | * @inode: pointer to inode |
249 | * @index: number of block pointer within the inode's array. |
250 | * @new_fragment: number of new allocated fragment(s) |
251 | * @err: we set it if something wrong |
252 | * @new: we set it if we allocate new block |
253 | * @locked_page: for ufs_new_fragments() |
254 | */ |
255 | static u64 |
256 | ufs_inode_getfrag(struct inode *inode, unsigned index, |
257 | sector_t new_fragment, int *err, |
258 | int *new, struct page *locked_page) |
259 | { |
260 | struct ufs_inode_info *ufsi = UFS_I(inode); |
261 | struct super_block *sb = inode->i_sb; |
262 | struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; |
263 | u64 tmp, goal, lastfrag; |
264 | unsigned nfrags = uspi->s_fpb; |
265 | void *p; |
266 | |
267 | /* TODO : to be done for write support |
268 | if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) |
269 | goto ufs2; |
270 | */ |
271 | |
272 | p = ufs_get_direct_data_ptr(uspi, ufsi, blk: index); |
273 | tmp = ufs_data_ptr_to_cpu(sb, p); |
274 | if (tmp) |
275 | goto out; |
276 | |
277 | lastfrag = ufsi->i_lastfrag; |
278 | |
279 | /* will that be a new tail? */ |
280 | if (new_fragment < UFS_NDIR_FRAGMENT && new_fragment >= lastfrag) |
281 | nfrags = (new_fragment & uspi->s_fpbmask) + 1; |
282 | |
283 | goal = 0; |
284 | if (index) { |
285 | goal = ufs_data_ptr_to_cpu(sb, |
286 | p: ufs_get_direct_data_ptr(uspi, ufsi, blk: index - 1)); |
287 | if (goal) |
288 | goal += uspi->s_fpb; |
289 | } |
290 | tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), |
291 | goal, nfrags, err, locked_page); |
292 | |
293 | if (!tmp) { |
294 | *err = -ENOSPC; |
295 | return 0; |
296 | } |
297 | |
298 | if (new) |
299 | *new = 1; |
300 | inode_set_ctime_current(inode); |
301 | if (IS_SYNC(inode)) |
302 | ufs_sync_inode (inode); |
303 | mark_inode_dirty(inode); |
304 | out: |
305 | return tmp + uspi->s_sbbase; |
306 | |
307 | /* This part : To be implemented .... |
308 | Required only for writing, not required for READ-ONLY. |
309 | ufs2: |
310 | |
311 | u2_block = ufs_fragstoblks(fragment); |
312 | u2_blockoff = ufs_fragnum(fragment); |
313 | p = ufsi->i_u1.u2_i_data + block; |
314 | goal = 0; |
315 | |
316 | repeat2: |
317 | tmp = fs32_to_cpu(sb, *p); |
318 | lastfrag = ufsi->i_lastfrag; |
319 | |
320 | */ |
321 | } |
322 | |
323 | /** |
324 | * ufs_inode_getblock() - allocate new block |
325 | * @inode: pointer to inode |
326 | * @ind_block: block number of the indirect block |
327 | * @index: number of pointer within the indirect block |
328 | * @new_fragment: number of new allocated fragment |
329 | * (block will hold this fragment and also uspi->s_fpb-1) |
330 | * @err: see ufs_inode_getfrag() |
331 | * @new: see ufs_inode_getfrag() |
332 | * @locked_page: see ufs_inode_getfrag() |
333 | */ |
334 | static u64 |
335 | ufs_inode_getblock(struct inode *inode, u64 ind_block, |
336 | unsigned index, sector_t new_fragment, int *err, |
337 | int *new, struct page *locked_page) |
338 | { |
339 | struct super_block *sb = inode->i_sb; |
340 | struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; |
341 | int shift = uspi->s_apbshift - uspi->s_fpbshift; |
342 | u64 tmp = 0, goal; |
343 | struct buffer_head *bh; |
344 | void *p; |
345 | |
346 | if (!ind_block) |
347 | return 0; |
348 | |
349 | bh = sb_bread(sb, block: ind_block + (index >> shift)); |
350 | if (unlikely(!bh)) { |
351 | *err = -EIO; |
352 | return 0; |
353 | } |
354 | |
355 | index &= uspi->s_apbmask >> uspi->s_fpbshift; |
356 | if (uspi->fs_magic == UFS2_MAGIC) |
357 | p = (__fs64 *)bh->b_data + index; |
358 | else |
359 | p = (__fs32 *)bh->b_data + index; |
360 | |
361 | tmp = ufs_data_ptr_to_cpu(sb, p); |
362 | if (tmp) |
363 | goto out; |
364 | |
365 | if (index && (uspi->fs_magic == UFS2_MAGIC ? |
366 | (tmp = fs64_to_cpu(sbp: sb, n: ((__fs64 *)bh->b_data)[index-1])) : |
367 | (tmp = fs32_to_cpu(sbp: sb, n: ((__fs32 *)bh->b_data)[index-1])))) |
368 | goal = tmp + uspi->s_fpb; |
369 | else |
370 | goal = bh->b_blocknr + uspi->s_fpb; |
371 | tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal, |
372 | uspi->s_fpb, err, locked_page); |
373 | if (!tmp) |
374 | goto out; |
375 | |
376 | if (new) |
377 | *new = 1; |
378 | |
379 | mark_buffer_dirty(bh); |
380 | if (IS_SYNC(inode)) |
381 | sync_dirty_buffer(bh); |
382 | inode_set_ctime_current(inode); |
383 | mark_inode_dirty(inode); |
384 | out: |
385 | brelse (bh); |
386 | UFSD("EXIT\n" ); |
387 | if (tmp) |
388 | tmp += uspi->s_sbbase; |
389 | return tmp; |
390 | } |
391 | |
392 | /** |
393 | * ufs_getfrag_block() - `get_block_t' function, interface between UFS and |
394 | * read_folio, writepages and so on |
395 | */ |
396 | |
397 | static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create) |
398 | { |
399 | struct super_block *sb = inode->i_sb; |
400 | struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; |
401 | int err = 0, new = 0; |
402 | unsigned offsets[4]; |
403 | int depth = ufs_block_to_path(inode, i_block: fragment >> uspi->s_fpbshift, offsets); |
404 | u64 phys64 = 0; |
405 | unsigned frag = fragment & uspi->s_fpbmask; |
406 | |
407 | phys64 = ufs_frag_map(inode, offsets, depth); |
408 | if (!create) |
409 | goto done; |
410 | |
411 | if (phys64) { |
412 | if (fragment >= UFS_NDIR_FRAGMENT) |
413 | goto done; |
414 | read_seqlock_excl(sl: &UFS_I(inode)->meta_lock); |
415 | if (fragment < UFS_I(inode)->i_lastfrag) { |
416 | read_sequnlock_excl(sl: &UFS_I(inode)->meta_lock); |
417 | goto done; |
418 | } |
419 | read_sequnlock_excl(sl: &UFS_I(inode)->meta_lock); |
420 | } |
421 | /* This code entered only while writing ....? */ |
422 | |
423 | mutex_lock(&UFS_I(inode)->truncate_mutex); |
424 | |
425 | UFSD("ENTER, ino %lu, fragment %llu\n" , inode->i_ino, (unsigned long long)fragment); |
426 | if (unlikely(!depth)) { |
427 | ufs_warning(sb, "ufs_get_block" , "block > big" ); |
428 | err = -EIO; |
429 | goto out; |
430 | } |
431 | |
432 | if (UFS_I(inode)->i_lastfrag < UFS_NDIR_FRAGMENT) { |
433 | unsigned lastfrag = UFS_I(inode)->i_lastfrag; |
434 | unsigned tailfrags = lastfrag & uspi->s_fpbmask; |
435 | if (tailfrags && fragment >= lastfrag) { |
436 | if (!ufs_extend_tail(inode, writes_to: fragment, |
437 | err: &err, locked_page: bh_result->b_page)) |
438 | goto out; |
439 | } |
440 | } |
441 | |
442 | if (depth == 1) { |
443 | phys64 = ufs_inode_getfrag(inode, index: offsets[0], new_fragment: fragment, |
444 | err: &err, new: &new, locked_page: bh_result->b_page); |
445 | } else { |
446 | int i; |
447 | phys64 = ufs_inode_getfrag(inode, index: offsets[0], new_fragment: fragment, |
448 | err: &err, NULL, NULL); |
449 | for (i = 1; i < depth - 1; i++) |
450 | phys64 = ufs_inode_getblock(inode, ind_block: phys64, index: offsets[i], |
451 | new_fragment: fragment, err: &err, NULL, NULL); |
452 | phys64 = ufs_inode_getblock(inode, ind_block: phys64, index: offsets[depth - 1], |
453 | new_fragment: fragment, err: &err, new: &new, locked_page: bh_result->b_page); |
454 | } |
455 | out: |
456 | if (phys64) { |
457 | phys64 += frag; |
458 | map_bh(bh: bh_result, sb, block: phys64); |
459 | if (new) |
460 | set_buffer_new(bh_result); |
461 | } |
462 | mutex_unlock(lock: &UFS_I(inode)->truncate_mutex); |
463 | return err; |
464 | |
465 | done: |
466 | if (phys64) |
467 | map_bh(bh: bh_result, sb, block: phys64 + frag); |
468 | return 0; |
469 | } |
470 | |
471 | static int ufs_writepages(struct address_space *mapping, |
472 | struct writeback_control *wbc) |
473 | { |
474 | return mpage_writepages(mapping, wbc, get_block: ufs_getfrag_block); |
475 | } |
476 | |
477 | static int ufs_read_folio(struct file *file, struct folio *folio) |
478 | { |
479 | return block_read_full_folio(folio, ufs_getfrag_block); |
480 | } |
481 | |
482 | int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len) |
483 | { |
484 | return __block_write_begin(page, pos, len, get_block: ufs_getfrag_block); |
485 | } |
486 | |
487 | static void ufs_truncate_blocks(struct inode *); |
488 | |
489 | static void ufs_write_failed(struct address_space *mapping, loff_t to) |
490 | { |
491 | struct inode *inode = mapping->host; |
492 | |
493 | if (to > inode->i_size) { |
494 | truncate_pagecache(inode, new: inode->i_size); |
495 | ufs_truncate_blocks(inode); |
496 | } |
497 | } |
498 | |
499 | static int ufs_write_begin(struct file *file, struct address_space *mapping, |
500 | loff_t pos, unsigned len, |
501 | struct page **pagep, void **fsdata) |
502 | { |
503 | int ret; |
504 | |
505 | ret = block_write_begin(mapping, pos, len, pagep, get_block: ufs_getfrag_block); |
506 | if (unlikely(ret)) |
507 | ufs_write_failed(mapping, to: pos + len); |
508 | |
509 | return ret; |
510 | } |
511 | |
512 | static int ufs_write_end(struct file *file, struct address_space *mapping, |
513 | loff_t pos, unsigned len, unsigned copied, |
514 | struct page *page, void *fsdata) |
515 | { |
516 | int ret; |
517 | |
518 | ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); |
519 | if (ret < len) |
520 | ufs_write_failed(mapping, to: pos + len); |
521 | return ret; |
522 | } |
523 | |
524 | static sector_t ufs_bmap(struct address_space *mapping, sector_t block) |
525 | { |
526 | return generic_block_bmap(mapping,block,ufs_getfrag_block); |
527 | } |
528 | |
529 | const struct address_space_operations ufs_aops = { |
530 | .dirty_folio = block_dirty_folio, |
531 | .invalidate_folio = block_invalidate_folio, |
532 | .read_folio = ufs_read_folio, |
533 | .writepages = ufs_writepages, |
534 | .write_begin = ufs_write_begin, |
535 | .write_end = ufs_write_end, |
536 | .migrate_folio = buffer_migrate_folio, |
537 | .bmap = ufs_bmap |
538 | }; |
539 | |
540 | static void ufs_set_inode_ops(struct inode *inode) |
541 | { |
542 | if (S_ISREG(inode->i_mode)) { |
543 | inode->i_op = &ufs_file_inode_operations; |
544 | inode->i_fop = &ufs_file_operations; |
545 | inode->i_mapping->a_ops = &ufs_aops; |
546 | } else if (S_ISDIR(inode->i_mode)) { |
547 | inode->i_op = &ufs_dir_inode_operations; |
548 | inode->i_fop = &ufs_dir_operations; |
549 | inode->i_mapping->a_ops = &ufs_aops; |
550 | } else if (S_ISLNK(inode->i_mode)) { |
551 | if (!inode->i_blocks) { |
552 | inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink; |
553 | inode->i_op = &simple_symlink_inode_operations; |
554 | } else { |
555 | inode->i_mapping->a_ops = &ufs_aops; |
556 | inode->i_op = &page_symlink_inode_operations; |
557 | inode_nohighmem(inode); |
558 | } |
559 | } else |
560 | init_special_inode(inode, inode->i_mode, |
561 | ufs_get_inode_dev(inode->i_sb, UFS_I(inode))); |
562 | } |
563 | |
564 | static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode) |
565 | { |
566 | struct ufs_inode_info *ufsi = UFS_I(inode); |
567 | struct super_block *sb = inode->i_sb; |
568 | umode_t mode; |
569 | |
570 | /* |
571 | * Copy data to the in-core inode. |
572 | */ |
573 | inode->i_mode = mode = fs16_to_cpu(sbp: sb, n: ufs_inode->ui_mode); |
574 | set_nlink(inode, nlink: fs16_to_cpu(sbp: sb, n: ufs_inode->ui_nlink)); |
575 | if (inode->i_nlink == 0) |
576 | return -ESTALE; |
577 | |
578 | /* |
579 | * Linux now has 32-bit uid and gid, so we can support EFT. |
580 | */ |
581 | i_uid_write(inode, uid: ufs_get_inode_uid(sb, inode: ufs_inode)); |
582 | i_gid_write(inode, gid: ufs_get_inode_gid(sb, inode: ufs_inode)); |
583 | |
584 | inode->i_size = fs64_to_cpu(sbp: sb, n: ufs_inode->ui_size); |
585 | inode_set_atime(inode, |
586 | sec: (signed)fs32_to_cpu(sbp: sb, n: ufs_inode->ui_atime.tv_sec), |
587 | nsec: 0); |
588 | inode_set_ctime(inode, |
589 | sec: (signed)fs32_to_cpu(sbp: sb, n: ufs_inode->ui_ctime.tv_sec), |
590 | nsec: 0); |
591 | inode_set_mtime(inode, |
592 | sec: (signed)fs32_to_cpu(sbp: sb, n: ufs_inode->ui_mtime.tv_sec), |
593 | nsec: 0); |
594 | inode->i_blocks = fs32_to_cpu(sbp: sb, n: ufs_inode->ui_blocks); |
595 | inode->i_generation = fs32_to_cpu(sbp: sb, n: ufs_inode->ui_gen); |
596 | ufsi->i_flags = fs32_to_cpu(sbp: sb, n: ufs_inode->ui_flags); |
597 | ufsi->i_shadow = fs32_to_cpu(sbp: sb, n: ufs_inode->ui_u3.ui_sun.ui_shadow); |
598 | ufsi->i_oeftflag = fs32_to_cpu(sbp: sb, n: ufs_inode->ui_u3.ui_sun.ui_oeftflag); |
599 | |
600 | |
601 | if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { |
602 | memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr, |
603 | sizeof(ufs_inode->ui_u2.ui_addr)); |
604 | } else { |
605 | memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink, |
606 | sizeof(ufs_inode->ui_u2.ui_symlink) - 1); |
607 | ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0; |
608 | } |
609 | return 0; |
610 | } |
611 | |
612 | static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode) |
613 | { |
614 | struct ufs_inode_info *ufsi = UFS_I(inode); |
615 | struct super_block *sb = inode->i_sb; |
616 | umode_t mode; |
617 | |
618 | UFSD("Reading ufs2 inode, ino %lu\n" , inode->i_ino); |
619 | /* |
620 | * Copy data to the in-core inode. |
621 | */ |
622 | inode->i_mode = mode = fs16_to_cpu(sbp: sb, n: ufs2_inode->ui_mode); |
623 | set_nlink(inode, nlink: fs16_to_cpu(sbp: sb, n: ufs2_inode->ui_nlink)); |
624 | if (inode->i_nlink == 0) |
625 | return -ESTALE; |
626 | |
627 | /* |
628 | * Linux now has 32-bit uid and gid, so we can support EFT. |
629 | */ |
630 | i_uid_write(inode, uid: fs32_to_cpu(sbp: sb, n: ufs2_inode->ui_uid)); |
631 | i_gid_write(inode, gid: fs32_to_cpu(sbp: sb, n: ufs2_inode->ui_gid)); |
632 | |
633 | inode->i_size = fs64_to_cpu(sbp: sb, n: ufs2_inode->ui_size); |
634 | inode_set_atime(inode, sec: fs64_to_cpu(sbp: sb, n: ufs2_inode->ui_atime), |
635 | nsec: fs32_to_cpu(sbp: sb, n: ufs2_inode->ui_atimensec)); |
636 | inode_set_ctime(inode, sec: fs64_to_cpu(sbp: sb, n: ufs2_inode->ui_ctime), |
637 | nsec: fs32_to_cpu(sbp: sb, n: ufs2_inode->ui_ctimensec)); |
638 | inode_set_mtime(inode, sec: fs64_to_cpu(sbp: sb, n: ufs2_inode->ui_mtime), |
639 | nsec: fs32_to_cpu(sbp: sb, n: ufs2_inode->ui_mtimensec)); |
640 | inode->i_blocks = fs64_to_cpu(sbp: sb, n: ufs2_inode->ui_blocks); |
641 | inode->i_generation = fs32_to_cpu(sbp: sb, n: ufs2_inode->ui_gen); |
642 | ufsi->i_flags = fs32_to_cpu(sbp: sb, n: ufs2_inode->ui_flags); |
643 | /* |
644 | ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); |
645 | ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); |
646 | */ |
647 | |
648 | if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { |
649 | memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr, |
650 | sizeof(ufs2_inode->ui_u2.ui_addr)); |
651 | } else { |
652 | memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink, |
653 | sizeof(ufs2_inode->ui_u2.ui_symlink) - 1); |
654 | ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0; |
655 | } |
656 | return 0; |
657 | } |
658 | |
659 | struct inode *ufs_iget(struct super_block *sb, unsigned long ino) |
660 | { |
661 | struct ufs_inode_info *ufsi; |
662 | struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; |
663 | struct buffer_head * bh; |
664 | struct inode *inode; |
665 | int err = -EIO; |
666 | |
667 | UFSD("ENTER, ino %lu\n" , ino); |
668 | |
669 | if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) { |
670 | ufs_warning(sb, "ufs_read_inode" , "bad inode number (%lu)\n" , |
671 | ino); |
672 | return ERR_PTR(error: -EIO); |
673 | } |
674 | |
675 | inode = iget_locked(sb, ino); |
676 | if (!inode) |
677 | return ERR_PTR(error: -ENOMEM); |
678 | if (!(inode->i_state & I_NEW)) |
679 | return inode; |
680 | |
681 | ufsi = UFS_I(inode); |
682 | |
683 | bh = sb_bread(sb, block: uspi->s_sbbase + ufs_inotofsba(inode->i_ino)); |
684 | if (!bh) { |
685 | ufs_warning(sb, "ufs_read_inode" , "unable to read inode %lu\n" , |
686 | inode->i_ino); |
687 | goto bad_inode; |
688 | } |
689 | if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { |
690 | struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data; |
691 | |
692 | err = ufs2_read_inode(inode, |
693 | ufs2_inode: ufs2_inode + ufs_inotofsbo(inode->i_ino)); |
694 | } else { |
695 | struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data; |
696 | |
697 | err = ufs1_read_inode(inode, |
698 | ufs_inode: ufs_inode + ufs_inotofsbo(inode->i_ino)); |
699 | } |
700 | brelse(bh); |
701 | if (err) |
702 | goto bad_inode; |
703 | |
704 | inode_inc_iversion(inode); |
705 | ufsi->i_lastfrag = |
706 | (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; |
707 | ufsi->i_dir_start_lookup = 0; |
708 | ufsi->i_osync = 0; |
709 | |
710 | ufs_set_inode_ops(inode); |
711 | |
712 | UFSD("EXIT\n" ); |
713 | unlock_new_inode(inode); |
714 | return inode; |
715 | |
716 | bad_inode: |
717 | iget_failed(inode); |
718 | return ERR_PTR(error: err); |
719 | } |
720 | |
721 | static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode) |
722 | { |
723 | struct super_block *sb = inode->i_sb; |
724 | struct ufs_inode_info *ufsi = UFS_I(inode); |
725 | |
726 | ufs_inode->ui_mode = cpu_to_fs16(sbp: sb, n: inode->i_mode); |
727 | ufs_inode->ui_nlink = cpu_to_fs16(sbp: sb, n: inode->i_nlink); |
728 | |
729 | ufs_set_inode_uid(sb, inode: ufs_inode, value: i_uid_read(inode)); |
730 | ufs_set_inode_gid(sb, inode: ufs_inode, value: i_gid_read(inode)); |
731 | |
732 | ufs_inode->ui_size = cpu_to_fs64(sbp: sb, n: inode->i_size); |
733 | ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sbp: sb, |
734 | n: inode_get_atime_sec(inode)); |
735 | ufs_inode->ui_atime.tv_usec = 0; |
736 | ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sbp: sb, |
737 | n: inode_get_ctime_sec(inode)); |
738 | ufs_inode->ui_ctime.tv_usec = 0; |
739 | ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sbp: sb, |
740 | n: inode_get_mtime_sec(inode)); |
741 | ufs_inode->ui_mtime.tv_usec = 0; |
742 | ufs_inode->ui_blocks = cpu_to_fs32(sbp: sb, n: inode->i_blocks); |
743 | ufs_inode->ui_flags = cpu_to_fs32(sbp: sb, n: ufsi->i_flags); |
744 | ufs_inode->ui_gen = cpu_to_fs32(sbp: sb, n: inode->i_generation); |
745 | |
746 | if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) { |
747 | ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sbp: sb, n: ufsi->i_shadow); |
748 | ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sbp: sb, n: ufsi->i_oeftflag); |
749 | } |
750 | |
751 | if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { |
752 | /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ |
753 | ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0]; |
754 | } else if (inode->i_blocks) { |
755 | memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data, |
756 | sizeof(ufs_inode->ui_u2.ui_addr)); |
757 | } |
758 | else { |
759 | memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink, |
760 | sizeof(ufs_inode->ui_u2.ui_symlink)); |
761 | } |
762 | |
763 | if (!inode->i_nlink) |
764 | memset (ufs_inode, 0, sizeof(struct ufs_inode)); |
765 | } |
766 | |
767 | static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode) |
768 | { |
769 | struct super_block *sb = inode->i_sb; |
770 | struct ufs_inode_info *ufsi = UFS_I(inode); |
771 | |
772 | UFSD("ENTER\n" ); |
773 | ufs_inode->ui_mode = cpu_to_fs16(sbp: sb, n: inode->i_mode); |
774 | ufs_inode->ui_nlink = cpu_to_fs16(sbp: sb, n: inode->i_nlink); |
775 | |
776 | ufs_inode->ui_uid = cpu_to_fs32(sbp: sb, n: i_uid_read(inode)); |
777 | ufs_inode->ui_gid = cpu_to_fs32(sbp: sb, n: i_gid_read(inode)); |
778 | |
779 | ufs_inode->ui_size = cpu_to_fs64(sbp: sb, n: inode->i_size); |
780 | ufs_inode->ui_atime = cpu_to_fs64(sbp: sb, n: inode_get_atime_sec(inode)); |
781 | ufs_inode->ui_atimensec = cpu_to_fs32(sbp: sb, |
782 | n: inode_get_atime_nsec(inode)); |
783 | ufs_inode->ui_ctime = cpu_to_fs64(sbp: sb, n: inode_get_ctime_sec(inode)); |
784 | ufs_inode->ui_ctimensec = cpu_to_fs32(sbp: sb, |
785 | n: inode_get_ctime_nsec(inode)); |
786 | ufs_inode->ui_mtime = cpu_to_fs64(sbp: sb, n: inode_get_mtime_sec(inode)); |
787 | ufs_inode->ui_mtimensec = cpu_to_fs32(sbp: sb, |
788 | n: inode_get_mtime_nsec(inode)); |
789 | |
790 | ufs_inode->ui_blocks = cpu_to_fs64(sbp: sb, n: inode->i_blocks); |
791 | ufs_inode->ui_flags = cpu_to_fs32(sbp: sb, n: ufsi->i_flags); |
792 | ufs_inode->ui_gen = cpu_to_fs32(sbp: sb, n: inode->i_generation); |
793 | |
794 | if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { |
795 | /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ |
796 | ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0]; |
797 | } else if (inode->i_blocks) { |
798 | memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data, |
799 | sizeof(ufs_inode->ui_u2.ui_addr)); |
800 | } else { |
801 | memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink, |
802 | sizeof(ufs_inode->ui_u2.ui_symlink)); |
803 | } |
804 | |
805 | if (!inode->i_nlink) |
806 | memset (ufs_inode, 0, sizeof(struct ufs2_inode)); |
807 | UFSD("EXIT\n" ); |
808 | } |
809 | |
810 | static int ufs_update_inode(struct inode * inode, int do_sync) |
811 | { |
812 | struct super_block *sb = inode->i_sb; |
813 | struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; |
814 | struct buffer_head * bh; |
815 | |
816 | UFSD("ENTER, ino %lu\n" , inode->i_ino); |
817 | |
818 | if (inode->i_ino < UFS_ROOTINO || |
819 | inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) { |
820 | ufs_warning (sb, "ufs_read_inode" , "bad inode number (%lu)\n" , inode->i_ino); |
821 | return -1; |
822 | } |
823 | |
824 | bh = sb_bread(sb, ufs_inotofsba(inode->i_ino)); |
825 | if (!bh) { |
826 | ufs_warning (sb, "ufs_read_inode" , "unable to read inode %lu\n" , inode->i_ino); |
827 | return -1; |
828 | } |
829 | if (uspi->fs_magic == UFS2_MAGIC) { |
830 | struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data; |
831 | |
832 | ufs2_update_inode(inode, |
833 | ufs_inode: ufs2_inode + ufs_inotofsbo(inode->i_ino)); |
834 | } else { |
835 | struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data; |
836 | |
837 | ufs1_update_inode(inode, ufs_inode: ufs_inode + ufs_inotofsbo(inode->i_ino)); |
838 | } |
839 | |
840 | mark_buffer_dirty(bh); |
841 | if (do_sync) |
842 | sync_dirty_buffer(bh); |
843 | brelse (bh); |
844 | |
845 | UFSD("EXIT\n" ); |
846 | return 0; |
847 | } |
848 | |
849 | int ufs_write_inode(struct inode *inode, struct writeback_control *wbc) |
850 | { |
851 | return ufs_update_inode(inode, do_sync: wbc->sync_mode == WB_SYNC_ALL); |
852 | } |
853 | |
854 | int ufs_sync_inode (struct inode *inode) |
855 | { |
856 | return ufs_update_inode (inode, do_sync: 1); |
857 | } |
858 | |
859 | void ufs_evict_inode(struct inode * inode) |
860 | { |
861 | int want_delete = 0; |
862 | |
863 | if (!inode->i_nlink && !is_bad_inode(inode)) |
864 | want_delete = 1; |
865 | |
866 | truncate_inode_pages_final(&inode->i_data); |
867 | if (want_delete) { |
868 | inode->i_size = 0; |
869 | if (inode->i_blocks && |
870 | (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || |
871 | S_ISLNK(inode->i_mode))) |
872 | ufs_truncate_blocks(inode); |
873 | ufs_update_inode(inode, do_sync: inode_needs_sync(inode)); |
874 | } |
875 | |
876 | invalidate_inode_buffers(inode); |
877 | clear_inode(inode); |
878 | |
879 | if (want_delete) |
880 | ufs_free_inode(inode); |
881 | } |
882 | |
883 | struct to_free { |
884 | struct inode *inode; |
885 | u64 to; |
886 | unsigned count; |
887 | }; |
888 | |
889 | static inline void free_data(struct to_free *ctx, u64 from, unsigned count) |
890 | { |
891 | if (ctx->count && ctx->to != from) { |
892 | ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count); |
893 | ctx->count = 0; |
894 | } |
895 | ctx->count += count; |
896 | ctx->to = from + count; |
897 | } |
898 | |
899 | #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift) |
900 | |
901 | static void ufs_trunc_direct(struct inode *inode) |
902 | { |
903 | struct ufs_inode_info *ufsi = UFS_I(inode); |
904 | struct super_block * sb; |
905 | struct ufs_sb_private_info * uspi; |
906 | void *p; |
907 | u64 frag1, frag2, frag3, frag4, block1, block2; |
908 | struct to_free ctx = {.inode = inode}; |
909 | unsigned i, tmp; |
910 | |
911 | UFSD("ENTER: ino %lu\n" , inode->i_ino); |
912 | |
913 | sb = inode->i_sb; |
914 | uspi = UFS_SB(sb)->s_uspi; |
915 | |
916 | frag1 = DIRECT_FRAGMENT; |
917 | frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag); |
918 | frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1); |
919 | frag3 = frag4 & ~uspi->s_fpbmask; |
920 | block1 = block2 = 0; |
921 | if (frag2 > frag3) { |
922 | frag2 = frag4; |
923 | frag3 = frag4 = 0; |
924 | } else if (frag2 < frag3) { |
925 | block1 = ufs_fragstoblks (frag2); |
926 | block2 = ufs_fragstoblks (frag3); |
927 | } |
928 | |
929 | UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu," |
930 | " frag3 %llu, frag4 %llu\n" , inode->i_ino, |
931 | (unsigned long long)frag1, (unsigned long long)frag2, |
932 | (unsigned long long)block1, (unsigned long long)block2, |
933 | (unsigned long long)frag3, (unsigned long long)frag4); |
934 | |
935 | if (frag1 >= frag2) |
936 | goto next1; |
937 | |
938 | /* |
939 | * Free first free fragments |
940 | */ |
941 | p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1)); |
942 | tmp = ufs_data_ptr_to_cpu(sb, p); |
943 | if (!tmp ) |
944 | ufs_panic (sb, "ufs_trunc_direct" , "internal error" ); |
945 | frag2 -= frag1; |
946 | frag1 = ufs_fragnum (frag1); |
947 | |
948 | ufs_free_fragments(inode, tmp + frag1, frag2); |
949 | |
950 | next1: |
951 | /* |
952 | * Free whole blocks |
953 | */ |
954 | for (i = block1 ; i < block2; i++) { |
955 | p = ufs_get_direct_data_ptr(uspi, ufsi, blk: i); |
956 | tmp = ufs_data_ptr_to_cpu(sb, p); |
957 | if (!tmp) |
958 | continue; |
959 | write_seqlock(sl: &ufsi->meta_lock); |
960 | ufs_data_ptr_clear(uspi, p); |
961 | write_sequnlock(sl: &ufsi->meta_lock); |
962 | |
963 | free_data(ctx: &ctx, from: tmp, count: uspi->s_fpb); |
964 | } |
965 | |
966 | free_data(ctx: &ctx, from: 0, count: 0); |
967 | |
968 | if (frag3 >= frag4) |
969 | goto next3; |
970 | |
971 | /* |
972 | * Free last free fragments |
973 | */ |
974 | p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3)); |
975 | tmp = ufs_data_ptr_to_cpu(sb, p); |
976 | if (!tmp ) |
977 | ufs_panic(sb, "ufs_truncate_direct" , "internal error" ); |
978 | frag4 = ufs_fragnum (frag4); |
979 | write_seqlock(sl: &ufsi->meta_lock); |
980 | ufs_data_ptr_clear(uspi, p); |
981 | write_sequnlock(sl: &ufsi->meta_lock); |
982 | |
983 | ufs_free_fragments (inode, tmp, frag4); |
984 | next3: |
985 | |
986 | UFSD("EXIT: ino %lu\n" , inode->i_ino); |
987 | } |
988 | |
989 | static void free_full_branch(struct inode *inode, u64 ind_block, int depth) |
990 | { |
991 | struct super_block *sb = inode->i_sb; |
992 | struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; |
993 | struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize); |
994 | unsigned i; |
995 | |
996 | if (!ubh) |
997 | return; |
998 | |
999 | if (--depth) { |
1000 | for (i = 0; i < uspi->s_apb; i++) { |
1001 | void *p = ubh_get_data_ptr(uspi, ubh, blk: i); |
1002 | u64 block = ufs_data_ptr_to_cpu(sb, p); |
1003 | if (block) |
1004 | free_full_branch(inode, ind_block: block, depth); |
1005 | } |
1006 | } else { |
1007 | struct to_free ctx = {.inode = inode}; |
1008 | |
1009 | for (i = 0; i < uspi->s_apb; i++) { |
1010 | void *p = ubh_get_data_ptr(uspi, ubh, blk: i); |
1011 | u64 block = ufs_data_ptr_to_cpu(sb, p); |
1012 | if (block) |
1013 | free_data(ctx: &ctx, from: block, count: uspi->s_fpb); |
1014 | } |
1015 | free_data(ctx: &ctx, from: 0, count: 0); |
1016 | } |
1017 | |
1018 | ubh_bforget(ubh); |
1019 | ufs_free_blocks(inode, ind_block, uspi->s_fpb); |
1020 | } |
1021 | |
1022 | static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth) |
1023 | { |
1024 | struct super_block *sb = inode->i_sb; |
1025 | struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; |
1026 | unsigned i; |
1027 | |
1028 | if (--depth) { |
1029 | for (i = from; i < uspi->s_apb ; i++) { |
1030 | void *p = ubh_get_data_ptr(uspi, ubh, blk: i); |
1031 | u64 block = ufs_data_ptr_to_cpu(sb, p); |
1032 | if (block) { |
1033 | write_seqlock(sl: &UFS_I(inode)->meta_lock); |
1034 | ufs_data_ptr_clear(uspi, p); |
1035 | write_sequnlock(sl: &UFS_I(inode)->meta_lock); |
1036 | ubh_mark_buffer_dirty(ubh); |
1037 | free_full_branch(inode, ind_block: block, depth); |
1038 | } |
1039 | } |
1040 | } else { |
1041 | struct to_free ctx = {.inode = inode}; |
1042 | |
1043 | for (i = from; i < uspi->s_apb; i++) { |
1044 | void *p = ubh_get_data_ptr(uspi, ubh, blk: i); |
1045 | u64 block = ufs_data_ptr_to_cpu(sb, p); |
1046 | if (block) { |
1047 | write_seqlock(sl: &UFS_I(inode)->meta_lock); |
1048 | ufs_data_ptr_clear(uspi, p); |
1049 | write_sequnlock(sl: &UFS_I(inode)->meta_lock); |
1050 | ubh_mark_buffer_dirty(ubh); |
1051 | free_data(ctx: &ctx, from: block, count: uspi->s_fpb); |
1052 | } |
1053 | } |
1054 | free_data(ctx: &ctx, from: 0, count: 0); |
1055 | } |
1056 | if (IS_SYNC(inode) && ubh_buffer_dirty(ubh)) |
1057 | ubh_sync_block(ubh); |
1058 | ubh_brelse(ubh); |
1059 | } |
1060 | |
1061 | static int ufs_alloc_lastblock(struct inode *inode, loff_t size) |
1062 | { |
1063 | int err = 0; |
1064 | struct super_block *sb = inode->i_sb; |
1065 | struct address_space *mapping = inode->i_mapping; |
1066 | struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; |
1067 | unsigned i, end; |
1068 | sector_t lastfrag; |
1069 | struct folio *folio; |
1070 | struct buffer_head *bh; |
1071 | u64 phys64; |
1072 | |
1073 | lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift; |
1074 | |
1075 | if (!lastfrag) |
1076 | goto out; |
1077 | |
1078 | lastfrag--; |
1079 | |
1080 | folio = ufs_get_locked_folio(mapping, index: lastfrag >> |
1081 | (PAGE_SHIFT - inode->i_blkbits)); |
1082 | if (IS_ERR(ptr: folio)) { |
1083 | err = -EIO; |
1084 | goto out; |
1085 | } |
1086 | |
1087 | end = lastfrag & ((1 << (PAGE_SHIFT - inode->i_blkbits)) - 1); |
1088 | bh = folio_buffers(folio); |
1089 | for (i = 0; i < end; ++i) |
1090 | bh = bh->b_this_page; |
1091 | |
1092 | err = ufs_getfrag_block(inode, fragment: lastfrag, bh_result: bh, create: 1); |
1093 | |
1094 | if (unlikely(err)) |
1095 | goto out_unlock; |
1096 | |
1097 | if (buffer_new(bh)) { |
1098 | clear_buffer_new(bh); |
1099 | clean_bdev_bh_alias(bh); |
1100 | /* |
1101 | * we do not zeroize fragment, because of |
1102 | * if it maped to hole, it already contains zeroes |
1103 | */ |
1104 | set_buffer_uptodate(bh); |
1105 | mark_buffer_dirty(bh); |
1106 | folio_mark_dirty(folio); |
1107 | } |
1108 | |
1109 | if (lastfrag >= UFS_IND_FRAGMENT) { |
1110 | end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1; |
1111 | phys64 = bh->b_blocknr + 1; |
1112 | for (i = 0; i < end; ++i) { |
1113 | bh = sb_getblk(sb, block: i + phys64); |
1114 | lock_buffer(bh); |
1115 | memset(bh->b_data, 0, sb->s_blocksize); |
1116 | set_buffer_uptodate(bh); |
1117 | mark_buffer_dirty(bh); |
1118 | unlock_buffer(bh); |
1119 | sync_dirty_buffer(bh); |
1120 | brelse(bh); |
1121 | } |
1122 | } |
1123 | out_unlock: |
1124 | ufs_put_locked_folio(folio); |
1125 | out: |
1126 | return err; |
1127 | } |
1128 | |
1129 | static void ufs_truncate_blocks(struct inode *inode) |
1130 | { |
1131 | struct ufs_inode_info *ufsi = UFS_I(inode); |
1132 | struct super_block *sb = inode->i_sb; |
1133 | struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; |
1134 | unsigned offsets[4]; |
1135 | int depth; |
1136 | int depth2; |
1137 | unsigned i; |
1138 | struct ufs_buffer_head *ubh[3]; |
1139 | void *p; |
1140 | u64 block; |
1141 | |
1142 | if (inode->i_size) { |
1143 | sector_t last = (inode->i_size - 1) >> uspi->s_bshift; |
1144 | depth = ufs_block_to_path(inode, i_block: last, offsets); |
1145 | if (!depth) |
1146 | return; |
1147 | } else { |
1148 | depth = 1; |
1149 | } |
1150 | |
1151 | for (depth2 = depth - 1; depth2; depth2--) |
1152 | if (offsets[depth2] != uspi->s_apb - 1) |
1153 | break; |
1154 | |
1155 | mutex_lock(&ufsi->truncate_mutex); |
1156 | if (depth == 1) { |
1157 | ufs_trunc_direct(inode); |
1158 | offsets[0] = UFS_IND_BLOCK; |
1159 | } else { |
1160 | /* get the blocks that should be partially emptied */ |
1161 | p = ufs_get_direct_data_ptr(uspi, ufsi, blk: offsets[0]++); |
1162 | for (i = 0; i < depth2; i++) { |
1163 | block = ufs_data_ptr_to_cpu(sb, p); |
1164 | if (!block) |
1165 | break; |
1166 | ubh[i] = ubh_bread(sb, block, uspi->s_bsize); |
1167 | if (!ubh[i]) { |
1168 | write_seqlock(sl: &ufsi->meta_lock); |
1169 | ufs_data_ptr_clear(uspi, p); |
1170 | write_sequnlock(sl: &ufsi->meta_lock); |
1171 | break; |
1172 | } |
1173 | p = ubh_get_data_ptr(uspi, ubh: ubh[i], blk: offsets[i + 1]++); |
1174 | } |
1175 | while (i--) |
1176 | free_branch_tail(inode, from: offsets[i + 1], ubh: ubh[i], depth: depth - i - 1); |
1177 | } |
1178 | for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) { |
1179 | p = ufs_get_direct_data_ptr(uspi, ufsi, blk: i); |
1180 | block = ufs_data_ptr_to_cpu(sb, p); |
1181 | if (block) { |
1182 | write_seqlock(sl: &ufsi->meta_lock); |
1183 | ufs_data_ptr_clear(uspi, p); |
1184 | write_sequnlock(sl: &ufsi->meta_lock); |
1185 | free_full_branch(inode, ind_block: block, depth: i - UFS_IND_BLOCK + 1); |
1186 | } |
1187 | } |
1188 | read_seqlock_excl(sl: &ufsi->meta_lock); |
1189 | ufsi->i_lastfrag = DIRECT_FRAGMENT; |
1190 | read_sequnlock_excl(sl: &ufsi->meta_lock); |
1191 | mark_inode_dirty(inode); |
1192 | mutex_unlock(lock: &ufsi->truncate_mutex); |
1193 | } |
1194 | |
1195 | static int ufs_truncate(struct inode *inode, loff_t size) |
1196 | { |
1197 | int err = 0; |
1198 | |
1199 | UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n" , |
1200 | inode->i_ino, (unsigned long long)size, |
1201 | (unsigned long long)i_size_read(inode)); |
1202 | |
1203 | if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || |
1204 | S_ISLNK(inode->i_mode))) |
1205 | return -EINVAL; |
1206 | if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) |
1207 | return -EPERM; |
1208 | |
1209 | err = ufs_alloc_lastblock(inode, size); |
1210 | |
1211 | if (err) |
1212 | goto out; |
1213 | |
1214 | block_truncate_page(inode->i_mapping, size, ufs_getfrag_block); |
1215 | |
1216 | truncate_setsize(inode, newsize: size); |
1217 | |
1218 | ufs_truncate_blocks(inode); |
1219 | inode_set_mtime_to_ts(inode, ts: inode_set_ctime_current(inode)); |
1220 | mark_inode_dirty(inode); |
1221 | out: |
1222 | UFSD("EXIT: err %d\n" , err); |
1223 | return err; |
1224 | } |
1225 | |
1226 | int ufs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, |
1227 | struct iattr *attr) |
1228 | { |
1229 | struct inode *inode = d_inode(dentry); |
1230 | unsigned int ia_valid = attr->ia_valid; |
1231 | int error; |
1232 | |
1233 | error = setattr_prepare(&nop_mnt_idmap, dentry, attr); |
1234 | if (error) |
1235 | return error; |
1236 | |
1237 | if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) { |
1238 | error = ufs_truncate(inode, size: attr->ia_size); |
1239 | if (error) |
1240 | return error; |
1241 | } |
1242 | |
1243 | setattr_copy(&nop_mnt_idmap, inode, attr); |
1244 | mark_inode_dirty(inode); |
1245 | return 0; |
1246 | } |
1247 | |
1248 | const struct inode_operations ufs_file_inode_operations = { |
1249 | .setattr = ufs_setattr, |
1250 | }; |
1251 | |