1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * This file is part of UBIFS.
4 *
5 * Copyright (C) 2006-2008 Nokia Corporation.
6 *
7 * Authors: Artem Bityutskiy (Битюцкий Артём)
8 * Adrian Hunter
9 */
10
11/*
12 * This file implements VFS file and inode operations for regular files, device
13 * nodes and symlinks as well as address space operations.
14 *
15 * UBIFS uses 2 page flags: @PG_private and @PG_checked. @PG_private is set if
16 * the page is dirty and is used for optimization purposes - dirty pages are
17 * not budgeted so the flag shows that 'ubifs_write_end()' should not release
18 * the budget for this page. The @PG_checked flag is set if full budgeting is
19 * required for the page e.g., when it corresponds to a file hole or it is
20 * beyond the file size. The budgeting is done in 'ubifs_write_begin()', because
21 * it is OK to fail in this function, and the budget is released in
22 * 'ubifs_write_end()'. So the @PG_private and @PG_checked flags carry
23 * information about how the page was budgeted, to make it possible to release
24 * the budget properly.
25 *
26 * A thing to keep in mind: inode @i_mutex is locked in most VFS operations we
27 * implement. However, this is not true for 'ubifs_writepage()', which may be
28 * called with @i_mutex unlocked. For example, when flusher thread is doing
29 * background write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex.
30 * At "normal" work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g.
31 * in the "sys_write -> alloc_pages -> direct reclaim path". So, in
32 * 'ubifs_writepage()' we are only guaranteed that the page is locked.
33 *
34 * Similarly, @i_mutex is not always locked in 'ubifs_read_folio()', e.g., the
35 * read-ahead path does not lock it ("sys_read -> generic_file_aio_read ->
36 * ondemand_readahead -> read_folio"). In case of readahead, @I_SYNC flag is not
37 * set as well. However, UBIFS disables readahead.
38 */
39
40#include "ubifs.h"
41#include <linux/mount.h>
42#include <linux/slab.h>
43#include <linux/migrate.h>
44
45static int read_block(struct inode *inode, void *addr, unsigned int block,
46 struct ubifs_data_node *dn)
47{
48 struct ubifs_info *c = inode->i_sb->s_fs_info;
49 int err, len, out_len;
50 union ubifs_key key;
51 unsigned int dlen;
52
53 data_key_init(c, key: &key, inum: inode->i_ino, block);
54 err = ubifs_tnc_lookup(c, key: &key, node: dn);
55 if (err) {
56 if (err == -ENOENT)
57 /* Not found, so it must be a hole */
58 memset(addr, 0, UBIFS_BLOCK_SIZE);
59 return err;
60 }
61
62 ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) >
63 ubifs_inode(inode)->creat_sqnum);
64 len = le32_to_cpu(dn->size);
65 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
66 goto dump;
67
68 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
69
70 if (IS_ENCRYPTED(inode)) {
71 err = ubifs_decrypt(inode, dn, out_len: &dlen, block);
72 if (err)
73 goto dump;
74 }
75
76 out_len = UBIFS_BLOCK_SIZE;
77 err = ubifs_decompress(c, buf: &dn->data, len: dlen, out: addr, out_len: &out_len,
78 le16_to_cpu(dn->compr_type));
79 if (err || len != out_len)
80 goto dump;
81
82 /*
83 * Data length can be less than a full block, even for blocks that are
84 * not the last in the file (e.g., as a result of making a hole and
85 * appending data). Ensure that the remainder is zeroed out.
86 */
87 if (len < UBIFS_BLOCK_SIZE)
88 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
89
90 return 0;
91
92dump:
93 ubifs_err(c, fmt: "bad data node (block %u, inode %lu)",
94 block, inode->i_ino);
95 ubifs_dump_node(c, node: dn, UBIFS_MAX_DATA_NODE_SZ);
96 return -EINVAL;
97}
98
99static int do_readpage(struct folio *folio)
100{
101 void *addr;
102 int err = 0, i;
103 unsigned int block, beyond;
104 struct ubifs_data_node *dn = NULL;
105 struct inode *inode = folio->mapping->host;
106 struct ubifs_info *c = inode->i_sb->s_fs_info;
107 loff_t i_size = i_size_read(inode);
108
109 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
110 inode->i_ino, folio->index, i_size, folio->flags);
111 ubifs_assert(c, !folio_test_checked(folio));
112 ubifs_assert(c, !folio->private);
113
114 addr = kmap_local_folio(folio, offset: 0);
115
116 block = folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
117 beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
118 if (block >= beyond) {
119 /* Reading beyond inode */
120 folio_set_checked(folio);
121 addr = folio_zero_tail(folio, offset: 0, kaddr: addr);
122 goto out;
123 }
124
125 dn = kmalloc(UBIFS_MAX_DATA_NODE_SZ, GFP_NOFS);
126 if (!dn) {
127 err = -ENOMEM;
128 goto out;
129 }
130
131 i = 0;
132 while (1) {
133 int ret;
134
135 if (block >= beyond) {
136 /* Reading beyond inode */
137 err = -ENOENT;
138 memset(addr, 0, UBIFS_BLOCK_SIZE);
139 } else {
140 ret = read_block(inode, addr, block, dn);
141 if (ret) {
142 err = ret;
143 if (err != -ENOENT)
144 break;
145 } else if (block + 1 == beyond) {
146 int dlen = le32_to_cpu(dn->size);
147 int ilen = i_size & (UBIFS_BLOCK_SIZE - 1);
148
149 if (ilen && ilen < dlen)
150 memset(addr + ilen, 0, dlen - ilen);
151 }
152 }
153 if (++i >= (UBIFS_BLOCKS_PER_PAGE << folio_order(folio)))
154 break;
155 block += 1;
156 addr += UBIFS_BLOCK_SIZE;
157 if (folio_test_highmem(folio) && (offset_in_page(addr) == 0)) {
158 kunmap_local(addr - UBIFS_BLOCK_SIZE);
159 addr = kmap_local_folio(folio, offset: i * UBIFS_BLOCK_SIZE);
160 }
161 }
162
163 if (err) {
164 struct ubifs_info *c = inode->i_sb->s_fs_info;
165 if (err == -ENOENT) {
166 /* Not found, so it must be a hole */
167 folio_set_checked(folio);
168 dbg_gen("hole");
169 err = 0;
170 } else {
171 ubifs_err(c, fmt: "cannot read page %lu of inode %lu, error %d",
172 folio->index, inode->i_ino, err);
173 }
174 }
175
176out:
177 kfree(objp: dn);
178 if (!err)
179 folio_mark_uptodate(folio);
180 flush_dcache_folio(folio);
181 kunmap_local(addr);
182 return err;
183}
184
185/**
186 * release_new_page_budget - release budget of a new page.
187 * @c: UBIFS file-system description object
188 *
189 * This is a helper function which releases budget corresponding to the budget
190 * of one new page of data.
191 */
192static void release_new_page_budget(struct ubifs_info *c)
193{
194 struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 };
195
196 ubifs_release_budget(c, req: &req);
197}
198
199/**
200 * release_existing_page_budget - release budget of an existing page.
201 * @c: UBIFS file-system description object
202 *
203 * This is a helper function which releases budget corresponding to the budget
204 * of changing one page of data which already exists on the flash media.
205 */
206static void release_existing_page_budget(struct ubifs_info *c)
207{
208 struct ubifs_budget_req req = { .dd_growth = c->bi.page_budget};
209
210 ubifs_release_budget(c, req: &req);
211}
212
213static int write_begin_slow(struct address_space *mapping,
214 loff_t pos, unsigned len, struct page **pagep)
215{
216 struct inode *inode = mapping->host;
217 struct ubifs_info *c = inode->i_sb->s_fs_info;
218 pgoff_t index = pos >> PAGE_SHIFT;
219 struct ubifs_budget_req req = { .new_page = 1 };
220 int err, appending = !!(pos + len > inode->i_size);
221 struct folio *folio;
222
223 dbg_gen("ino %lu, pos %llu, len %u, i_size %lld",
224 inode->i_ino, pos, len, inode->i_size);
225
226 /*
227 * At the slow path we have to budget before locking the folio, because
228 * budgeting may force write-back, which would wait on locked folios and
229 * deadlock if we had the folio locked. At this point we do not know
230 * anything about the folio, so assume that this is a new folio which is
231 * written to a hole. This corresponds to largest budget. Later the
232 * budget will be amended if this is not true.
233 */
234 if (appending)
235 /* We are appending data, budget for inode change */
236 req.dirtied_ino = 1;
237
238 err = ubifs_budget_space(c, req: &req);
239 if (unlikely(err))
240 return err;
241
242 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
243 gfp: mapping_gfp_mask(mapping));
244 if (IS_ERR(ptr: folio)) {
245 ubifs_release_budget(c, req: &req);
246 return PTR_ERR(ptr: folio);
247 }
248
249 if (!folio_test_uptodate(folio)) {
250 if (pos == folio_pos(folio) && len >= folio_size(folio))
251 folio_set_checked(folio);
252 else {
253 err = do_readpage(folio);
254 if (err) {
255 folio_unlock(folio);
256 folio_put(folio);
257 ubifs_release_budget(c, req: &req);
258 return err;
259 }
260 }
261 }
262
263 if (folio->private)
264 /*
265 * The folio is dirty, which means it was budgeted twice:
266 * o first time the budget was allocated by the task which
267 * made the folio dirty and set the private field;
268 * o and then we budgeted for it for the second time at the
269 * very beginning of this function.
270 *
271 * So what we have to do is to release the folio budget we
272 * allocated.
273 */
274 release_new_page_budget(c);
275 else if (!folio_test_checked(folio))
276 /*
277 * We are changing a folio which already exists on the media.
278 * This means that changing the folio does not make the amount
279 * of indexing information larger, and this part of the budget
280 * which we have already acquired may be released.
281 */
282 ubifs_convert_page_budget(c);
283
284 if (appending) {
285 struct ubifs_inode *ui = ubifs_inode(inode);
286
287 /*
288 * 'ubifs_write_end()' is optimized from the fast-path part of
289 * 'ubifs_write_begin()' and expects the @ui_mutex to be locked
290 * if data is appended.
291 */
292 mutex_lock(&ui->ui_mutex);
293 if (ui->dirty)
294 /*
295 * The inode is dirty already, so we may free the
296 * budget we allocated.
297 */
298 ubifs_release_dirty_inode_budget(c, ui);
299 }
300
301 *pagep = &folio->page;
302 return 0;
303}
304
305/**
306 * allocate_budget - allocate budget for 'ubifs_write_begin()'.
307 * @c: UBIFS file-system description object
308 * @folio: folio to allocate budget for
309 * @ui: UBIFS inode object the page belongs to
310 * @appending: non-zero if the page is appended
311 *
312 * This is a helper function for 'ubifs_write_begin()' which allocates budget
313 * for the operation. The budget is allocated differently depending on whether
314 * this is appending, whether the page is dirty or not, and so on. This
315 * function leaves the @ui->ui_mutex locked in case of appending.
316 *
317 * Returns: %0 in case of success and %-ENOSPC in case of failure.
318 */
319static int allocate_budget(struct ubifs_info *c, struct folio *folio,
320 struct ubifs_inode *ui, int appending)
321{
322 struct ubifs_budget_req req = { .fast = 1 };
323
324 if (folio->private) {
325 if (!appending)
326 /*
327 * The folio is dirty and we are not appending, which
328 * means no budget is needed at all.
329 */
330 return 0;
331
332 mutex_lock(&ui->ui_mutex);
333 if (ui->dirty)
334 /*
335 * The page is dirty and we are appending, so the inode
336 * has to be marked as dirty. However, it is already
337 * dirty, so we do not need any budget. We may return,
338 * but @ui->ui_mutex hast to be left locked because we
339 * should prevent write-back from flushing the inode
340 * and freeing the budget. The lock will be released in
341 * 'ubifs_write_end()'.
342 */
343 return 0;
344
345 /*
346 * The page is dirty, we are appending, the inode is clean, so
347 * we need to budget the inode change.
348 */
349 req.dirtied_ino = 1;
350 } else {
351 if (folio_test_checked(folio))
352 /*
353 * The page corresponds to a hole and does not
354 * exist on the media. So changing it makes
355 * the amount of indexing information
356 * larger, and we have to budget for a new
357 * page.
358 */
359 req.new_page = 1;
360 else
361 /*
362 * Not a hole, the change will not add any new
363 * indexing information, budget for page
364 * change.
365 */
366 req.dirtied_page = 1;
367
368 if (appending) {
369 mutex_lock(&ui->ui_mutex);
370 if (!ui->dirty)
371 /*
372 * The inode is clean but we will have to mark
373 * it as dirty because we are appending. This
374 * needs a budget.
375 */
376 req.dirtied_ino = 1;
377 }
378 }
379
380 return ubifs_budget_space(c, req: &req);
381}
382
383/*
384 * This function is called when a page of data is going to be written. Since
385 * the page of data will not necessarily go to the flash straight away, UBIFS
386 * has to reserve space on the media for it, which is done by means of
387 * budgeting.
388 *
389 * This is the hot-path of the file-system and we are trying to optimize it as
390 * much as possible. For this reasons it is split on 2 parts - slow and fast.
391 *
392 * There many budgeting cases:
393 * o a new page is appended - we have to budget for a new page and for
394 * changing the inode; however, if the inode is already dirty, there is
395 * no need to budget for it;
396 * o an existing clean page is changed - we have budget for it; if the page
397 * does not exist on the media (a hole), we have to budget for a new
398 * page; otherwise, we may budget for changing an existing page; the
399 * difference between these cases is that changing an existing page does
400 * not introduce anything new to the FS indexing information, so it does
401 * not grow, and smaller budget is acquired in this case;
402 * o an existing dirty page is changed - no need to budget at all, because
403 * the page budget has been acquired by earlier, when the page has been
404 * marked dirty.
405 *
406 * UBIFS budgeting sub-system may force write-back if it thinks there is no
407 * space to reserve. This imposes some locking restrictions and makes it
408 * impossible to take into account the above cases, and makes it impossible to
409 * optimize budgeting.
410 *
411 * The solution for this is that the fast path of 'ubifs_write_begin()' assumes
412 * there is a plenty of flash space and the budget will be acquired quickly,
413 * without forcing write-back. The slow path does not make this assumption.
414 */
415static int ubifs_write_begin(struct file *file, struct address_space *mapping,
416 loff_t pos, unsigned len,
417 struct page **pagep, void **fsdata)
418{
419 struct inode *inode = mapping->host;
420 struct ubifs_info *c = inode->i_sb->s_fs_info;
421 struct ubifs_inode *ui = ubifs_inode(inode);
422 pgoff_t index = pos >> PAGE_SHIFT;
423 int err, appending = !!(pos + len > inode->i_size);
424 int skipped_read = 0;
425 struct folio *folio;
426
427 ubifs_assert(c, ubifs_inode(inode)->ui_size == inode->i_size);
428 ubifs_assert(c, !c->ro_media && !c->ro_mount);
429
430 if (unlikely(c->ro_error))
431 return -EROFS;
432
433 /* Try out the fast-path part first */
434 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
435 gfp: mapping_gfp_mask(mapping));
436 if (IS_ERR(ptr: folio))
437 return PTR_ERR(ptr: folio);
438
439 if (!folio_test_uptodate(folio)) {
440 /* The page is not loaded from the flash */
441 if (pos == folio_pos(folio) && len >= folio_size(folio)) {
442 /*
443 * We change whole page so no need to load it. But we
444 * do not know whether this page exists on the media or
445 * not, so we assume the latter because it requires
446 * larger budget. The assumption is that it is better
447 * to budget a bit more than to read the page from the
448 * media. Thus, we are setting the @PG_checked flag
449 * here.
450 */
451 folio_set_checked(folio);
452 skipped_read = 1;
453 } else {
454 err = do_readpage(folio);
455 if (err) {
456 folio_unlock(folio);
457 folio_put(folio);
458 return err;
459 }
460 }
461 }
462
463 err = allocate_budget(c, folio, ui, appending);
464 if (unlikely(err)) {
465 ubifs_assert(c, err == -ENOSPC);
466 /*
467 * If we skipped reading the page because we were going to
468 * write all of it, then it is not up to date.
469 */
470 if (skipped_read)
471 folio_clear_checked(folio);
472 /*
473 * Budgeting failed which means it would have to force
474 * write-back but didn't, because we set the @fast flag in the
475 * request. Write-back cannot be done now, while we have the
476 * page locked, because it would deadlock. Unlock and free
477 * everything and fall-back to slow-path.
478 */
479 if (appending) {
480 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
481 mutex_unlock(lock: &ui->ui_mutex);
482 }
483 folio_unlock(folio);
484 folio_put(folio);
485
486 return write_begin_slow(mapping, pos, len, pagep);
487 }
488
489 /*
490 * Whee, we acquired budgeting quickly - without involving
491 * garbage-collection, committing or forcing write-back. We return
492 * with @ui->ui_mutex locked if we are appending pages, and unlocked
493 * otherwise. This is an optimization (slightly hacky though).
494 */
495 *pagep = &folio->page;
496 return 0;
497}
498
499/**
500 * cancel_budget - cancel budget.
501 * @c: UBIFS file-system description object
502 * @folio: folio to cancel budget for
503 * @ui: UBIFS inode object the page belongs to
504 * @appending: non-zero if the page is appended
505 *
506 * This is a helper function for a page write operation. It unlocks the
507 * @ui->ui_mutex in case of appending.
508 */
509static void cancel_budget(struct ubifs_info *c, struct folio *folio,
510 struct ubifs_inode *ui, int appending)
511{
512 if (appending) {
513 if (!ui->dirty)
514 ubifs_release_dirty_inode_budget(c, ui);
515 mutex_unlock(lock: &ui->ui_mutex);
516 }
517 if (!folio->private) {
518 if (folio_test_checked(folio))
519 release_new_page_budget(c);
520 else
521 release_existing_page_budget(c);
522 }
523}
524
525static int ubifs_write_end(struct file *file, struct address_space *mapping,
526 loff_t pos, unsigned len, unsigned copied,
527 struct page *page, void *fsdata)
528{
529 struct folio *folio = page_folio(page);
530 struct inode *inode = mapping->host;
531 struct ubifs_inode *ui = ubifs_inode(inode);
532 struct ubifs_info *c = inode->i_sb->s_fs_info;
533 loff_t end_pos = pos + len;
534 int appending = !!(end_pos > inode->i_size);
535
536 dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld",
537 inode->i_ino, pos, folio->index, len, copied, inode->i_size);
538
539 if (unlikely(copied < len && !folio_test_uptodate(folio))) {
540 /*
541 * VFS copied less data to the folio than it intended and
542 * declared in its '->write_begin()' call via the @len
543 * argument. If the folio was not up-to-date,
544 * the 'ubifs_write_begin()' function did
545 * not load it from the media (for optimization reasons). This
546 * means that part of the folio contains garbage. So read the
547 * folio now.
548 */
549 dbg_gen("copied %d instead of %d, read page and repeat",
550 copied, len);
551 cancel_budget(c, folio, ui, appending);
552 folio_clear_checked(folio);
553
554 /*
555 * Return 0 to force VFS to repeat the whole operation, or the
556 * error code if 'do_readpage()' fails.
557 */
558 copied = do_readpage(folio);
559 goto out;
560 }
561
562 if (len == folio_size(folio))
563 folio_mark_uptodate(folio);
564
565 if (!folio->private) {
566 folio_attach_private(folio, data: (void *)1);
567 atomic_long_inc(v: &c->dirty_pg_cnt);
568 filemap_dirty_folio(mapping, folio);
569 }
570
571 if (appending) {
572 i_size_write(inode, i_size: end_pos);
573 ui->ui_size = end_pos;
574 /*
575 * We do not set @I_DIRTY_PAGES (which means that
576 * the inode has dirty pages), this was done in
577 * filemap_dirty_folio().
578 */
579 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
580 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
581 mutex_unlock(lock: &ui->ui_mutex);
582 }
583
584out:
585 folio_unlock(folio);
586 folio_put(folio);
587 return copied;
588}
589
590/**
591 * populate_page - copy data nodes into a page for bulk-read.
592 * @c: UBIFS file-system description object
593 * @folio: folio
594 * @bu: bulk-read information
595 * @n: next zbranch slot
596 *
597 * Returns: %0 on success and a negative error code on failure.
598 */
599static int populate_page(struct ubifs_info *c, struct folio *folio,
600 struct bu_info *bu, int *n)
601{
602 int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0;
603 struct inode *inode = folio->mapping->host;
604 loff_t i_size = i_size_read(inode);
605 unsigned int page_block;
606 void *addr, *zaddr;
607 pgoff_t end_index;
608
609 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
610 inode->i_ino, folio->index, i_size, folio->flags);
611
612 addr = zaddr = kmap_local_folio(folio, offset: 0);
613
614 end_index = (i_size - 1) >> PAGE_SHIFT;
615 if (!i_size || folio->index > end_index) {
616 hole = 1;
617 addr = folio_zero_tail(folio, offset: 0, kaddr: addr);
618 goto out_hole;
619 }
620
621 page_block = folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
622 while (1) {
623 int err, len, out_len, dlen;
624
625 if (nn >= bu->cnt) {
626 hole = 1;
627 memset(addr, 0, UBIFS_BLOCK_SIZE);
628 } else if (key_block(c, key: &bu->zbranch[nn].key) == page_block) {
629 struct ubifs_data_node *dn;
630
631 dn = bu->buf + (bu->zbranch[nn].offs - offs);
632
633 ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) >
634 ubifs_inode(inode)->creat_sqnum);
635
636 len = le32_to_cpu(dn->size);
637 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
638 goto out_err;
639
640 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
641 out_len = UBIFS_BLOCK_SIZE;
642
643 if (IS_ENCRYPTED(inode)) {
644 err = ubifs_decrypt(inode, dn, out_len: &dlen, block: page_block);
645 if (err)
646 goto out_err;
647 }
648
649 err = ubifs_decompress(c, buf: &dn->data, len: dlen, out: addr, out_len: &out_len,
650 le16_to_cpu(dn->compr_type));
651 if (err || len != out_len)
652 goto out_err;
653
654 if (len < UBIFS_BLOCK_SIZE)
655 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
656
657 nn += 1;
658 read = (i << UBIFS_BLOCK_SHIFT) + len;
659 } else if (key_block(c, key: &bu->zbranch[nn].key) < page_block) {
660 nn += 1;
661 continue;
662 } else {
663 hole = 1;
664 memset(addr, 0, UBIFS_BLOCK_SIZE);
665 }
666 if (++i >= UBIFS_BLOCKS_PER_PAGE)
667 break;
668 addr += UBIFS_BLOCK_SIZE;
669 page_block += 1;
670 if (folio_test_highmem(folio) && (offset_in_page(addr) == 0)) {
671 kunmap_local(addr - UBIFS_BLOCK_SIZE);
672 addr = kmap_local_folio(folio, offset: i * UBIFS_BLOCK_SIZE);
673 }
674 }
675
676 if (end_index == folio->index) {
677 int len = i_size & (PAGE_SIZE - 1);
678
679 if (len && len < read)
680 memset(zaddr + len, 0, read - len);
681 }
682
683out_hole:
684 if (hole) {
685 folio_set_checked(folio);
686 dbg_gen("hole");
687 }
688
689 folio_mark_uptodate(folio);
690 flush_dcache_folio(folio);
691 kunmap_local(addr);
692 *n = nn;
693 return 0;
694
695out_err:
696 flush_dcache_folio(folio);
697 kunmap_local(addr);
698 ubifs_err(c, fmt: "bad data node (block %u, inode %lu)",
699 page_block, inode->i_ino);
700 return -EINVAL;
701}
702
703/**
704 * ubifs_do_bulk_read - do bulk-read.
705 * @c: UBIFS file-system description object
706 * @bu: bulk-read information
707 * @folio1: first folio to read
708 *
709 * Returns: %1 if the bulk-read is done, otherwise %0 is returned.
710 */
711static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
712 struct folio *folio1)
713{
714 pgoff_t offset = folio1->index, end_index;
715 struct address_space *mapping = folio1->mapping;
716 struct inode *inode = mapping->host;
717 struct ubifs_inode *ui = ubifs_inode(inode);
718 int err, page_idx, page_cnt, ret = 0, n = 0;
719 int allocate = bu->buf ? 0 : 1;
720 loff_t isize;
721 gfp_t ra_gfp_mask = readahead_gfp_mask(x: mapping) & ~__GFP_FS;
722
723 err = ubifs_tnc_get_bu_keys(c, bu);
724 if (err)
725 goto out_warn;
726
727 if (bu->eof) {
728 /* Turn off bulk-read at the end of the file */
729 ui->read_in_a_row = 1;
730 ui->bulk_read = 0;
731 }
732
733 page_cnt = bu->blk_cnt >> UBIFS_BLOCKS_PER_PAGE_SHIFT;
734 if (!page_cnt) {
735 /*
736 * This happens when there are multiple blocks per page and the
737 * blocks for the first page we are looking for, are not
738 * together. If all the pages were like this, bulk-read would
739 * reduce performance, so we turn it off for a while.
740 */
741 goto out_bu_off;
742 }
743
744 if (bu->cnt) {
745 if (allocate) {
746 /*
747 * Allocate bulk-read buffer depending on how many data
748 * nodes we are going to read.
749 */
750 bu->buf_len = bu->zbranch[bu->cnt - 1].offs +
751 bu->zbranch[bu->cnt - 1].len -
752 bu->zbranch[0].offs;
753 ubifs_assert(c, bu->buf_len > 0);
754 ubifs_assert(c, bu->buf_len <= c->leb_size);
755 bu->buf = kmalloc(size: bu->buf_len, GFP_NOFS | __GFP_NOWARN);
756 if (!bu->buf)
757 goto out_bu_off;
758 }
759
760 err = ubifs_tnc_bulk_read(c, bu);
761 if (err)
762 goto out_warn;
763 }
764
765 err = populate_page(c, folio: folio1, bu, n: &n);
766 if (err)
767 goto out_warn;
768
769 folio_unlock(folio: folio1);
770 ret = 1;
771
772 isize = i_size_read(inode);
773 if (isize == 0)
774 goto out_free;
775 end_index = ((isize - 1) >> PAGE_SHIFT);
776
777 for (page_idx = 1; page_idx < page_cnt; page_idx++) {
778 pgoff_t page_offset = offset + page_idx;
779 struct folio *folio;
780
781 if (page_offset > end_index)
782 break;
783 folio = __filemap_get_folio(mapping, index: page_offset,
784 FGP_LOCK|FGP_ACCESSED|FGP_CREAT|FGP_NOWAIT,
785 gfp: ra_gfp_mask);
786 if (IS_ERR(ptr: folio))
787 break;
788 if (!folio_test_uptodate(folio))
789 err = populate_page(c, folio, bu, n: &n);
790 folio_unlock(folio);
791 folio_put(folio);
792 if (err)
793 break;
794 }
795
796 ui->last_page_read = offset + page_idx - 1;
797
798out_free:
799 if (allocate)
800 kfree(objp: bu->buf);
801 return ret;
802
803out_warn:
804 ubifs_warn(c, fmt: "ignoring error %d and skipping bulk-read", err);
805 goto out_free;
806
807out_bu_off:
808 ui->read_in_a_row = ui->bulk_read = 0;
809 goto out_free;
810}
811
812/**
813 * ubifs_bulk_read - determine whether to bulk-read and, if so, do it.
814 * @folio: folio from which to start bulk-read.
815 *
816 * Some flash media are capable of reading sequentially at faster rates. UBIFS
817 * bulk-read facility is designed to take advantage of that, by reading in one
818 * go consecutive data nodes that are also located consecutively in the same
819 * LEB.
820 *
821 * Returns: %1 if a bulk-read is done and %0 otherwise.
822 */
823static int ubifs_bulk_read(struct folio *folio)
824{
825 struct inode *inode = folio->mapping->host;
826 struct ubifs_info *c = inode->i_sb->s_fs_info;
827 struct ubifs_inode *ui = ubifs_inode(inode);
828 pgoff_t index = folio->index, last_page_read = ui->last_page_read;
829 struct bu_info *bu;
830 int err = 0, allocated = 0;
831
832 ui->last_page_read = index;
833 if (!c->bulk_read)
834 return 0;
835
836 /*
837 * Bulk-read is protected by @ui->ui_mutex, but it is an optimization,
838 * so don't bother if we cannot lock the mutex.
839 */
840 if (!mutex_trylock(lock: &ui->ui_mutex))
841 return 0;
842
843 if (index != last_page_read + 1) {
844 /* Turn off bulk-read if we stop reading sequentially */
845 ui->read_in_a_row = 1;
846 if (ui->bulk_read)
847 ui->bulk_read = 0;
848 goto out_unlock;
849 }
850
851 if (!ui->bulk_read) {
852 ui->read_in_a_row += 1;
853 if (ui->read_in_a_row < 3)
854 goto out_unlock;
855 /* Three reads in a row, so switch on bulk-read */
856 ui->bulk_read = 1;
857 }
858
859 /*
860 * If possible, try to use pre-allocated bulk-read information, which
861 * is protected by @c->bu_mutex.
862 */
863 if (mutex_trylock(lock: &c->bu_mutex))
864 bu = &c->bu;
865 else {
866 bu = kmalloc(size: sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN);
867 if (!bu)
868 goto out_unlock;
869
870 bu->buf = NULL;
871 allocated = 1;
872 }
873
874 bu->buf_len = c->max_bu_buf_len;
875 data_key_init(c, key: &bu->key, inum: inode->i_ino,
876 block: folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT);
877 err = ubifs_do_bulk_read(c, bu, folio1: folio);
878
879 if (!allocated)
880 mutex_unlock(lock: &c->bu_mutex);
881 else
882 kfree(objp: bu);
883
884out_unlock:
885 mutex_unlock(lock: &ui->ui_mutex);
886 return err;
887}
888
889static int ubifs_read_folio(struct file *file, struct folio *folio)
890{
891 if (ubifs_bulk_read(folio))
892 return 0;
893 do_readpage(folio);
894 folio_unlock(folio);
895 return 0;
896}
897
898static int do_writepage(struct folio *folio, size_t len)
899{
900 int err = 0, blen;
901 unsigned int block;
902 void *addr;
903 size_t offset = 0;
904 union ubifs_key key;
905 struct inode *inode = folio->mapping->host;
906 struct ubifs_info *c = inode->i_sb->s_fs_info;
907
908#ifdef UBIFS_DEBUG
909 struct ubifs_inode *ui = ubifs_inode(inode);
910 spin_lock(&ui->ui_lock);
911 ubifs_assert(c, folio->index <= ui->synced_i_size >> PAGE_SHIFT);
912 spin_unlock(&ui->ui_lock);
913#endif
914
915 folio_start_writeback(folio);
916
917 addr = kmap_local_folio(folio, offset);
918 block = folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
919 for (;;) {
920 blen = min_t(size_t, len, UBIFS_BLOCK_SIZE);
921 data_key_init(c, key: &key, inum: inode->i_ino, block);
922 err = ubifs_jnl_write_data(c, inode, key: &key, buf: addr, len: blen);
923 if (err)
924 break;
925 len -= blen;
926 if (!len)
927 break;
928 block += 1;
929 addr += blen;
930 if (folio_test_highmem(folio) && !offset_in_page(addr)) {
931 kunmap_local(addr - blen);
932 offset += PAGE_SIZE;
933 addr = kmap_local_folio(folio, offset);
934 }
935 }
936 kunmap_local(addr);
937 if (err) {
938 mapping_set_error(mapping: folio->mapping, error: err);
939 ubifs_err(c, fmt: "cannot write folio %lu of inode %lu, error %d",
940 folio->index, inode->i_ino, err);
941 ubifs_ro_mode(c, err);
942 }
943
944 ubifs_assert(c, folio->private != NULL);
945 if (folio_test_checked(folio))
946 release_new_page_budget(c);
947 else
948 release_existing_page_budget(c);
949
950 atomic_long_dec(v: &c->dirty_pg_cnt);
951 folio_detach_private(folio);
952 folio_clear_checked(folio);
953
954 folio_unlock(folio);
955 folio_end_writeback(folio);
956 return err;
957}
958
959/*
960 * When writing-back dirty inodes, VFS first writes-back pages belonging to the
961 * inode, then the inode itself. For UBIFS this may cause a problem. Consider a
962 * situation when a we have an inode with size 0, then a megabyte of data is
963 * appended to the inode, then write-back starts and flushes some amount of the
964 * dirty pages, the journal becomes full, commit happens and finishes, and then
965 * an unclean reboot happens. When the file system is mounted next time, the
966 * inode size would still be 0, but there would be many pages which are beyond
967 * the inode size, they would be indexed and consume flash space. Because the
968 * journal has been committed, the replay would not be able to detect this
969 * situation and correct the inode size. This means UBIFS would have to scan
970 * whole index and correct all inode sizes, which is long an unacceptable.
971 *
972 * To prevent situations like this, UBIFS writes pages back only if they are
973 * within the last synchronized inode size, i.e. the size which has been
974 * written to the flash media last time. Otherwise, UBIFS forces inode
975 * write-back, thus making sure the on-flash inode contains current inode size,
976 * and then keeps writing pages back.
977 *
978 * Some locking issues explanation. 'ubifs_writepage()' first is called with
979 * the page locked, and it locks @ui_mutex. However, write-back does take inode
980 * @i_mutex, which means other VFS operations may be run on this inode at the
981 * same time. And the problematic one is truncation to smaller size, from where
982 * we have to call 'truncate_setsize()', which first changes @inode->i_size,
983 * then drops the truncated pages. And while dropping the pages, it takes the
984 * page lock. This means that 'do_truncation()' cannot call 'truncate_setsize()'
985 * with @ui_mutex locked, because it would deadlock with 'ubifs_writepage()'.
986 * This means that @inode->i_size is changed while @ui_mutex is unlocked.
987 *
988 * XXX(truncate): with the new truncate sequence this is not true anymore,
989 * and the calls to truncate_setsize can be move around freely. They should
990 * be moved to the very end of the truncate sequence.
991 *
992 * But in 'ubifs_writepage()' we have to guarantee that we do not write beyond
993 * inode size. How do we do this if @inode->i_size may became smaller while we
994 * are in the middle of 'ubifs_writepage()'? The UBIFS solution is the
995 * @ui->ui_isize "shadow" field which UBIFS uses instead of @inode->i_size
996 * internally and updates it under @ui_mutex.
997 *
998 * Q: why we do not worry that if we race with truncation, we may end up with a
999 * situation when the inode is truncated while we are in the middle of
1000 * 'do_writepage()', so we do write beyond inode size?
1001 * A: If we are in the middle of 'do_writepage()', truncation would be locked
1002 * on the page lock and it would not write the truncated inode node to the
1003 * journal before we have finished.
1004 */
1005static int ubifs_writepage(struct folio *folio, struct writeback_control *wbc,
1006 void *data)
1007{
1008 struct inode *inode = folio->mapping->host;
1009 struct ubifs_info *c = inode->i_sb->s_fs_info;
1010 struct ubifs_inode *ui = ubifs_inode(inode);
1011 loff_t i_size = i_size_read(inode), synced_i_size;
1012 int err, len = folio_size(folio);
1013
1014 dbg_gen("ino %lu, pg %lu, pg flags %#lx",
1015 inode->i_ino, folio->index, folio->flags);
1016 ubifs_assert(c, folio->private != NULL);
1017
1018 /* Is the folio fully outside @i_size? (truncate in progress) */
1019 if (folio_pos(folio) >= i_size) {
1020 err = 0;
1021 goto out_unlock;
1022 }
1023
1024 spin_lock(lock: &ui->ui_lock);
1025 synced_i_size = ui->synced_i_size;
1026 spin_unlock(lock: &ui->ui_lock);
1027
1028 /* Is the folio fully inside i_size? */
1029 if (folio_pos(folio) + len <= i_size) {
1030 if (folio_pos(folio) >= synced_i_size) {
1031 err = inode->i_sb->s_op->write_inode(inode, NULL);
1032 if (err)
1033 goto out_redirty;
1034 /*
1035 * The inode has been written, but the write-buffer has
1036 * not been synchronized, so in case of an unclean
1037 * reboot we may end up with some pages beyond inode
1038 * size, but they would be in the journal (because
1039 * commit flushes write buffers) and recovery would deal
1040 * with this.
1041 */
1042 }
1043 return do_writepage(folio, len);
1044 }
1045
1046 /*
1047 * The folio straddles @i_size. It must be zeroed out on each and every
1048 * writepage invocation because it may be mmapped. "A file is mapped
1049 * in multiples of the page size. For a file that is not a multiple of
1050 * the page size, the remaining memory is zeroed when mapped, and
1051 * writes to that region are not written out to the file."
1052 */
1053 len = i_size - folio_pos(folio);
1054 folio_zero_segment(folio, start: len, xend: folio_size(folio));
1055
1056 if (i_size > synced_i_size) {
1057 err = inode->i_sb->s_op->write_inode(inode, NULL);
1058 if (err)
1059 goto out_redirty;
1060 }
1061
1062 return do_writepage(folio, len);
1063out_redirty:
1064 /*
1065 * folio_redirty_for_writepage() won't call ubifs_dirty_inode() because
1066 * it passes I_DIRTY_PAGES flag while calling __mark_inode_dirty(), so
1067 * there is no need to do space budget for dirty inode.
1068 */
1069 folio_redirty_for_writepage(wbc, folio);
1070out_unlock:
1071 folio_unlock(folio);
1072 return err;
1073}
1074
1075static int ubifs_writepages(struct address_space *mapping,
1076 struct writeback_control *wbc)
1077{
1078 return write_cache_pages(mapping, wbc, writepage: ubifs_writepage, NULL);
1079}
1080
1081/**
1082 * do_attr_changes - change inode attributes.
1083 * @inode: inode to change attributes for
1084 * @attr: describes attributes to change
1085 */
1086static void do_attr_changes(struct inode *inode, const struct iattr *attr)
1087{
1088 if (attr->ia_valid & ATTR_UID)
1089 inode->i_uid = attr->ia_uid;
1090 if (attr->ia_valid & ATTR_GID)
1091 inode->i_gid = attr->ia_gid;
1092 if (attr->ia_valid & ATTR_ATIME)
1093 inode_set_atime_to_ts(inode, ts: attr->ia_atime);
1094 if (attr->ia_valid & ATTR_MTIME)
1095 inode_set_mtime_to_ts(inode, ts: attr->ia_mtime);
1096 if (attr->ia_valid & ATTR_CTIME)
1097 inode_set_ctime_to_ts(inode, ts: attr->ia_ctime);
1098 if (attr->ia_valid & ATTR_MODE) {
1099 umode_t mode = attr->ia_mode;
1100
1101 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
1102 mode &= ~S_ISGID;
1103 inode->i_mode = mode;
1104 }
1105}
1106
1107/**
1108 * do_truncation - truncate an inode.
1109 * @c: UBIFS file-system description object
1110 * @inode: inode to truncate
1111 * @attr: inode attribute changes description
1112 *
1113 * This function implements VFS '->setattr()' call when the inode is truncated
1114 * to a smaller size.
1115 *
1116 * Returns: %0 in case of success and a negative error code
1117 * in case of failure.
1118 */
1119static int do_truncation(struct ubifs_info *c, struct inode *inode,
1120 const struct iattr *attr)
1121{
1122 int err;
1123 struct ubifs_budget_req req;
1124 loff_t old_size = inode->i_size, new_size = attr->ia_size;
1125 int offset = new_size & (UBIFS_BLOCK_SIZE - 1), budgeted = 1;
1126 struct ubifs_inode *ui = ubifs_inode(inode);
1127
1128 dbg_gen("ino %lu, size %lld -> %lld", inode->i_ino, old_size, new_size);
1129 memset(&req, 0, sizeof(struct ubifs_budget_req));
1130
1131 /*
1132 * If this is truncation to a smaller size, and we do not truncate on a
1133 * block boundary, budget for changing one data block, because the last
1134 * block will be re-written.
1135 */
1136 if (new_size & (UBIFS_BLOCK_SIZE - 1))
1137 req.dirtied_page = 1;
1138
1139 req.dirtied_ino = 1;
1140 /* A funny way to budget for truncation node */
1141 req.dirtied_ino_d = UBIFS_TRUN_NODE_SZ;
1142 err = ubifs_budget_space(c, req: &req);
1143 if (err) {
1144 /*
1145 * Treat truncations to zero as deletion and always allow them,
1146 * just like we do for '->unlink()'.
1147 */
1148 if (new_size || err != -ENOSPC)
1149 return err;
1150 budgeted = 0;
1151 }
1152
1153 truncate_setsize(inode, newsize: new_size);
1154
1155 if (offset) {
1156 pgoff_t index = new_size >> PAGE_SHIFT;
1157 struct folio *folio;
1158
1159 folio = filemap_lock_folio(mapping: inode->i_mapping, index);
1160 if (!IS_ERR(ptr: folio)) {
1161 if (folio_test_dirty(folio)) {
1162 /*
1163 * 'ubifs_jnl_truncate()' will try to truncate
1164 * the last data node, but it contains
1165 * out-of-date data because the page is dirty.
1166 * Write the page now, so that
1167 * 'ubifs_jnl_truncate()' will see an already
1168 * truncated (and up to date) data node.
1169 */
1170 ubifs_assert(c, folio->private != NULL);
1171
1172 folio_clear_dirty_for_io(folio);
1173 if (UBIFS_BLOCKS_PER_PAGE_SHIFT)
1174 offset = offset_in_folio(folio,
1175 new_size);
1176 err = do_writepage(folio, len: offset);
1177 folio_put(folio);
1178 if (err)
1179 goto out_budg;
1180 /*
1181 * We could now tell 'ubifs_jnl_truncate()' not
1182 * to read the last block.
1183 */
1184 } else {
1185 /*
1186 * We could 'kmap()' the page and pass the data
1187 * to 'ubifs_jnl_truncate()' to save it from
1188 * having to read it.
1189 */
1190 folio_unlock(folio);
1191 folio_put(folio);
1192 }
1193 }
1194 }
1195
1196 mutex_lock(&ui->ui_mutex);
1197 ui->ui_size = inode->i_size;
1198 /* Truncation changes inode [mc]time */
1199 inode_set_mtime_to_ts(inode, ts: inode_set_ctime_current(inode));
1200 /* Other attributes may be changed at the same time as well */
1201 do_attr_changes(inode, attr);
1202 err = ubifs_jnl_truncate(c, inode, old_size, new_size);
1203 mutex_unlock(lock: &ui->ui_mutex);
1204
1205out_budg:
1206 if (budgeted)
1207 ubifs_release_budget(c, req: &req);
1208 else {
1209 c->bi.nospace = c->bi.nospace_rp = 0;
1210 smp_wmb();
1211 }
1212 return err;
1213}
1214
1215/**
1216 * do_setattr - change inode attributes.
1217 * @c: UBIFS file-system description object
1218 * @inode: inode to change attributes for
1219 * @attr: inode attribute changes description
1220 *
1221 * This function implements VFS '->setattr()' call for all cases except
1222 * truncations to smaller size.
1223 *
1224 * Returns: %0 in case of success and a negative
1225 * error code in case of failure.
1226 */
1227static int do_setattr(struct ubifs_info *c, struct inode *inode,
1228 const struct iattr *attr)
1229{
1230 int err, release;
1231 loff_t new_size = attr->ia_size;
1232 struct ubifs_inode *ui = ubifs_inode(inode);
1233 struct ubifs_budget_req req = { .dirtied_ino = 1,
1234 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1235
1236 err = ubifs_budget_space(c, req: &req);
1237 if (err)
1238 return err;
1239
1240 if (attr->ia_valid & ATTR_SIZE) {
1241 dbg_gen("size %lld -> %lld", inode->i_size, new_size);
1242 truncate_setsize(inode, newsize: new_size);
1243 }
1244
1245 mutex_lock(&ui->ui_mutex);
1246 if (attr->ia_valid & ATTR_SIZE) {
1247 /* Truncation changes inode [mc]time */
1248 inode_set_mtime_to_ts(inode, ts: inode_set_ctime_current(inode));
1249 /* 'truncate_setsize()' changed @i_size, update @ui_size */
1250 ui->ui_size = inode->i_size;
1251 }
1252
1253 do_attr_changes(inode, attr);
1254
1255 release = ui->dirty;
1256 if (attr->ia_valid & ATTR_SIZE)
1257 /*
1258 * Inode length changed, so we have to make sure
1259 * @I_DIRTY_DATASYNC is set.
1260 */
1261 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1262 else
1263 mark_inode_dirty_sync(inode);
1264 mutex_unlock(lock: &ui->ui_mutex);
1265
1266 if (release)
1267 ubifs_release_budget(c, req: &req);
1268 if (IS_SYNC(inode))
1269 err = inode->i_sb->s_op->write_inode(inode, NULL);
1270 return err;
1271}
1272
1273int ubifs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
1274 struct iattr *attr)
1275{
1276 int err;
1277 struct inode *inode = d_inode(dentry);
1278 struct ubifs_info *c = inode->i_sb->s_fs_info;
1279
1280 dbg_gen("ino %lu, mode %#x, ia_valid %#x",
1281 inode->i_ino, inode->i_mode, attr->ia_valid);
1282 err = setattr_prepare(&nop_mnt_idmap, dentry, attr);
1283 if (err)
1284 return err;
1285
1286 err = dbg_check_synced_i_size(c, inode);
1287 if (err)
1288 return err;
1289
1290 err = fscrypt_prepare_setattr(dentry, attr);
1291 if (err)
1292 return err;
1293
1294 if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size < inode->i_size)
1295 /* Truncation to a smaller size */
1296 err = do_truncation(c, inode, attr);
1297 else
1298 err = do_setattr(c, inode, attr);
1299
1300 return err;
1301}
1302
1303static void ubifs_invalidate_folio(struct folio *folio, size_t offset,
1304 size_t length)
1305{
1306 struct inode *inode = folio->mapping->host;
1307 struct ubifs_info *c = inode->i_sb->s_fs_info;
1308
1309 ubifs_assert(c, folio_test_private(folio));
1310 if (offset || length < folio_size(folio))
1311 /* Partial folio remains dirty */
1312 return;
1313
1314 if (folio_test_checked(folio))
1315 release_new_page_budget(c);
1316 else
1317 release_existing_page_budget(c);
1318
1319 atomic_long_dec(v: &c->dirty_pg_cnt);
1320 folio_detach_private(folio);
1321 folio_clear_checked(folio);
1322}
1323
1324int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1325{
1326 struct inode *inode = file->f_mapping->host;
1327 struct ubifs_info *c = inode->i_sb->s_fs_info;
1328 int err;
1329
1330 dbg_gen("syncing inode %lu", inode->i_ino);
1331
1332 if (c->ro_mount)
1333 /*
1334 * For some really strange reasons VFS does not filter out
1335 * 'fsync()' for R/O mounted file-systems as per 2.6.39.
1336 */
1337 return 0;
1338
1339 err = file_write_and_wait_range(file, start, end);
1340 if (err)
1341 return err;
1342 inode_lock(inode);
1343
1344 /* Synchronize the inode unless this is a 'datasync()' call. */
1345 if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) {
1346 err = inode->i_sb->s_op->write_inode(inode, NULL);
1347 if (err)
1348 goto out;
1349 }
1350
1351 /*
1352 * Nodes related to this inode may still sit in a write-buffer. Flush
1353 * them.
1354 */
1355 err = ubifs_sync_wbufs_by_inode(c, inode);
1356out:
1357 inode_unlock(inode);
1358 return err;
1359}
1360
1361/**
1362 * mctime_update_needed - check if mtime or ctime update is needed.
1363 * @inode: the inode to do the check for
1364 * @now: current time
1365 *
1366 * This helper function checks if the inode mtime/ctime should be updated or
1367 * not. If current values of the time-stamps are within the UBIFS inode time
1368 * granularity, they are not updated. This is an optimization.
1369 *
1370 * Returns: %1 if time update is needed, %0 if not
1371 */
1372static inline int mctime_update_needed(const struct inode *inode,
1373 const struct timespec64 *now)
1374{
1375 struct timespec64 ctime = inode_get_ctime(inode);
1376 struct timespec64 mtime = inode_get_mtime(inode);
1377
1378 if (!timespec64_equal(a: &mtime, b: now) || !timespec64_equal(a: &ctime, b: now))
1379 return 1;
1380 return 0;
1381}
1382
1383/**
1384 * ubifs_update_time - update time of inode.
1385 * @inode: inode to update
1386 * @flags: time updating control flag determines updating
1387 * which time fields of @inode
1388 *
1389 * This function updates time of the inode.
1390 *
1391 * Returns: %0 for success or a negative error code otherwise.
1392 */
1393int ubifs_update_time(struct inode *inode, int flags)
1394{
1395 struct ubifs_inode *ui = ubifs_inode(inode);
1396 struct ubifs_info *c = inode->i_sb->s_fs_info;
1397 struct ubifs_budget_req req = { .dirtied_ino = 1,
1398 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1399 int err, release;
1400
1401 if (!IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT)) {
1402 generic_update_time(inode, flags);
1403 return 0;
1404 }
1405
1406 err = ubifs_budget_space(c, req: &req);
1407 if (err)
1408 return err;
1409
1410 mutex_lock(&ui->ui_mutex);
1411 inode_update_timestamps(inode, flags);
1412 release = ui->dirty;
1413 __mark_inode_dirty(inode, I_DIRTY_SYNC);
1414 mutex_unlock(lock: &ui->ui_mutex);
1415 if (release)
1416 ubifs_release_budget(c, req: &req);
1417 return 0;
1418}
1419
1420/**
1421 * update_mctime - update mtime and ctime of an inode.
1422 * @inode: inode to update
1423 *
1424 * This function updates mtime and ctime of the inode if it is not equivalent to
1425 * current time.
1426 *
1427 * Returns: %0 in case of success and a negative error code in
1428 * case of failure.
1429 */
1430static int update_mctime(struct inode *inode)
1431{
1432 struct timespec64 now = current_time(inode);
1433 struct ubifs_inode *ui = ubifs_inode(inode);
1434 struct ubifs_info *c = inode->i_sb->s_fs_info;
1435
1436 if (mctime_update_needed(inode, now: &now)) {
1437 int err, release;
1438 struct ubifs_budget_req req = { .dirtied_ino = 1,
1439 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1440
1441 err = ubifs_budget_space(c, req: &req);
1442 if (err)
1443 return err;
1444
1445 mutex_lock(&ui->ui_mutex);
1446 inode_set_mtime_to_ts(inode, ts: inode_set_ctime_current(inode));
1447 release = ui->dirty;
1448 mark_inode_dirty_sync(inode);
1449 mutex_unlock(lock: &ui->ui_mutex);
1450 if (release)
1451 ubifs_release_budget(c, req: &req);
1452 }
1453
1454 return 0;
1455}
1456
1457static ssize_t ubifs_write_iter(struct kiocb *iocb, struct iov_iter *from)
1458{
1459 int err = update_mctime(inode: file_inode(f: iocb->ki_filp));
1460 if (err)
1461 return err;
1462
1463 return generic_file_write_iter(iocb, from);
1464}
1465
1466static bool ubifs_dirty_folio(struct address_space *mapping,
1467 struct folio *folio)
1468{
1469 bool ret;
1470 struct ubifs_info *c = mapping->host->i_sb->s_fs_info;
1471
1472 ret = filemap_dirty_folio(mapping, folio);
1473 /*
1474 * An attempt to dirty a page without budgeting for it - should not
1475 * happen.
1476 */
1477 ubifs_assert(c, ret == false);
1478 return ret;
1479}
1480
1481static bool ubifs_release_folio(struct folio *folio, gfp_t unused_gfp_flags)
1482{
1483 struct inode *inode = folio->mapping->host;
1484 struct ubifs_info *c = inode->i_sb->s_fs_info;
1485
1486 if (folio_test_writeback(folio))
1487 return false;
1488
1489 /*
1490 * Page is private but not dirty, weird? There is one condition
1491 * making it happened. ubifs_writepage skipped the page because
1492 * page index beyonds isize (for example. truncated by other
1493 * process named A), then the page is invalidated by fadvise64
1494 * syscall before being truncated by process A.
1495 */
1496 ubifs_assert(c, folio_test_private(folio));
1497 if (folio_test_checked(folio))
1498 release_new_page_budget(c);
1499 else
1500 release_existing_page_budget(c);
1501
1502 atomic_long_dec(v: &c->dirty_pg_cnt);
1503 folio_detach_private(folio);
1504 folio_clear_checked(folio);
1505 return true;
1506}
1507
1508/*
1509 * mmap()d file has taken write protection fault and is being made writable.
1510 * UBIFS must ensure page is budgeted for.
1511 */
1512static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
1513{
1514 struct folio *folio = page_folio(vmf->page);
1515 struct inode *inode = file_inode(f: vmf->vma->vm_file);
1516 struct ubifs_info *c = inode->i_sb->s_fs_info;
1517 struct timespec64 now = current_time(inode);
1518 struct ubifs_budget_req req = { .new_page = 1 };
1519 int err, update_time;
1520
1521 dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, folio->index,
1522 i_size_read(inode));
1523 ubifs_assert(c, !c->ro_media && !c->ro_mount);
1524
1525 if (unlikely(c->ro_error))
1526 return VM_FAULT_SIGBUS; /* -EROFS */
1527
1528 /*
1529 * We have not locked @folio so far so we may budget for changing the
1530 * folio. Note, we cannot do this after we locked the folio, because
1531 * budgeting may cause write-back which would cause deadlock.
1532 *
1533 * At the moment we do not know whether the folio is dirty or not, so we
1534 * assume that it is not and budget for a new folio. We could look at
1535 * the @PG_private flag and figure this out, but we may race with write
1536 * back and the folio state may change by the time we lock it, so this
1537 * would need additional care. We do not bother with this at the
1538 * moment, although it might be good idea to do. Instead, we allocate
1539 * budget for a new folio and amend it later on if the folio was in fact
1540 * dirty.
1541 *
1542 * The budgeting-related logic of this function is similar to what we
1543 * do in 'ubifs_write_begin()' and 'ubifs_write_end()'. Glance there
1544 * for more comments.
1545 */
1546 update_time = mctime_update_needed(inode, now: &now);
1547 if (update_time)
1548 /*
1549 * We have to change inode time stamp which requires extra
1550 * budgeting.
1551 */
1552 req.dirtied_ino = 1;
1553
1554 err = ubifs_budget_space(c, req: &req);
1555 if (unlikely(err)) {
1556 if (err == -ENOSPC)
1557 ubifs_warn(c, fmt: "out of space for mmapped file (inode number %lu)",
1558 inode->i_ino);
1559 return VM_FAULT_SIGBUS;
1560 }
1561
1562 folio_lock(folio);
1563 if (unlikely(folio->mapping != inode->i_mapping ||
1564 folio_pos(folio) >= i_size_read(inode))) {
1565 /* Folio got truncated out from underneath us */
1566 goto sigbus;
1567 }
1568
1569 if (folio->private)
1570 release_new_page_budget(c);
1571 else {
1572 if (!folio_test_checked(folio))
1573 ubifs_convert_page_budget(c);
1574 folio_attach_private(folio, data: (void *)1);
1575 atomic_long_inc(v: &c->dirty_pg_cnt);
1576 filemap_dirty_folio(mapping: folio->mapping, folio);
1577 }
1578
1579 if (update_time) {
1580 int release;
1581 struct ubifs_inode *ui = ubifs_inode(inode);
1582
1583 mutex_lock(&ui->ui_mutex);
1584 inode_set_mtime_to_ts(inode, ts: inode_set_ctime_current(inode));
1585 release = ui->dirty;
1586 mark_inode_dirty_sync(inode);
1587 mutex_unlock(lock: &ui->ui_mutex);
1588 if (release)
1589 ubifs_release_dirty_inode_budget(c, ui);
1590 }
1591
1592 folio_wait_stable(folio);
1593 return VM_FAULT_LOCKED;
1594
1595sigbus:
1596 folio_unlock(folio);
1597 ubifs_release_budget(c, req: &req);
1598 return VM_FAULT_SIGBUS;
1599}
1600
1601static const struct vm_operations_struct ubifs_file_vm_ops = {
1602 .fault = filemap_fault,
1603 .map_pages = filemap_map_pages,
1604 .page_mkwrite = ubifs_vm_page_mkwrite,
1605};
1606
1607static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1608{
1609 int err;
1610
1611 err = generic_file_mmap(file, vma);
1612 if (err)
1613 return err;
1614 vma->vm_ops = &ubifs_file_vm_ops;
1615
1616 if (IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT))
1617 file_accessed(file);
1618
1619 return 0;
1620}
1621
1622static const char *ubifs_get_link(struct dentry *dentry,
1623 struct inode *inode,
1624 struct delayed_call *done)
1625{
1626 struct ubifs_inode *ui = ubifs_inode(inode);
1627
1628 if (!IS_ENCRYPTED(inode))
1629 return ui->data;
1630
1631 if (!dentry)
1632 return ERR_PTR(error: -ECHILD);
1633
1634 return fscrypt_get_symlink(inode, caddr: ui->data, max_size: ui->data_len, done);
1635}
1636
1637static int ubifs_symlink_getattr(struct mnt_idmap *idmap,
1638 const struct path *path, struct kstat *stat,
1639 u32 request_mask, unsigned int query_flags)
1640{
1641 ubifs_getattr(idmap, path, stat, request_mask, flags: query_flags);
1642
1643 if (IS_ENCRYPTED(d_inode(path->dentry)))
1644 return fscrypt_symlink_getattr(path, stat);
1645 return 0;
1646}
1647
1648const struct address_space_operations ubifs_file_address_operations = {
1649 .read_folio = ubifs_read_folio,
1650 .writepages = ubifs_writepages,
1651 .write_begin = ubifs_write_begin,
1652 .write_end = ubifs_write_end,
1653 .invalidate_folio = ubifs_invalidate_folio,
1654 .dirty_folio = ubifs_dirty_folio,
1655 .migrate_folio = filemap_migrate_folio,
1656 .release_folio = ubifs_release_folio,
1657};
1658
1659const struct inode_operations ubifs_file_inode_operations = {
1660 .setattr = ubifs_setattr,
1661 .getattr = ubifs_getattr,
1662 .listxattr = ubifs_listxattr,
1663 .update_time = ubifs_update_time,
1664 .fileattr_get = ubifs_fileattr_get,
1665 .fileattr_set = ubifs_fileattr_set,
1666};
1667
1668const struct inode_operations ubifs_symlink_inode_operations = {
1669 .get_link = ubifs_get_link,
1670 .setattr = ubifs_setattr,
1671 .getattr = ubifs_symlink_getattr,
1672 .listxattr = ubifs_listxattr,
1673 .update_time = ubifs_update_time,
1674};
1675
1676const struct file_operations ubifs_file_operations = {
1677 .llseek = generic_file_llseek,
1678 .read_iter = generic_file_read_iter,
1679 .write_iter = ubifs_write_iter,
1680 .mmap = ubifs_file_mmap,
1681 .fsync = ubifs_fsync,
1682 .unlocked_ioctl = ubifs_ioctl,
1683 .splice_read = filemap_splice_read,
1684 .splice_write = iter_file_splice_write,
1685 .open = fscrypt_file_open,
1686#ifdef CONFIG_COMPAT
1687 .compat_ioctl = ubifs_compat_ioctl,
1688#endif
1689};
1690

source code of linux/fs/ubifs/file.c