1 | /* SPDX-License-Identifier: GPL-2.0 */ |
---|---|
2 | /* |
3 | * fs/f2fs/f2fs.h |
4 | * |
5 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. |
6 | * http://www.samsung.com/ |
7 | */ |
8 | #ifndef _LINUX_F2FS_H |
9 | #define _LINUX_F2FS_H |
10 | |
11 | #include <linux/uio.h> |
12 | #include <linux/types.h> |
13 | #include <linux/page-flags.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/crc32.h> |
16 | #include <linux/magic.h> |
17 | #include <linux/kobject.h> |
18 | #include <linux/sched.h> |
19 | #include <linux/cred.h> |
20 | #include <linux/sched/mm.h> |
21 | #include <linux/vmalloc.h> |
22 | #include <linux/bio.h> |
23 | #include <linux/blkdev.h> |
24 | #include <linux/quotaops.h> |
25 | #include <linux/part_stat.h> |
26 | #include <linux/rw_hint.h> |
27 | |
28 | #include <linux/fscrypt.h> |
29 | #include <linux/fsverity.h> |
30 | |
31 | struct pagevec; |
32 | |
33 | #ifdef CONFIG_F2FS_CHECK_FS |
34 | #define f2fs_bug_on(sbi, condition) BUG_ON(condition) |
35 | #else |
36 | #define f2fs_bug_on(sbi, condition) \ |
37 | do { \ |
38 | if (WARN_ON(condition)) \ |
39 | set_sbi_flag(sbi, SBI_NEED_FSCK); \ |
40 | } while (0) |
41 | #endif |
42 | |
43 | enum { |
44 | FAULT_KMALLOC, |
45 | FAULT_KVMALLOC, |
46 | FAULT_PAGE_ALLOC, |
47 | FAULT_PAGE_GET, |
48 | FAULT_ALLOC_BIO, /* it's obsolete due to bio_alloc() will never fail */ |
49 | FAULT_ALLOC_NID, |
50 | FAULT_ORPHAN, |
51 | FAULT_BLOCK, |
52 | FAULT_DIR_DEPTH, |
53 | FAULT_EVICT_INODE, |
54 | FAULT_TRUNCATE, |
55 | FAULT_READ_IO, |
56 | FAULT_CHECKPOINT, |
57 | FAULT_DISCARD, |
58 | FAULT_WRITE_IO, |
59 | FAULT_SLAB_ALLOC, |
60 | FAULT_DQUOT_INIT, |
61 | FAULT_LOCK_OP, |
62 | FAULT_BLKADDR_VALIDITY, |
63 | FAULT_BLKADDR_CONSISTENCE, |
64 | FAULT_NO_SEGMENT, |
65 | FAULT_INCONSISTENT_FOOTER, |
66 | FAULT_TIMEOUT, |
67 | FAULT_VMALLOC, |
68 | FAULT_MAX, |
69 | }; |
70 | |
71 | /* indicate which option to update */ |
72 | enum fault_option { |
73 | FAULT_RATE = 1, /* only update fault rate */ |
74 | FAULT_TYPE = 2, /* only update fault type */ |
75 | FAULT_ALL = 4, /* reset all fault injection options/stats */ |
76 | }; |
77 | |
78 | #ifdef CONFIG_F2FS_FAULT_INJECTION |
79 | struct f2fs_fault_info { |
80 | atomic_t inject_ops; |
81 | int inject_rate; |
82 | unsigned int inject_type; |
83 | /* Used to account total count of injection for each type */ |
84 | unsigned int inject_count[FAULT_MAX]; |
85 | }; |
86 | |
87 | extern const char *f2fs_fault_name[FAULT_MAX]; |
88 | #define IS_FAULT_SET(fi, type) ((fi)->inject_type & BIT(type)) |
89 | |
90 | /* maximum retry count for injected failure */ |
91 | #define DEFAULT_FAILURE_RETRY_COUNT 8 |
92 | #else |
93 | #define DEFAULT_FAILURE_RETRY_COUNT 1 |
94 | #endif |
95 | |
96 | /* |
97 | * For mount options |
98 | */ |
99 | #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000001 |
100 | #define F2FS_MOUNT_DISCARD 0x00000002 |
101 | #define F2FS_MOUNT_NOHEAP 0x00000004 |
102 | #define F2FS_MOUNT_XATTR_USER 0x00000008 |
103 | #define F2FS_MOUNT_POSIX_ACL 0x00000010 |
104 | #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000020 |
105 | #define F2FS_MOUNT_INLINE_XATTR 0x00000040 |
106 | #define F2FS_MOUNT_INLINE_DATA 0x00000080 |
107 | #define F2FS_MOUNT_INLINE_DENTRY 0x00000100 |
108 | #define F2FS_MOUNT_FLUSH_MERGE 0x00000200 |
109 | #define F2FS_MOUNT_NOBARRIER 0x00000400 |
110 | #define F2FS_MOUNT_FASTBOOT 0x00000800 |
111 | #define F2FS_MOUNT_READ_EXTENT_CACHE 0x00001000 |
112 | #define F2FS_MOUNT_DATA_FLUSH 0x00002000 |
113 | #define F2FS_MOUNT_FAULT_INJECTION 0x00004000 |
114 | #define F2FS_MOUNT_USRQUOTA 0x00008000 |
115 | #define F2FS_MOUNT_GRPQUOTA 0x00010000 |
116 | #define F2FS_MOUNT_PRJQUOTA 0x00020000 |
117 | #define F2FS_MOUNT_QUOTA 0x00040000 |
118 | #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00080000 |
119 | #define F2FS_MOUNT_RESERVE_ROOT 0x00100000 |
120 | #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x00200000 |
121 | #define F2FS_MOUNT_NORECOVERY 0x00400000 |
122 | #define F2FS_MOUNT_ATGC 0x00800000 |
123 | #define F2FS_MOUNT_MERGE_CHECKPOINT 0x01000000 |
124 | #define F2FS_MOUNT_GC_MERGE 0x02000000 |
125 | #define F2FS_MOUNT_COMPRESS_CACHE 0x04000000 |
126 | #define F2FS_MOUNT_AGE_EXTENT_CACHE 0x08000000 |
127 | #define F2FS_MOUNT_NAT_BITS 0x10000000 |
128 | #define F2FS_MOUNT_INLINECRYPT 0x20000000 |
129 | /* |
130 | * Some f2fs environments expect to be able to pass the "lazytime" option |
131 | * string rather than using the MS_LAZYTIME flag, so this must remain. |
132 | */ |
133 | #define F2FS_MOUNT_LAZYTIME 0x40000000 |
134 | |
135 | #define F2FS_OPTION(sbi) ((sbi)->mount_opt) |
136 | #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option) |
137 | #define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option) |
138 | #define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option) |
139 | |
140 | #define ver_after(a, b) (typecheck(unsigned long long, a) && \ |
141 | typecheck(unsigned long long, b) && \ |
142 | ((long long)((a) - (b)) > 0)) |
143 | |
144 | typedef u32 block_t; /* |
145 | * should not change u32, since it is the on-disk block |
146 | * address format, __le32. |
147 | */ |
148 | typedef u32 nid_t; |
149 | |
150 | #define COMPRESS_EXT_NUM 16 |
151 | |
152 | enum blkzone_allocation_policy { |
153 | BLKZONE_ALLOC_PRIOR_SEQ, /* Prioritize writing to sequential zones */ |
154 | BLKZONE_ALLOC_ONLY_SEQ, /* Only allow writing to sequential zones */ |
155 | BLKZONE_ALLOC_PRIOR_CONV, /* Prioritize writing to conventional zones */ |
156 | }; |
157 | |
158 | /* |
159 | * An implementation of an rwsem that is explicitly unfair to readers. This |
160 | * prevents priority inversion when a low-priority reader acquires the read lock |
161 | * while sleeping on the write lock but the write lock is needed by |
162 | * higher-priority clients. |
163 | */ |
164 | |
165 | struct f2fs_rwsem { |
166 | struct rw_semaphore internal_rwsem; |
167 | #ifdef CONFIG_F2FS_UNFAIR_RWSEM |
168 | wait_queue_head_t read_waiters; |
169 | #endif |
170 | }; |
171 | |
172 | struct f2fs_mount_info { |
173 | unsigned int opt; |
174 | block_t root_reserved_blocks; /* root reserved blocks */ |
175 | kuid_t s_resuid; /* reserved blocks for uid */ |
176 | kgid_t s_resgid; /* reserved blocks for gid */ |
177 | int active_logs; /* # of active logs */ |
178 | int inline_xattr_size; /* inline xattr size */ |
179 | #ifdef CONFIG_F2FS_FAULT_INJECTION |
180 | struct f2fs_fault_info fault_info; /* For fault injection */ |
181 | #endif |
182 | #ifdef CONFIG_QUOTA |
183 | /* Names of quota files with journalled quota */ |
184 | char *s_qf_names[MAXQUOTAS]; |
185 | int s_jquota_fmt; /* Format of quota to use */ |
186 | #endif |
187 | /* For which write hints are passed down to block layer */ |
188 | int alloc_mode; /* segment allocation policy */ |
189 | int fsync_mode; /* fsync policy */ |
190 | int fs_mode; /* fs mode: LFS or ADAPTIVE */ |
191 | int bggc_mode; /* bggc mode: off, on or sync */ |
192 | int memory_mode; /* memory mode */ |
193 | int errors; /* errors parameter */ |
194 | int discard_unit; /* |
195 | * discard command's offset/size should |
196 | * be aligned to this unit: block, |
197 | * segment or section |
198 | */ |
199 | struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */ |
200 | block_t unusable_cap_perc; /* percentage for cap */ |
201 | block_t unusable_cap; /* Amount of space allowed to be |
202 | * unusable when disabling checkpoint |
203 | */ |
204 | |
205 | /* For compression */ |
206 | unsigned char compress_algorithm; /* algorithm type */ |
207 | unsigned char compress_log_size; /* cluster log size */ |
208 | unsigned char compress_level; /* compress level */ |
209 | bool compress_chksum; /* compressed data chksum */ |
210 | unsigned char compress_ext_cnt; /* extension count */ |
211 | unsigned char nocompress_ext_cnt; /* nocompress extension count */ |
212 | int compress_mode; /* compression mode */ |
213 | unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */ |
214 | unsigned char noextensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */ |
215 | }; |
216 | |
217 | #define F2FS_FEATURE_ENCRYPT 0x00000001 |
218 | #define F2FS_FEATURE_BLKZONED 0x00000002 |
219 | #define F2FS_FEATURE_ATOMIC_WRITE 0x00000004 |
220 | #define F2FS_FEATURE_EXTRA_ATTR 0x00000008 |
221 | #define F2FS_FEATURE_PRJQUOTA 0x00000010 |
222 | #define F2FS_FEATURE_INODE_CHKSUM 0x00000020 |
223 | #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x00000040 |
224 | #define F2FS_FEATURE_QUOTA_INO 0x00000080 |
225 | #define F2FS_FEATURE_INODE_CRTIME 0x00000100 |
226 | #define F2FS_FEATURE_LOST_FOUND 0x00000200 |
227 | #define F2FS_FEATURE_VERITY 0x00000400 |
228 | #define F2FS_FEATURE_SB_CHKSUM 0x00000800 |
229 | #define F2FS_FEATURE_CASEFOLD 0x00001000 |
230 | #define F2FS_FEATURE_COMPRESSION 0x00002000 |
231 | #define F2FS_FEATURE_RO 0x00004000 |
232 | #define F2FS_FEATURE_DEVICE_ALIAS 0x00008000 |
233 | |
234 | #define __F2FS_HAS_FEATURE(raw_super, mask) \ |
235 | ((raw_super->feature & cpu_to_le32(mask)) != 0) |
236 | #define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask) |
237 | |
238 | /* |
239 | * Default values for user and/or group using reserved blocks |
240 | */ |
241 | #define F2FS_DEF_RESUID 0 |
242 | #define F2FS_DEF_RESGID 0 |
243 | |
244 | /* |
245 | * For checkpoint manager |
246 | */ |
247 | enum { |
248 | NAT_BITMAP, |
249 | SIT_BITMAP |
250 | }; |
251 | |
252 | #define CP_UMOUNT 0x00000001 |
253 | #define CP_FASTBOOT 0x00000002 |
254 | #define CP_SYNC 0x00000004 |
255 | #define CP_RECOVERY 0x00000008 |
256 | #define CP_DISCARD 0x00000010 |
257 | #define CP_TRIMMED 0x00000020 |
258 | #define CP_PAUSE 0x00000040 |
259 | #define CP_RESIZE 0x00000080 |
260 | |
261 | #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */ |
262 | #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */ |
263 | #define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */ |
264 | #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */ |
265 | #define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */ |
266 | #define DEF_CP_INTERVAL 60 /* 60 secs */ |
267 | #define DEF_IDLE_INTERVAL 5 /* 5 secs */ |
268 | #define DEF_DISABLE_INTERVAL 5 /* 5 secs */ |
269 | #define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */ |
270 | #define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */ |
271 | |
272 | struct cp_control { |
273 | int reason; |
274 | __u64 trim_start; |
275 | __u64 trim_end; |
276 | __u64 trim_minlen; |
277 | }; |
278 | |
279 | /* |
280 | * indicate meta/data type |
281 | */ |
282 | enum { |
283 | META_CP, |
284 | META_NAT, |
285 | META_SIT, |
286 | META_SSA, |
287 | META_MAX, |
288 | META_POR, |
289 | DATA_GENERIC, /* check range only */ |
290 | DATA_GENERIC_ENHANCE, /* strong check on range and segment bitmap */ |
291 | DATA_GENERIC_ENHANCE_READ, /* |
292 | * strong check on range and segment |
293 | * bitmap but no warning due to race |
294 | * condition of read on truncated area |
295 | * by extent_cache |
296 | */ |
297 | DATA_GENERIC_ENHANCE_UPDATE, /* |
298 | * strong check on range and segment |
299 | * bitmap for update case |
300 | */ |
301 | META_GENERIC, |
302 | }; |
303 | |
304 | /* for the list of ino */ |
305 | enum { |
306 | ORPHAN_INO, /* for orphan ino list */ |
307 | APPEND_INO, /* for append ino list */ |
308 | UPDATE_INO, /* for update ino list */ |
309 | TRANS_DIR_INO, /* for transactions dir ino list */ |
310 | XATTR_DIR_INO, /* for xattr updated dir ino list */ |
311 | FLUSH_INO, /* for multiple device flushing */ |
312 | MAX_INO_ENTRY, /* max. list */ |
313 | }; |
314 | |
315 | struct ino_entry { |
316 | struct list_head list; /* list head */ |
317 | nid_t ino; /* inode number */ |
318 | unsigned int dirty_device; /* dirty device bitmap */ |
319 | }; |
320 | |
321 | /* for the list of inodes to be GCed */ |
322 | struct inode_entry { |
323 | struct list_head list; /* list head */ |
324 | struct inode *inode; /* vfs inode pointer */ |
325 | }; |
326 | |
327 | struct fsync_node_entry { |
328 | struct list_head list; /* list head */ |
329 | struct folio *folio; /* warm node folio pointer */ |
330 | unsigned int seq_id; /* sequence id */ |
331 | }; |
332 | |
333 | struct ckpt_req { |
334 | struct completion wait; /* completion for checkpoint done */ |
335 | struct llist_node llnode; /* llist_node to be linked in wait queue */ |
336 | int ret; /* return code of checkpoint */ |
337 | ktime_t queue_time; /* request queued time */ |
338 | }; |
339 | |
340 | struct ckpt_req_control { |
341 | struct task_struct *f2fs_issue_ckpt; /* checkpoint task */ |
342 | int ckpt_thread_ioprio; /* checkpoint merge thread ioprio */ |
343 | wait_queue_head_t ckpt_wait_queue; /* waiting queue for wake-up */ |
344 | atomic_t issued_ckpt; /* # of actually issued ckpts */ |
345 | atomic_t total_ckpt; /* # of total ckpts */ |
346 | atomic_t queued_ckpt; /* # of queued ckpts */ |
347 | struct llist_head issue_list; /* list for command issue */ |
348 | spinlock_t stat_lock; /* lock for below checkpoint time stats */ |
349 | unsigned int cur_time; /* cur wait time in msec for currently issued checkpoint */ |
350 | unsigned int peak_time; /* peak wait time in msec until now */ |
351 | }; |
352 | |
353 | /* for the bitmap indicate blocks to be discarded */ |
354 | struct discard_entry { |
355 | struct list_head list; /* list head */ |
356 | block_t start_blkaddr; /* start blockaddr of current segment */ |
357 | unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */ |
358 | }; |
359 | |
360 | /* minimum discard granularity, unit: block count */ |
361 | #define MIN_DISCARD_GRANULARITY 1 |
362 | /* default discard granularity of inner discard thread, unit: block count */ |
363 | #define DEFAULT_DISCARD_GRANULARITY 16 |
364 | /* default maximum discard granularity of ordered discard, unit: block count */ |
365 | #define DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY 16 |
366 | |
367 | /* max discard pend list number */ |
368 | #define MAX_PLIST_NUM 512 |
369 | #define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \ |
370 | (MAX_PLIST_NUM - 1) : ((blk_num) - 1)) |
371 | |
372 | enum { |
373 | D_PREP, /* initial */ |
374 | D_PARTIAL, /* partially submitted */ |
375 | D_SUBMIT, /* all submitted */ |
376 | D_DONE, /* finished */ |
377 | }; |
378 | |
379 | struct discard_info { |
380 | block_t lstart; /* logical start address */ |
381 | block_t len; /* length */ |
382 | block_t start; /* actual start address in dev */ |
383 | }; |
384 | |
385 | struct discard_cmd { |
386 | struct rb_node rb_node; /* rb node located in rb-tree */ |
387 | struct discard_info di; /* discard info */ |
388 | struct list_head list; /* command list */ |
389 | struct completion wait; /* compleation */ |
390 | struct block_device *bdev; /* bdev */ |
391 | unsigned short ref; /* reference count */ |
392 | unsigned char state; /* state */ |
393 | unsigned char queued; /* queued discard */ |
394 | int error; /* bio error */ |
395 | spinlock_t lock; /* for state/bio_ref updating */ |
396 | unsigned short bio_ref; /* bio reference count */ |
397 | }; |
398 | |
399 | enum { |
400 | DPOLICY_BG, |
401 | DPOLICY_FORCE, |
402 | DPOLICY_FSTRIM, |
403 | DPOLICY_UMOUNT, |
404 | MAX_DPOLICY, |
405 | }; |
406 | |
407 | enum { |
408 | DPOLICY_IO_AWARE_DISABLE, /* force to not be aware of IO */ |
409 | DPOLICY_IO_AWARE_ENABLE, /* force to be aware of IO */ |
410 | DPOLICY_IO_AWARE_MAX, |
411 | }; |
412 | |
413 | struct discard_policy { |
414 | int type; /* type of discard */ |
415 | unsigned int min_interval; /* used for candidates exist */ |
416 | unsigned int mid_interval; /* used for device busy */ |
417 | unsigned int max_interval; /* used for candidates not exist */ |
418 | unsigned int max_requests; /* # of discards issued per round */ |
419 | unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */ |
420 | bool io_aware; /* issue discard in idle time */ |
421 | bool sync; /* submit discard with REQ_SYNC flag */ |
422 | bool ordered; /* issue discard by lba order */ |
423 | bool timeout; /* discard timeout for put_super */ |
424 | unsigned int granularity; /* discard granularity */ |
425 | }; |
426 | |
427 | struct discard_cmd_control { |
428 | struct task_struct *f2fs_issue_discard; /* discard thread */ |
429 | struct list_head entry_list; /* 4KB discard entry list */ |
430 | struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */ |
431 | struct list_head wait_list; /* store on-flushing entries */ |
432 | struct list_head fstrim_list; /* in-flight discard from fstrim */ |
433 | wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */ |
434 | struct mutex cmd_lock; |
435 | unsigned int nr_discards; /* # of discards in the list */ |
436 | unsigned int max_discards; /* max. discards to be issued */ |
437 | unsigned int max_discard_request; /* max. discard request per round */ |
438 | unsigned int min_discard_issue_time; /* min. interval between discard issue */ |
439 | unsigned int mid_discard_issue_time; /* mid. interval between discard issue */ |
440 | unsigned int max_discard_issue_time; /* max. interval between discard issue */ |
441 | unsigned int discard_io_aware_gran; /* minimum discard granularity not be aware of I/O */ |
442 | unsigned int discard_urgent_util; /* utilization which issue discard proactively */ |
443 | unsigned int discard_granularity; /* discard granularity */ |
444 | unsigned int max_ordered_discard; /* maximum discard granularity issued by lba order */ |
445 | unsigned int discard_io_aware; /* io_aware policy */ |
446 | unsigned int undiscard_blks; /* # of undiscard blocks */ |
447 | unsigned int next_pos; /* next discard position */ |
448 | atomic_t issued_discard; /* # of issued discard */ |
449 | atomic_t queued_discard; /* # of queued discard */ |
450 | atomic_t discard_cmd_cnt; /* # of cached cmd count */ |
451 | struct rb_root_cached root; /* root of discard rb-tree */ |
452 | bool rbtree_check; /* config for consistence check */ |
453 | bool discard_wake; /* to wake up discard thread */ |
454 | }; |
455 | |
456 | /* for the list of fsync inodes, used only during recovery */ |
457 | struct fsync_inode_entry { |
458 | struct list_head list; /* list head */ |
459 | struct inode *inode; /* vfs inode pointer */ |
460 | block_t blkaddr; /* block address locating the last fsync */ |
461 | block_t last_dentry; /* block address locating the last dentry */ |
462 | }; |
463 | |
464 | #define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats)) |
465 | #define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits)) |
466 | |
467 | #define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne) |
468 | #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid) |
469 | #define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se) |
470 | #define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno) |
471 | |
472 | #define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl)) |
473 | #define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl)) |
474 | |
475 | static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i) |
476 | { |
477 | int before = nats_in_cursum(journal); |
478 | |
479 | journal->n_nats = cpu_to_le16(before + i); |
480 | return before; |
481 | } |
482 | |
483 | static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i) |
484 | { |
485 | int before = sits_in_cursum(journal); |
486 | |
487 | journal->n_sits = cpu_to_le16(before + i); |
488 | return before; |
489 | } |
490 | |
491 | static inline bool __has_cursum_space(struct f2fs_journal *journal, |
492 | int size, int type) |
493 | { |
494 | if (type == NAT_JOURNAL) |
495 | return size <= MAX_NAT_JENTRIES(journal); |
496 | return size <= MAX_SIT_JENTRIES(journal); |
497 | } |
498 | |
499 | /* for inline stuff */ |
500 | #define DEF_INLINE_RESERVED_SIZE 1 |
501 | static inline int get_extra_isize(struct inode *inode); |
502 | static inline int get_inline_xattr_addrs(struct inode *inode); |
503 | #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \ |
504 | (CUR_ADDRS_PER_INODE(inode) - \ |
505 | get_inline_xattr_addrs(inode) - \ |
506 | DEF_INLINE_RESERVED_SIZE)) |
507 | |
508 | /* for inline dir */ |
509 | #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \ |
510 | ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ |
511 | BITS_PER_BYTE + 1)) |
512 | #define INLINE_DENTRY_BITMAP_SIZE(inode) \ |
513 | DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE) |
514 | #define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \ |
515 | ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ |
516 | NR_INLINE_DENTRY(inode) + \ |
517 | INLINE_DENTRY_BITMAP_SIZE(inode))) |
518 | |
519 | /* |
520 | * For INODE and NODE manager |
521 | */ |
522 | /* for directory operations */ |
523 | |
524 | struct f2fs_filename { |
525 | /* |
526 | * The filename the user specified. This is NULL for some |
527 | * filesystem-internal operations, e.g. converting an inline directory |
528 | * to a non-inline one, or roll-forward recovering an encrypted dentry. |
529 | */ |
530 | const struct qstr *usr_fname; |
531 | |
532 | /* |
533 | * The on-disk filename. For encrypted directories, this is encrypted. |
534 | * This may be NULL for lookups in an encrypted dir without the key. |
535 | */ |
536 | struct fscrypt_str disk_name; |
537 | |
538 | /* The dirhash of this filename */ |
539 | f2fs_hash_t hash; |
540 | |
541 | #ifdef CONFIG_FS_ENCRYPTION |
542 | /* |
543 | * For lookups in encrypted directories: either the buffer backing |
544 | * disk_name, or a buffer that holds the decoded no-key name. |
545 | */ |
546 | struct fscrypt_str crypto_buf; |
547 | #endif |
548 | #if IS_ENABLED(CONFIG_UNICODE) |
549 | /* |
550 | * For casefolded directories: the casefolded name, but it's left NULL |
551 | * if the original name is not valid Unicode, if the original name is |
552 | * "." or "..", if the directory is both casefolded and encrypted and |
553 | * its encryption key is unavailable, or if the filesystem is doing an |
554 | * internal operation where usr_fname is also NULL. In all these cases |
555 | * we fall back to treating the name as an opaque byte sequence. |
556 | */ |
557 | struct qstr cf_name; |
558 | #endif |
559 | }; |
560 | |
561 | struct f2fs_dentry_ptr { |
562 | struct inode *inode; |
563 | void *bitmap; |
564 | struct f2fs_dir_entry *dentry; |
565 | __u8 (*filename)[F2FS_SLOT_LEN]; |
566 | int max; |
567 | int nr_bitmap; |
568 | }; |
569 | |
570 | static inline void make_dentry_ptr_block(struct inode *inode, |
571 | struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t) |
572 | { |
573 | d->inode = inode; |
574 | d->max = NR_DENTRY_IN_BLOCK; |
575 | d->nr_bitmap = SIZE_OF_DENTRY_BITMAP; |
576 | d->bitmap = t->dentry_bitmap; |
577 | d->dentry = t->dentry; |
578 | d->filename = t->filename; |
579 | } |
580 | |
581 | static inline void make_dentry_ptr_inline(struct inode *inode, |
582 | struct f2fs_dentry_ptr *d, void *t) |
583 | { |
584 | int entry_cnt = NR_INLINE_DENTRY(inode); |
585 | int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode); |
586 | int reserved_size = INLINE_RESERVED_SIZE(inode); |
587 | |
588 | d->inode = inode; |
589 | d->max = entry_cnt; |
590 | d->nr_bitmap = bitmap_size; |
591 | d->bitmap = t; |
592 | d->dentry = t + bitmap_size + reserved_size; |
593 | d->filename = t + bitmap_size + reserved_size + |
594 | SIZE_OF_DIR_ENTRY * entry_cnt; |
595 | } |
596 | |
597 | /* |
598 | * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1 |
599 | * as its node offset to distinguish from index node blocks. |
600 | * But some bits are used to mark the node block. |
601 | */ |
602 | #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \ |
603 | >> OFFSET_BIT_SHIFT) |
604 | enum { |
605 | ALLOC_NODE, /* allocate a new node page if needed */ |
606 | LOOKUP_NODE, /* look up a node without readahead */ |
607 | LOOKUP_NODE_RA, /* |
608 | * look up a node with readahead called |
609 | * by get_data_block. |
610 | */ |
611 | }; |
612 | |
613 | #define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO or flush count */ |
614 | |
615 | /* congestion wait timeout value, default: 20ms */ |
616 | #define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20)) |
617 | |
618 | /* timeout value injected, default: 1000ms */ |
619 | #define DEFAULT_FAULT_TIMEOUT (msecs_to_jiffies(1000)) |
620 | |
621 | /* maximum retry quota flush count */ |
622 | #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8 |
623 | |
624 | /* maximum retry of EIO'ed page */ |
625 | #define MAX_RETRY_PAGE_EIO 100 |
626 | |
627 | #define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */ |
628 | |
629 | #define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */ |
630 | |
631 | /* dirty segments threshold for triggering CP */ |
632 | #define DEFAULT_DIRTY_THRESHOLD 4 |
633 | |
634 | #define RECOVERY_MAX_RA_BLOCKS BIO_MAX_VECS |
635 | #define RECOVERY_MIN_RA_BLOCKS 1 |
636 | |
637 | #define F2FS_ONSTACK_PAGES 16 /* nr of onstack pages */ |
638 | |
639 | /* for in-memory extent cache entry */ |
640 | #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */ |
641 | |
642 | /* number of extent info in extent cache we try to shrink */ |
643 | #define READ_EXTENT_CACHE_SHRINK_NUMBER 128 |
644 | |
645 | /* number of age extent info in extent cache we try to shrink */ |
646 | #define AGE_EXTENT_CACHE_SHRINK_NUMBER 128 |
647 | #define LAST_AGE_WEIGHT 30 |
648 | #define SAME_AGE_REGION 1024 |
649 | |
650 | /* |
651 | * Define data block with age less than 1GB as hot data |
652 | * define data block with age less than 10GB but more than 1GB as warm data |
653 | */ |
654 | #define DEF_HOT_DATA_AGE_THRESHOLD 262144 |
655 | #define DEF_WARM_DATA_AGE_THRESHOLD 2621440 |
656 | |
657 | /* default max read extent count per inode */ |
658 | #define DEF_MAX_READ_EXTENT_COUNT 10240 |
659 | |
660 | /* extent cache type */ |
661 | enum extent_type { |
662 | EX_READ, |
663 | EX_BLOCK_AGE, |
664 | NR_EXTENT_CACHES, |
665 | }; |
666 | |
667 | struct extent_info { |
668 | unsigned int fofs; /* start offset in a file */ |
669 | unsigned int len; /* length of the extent */ |
670 | union { |
671 | /* read extent_cache */ |
672 | struct { |
673 | /* start block address of the extent */ |
674 | block_t blk; |
675 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
676 | /* physical extent length of compressed blocks */ |
677 | unsigned int c_len; |
678 | #endif |
679 | }; |
680 | /* block age extent_cache */ |
681 | struct { |
682 | /* block age of the extent */ |
683 | unsigned long long age; |
684 | /* last total blocks allocated */ |
685 | unsigned long long last_blocks; |
686 | }; |
687 | }; |
688 | }; |
689 | |
690 | struct extent_node { |
691 | struct rb_node rb_node; /* rb node located in rb-tree */ |
692 | struct extent_info ei; /* extent info */ |
693 | struct list_head list; /* node in global extent list of sbi */ |
694 | struct extent_tree *et; /* extent tree pointer */ |
695 | }; |
696 | |
697 | struct extent_tree { |
698 | nid_t ino; /* inode number */ |
699 | enum extent_type type; /* keep the extent tree type */ |
700 | struct rb_root_cached root; /* root of extent info rb-tree */ |
701 | struct extent_node *cached_en; /* recently accessed extent node */ |
702 | struct list_head list; /* to be used by sbi->zombie_list */ |
703 | rwlock_t lock; /* protect extent info rb-tree */ |
704 | atomic_t node_cnt; /* # of extent node in rb-tree*/ |
705 | bool largest_updated; /* largest extent updated */ |
706 | struct extent_info largest; /* largest cached extent for EX_READ */ |
707 | }; |
708 | |
709 | struct extent_tree_info { |
710 | struct radix_tree_root extent_tree_root;/* cache extent cache entries */ |
711 | struct mutex extent_tree_lock; /* locking extent radix tree */ |
712 | struct list_head extent_list; /* lru list for shrinker */ |
713 | spinlock_t extent_lock; /* locking extent lru list */ |
714 | atomic_t total_ext_tree; /* extent tree count */ |
715 | struct list_head zombie_list; /* extent zombie tree list */ |
716 | atomic_t total_zombie_tree; /* extent zombie tree count */ |
717 | atomic_t total_ext_node; /* extent info count */ |
718 | }; |
719 | |
720 | /* |
721 | * State of block returned by f2fs_map_blocks. |
722 | */ |
723 | #define F2FS_MAP_NEW (1U << 0) |
724 | #define F2FS_MAP_MAPPED (1U << 1) |
725 | #define F2FS_MAP_DELALLOC (1U << 2) |
726 | #define F2FS_MAP_FLAGS (F2FS_MAP_NEW | F2FS_MAP_MAPPED |\ |
727 | F2FS_MAP_DELALLOC) |
728 | |
729 | struct f2fs_map_blocks { |
730 | struct block_device *m_bdev; /* for multi-device dio */ |
731 | block_t m_pblk; |
732 | block_t m_lblk; |
733 | unsigned int m_len; |
734 | unsigned int m_flags; |
735 | pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */ |
736 | pgoff_t *m_next_extent; /* point to next possible extent */ |
737 | int m_seg_type; |
738 | bool m_may_create; /* indicate it is from write path */ |
739 | bool m_multidev_dio; /* indicate it allows multi-device dio */ |
740 | }; |
741 | |
742 | /* for flag in get_data_block */ |
743 | enum { |
744 | F2FS_GET_BLOCK_DEFAULT, |
745 | F2FS_GET_BLOCK_FIEMAP, |
746 | F2FS_GET_BLOCK_BMAP, |
747 | F2FS_GET_BLOCK_DIO, |
748 | F2FS_GET_BLOCK_PRE_DIO, |
749 | F2FS_GET_BLOCK_PRE_AIO, |
750 | F2FS_GET_BLOCK_PRECACHE, |
751 | }; |
752 | |
753 | /* |
754 | * i_advise uses FADVISE_XXX_BIT. We can add additional hints later. |
755 | */ |
756 | #define FADVISE_COLD_BIT 0x01 |
757 | #define FADVISE_LOST_PINO_BIT 0x02 |
758 | #define FADVISE_ENCRYPT_BIT 0x04 |
759 | #define FADVISE_ENC_NAME_BIT 0x08 |
760 | #define FADVISE_KEEP_SIZE_BIT 0x10 |
761 | #define FADVISE_HOT_BIT 0x20 |
762 | #define FADVISE_VERITY_BIT 0x40 |
763 | #define FADVISE_TRUNC_BIT 0x80 |
764 | |
765 | #define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT) |
766 | |
767 | #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT) |
768 | #define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT) |
769 | #define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT) |
770 | |
771 | #define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT) |
772 | #define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT) |
773 | #define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT) |
774 | |
775 | #define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT) |
776 | #define file_set_encrypt(inode) set_file(inode, FADVISE_ENCRYPT_BIT) |
777 | |
778 | #define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT) |
779 | #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT) |
780 | |
781 | #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT) |
782 | #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT) |
783 | |
784 | #define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT) |
785 | #define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT) |
786 | #define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT) |
787 | |
788 | #define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT) |
789 | #define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT) |
790 | |
791 | #define file_should_truncate(inode) is_file(inode, FADVISE_TRUNC_BIT) |
792 | #define file_need_truncate(inode) set_file(inode, FADVISE_TRUNC_BIT) |
793 | #define file_dont_truncate(inode) clear_file(inode, FADVISE_TRUNC_BIT) |
794 | |
795 | #define DEF_DIR_LEVEL 0 |
796 | |
797 | /* used for f2fs_inode_info->flags */ |
798 | enum { |
799 | FI_NEW_INODE, /* indicate newly allocated inode */ |
800 | FI_DIRTY_INODE, /* indicate inode is dirty or not */ |
801 | FI_AUTO_RECOVER, /* indicate inode is recoverable */ |
802 | FI_DIRTY_DIR, /* indicate directory has dirty pages */ |
803 | FI_INC_LINK, /* need to increment i_nlink */ |
804 | FI_ACL_MODE, /* indicate acl mode */ |
805 | FI_NO_ALLOC, /* should not allocate any blocks */ |
806 | FI_FREE_NID, /* free allocated nide */ |
807 | FI_NO_EXTENT, /* not to use the extent cache */ |
808 | FI_INLINE_XATTR, /* used for inline xattr */ |
809 | FI_INLINE_DATA, /* used for inline data*/ |
810 | FI_INLINE_DENTRY, /* used for inline dentry */ |
811 | FI_APPEND_WRITE, /* inode has appended data */ |
812 | FI_UPDATE_WRITE, /* inode has in-place-update data */ |
813 | FI_NEED_IPU, /* used for ipu per file */ |
814 | FI_ATOMIC_FILE, /* indicate atomic file */ |
815 | FI_DATA_EXIST, /* indicate data exists */ |
816 | FI_SKIP_WRITES, /* should skip data page writeback */ |
817 | FI_OPU_WRITE, /* used for opu per file */ |
818 | FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */ |
819 | FI_PREALLOCATED_ALL, /* all blocks for write were preallocated */ |
820 | FI_HOT_DATA, /* indicate file is hot */ |
821 | FI_EXTRA_ATTR, /* indicate file has extra attribute */ |
822 | FI_PROJ_INHERIT, /* indicate file inherits projectid */ |
823 | FI_PIN_FILE, /* indicate file should not be gced */ |
824 | FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */ |
825 | FI_COMPRESSED_FILE, /* indicate file's data can be compressed */ |
826 | FI_COMPRESS_CORRUPT, /* indicate compressed cluster is corrupted */ |
827 | FI_MMAP_FILE, /* indicate file was mmapped */ |
828 | FI_ENABLE_COMPRESS, /* enable compression in "user" compression mode */ |
829 | FI_COMPRESS_RELEASED, /* compressed blocks were released */ |
830 | FI_ALIGNED_WRITE, /* enable aligned write */ |
831 | FI_COW_FILE, /* indicate COW file */ |
832 | FI_ATOMIC_COMMITTED, /* indicate atomic commit completed except disk sync */ |
833 | FI_ATOMIC_DIRTIED, /* indicate atomic file is dirtied */ |
834 | FI_ATOMIC_REPLACE, /* indicate atomic replace */ |
835 | FI_OPENED_FILE, /* indicate file has been opened */ |
836 | FI_DONATE_FINISHED, /* indicate page donation of file has been finished */ |
837 | FI_MAX, /* max flag, never be used */ |
838 | }; |
839 | |
840 | struct f2fs_inode_info { |
841 | struct inode vfs_inode; /* serve a vfs inode */ |
842 | unsigned long i_flags; /* keep an inode flags for ioctl */ |
843 | unsigned char i_advise; /* use to give file attribute hints */ |
844 | unsigned char i_dir_level; /* use for dentry level for large dir */ |
845 | union { |
846 | unsigned int i_current_depth; /* only for directory depth */ |
847 | unsigned short i_gc_failures; /* for gc failure statistic */ |
848 | }; |
849 | unsigned int i_pino; /* parent inode number */ |
850 | umode_t i_acl_mode; /* keep file acl mode temporarily */ |
851 | |
852 | /* Use below internally in f2fs*/ |
853 | unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */ |
854 | unsigned int ioprio_hint; /* hint for IO priority */ |
855 | struct f2fs_rwsem i_sem; /* protect fi info */ |
856 | atomic_t dirty_pages; /* # of dirty pages */ |
857 | f2fs_hash_t chash; /* hash value of given file name */ |
858 | unsigned int clevel; /* maximum level of given file name */ |
859 | struct task_struct *task; /* lookup and create consistency */ |
860 | struct task_struct *cp_task; /* separate cp/wb IO stats*/ |
861 | struct task_struct *wb_task; /* indicate inode is in context of writeback */ |
862 | nid_t i_xattr_nid; /* node id that contains xattrs */ |
863 | loff_t last_disk_size; /* lastly written file size */ |
864 | spinlock_t i_size_lock; /* protect last_disk_size */ |
865 | |
866 | #ifdef CONFIG_QUOTA |
867 | struct dquot __rcu *i_dquot[MAXQUOTAS]; |
868 | |
869 | /* quota space reservation, managed internally by quota code */ |
870 | qsize_t i_reserved_quota; |
871 | #endif |
872 | struct list_head dirty_list; /* dirty list for dirs and files */ |
873 | struct list_head gdirty_list; /* linked in global dirty list */ |
874 | |
875 | /* linked in global inode list for cache donation */ |
876 | struct list_head gdonate_list; |
877 | pgoff_t donate_start, donate_end; /* inclusive */ |
878 | |
879 | struct task_struct *atomic_write_task; /* store atomic write task */ |
880 | struct extent_tree *extent_tree[NR_EXTENT_CACHES]; |
881 | /* cached extent_tree entry */ |
882 | union { |
883 | struct inode *cow_inode; /* copy-on-write inode for atomic write */ |
884 | struct inode *atomic_inode; |
885 | /* point to atomic_inode, available only for cow_inode */ |
886 | }; |
887 | |
888 | /* avoid racing between foreground op and gc */ |
889 | struct f2fs_rwsem i_gc_rwsem[2]; |
890 | struct f2fs_rwsem i_xattr_sem; /* avoid racing between reading and changing EAs */ |
891 | |
892 | int i_extra_isize; /* size of extra space located in i_addr */ |
893 | kprojid_t i_projid; /* id for project quota */ |
894 | int i_inline_xattr_size; /* inline xattr size */ |
895 | struct timespec64 i_crtime; /* inode creation time */ |
896 | struct timespec64 i_disk_time[3];/* inode disk times */ |
897 | |
898 | /* for file compress */ |
899 | atomic_t i_compr_blocks; /* # of compressed blocks */ |
900 | unsigned char i_compress_algorithm; /* algorithm type */ |
901 | unsigned char i_log_cluster_size; /* log of cluster size */ |
902 | unsigned char i_compress_level; /* compress level (lz4hc,zstd) */ |
903 | unsigned char i_compress_flag; /* compress flag */ |
904 | unsigned int i_cluster_size; /* cluster size */ |
905 | |
906 | unsigned int atomic_write_cnt; |
907 | loff_t original_i_size; /* original i_size before atomic write */ |
908 | }; |
909 | |
910 | static inline void get_read_extent_info(struct extent_info *ext, |
911 | struct f2fs_extent *i_ext) |
912 | { |
913 | ext->fofs = le32_to_cpu(i_ext->fofs); |
914 | ext->blk = le32_to_cpu(i_ext->blk); |
915 | ext->len = le32_to_cpu(i_ext->len); |
916 | } |
917 | |
918 | static inline void set_raw_read_extent(struct extent_info *ext, |
919 | struct f2fs_extent *i_ext) |
920 | { |
921 | i_ext->fofs = cpu_to_le32(ext->fofs); |
922 | i_ext->blk = cpu_to_le32(ext->blk); |
923 | i_ext->len = cpu_to_le32(ext->len); |
924 | } |
925 | |
926 | static inline bool __is_discard_mergeable(struct discard_info *back, |
927 | struct discard_info *front, unsigned int max_len) |
928 | { |
929 | return (back->lstart + back->len == front->lstart) && |
930 | (back->len + front->len <= max_len); |
931 | } |
932 | |
933 | static inline bool __is_discard_back_mergeable(struct discard_info *cur, |
934 | struct discard_info *back, unsigned int max_len) |
935 | { |
936 | return __is_discard_mergeable(back, front: cur, max_len); |
937 | } |
938 | |
939 | static inline bool __is_discard_front_mergeable(struct discard_info *cur, |
940 | struct discard_info *front, unsigned int max_len) |
941 | { |
942 | return __is_discard_mergeable(back: cur, front, max_len); |
943 | } |
944 | |
945 | /* |
946 | * For free nid management |
947 | */ |
948 | enum nid_state { |
949 | FREE_NID, /* newly added to free nid list */ |
950 | PREALLOC_NID, /* it is preallocated */ |
951 | MAX_NID_STATE, |
952 | }; |
953 | |
954 | enum nat_state { |
955 | TOTAL_NAT, |
956 | DIRTY_NAT, |
957 | RECLAIMABLE_NAT, |
958 | MAX_NAT_STATE, |
959 | }; |
960 | |
961 | struct f2fs_nm_info { |
962 | block_t nat_blkaddr; /* base disk address of NAT */ |
963 | nid_t max_nid; /* maximum possible node ids */ |
964 | nid_t available_nids; /* # of available node ids */ |
965 | nid_t next_scan_nid; /* the next nid to be scanned */ |
966 | nid_t max_rf_node_blocks; /* max # of nodes for recovery */ |
967 | unsigned int ram_thresh; /* control the memory footprint */ |
968 | unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */ |
969 | unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */ |
970 | |
971 | /* NAT cache management */ |
972 | struct radix_tree_root nat_root;/* root of the nat entry cache */ |
973 | struct radix_tree_root nat_set_root;/* root of the nat set cache */ |
974 | struct f2fs_rwsem nat_tree_lock; /* protect nat entry tree */ |
975 | struct list_head nat_entries; /* cached nat entry list (clean) */ |
976 | spinlock_t nat_list_lock; /* protect clean nat entry list */ |
977 | unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */ |
978 | unsigned int nat_blocks; /* # of nat blocks */ |
979 | |
980 | /* free node ids management */ |
981 | struct radix_tree_root free_nid_root;/* root of the free_nid cache */ |
982 | struct list_head free_nid_list; /* list for free nids excluding preallocated nids */ |
983 | unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */ |
984 | spinlock_t nid_list_lock; /* protect nid lists ops */ |
985 | struct mutex build_lock; /* lock for build free nids */ |
986 | unsigned char **free_nid_bitmap; |
987 | unsigned char *nat_block_bitmap; |
988 | unsigned short *free_nid_count; /* free nid count of NAT block */ |
989 | |
990 | /* for checkpoint */ |
991 | char *nat_bitmap; /* NAT bitmap pointer */ |
992 | |
993 | unsigned int nat_bits_blocks; /* # of nat bits blocks */ |
994 | unsigned char *nat_bits; /* NAT bits blocks */ |
995 | unsigned char *full_nat_bits; /* full NAT pages */ |
996 | unsigned char *empty_nat_bits; /* empty NAT pages */ |
997 | #ifdef CONFIG_F2FS_CHECK_FS |
998 | char *nat_bitmap_mir; /* NAT bitmap mirror */ |
999 | #endif |
1000 | int bitmap_size; /* bitmap size */ |
1001 | }; |
1002 | |
1003 | /* |
1004 | * this structure is used as one of function parameters. |
1005 | * all the information are dedicated to a given direct node block determined |
1006 | * by the data offset in a file. |
1007 | */ |
1008 | struct dnode_of_data { |
1009 | struct inode *inode; /* vfs inode pointer */ |
1010 | struct folio *inode_folio; /* its inode folio, NULL is possible */ |
1011 | struct folio *node_folio; /* cached direct node folio */ |
1012 | nid_t nid; /* node id of the direct node block */ |
1013 | unsigned int ofs_in_node; /* data offset in the node page */ |
1014 | bool inode_folio_locked; /* inode folio is locked or not */ |
1015 | bool node_changed; /* is node block changed */ |
1016 | char cur_level; /* level of hole node page */ |
1017 | char max_level; /* level of current page located */ |
1018 | block_t data_blkaddr; /* block address of the node block */ |
1019 | }; |
1020 | |
1021 | static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, |
1022 | struct folio *ifolio, struct folio *nfolio, nid_t nid) |
1023 | { |
1024 | memset(dn, 0, sizeof(*dn)); |
1025 | dn->inode = inode; |
1026 | dn->inode_folio = ifolio; |
1027 | dn->node_folio = nfolio; |
1028 | dn->nid = nid; |
1029 | } |
1030 | |
1031 | /* |
1032 | * For SIT manager |
1033 | * |
1034 | * By default, there are 6 active log areas across the whole main area. |
1035 | * When considering hot and cold data separation to reduce cleaning overhead, |
1036 | * we split 3 for data logs and 3 for node logs as hot, warm, and cold types, |
1037 | * respectively. |
1038 | * In the current design, you should not change the numbers intentionally. |
1039 | * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6 |
1040 | * logs individually according to the underlying devices. (default: 6) |
1041 | * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for |
1042 | * data and 8 for node logs. |
1043 | */ |
1044 | #define NR_CURSEG_DATA_TYPE (3) |
1045 | #define NR_CURSEG_NODE_TYPE (3) |
1046 | #define NR_CURSEG_INMEM_TYPE (2) |
1047 | #define NR_CURSEG_RO_TYPE (2) |
1048 | #define NR_CURSEG_PERSIST_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE) |
1049 | #define NR_CURSEG_TYPE (NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE) |
1050 | |
1051 | enum log_type { |
1052 | CURSEG_HOT_DATA = 0, /* directory entry blocks */ |
1053 | CURSEG_WARM_DATA, /* data blocks */ |
1054 | CURSEG_COLD_DATA, /* multimedia or GCed data blocks */ |
1055 | CURSEG_HOT_NODE, /* direct node blocks of directory files */ |
1056 | CURSEG_WARM_NODE, /* direct node blocks of normal files */ |
1057 | CURSEG_COLD_NODE, /* indirect node blocks */ |
1058 | NR_PERSISTENT_LOG, /* number of persistent log */ |
1059 | CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG, |
1060 | /* pinned file that needs consecutive block address */ |
1061 | CURSEG_ALL_DATA_ATGC, /* SSR alloctor in hot/warm/cold data area */ |
1062 | NO_CHECK_TYPE, /* number of persistent & inmem log */ |
1063 | }; |
1064 | |
1065 | struct flush_cmd { |
1066 | struct completion wait; |
1067 | struct llist_node llnode; |
1068 | nid_t ino; |
1069 | int ret; |
1070 | }; |
1071 | |
1072 | struct flush_cmd_control { |
1073 | struct task_struct *f2fs_issue_flush; /* flush thread */ |
1074 | wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */ |
1075 | atomic_t issued_flush; /* # of issued flushes */ |
1076 | atomic_t queued_flush; /* # of queued flushes */ |
1077 | struct llist_head issue_list; /* list for command issue */ |
1078 | struct llist_node *dispatch_list; /* list for command dispatch */ |
1079 | }; |
1080 | |
1081 | struct f2fs_sm_info { |
1082 | struct sit_info *sit_info; /* whole segment information */ |
1083 | struct free_segmap_info *free_info; /* free segment information */ |
1084 | struct dirty_seglist_info *dirty_info; /* dirty segment information */ |
1085 | struct curseg_info *curseg_array; /* active segment information */ |
1086 | |
1087 | struct f2fs_rwsem curseg_lock; /* for preventing curseg change */ |
1088 | |
1089 | block_t seg0_blkaddr; /* block address of 0'th segment */ |
1090 | block_t main_blkaddr; /* start block address of main area */ |
1091 | block_t ssa_blkaddr; /* start block address of SSA area */ |
1092 | |
1093 | unsigned int segment_count; /* total # of segments */ |
1094 | unsigned int main_segments; /* # of segments in main area */ |
1095 | unsigned int reserved_segments; /* # of reserved segments */ |
1096 | unsigned int ovp_segments; /* # of overprovision segments */ |
1097 | |
1098 | /* a threshold to reclaim prefree segments */ |
1099 | unsigned int rec_prefree_segments; |
1100 | |
1101 | struct list_head sit_entry_set; /* sit entry set list */ |
1102 | |
1103 | unsigned int ipu_policy; /* in-place-update policy */ |
1104 | unsigned int min_ipu_util; /* in-place-update threshold */ |
1105 | unsigned int min_fsync_blocks; /* threshold for fsync */ |
1106 | unsigned int min_seq_blocks; /* threshold for sequential blocks */ |
1107 | unsigned int min_hot_blocks; /* threshold for hot block allocation */ |
1108 | unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */ |
1109 | |
1110 | /* for flush command control */ |
1111 | struct flush_cmd_control *fcc_info; |
1112 | |
1113 | /* for discard command control */ |
1114 | struct discard_cmd_control *dcc_info; |
1115 | }; |
1116 | |
1117 | /* |
1118 | * For superblock |
1119 | */ |
1120 | /* |
1121 | * COUNT_TYPE for monitoring |
1122 | * |
1123 | * f2fs monitors the number of several block types such as on-writeback, |
1124 | * dirty dentry blocks, dirty node blocks, and dirty meta blocks. |
1125 | */ |
1126 | #define WB_DATA_TYPE(p, f) \ |
1127 | (f || f2fs_is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA) |
1128 | enum count_type { |
1129 | F2FS_DIRTY_DENTS, |
1130 | F2FS_DIRTY_DATA, |
1131 | F2FS_DIRTY_QDATA, |
1132 | F2FS_DIRTY_NODES, |
1133 | F2FS_DIRTY_META, |
1134 | F2FS_DIRTY_IMETA, |
1135 | F2FS_WB_CP_DATA, |
1136 | F2FS_WB_DATA, |
1137 | F2FS_RD_DATA, |
1138 | F2FS_RD_NODE, |
1139 | F2FS_RD_META, |
1140 | F2FS_DIO_WRITE, |
1141 | F2FS_DIO_READ, |
1142 | NR_COUNT_TYPE, |
1143 | }; |
1144 | |
1145 | /* |
1146 | * The below are the page types of bios used in submit_bio(). |
1147 | * The available types are: |
1148 | * DATA User data pages. It operates as async mode. |
1149 | * NODE Node pages. It operates as async mode. |
1150 | * META FS metadata pages such as SIT, NAT, CP. |
1151 | * NR_PAGE_TYPE The number of page types. |
1152 | * META_FLUSH Make sure the previous pages are written |
1153 | * with waiting the bio's completion |
1154 | * ... Only can be used with META. |
1155 | */ |
1156 | #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type)) |
1157 | #define PAGE_TYPE_ON_MAIN(type) ((type) == DATA || (type) == NODE) |
1158 | enum page_type { |
1159 | DATA = 0, |
1160 | NODE = 1, /* should not change this */ |
1161 | META, |
1162 | NR_PAGE_TYPE, |
1163 | META_FLUSH, |
1164 | IPU, /* the below types are used by tracepoints only. */ |
1165 | OPU, |
1166 | }; |
1167 | |
1168 | enum temp_type { |
1169 | HOT = 0, /* must be zero for meta bio */ |
1170 | WARM, |
1171 | COLD, |
1172 | NR_TEMP_TYPE, |
1173 | }; |
1174 | |
1175 | enum need_lock_type { |
1176 | LOCK_REQ = 0, |
1177 | LOCK_DONE, |
1178 | LOCK_RETRY, |
1179 | }; |
1180 | |
1181 | enum cp_reason_type { |
1182 | CP_NO_NEEDED, |
1183 | CP_NON_REGULAR, |
1184 | CP_COMPRESSED, |
1185 | CP_HARDLINK, |
1186 | CP_SB_NEED_CP, |
1187 | CP_WRONG_PINO, |
1188 | CP_NO_SPC_ROLL, |
1189 | CP_NODE_NEED_CP, |
1190 | CP_FASTBOOT_MODE, |
1191 | CP_SPEC_LOG_NUM, |
1192 | CP_RECOVER_DIR, |
1193 | CP_XATTR_DIR, |
1194 | }; |
1195 | |
1196 | enum iostat_type { |
1197 | /* WRITE IO */ |
1198 | APP_DIRECT_IO, /* app direct write IOs */ |
1199 | APP_BUFFERED_IO, /* app buffered write IOs */ |
1200 | APP_WRITE_IO, /* app write IOs */ |
1201 | APP_MAPPED_IO, /* app mapped IOs */ |
1202 | APP_BUFFERED_CDATA_IO, /* app buffered write IOs on compressed file */ |
1203 | APP_MAPPED_CDATA_IO, /* app mapped write IOs on compressed file */ |
1204 | FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */ |
1205 | FS_CDATA_IO, /* data IOs from kworker/fsync/reclaimer on compressed file */ |
1206 | FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */ |
1207 | FS_META_IO, /* meta IOs from kworker/reclaimer */ |
1208 | FS_GC_DATA_IO, /* data IOs from forground gc */ |
1209 | FS_GC_NODE_IO, /* node IOs from forground gc */ |
1210 | FS_CP_DATA_IO, /* data IOs from checkpoint */ |
1211 | FS_CP_NODE_IO, /* node IOs from checkpoint */ |
1212 | FS_CP_META_IO, /* meta IOs from checkpoint */ |
1213 | |
1214 | /* READ IO */ |
1215 | APP_DIRECT_READ_IO, /* app direct read IOs */ |
1216 | APP_BUFFERED_READ_IO, /* app buffered read IOs */ |
1217 | APP_READ_IO, /* app read IOs */ |
1218 | APP_MAPPED_READ_IO, /* app mapped read IOs */ |
1219 | APP_BUFFERED_CDATA_READ_IO, /* app buffered read IOs on compressed file */ |
1220 | APP_MAPPED_CDATA_READ_IO, /* app mapped read IOs on compressed file */ |
1221 | FS_DATA_READ_IO, /* data read IOs */ |
1222 | FS_GDATA_READ_IO, /* data read IOs from background gc */ |
1223 | FS_CDATA_READ_IO, /* compressed data read IOs */ |
1224 | FS_NODE_READ_IO, /* node read IOs */ |
1225 | FS_META_READ_IO, /* meta read IOs */ |
1226 | |
1227 | /* other */ |
1228 | FS_DISCARD_IO, /* discard */ |
1229 | FS_FLUSH_IO, /* flush */ |
1230 | FS_ZONE_RESET_IO, /* zone reset */ |
1231 | NR_IO_TYPE, |
1232 | }; |
1233 | |
1234 | struct f2fs_io_info { |
1235 | struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ |
1236 | nid_t ino; /* inode number */ |
1237 | enum page_type type; /* contains DATA/NODE/META/META_FLUSH */ |
1238 | enum temp_type temp; /* contains HOT/WARM/COLD */ |
1239 | enum req_op op; /* contains REQ_OP_ */ |
1240 | blk_opf_t op_flags; /* req_flag_bits */ |
1241 | block_t new_blkaddr; /* new block address to be written */ |
1242 | block_t old_blkaddr; /* old block address before Cow */ |
1243 | struct page *page; /* page to be written */ |
1244 | struct page *encrypted_page; /* encrypted page */ |
1245 | struct page *compressed_page; /* compressed page */ |
1246 | struct list_head list; /* serialize IOs */ |
1247 | unsigned int compr_blocks; /* # of compressed block addresses */ |
1248 | unsigned int need_lock:8; /* indicate we need to lock cp_rwsem */ |
1249 | unsigned int version:8; /* version of the node */ |
1250 | unsigned int submitted:1; /* indicate IO submission */ |
1251 | unsigned int in_list:1; /* indicate fio is in io_list */ |
1252 | unsigned int is_por:1; /* indicate IO is from recovery or not */ |
1253 | unsigned int encrypted:1; /* indicate file is encrypted */ |
1254 | unsigned int meta_gc:1; /* require meta inode GC */ |
1255 | enum iostat_type io_type; /* io type */ |
1256 | struct writeback_control *io_wbc; /* writeback control */ |
1257 | struct bio **bio; /* bio for ipu */ |
1258 | sector_t *last_block; /* last block number in bio */ |
1259 | }; |
1260 | |
1261 | struct bio_entry { |
1262 | struct bio *bio; |
1263 | struct list_head list; |
1264 | }; |
1265 | |
1266 | #define is_read_io(rw) ((rw) == READ) |
1267 | struct f2fs_bio_info { |
1268 | struct f2fs_sb_info *sbi; /* f2fs superblock */ |
1269 | struct bio *bio; /* bios to merge */ |
1270 | sector_t last_block_in_bio; /* last block number */ |
1271 | struct f2fs_io_info fio; /* store buffered io info. */ |
1272 | #ifdef CONFIG_BLK_DEV_ZONED |
1273 | struct completion zone_wait; /* condition value for the previous open zone to close */ |
1274 | struct bio *zone_pending_bio; /* pending bio for the previous zone */ |
1275 | void *bi_private; /* previous bi_private for pending bio */ |
1276 | #endif |
1277 | struct f2fs_rwsem io_rwsem; /* blocking op for bio */ |
1278 | spinlock_t io_lock; /* serialize DATA/NODE IOs */ |
1279 | struct list_head io_list; /* track fios */ |
1280 | struct list_head bio_list; /* bio entry list head */ |
1281 | struct f2fs_rwsem bio_list_lock; /* lock to protect bio entry list */ |
1282 | }; |
1283 | |
1284 | #define FDEV(i) (sbi->devs[i]) |
1285 | #define RDEV(i) (raw_super->devs[i]) |
1286 | struct f2fs_dev_info { |
1287 | struct file *bdev_file; |
1288 | struct block_device *bdev; |
1289 | char path[MAX_PATH_LEN]; |
1290 | unsigned int total_segments; |
1291 | block_t start_blk; |
1292 | block_t end_blk; |
1293 | #ifdef CONFIG_BLK_DEV_ZONED |
1294 | unsigned int nr_blkz; /* Total number of zones */ |
1295 | unsigned long *blkz_seq; /* Bitmap indicating sequential zones */ |
1296 | #endif |
1297 | }; |
1298 | |
1299 | enum inode_type { |
1300 | DIR_INODE, /* for dirty dir inode */ |
1301 | FILE_INODE, /* for dirty regular/symlink inode */ |
1302 | DIRTY_META, /* for all dirtied inode metadata */ |
1303 | DONATE_INODE, /* for all inode to donate pages */ |
1304 | NR_INODE_TYPE, |
1305 | }; |
1306 | |
1307 | /* for inner inode cache management */ |
1308 | struct inode_management { |
1309 | struct radix_tree_root ino_root; /* ino entry array */ |
1310 | spinlock_t ino_lock; /* for ino entry lock */ |
1311 | struct list_head ino_list; /* inode list head */ |
1312 | unsigned long ino_num; /* number of entries */ |
1313 | }; |
1314 | |
1315 | /* for GC_AT */ |
1316 | struct atgc_management { |
1317 | bool atgc_enabled; /* ATGC is enabled or not */ |
1318 | struct rb_root_cached root; /* root of victim rb-tree */ |
1319 | struct list_head victim_list; /* linked with all victim entries */ |
1320 | unsigned int victim_count; /* victim count in rb-tree */ |
1321 | unsigned int candidate_ratio; /* candidate ratio */ |
1322 | unsigned int max_candidate_count; /* max candidate count */ |
1323 | unsigned int age_weight; /* age weight, vblock_weight = 100 - age_weight */ |
1324 | unsigned long long age_threshold; /* age threshold */ |
1325 | }; |
1326 | |
1327 | struct f2fs_gc_control { |
1328 | unsigned int victim_segno; /* target victim segment number */ |
1329 | int init_gc_type; /* FG_GC or BG_GC */ |
1330 | bool no_bg_gc; /* check the space and stop bg_gc */ |
1331 | bool should_migrate_blocks; /* should migrate blocks */ |
1332 | bool err_gc_skipped; /* return EAGAIN if GC skipped */ |
1333 | bool one_time; /* require one time GC in one migration unit */ |
1334 | unsigned int nr_free_secs; /* # of free sections to do GC */ |
1335 | }; |
1336 | |
1337 | /* |
1338 | * For s_flag in struct f2fs_sb_info |
1339 | * Modification on enum should be synchronized with s_flag array |
1340 | */ |
1341 | enum { |
1342 | SBI_IS_DIRTY, /* dirty flag for checkpoint */ |
1343 | SBI_IS_CLOSE, /* specify unmounting */ |
1344 | SBI_NEED_FSCK, /* need fsck.f2fs to fix */ |
1345 | SBI_POR_DOING, /* recovery is doing or not */ |
1346 | SBI_NEED_SB_WRITE, /* need to recover superblock */ |
1347 | SBI_NEED_CP, /* need to checkpoint */ |
1348 | SBI_IS_SHUTDOWN, /* shutdown by ioctl */ |
1349 | SBI_IS_RECOVERED, /* recovered orphan/data */ |
1350 | SBI_CP_DISABLED, /* CP was disabled last mount */ |
1351 | SBI_CP_DISABLED_QUICK, /* CP was disabled quickly */ |
1352 | SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */ |
1353 | SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */ |
1354 | SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */ |
1355 | SBI_IS_RESIZEFS, /* resizefs is in process */ |
1356 | SBI_IS_FREEZING, /* freezefs is in process */ |
1357 | SBI_IS_WRITABLE, /* remove ro mountoption transiently */ |
1358 | MAX_SBI_FLAG, |
1359 | }; |
1360 | |
1361 | enum { |
1362 | CP_TIME, |
1363 | REQ_TIME, |
1364 | DISCARD_TIME, |
1365 | GC_TIME, |
1366 | DISABLE_TIME, |
1367 | UMOUNT_DISCARD_TIMEOUT, |
1368 | MAX_TIME, |
1369 | }; |
1370 | |
1371 | /* Note that you need to keep synchronization with this gc_mode_names array */ |
1372 | enum { |
1373 | GC_NORMAL, |
1374 | GC_IDLE_CB, |
1375 | GC_IDLE_GREEDY, |
1376 | GC_IDLE_AT, |
1377 | GC_URGENT_HIGH, |
1378 | GC_URGENT_LOW, |
1379 | GC_URGENT_MID, |
1380 | MAX_GC_MODE, |
1381 | }; |
1382 | |
1383 | enum { |
1384 | BGGC_MODE_ON, /* background gc is on */ |
1385 | BGGC_MODE_OFF, /* background gc is off */ |
1386 | BGGC_MODE_SYNC, /* |
1387 | * background gc is on, migrating blocks |
1388 | * like foreground gc |
1389 | */ |
1390 | }; |
1391 | |
1392 | enum { |
1393 | FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */ |
1394 | FS_MODE_LFS, /* use lfs allocation only */ |
1395 | FS_MODE_FRAGMENT_SEG, /* segment fragmentation mode */ |
1396 | FS_MODE_FRAGMENT_BLK, /* block fragmentation mode */ |
1397 | }; |
1398 | |
1399 | enum { |
1400 | ALLOC_MODE_DEFAULT, /* stay default */ |
1401 | ALLOC_MODE_REUSE, /* reuse segments as much as possible */ |
1402 | }; |
1403 | |
1404 | enum fsync_mode { |
1405 | FSYNC_MODE_POSIX, /* fsync follows posix semantics */ |
1406 | FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */ |
1407 | FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */ |
1408 | }; |
1409 | |
1410 | enum { |
1411 | COMPR_MODE_FS, /* |
1412 | * automatically compress compression |
1413 | * enabled files |
1414 | */ |
1415 | COMPR_MODE_USER, /* |
1416 | * automatical compression is disabled. |
1417 | * user can control the file compression |
1418 | * using ioctls |
1419 | */ |
1420 | }; |
1421 | |
1422 | enum { |
1423 | DISCARD_UNIT_BLOCK, /* basic discard unit is block */ |
1424 | DISCARD_UNIT_SEGMENT, /* basic discard unit is segment */ |
1425 | DISCARD_UNIT_SECTION, /* basic discard unit is section */ |
1426 | }; |
1427 | |
1428 | enum { |
1429 | MEMORY_MODE_NORMAL, /* memory mode for normal devices */ |
1430 | MEMORY_MODE_LOW, /* memory mode for low memry devices */ |
1431 | }; |
1432 | |
1433 | enum errors_option { |
1434 | MOUNT_ERRORS_READONLY, /* remount fs ro on errors */ |
1435 | MOUNT_ERRORS_CONTINUE, /* continue on errors */ |
1436 | MOUNT_ERRORS_PANIC, /* panic on errors */ |
1437 | }; |
1438 | |
1439 | enum { |
1440 | BACKGROUND, |
1441 | FOREGROUND, |
1442 | MAX_CALL_TYPE, |
1443 | TOTAL_CALL = FOREGROUND, |
1444 | }; |
1445 | |
1446 | static inline int f2fs_test_bit(unsigned int nr, char *addr); |
1447 | static inline void f2fs_set_bit(unsigned int nr, char *addr); |
1448 | static inline void f2fs_clear_bit(unsigned int nr, char *addr); |
1449 | |
1450 | /* |
1451 | * Layout of f2fs page.private: |
1452 | * |
1453 | * Layout A: lowest bit should be 1 |
1454 | * | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... | |
1455 | * bit 0 PAGE_PRIVATE_NOT_POINTER |
1456 | * bit 1 PAGE_PRIVATE_ONGOING_MIGRATION |
1457 | * bit 2 PAGE_PRIVATE_INLINE_INODE |
1458 | * bit 3 PAGE_PRIVATE_REF_RESOURCE |
1459 | * bit 4 PAGE_PRIVATE_ATOMIC_WRITE |
1460 | * bit 5- f2fs private data |
1461 | * |
1462 | * Layout B: lowest bit should be 0 |
1463 | * page.private is a wrapped pointer. |
1464 | */ |
1465 | enum { |
1466 | PAGE_PRIVATE_NOT_POINTER, /* private contains non-pointer data */ |
1467 | PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */ |
1468 | PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */ |
1469 | PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */ |
1470 | PAGE_PRIVATE_ATOMIC_WRITE, /* data page from atomic write path */ |
1471 | PAGE_PRIVATE_MAX |
1472 | }; |
1473 | |
1474 | /* For compression */ |
1475 | enum compress_algorithm_type { |
1476 | COMPRESS_LZO, |
1477 | COMPRESS_LZ4, |
1478 | COMPRESS_ZSTD, |
1479 | COMPRESS_LZORLE, |
1480 | COMPRESS_MAX, |
1481 | }; |
1482 | |
1483 | enum compress_flag { |
1484 | COMPRESS_CHKSUM, |
1485 | COMPRESS_MAX_FLAG, |
1486 | }; |
1487 | |
1488 | #define COMPRESS_WATERMARK 20 |
1489 | #define COMPRESS_PERCENT 20 |
1490 | |
1491 | #define COMPRESS_DATA_RESERVED_SIZE 4 |
1492 | struct compress_data { |
1493 | __le32 clen; /* compressed data size */ |
1494 | __le32 chksum; /* compressed data chksum */ |
1495 | __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */ |
1496 | u8 cdata[]; /* compressed data */ |
1497 | }; |
1498 | |
1499 | #define COMPRESS_HEADER_SIZE (sizeof(struct compress_data)) |
1500 | |
1501 | #define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000 |
1502 | |
1503 | #define F2FS_ZSTD_DEFAULT_CLEVEL 1 |
1504 | |
1505 | #define COMPRESS_LEVEL_OFFSET 8 |
1506 | |
1507 | /* compress context */ |
1508 | struct compress_ctx { |
1509 | struct inode *inode; /* inode the context belong to */ |
1510 | pgoff_t cluster_idx; /* cluster index number */ |
1511 | unsigned int cluster_size; /* page count in cluster */ |
1512 | unsigned int log_cluster_size; /* log of cluster size */ |
1513 | struct page **rpages; /* pages store raw data in cluster */ |
1514 | unsigned int nr_rpages; /* total page number in rpages */ |
1515 | struct page **cpages; /* pages store compressed data in cluster */ |
1516 | unsigned int nr_cpages; /* total page number in cpages */ |
1517 | unsigned int valid_nr_cpages; /* valid page number in cpages */ |
1518 | void *rbuf; /* virtual mapped address on rpages */ |
1519 | struct compress_data *cbuf; /* virtual mapped address on cpages */ |
1520 | size_t rlen; /* valid data length in rbuf */ |
1521 | size_t clen; /* valid data length in cbuf */ |
1522 | void *private; /* payload buffer for specified compression algorithm */ |
1523 | void *private2; /* extra payload buffer */ |
1524 | }; |
1525 | |
1526 | /* compress context for write IO path */ |
1527 | struct compress_io_ctx { |
1528 | u32 magic; /* magic number to indicate page is compressed */ |
1529 | struct inode *inode; /* inode the context belong to */ |
1530 | struct page **rpages; /* pages store raw data in cluster */ |
1531 | unsigned int nr_rpages; /* total page number in rpages */ |
1532 | atomic_t pending_pages; /* in-flight compressed page count */ |
1533 | }; |
1534 | |
1535 | /* Context for decompressing one cluster on the read IO path */ |
1536 | struct decompress_io_ctx { |
1537 | u32 magic; /* magic number to indicate page is compressed */ |
1538 | struct inode *inode; /* inode the context belong to */ |
1539 | pgoff_t cluster_idx; /* cluster index number */ |
1540 | unsigned int cluster_size; /* page count in cluster */ |
1541 | unsigned int log_cluster_size; /* log of cluster size */ |
1542 | struct page **rpages; /* pages store raw data in cluster */ |
1543 | unsigned int nr_rpages; /* total page number in rpages */ |
1544 | struct page **cpages; /* pages store compressed data in cluster */ |
1545 | unsigned int nr_cpages; /* total page number in cpages */ |
1546 | struct page **tpages; /* temp pages to pad holes in cluster */ |
1547 | void *rbuf; /* virtual mapped address on rpages */ |
1548 | struct compress_data *cbuf; /* virtual mapped address on cpages */ |
1549 | size_t rlen; /* valid data length in rbuf */ |
1550 | size_t clen; /* valid data length in cbuf */ |
1551 | |
1552 | /* |
1553 | * The number of compressed pages remaining to be read in this cluster. |
1554 | * This is initially nr_cpages. It is decremented by 1 each time a page |
1555 | * has been read (or failed to be read). When it reaches 0, the cluster |
1556 | * is decompressed (or an error is reported). |
1557 | * |
1558 | * If an error occurs before all the pages have been submitted for I/O, |
1559 | * then this will never reach 0. In this case the I/O submitter is |
1560 | * responsible for calling f2fs_decompress_end_io() instead. |
1561 | */ |
1562 | atomic_t remaining_pages; |
1563 | |
1564 | /* |
1565 | * Number of references to this decompress_io_ctx. |
1566 | * |
1567 | * One reference is held for I/O completion. This reference is dropped |
1568 | * after the pagecache pages are updated and unlocked -- either after |
1569 | * decompression (and verity if enabled), or after an error. |
1570 | * |
1571 | * In addition, each compressed page holds a reference while it is in a |
1572 | * bio. These references are necessary prevent compressed pages from |
1573 | * being freed while they are still in a bio. |
1574 | */ |
1575 | refcount_t refcnt; |
1576 | |
1577 | bool failed; /* IO error occurred before decompression? */ |
1578 | bool need_verity; /* need fs-verity verification after decompression? */ |
1579 | void *private; /* payload buffer for specified decompression algorithm */ |
1580 | void *private2; /* extra payload buffer */ |
1581 | struct work_struct verity_work; /* work to verify the decompressed pages */ |
1582 | struct work_struct free_work; /* work for late free this structure itself */ |
1583 | }; |
1584 | |
1585 | #define NULL_CLUSTER ((unsigned int)(~0)) |
1586 | #define MIN_COMPRESS_LOG_SIZE 2 |
1587 | #define MAX_COMPRESS_LOG_SIZE 8 |
1588 | #define MAX_COMPRESS_WINDOW_SIZE(log_size) ((PAGE_SIZE) << (log_size)) |
1589 | |
1590 | struct f2fs_sb_info { |
1591 | struct super_block *sb; /* pointer to VFS super block */ |
1592 | struct proc_dir_entry *s_proc; /* proc entry */ |
1593 | struct f2fs_super_block *raw_super; /* raw super block pointer */ |
1594 | struct f2fs_rwsem sb_lock; /* lock for raw super block */ |
1595 | int valid_super_block; /* valid super block no */ |
1596 | unsigned long s_flag; /* flags for sbi */ |
1597 | struct mutex writepages; /* mutex for writepages() */ |
1598 | |
1599 | #ifdef CONFIG_BLK_DEV_ZONED |
1600 | unsigned int blocks_per_blkz; /* F2FS blocks per zone */ |
1601 | unsigned int max_open_zones; /* max open zone resources of the zoned device */ |
1602 | /* For adjust the priority writing position of data in zone UFS */ |
1603 | unsigned int blkzone_alloc_policy; |
1604 | #endif |
1605 | |
1606 | /* for node-related operations */ |
1607 | struct f2fs_nm_info *nm_info; /* node manager */ |
1608 | struct inode *node_inode; /* cache node blocks */ |
1609 | |
1610 | /* for segment-related operations */ |
1611 | struct f2fs_sm_info *sm_info; /* segment manager */ |
1612 | |
1613 | /* for bio operations */ |
1614 | struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */ |
1615 | /* keep migration IO order for LFS mode */ |
1616 | struct f2fs_rwsem io_order_lock; |
1617 | pgoff_t page_eio_ofs[NR_PAGE_TYPE]; /* EIO page offset */ |
1618 | int page_eio_cnt[NR_PAGE_TYPE]; /* EIO count */ |
1619 | |
1620 | /* for checkpoint */ |
1621 | struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ |
1622 | int cur_cp_pack; /* remain current cp pack */ |
1623 | spinlock_t cp_lock; /* for flag in ckpt */ |
1624 | struct inode *meta_inode; /* cache meta blocks */ |
1625 | struct f2fs_rwsem cp_global_sem; /* checkpoint procedure lock */ |
1626 | struct f2fs_rwsem cp_rwsem; /* blocking FS operations */ |
1627 | struct f2fs_rwsem node_write; /* locking node writes */ |
1628 | struct f2fs_rwsem node_change; /* locking node change */ |
1629 | wait_queue_head_t cp_wait; |
1630 | unsigned long last_time[MAX_TIME]; /* to store time in jiffies */ |
1631 | long interval_time[MAX_TIME]; /* to store thresholds */ |
1632 | struct ckpt_req_control cprc_info; /* for checkpoint request control */ |
1633 | |
1634 | struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */ |
1635 | |
1636 | spinlock_t fsync_node_lock; /* for node entry lock */ |
1637 | struct list_head fsync_node_list; /* node list head */ |
1638 | unsigned int fsync_seg_id; /* sequence id */ |
1639 | unsigned int fsync_node_num; /* number of node entries */ |
1640 | |
1641 | /* for orphan inode, use 0'th array */ |
1642 | unsigned int max_orphans; /* max orphan inodes */ |
1643 | |
1644 | /* for inode management */ |
1645 | struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */ |
1646 | spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */ |
1647 | struct mutex flush_lock; /* for flush exclusion */ |
1648 | |
1649 | /* for extent tree cache */ |
1650 | struct extent_tree_info extent_tree[NR_EXTENT_CACHES]; |
1651 | atomic64_t allocated_data_blocks; /* for block age extent_cache */ |
1652 | unsigned int max_read_extent_count; /* max read extent count per inode */ |
1653 | |
1654 | /* The threshold used for hot and warm data seperation*/ |
1655 | unsigned int hot_data_age_threshold; |
1656 | unsigned int warm_data_age_threshold; |
1657 | unsigned int last_age_weight; |
1658 | |
1659 | /* control donate caches */ |
1660 | unsigned int donate_files; |
1661 | |
1662 | /* basic filesystem units */ |
1663 | unsigned int log_sectors_per_block; /* log2 sectors per block */ |
1664 | unsigned int log_blocksize; /* log2 block size */ |
1665 | unsigned int blocksize; /* block size */ |
1666 | unsigned int root_ino_num; /* root inode number*/ |
1667 | unsigned int node_ino_num; /* node inode number*/ |
1668 | unsigned int meta_ino_num; /* meta inode number*/ |
1669 | unsigned int log_blocks_per_seg; /* log2 blocks per segment */ |
1670 | unsigned int blocks_per_seg; /* blocks per segment */ |
1671 | unsigned int unusable_blocks_per_sec; /* unusable blocks per section */ |
1672 | unsigned int segs_per_sec; /* segments per section */ |
1673 | unsigned int secs_per_zone; /* sections per zone */ |
1674 | unsigned int total_sections; /* total section count */ |
1675 | unsigned int total_node_count; /* total node block count */ |
1676 | unsigned int total_valid_node_count; /* valid node block count */ |
1677 | int dir_level; /* directory level */ |
1678 | bool readdir_ra; /* readahead inode in readdir */ |
1679 | u64 max_io_bytes; /* max io bytes to merge IOs */ |
1680 | |
1681 | block_t user_block_count; /* # of user blocks */ |
1682 | block_t total_valid_block_count; /* # of valid blocks */ |
1683 | block_t discard_blks; /* discard command candidats */ |
1684 | block_t last_valid_block_count; /* for recovery */ |
1685 | block_t reserved_blocks; /* configurable reserved blocks */ |
1686 | block_t current_reserved_blocks; /* current reserved blocks */ |
1687 | |
1688 | /* Additional tracking for no checkpoint mode */ |
1689 | block_t unusable_block_count; /* # of blocks saved by last cp */ |
1690 | |
1691 | unsigned int nquota_files; /* # of quota sysfile */ |
1692 | struct f2fs_rwsem quota_sem; /* blocking cp for flags */ |
1693 | struct task_struct *umount_lock_holder; /* s_umount lock holder */ |
1694 | |
1695 | /* # of pages, see count_type */ |
1696 | atomic_t nr_pages[NR_COUNT_TYPE]; |
1697 | /* # of allocated blocks */ |
1698 | struct percpu_counter alloc_valid_block_count; |
1699 | /* # of node block writes as roll forward recovery */ |
1700 | struct percpu_counter rf_node_block_count; |
1701 | |
1702 | /* writeback control */ |
1703 | atomic_t wb_sync_req[META]; /* count # of WB_SYNC threads */ |
1704 | |
1705 | /* valid inode count */ |
1706 | struct percpu_counter total_valid_inode_count; |
1707 | |
1708 | struct f2fs_mount_info mount_opt; /* mount options */ |
1709 | |
1710 | /* for cleaning operations */ |
1711 | struct f2fs_rwsem gc_lock; /* |
1712 | * semaphore for GC, avoid |
1713 | * race between GC and GC or CP |
1714 | */ |
1715 | struct f2fs_gc_kthread *gc_thread; /* GC thread */ |
1716 | struct atgc_management am; /* atgc management */ |
1717 | unsigned int cur_victim_sec; /* current victim section num */ |
1718 | unsigned int gc_mode; /* current GC state */ |
1719 | unsigned int next_victim_seg[2]; /* next segment in victim section */ |
1720 | spinlock_t gc_remaining_trials_lock; |
1721 | /* remaining trial count for GC_URGENT_* and GC_IDLE_* */ |
1722 | unsigned int gc_remaining_trials; |
1723 | |
1724 | /* for skip statistic */ |
1725 | unsigned long long skipped_gc_rwsem; /* FG_GC only */ |
1726 | |
1727 | /* threshold for gc trials on pinned files */ |
1728 | unsigned short gc_pin_file_threshold; |
1729 | struct f2fs_rwsem pin_sem; |
1730 | |
1731 | /* maximum # of trials to find a victim segment for SSR and GC */ |
1732 | unsigned int max_victim_search; |
1733 | /* migration granularity of garbage collection, unit: segment */ |
1734 | unsigned int migration_granularity; |
1735 | /* migration window granularity of garbage collection, unit: segment */ |
1736 | unsigned int migration_window_granularity; |
1737 | |
1738 | /* |
1739 | * for stat information. |
1740 | * one is for the LFS mode, and the other is for the SSR mode. |
1741 | */ |
1742 | #ifdef CONFIG_F2FS_STAT_FS |
1743 | struct f2fs_stat_info *stat_info; /* FS status information */ |
1744 | atomic_t meta_count[META_MAX]; /* # of meta blocks */ |
1745 | unsigned int segment_count[2]; /* # of allocated segments */ |
1746 | unsigned int block_count[2]; /* # of allocated blocks */ |
1747 | atomic_t inplace_count; /* # of inplace update */ |
1748 | /* # of lookup extent cache */ |
1749 | atomic64_t total_hit_ext[NR_EXTENT_CACHES]; |
1750 | /* # of hit rbtree extent node */ |
1751 | atomic64_t read_hit_rbtree[NR_EXTENT_CACHES]; |
1752 | /* # of hit cached extent node */ |
1753 | atomic64_t read_hit_cached[NR_EXTENT_CACHES]; |
1754 | /* # of hit largest extent node in read extent cache */ |
1755 | atomic64_t read_hit_largest; |
1756 | atomic_t inline_xattr; /* # of inline_xattr inodes */ |
1757 | atomic_t inline_inode; /* # of inline_data inodes */ |
1758 | atomic_t inline_dir; /* # of inline_dentry inodes */ |
1759 | atomic_t compr_inode; /* # of compressed inodes */ |
1760 | atomic64_t compr_blocks; /* # of compressed blocks */ |
1761 | atomic_t swapfile_inode; /* # of swapfile inodes */ |
1762 | atomic_t atomic_files; /* # of opened atomic file */ |
1763 | atomic_t max_aw_cnt; /* max # of atomic writes */ |
1764 | unsigned int io_skip_bggc; /* skip background gc for in-flight IO */ |
1765 | unsigned int other_skip_bggc; /* skip background gc for other reasons */ |
1766 | unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */ |
1767 | atomic_t cp_call_count[MAX_CALL_TYPE]; /* # of cp call */ |
1768 | #endif |
1769 | spinlock_t stat_lock; /* lock for stat operations */ |
1770 | |
1771 | /* to attach REQ_META|REQ_FUA flags */ |
1772 | unsigned int data_io_flag; |
1773 | unsigned int node_io_flag; |
1774 | |
1775 | /* For sysfs support */ |
1776 | struct kobject s_kobj; /* /sys/fs/f2fs/<devname> */ |
1777 | struct completion s_kobj_unregister; |
1778 | |
1779 | struct kobject s_stat_kobj; /* /sys/fs/f2fs/<devname>/stat */ |
1780 | struct completion s_stat_kobj_unregister; |
1781 | |
1782 | struct kobject s_feature_list_kobj; /* /sys/fs/f2fs/<devname>/feature_list */ |
1783 | struct completion s_feature_list_kobj_unregister; |
1784 | |
1785 | /* For shrinker support */ |
1786 | struct list_head s_list; |
1787 | struct mutex umount_mutex; |
1788 | unsigned int shrinker_run_no; |
1789 | |
1790 | /* For multi devices */ |
1791 | int s_ndevs; /* number of devices */ |
1792 | struct f2fs_dev_info *devs; /* for device list */ |
1793 | unsigned int dirty_device; /* for checkpoint data flush */ |
1794 | spinlock_t dev_lock; /* protect dirty_device */ |
1795 | bool aligned_blksize; /* all devices has the same logical blksize */ |
1796 | unsigned int first_seq_zone_segno; /* first segno in sequential zone */ |
1797 | |
1798 | /* For write statistics */ |
1799 | u64 sectors_written_start; |
1800 | u64 kbytes_written; |
1801 | |
1802 | /* Precomputed FS UUID checksum for seeding other checksums */ |
1803 | __u32 s_chksum_seed; |
1804 | |
1805 | struct workqueue_struct *post_read_wq; /* post read workqueue */ |
1806 | |
1807 | /* |
1808 | * If we are in irq context, let's update error information into |
1809 | * on-disk superblock in the work. |
1810 | */ |
1811 | struct work_struct s_error_work; |
1812 | unsigned char errors[MAX_F2FS_ERRORS]; /* error flags */ |
1813 | unsigned char stop_reason[MAX_STOP_REASON]; /* stop reason */ |
1814 | spinlock_t error_lock; /* protect errors/stop_reason array */ |
1815 | bool error_dirty; /* errors of sb is dirty */ |
1816 | |
1817 | struct kmem_cache *inline_xattr_slab; /* inline xattr entry */ |
1818 | unsigned int inline_xattr_slab_size; /* default inline xattr slab size */ |
1819 | |
1820 | /* For reclaimed segs statistics per each GC mode */ |
1821 | unsigned int gc_segment_mode; /* GC state for reclaimed segments */ |
1822 | unsigned int gc_reclaimed_segs[MAX_GC_MODE]; /* Reclaimed segs for each mode */ |
1823 | |
1824 | unsigned long seq_file_ra_mul; /* multiplier for ra_pages of seq. files in fadvise */ |
1825 | |
1826 | int max_fragment_chunk; /* max chunk size for block fragmentation mode */ |
1827 | int max_fragment_hole; /* max hole size for block fragmentation mode */ |
1828 | |
1829 | /* For atomic write statistics */ |
1830 | atomic64_t current_atomic_write; |
1831 | s64 peak_atomic_write; |
1832 | u64 committed_atomic_block; |
1833 | u64 revoked_atomic_block; |
1834 | |
1835 | /* carve out reserved_blocks from total blocks */ |
1836 | bool carve_out; |
1837 | |
1838 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
1839 | struct kmem_cache *page_array_slab; /* page array entry */ |
1840 | unsigned int page_array_slab_size; /* default page array slab size */ |
1841 | |
1842 | /* For runtime compression statistics */ |
1843 | u64 compr_written_block; |
1844 | u64 compr_saved_block; |
1845 | u32 compr_new_inode; |
1846 | |
1847 | /* For compressed block cache */ |
1848 | struct inode *compress_inode; /* cache compressed blocks */ |
1849 | unsigned int compress_percent; /* cache page percentage */ |
1850 | unsigned int compress_watermark; /* cache page watermark */ |
1851 | atomic_t compress_page_hit; /* cache hit count */ |
1852 | #endif |
1853 | |
1854 | #ifdef CONFIG_F2FS_IOSTAT |
1855 | /* For app/fs IO statistics */ |
1856 | spinlock_t iostat_lock; |
1857 | unsigned long long iostat_count[NR_IO_TYPE]; |
1858 | unsigned long long iostat_bytes[NR_IO_TYPE]; |
1859 | unsigned long long prev_iostat_bytes[NR_IO_TYPE]; |
1860 | bool iostat_enable; |
1861 | unsigned long iostat_next_period; |
1862 | unsigned int iostat_period_ms; |
1863 | |
1864 | /* For io latency related statistics info in one iostat period */ |
1865 | spinlock_t iostat_lat_lock; |
1866 | struct iostat_lat_info *iostat_io_lat; |
1867 | #endif |
1868 | }; |
1869 | |
1870 | /* Definitions to access f2fs_sb_info */ |
1871 | #define SEGS_TO_BLKS(sbi, segs) \ |
1872 | ((segs) << (sbi)->log_blocks_per_seg) |
1873 | #define BLKS_TO_SEGS(sbi, blks) \ |
1874 | ((blks) >> (sbi)->log_blocks_per_seg) |
1875 | |
1876 | #define BLKS_PER_SEG(sbi) ((sbi)->blocks_per_seg) |
1877 | #define BLKS_PER_SEC(sbi) (SEGS_TO_BLKS(sbi, (sbi)->segs_per_sec)) |
1878 | #define SEGS_PER_SEC(sbi) ((sbi)->segs_per_sec) |
1879 | |
1880 | __printf(3, 4) |
1881 | void f2fs_printk(struct f2fs_sb_info *sbi, bool limit_rate, const char *fmt, ...); |
1882 | |
1883 | #define f2fs_err(sbi, fmt, ...) \ |
1884 | f2fs_printk(sbi, false, KERN_ERR fmt, ##__VA_ARGS__) |
1885 | #define f2fs_warn(sbi, fmt, ...) \ |
1886 | f2fs_printk(sbi, false, KERN_WARNING fmt, ##__VA_ARGS__) |
1887 | #define f2fs_notice(sbi, fmt, ...) \ |
1888 | f2fs_printk(sbi, false, KERN_NOTICE fmt, ##__VA_ARGS__) |
1889 | #define f2fs_info(sbi, fmt, ...) \ |
1890 | f2fs_printk(sbi, false, KERN_INFO fmt, ##__VA_ARGS__) |
1891 | #define f2fs_debug(sbi, fmt, ...) \ |
1892 | f2fs_printk(sbi, false, KERN_DEBUG fmt, ##__VA_ARGS__) |
1893 | |
1894 | #define f2fs_err_ratelimited(sbi, fmt, ...) \ |
1895 | f2fs_printk(sbi, true, KERN_ERR fmt, ##__VA_ARGS__) |
1896 | #define f2fs_warn_ratelimited(sbi, fmt, ...) \ |
1897 | f2fs_printk(sbi, true, KERN_WARNING fmt, ##__VA_ARGS__) |
1898 | #define f2fs_info_ratelimited(sbi, fmt, ...) \ |
1899 | f2fs_printk(sbi, true, KERN_INFO fmt, ##__VA_ARGS__) |
1900 | |
1901 | #ifdef CONFIG_F2FS_FAULT_INJECTION |
1902 | #define time_to_inject(sbi, type) __time_to_inject(sbi, type, __func__, \ |
1903 | __builtin_return_address(0)) |
1904 | static inline bool __time_to_inject(struct f2fs_sb_info *sbi, int type, |
1905 | const char *func, const char *parent_func) |
1906 | { |
1907 | struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; |
1908 | |
1909 | if (!ffi->inject_rate) |
1910 | return false; |
1911 | |
1912 | if (!IS_FAULT_SET(ffi, type)) |
1913 | return false; |
1914 | |
1915 | atomic_inc(v: &ffi->inject_ops); |
1916 | if (atomic_read(v: &ffi->inject_ops) >= ffi->inject_rate) { |
1917 | atomic_set(v: &ffi->inject_ops, i: 0); |
1918 | ffi->inject_count[type]++; |
1919 | f2fs_info_ratelimited(sbi, "inject %s in %s of %pS", |
1920 | f2fs_fault_name[type], func, parent_func); |
1921 | return true; |
1922 | } |
1923 | return false; |
1924 | } |
1925 | #else |
1926 | static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) |
1927 | { |
1928 | return false; |
1929 | } |
1930 | #endif |
1931 | |
1932 | /* |
1933 | * Test if the mounted volume is a multi-device volume. |
1934 | * - For a single regular disk volume, sbi->s_ndevs is 0. |
1935 | * - For a single zoned disk volume, sbi->s_ndevs is 1. |
1936 | * - For a multi-device volume, sbi->s_ndevs is always 2 or more. |
1937 | */ |
1938 | static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi) |
1939 | { |
1940 | return sbi->s_ndevs > 1; |
1941 | } |
1942 | |
1943 | static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type) |
1944 | { |
1945 | unsigned long now = jiffies; |
1946 | |
1947 | sbi->last_time[type] = now; |
1948 | |
1949 | /* DISCARD_TIME and GC_TIME are based on REQ_TIME */ |
1950 | if (type == REQ_TIME) { |
1951 | sbi->last_time[DISCARD_TIME] = now; |
1952 | sbi->last_time[GC_TIME] = now; |
1953 | } |
1954 | } |
1955 | |
1956 | static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type) |
1957 | { |
1958 | unsigned long interval = sbi->interval_time[type] * HZ; |
1959 | |
1960 | return time_after(jiffies, sbi->last_time[type] + interval); |
1961 | } |
1962 | |
1963 | static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi, |
1964 | int type) |
1965 | { |
1966 | unsigned long interval = sbi->interval_time[type] * HZ; |
1967 | unsigned int wait_ms = 0; |
1968 | long delta; |
1969 | |
1970 | delta = (sbi->last_time[type] + interval) - jiffies; |
1971 | if (delta > 0) |
1972 | wait_ms = jiffies_to_msecs(j: delta); |
1973 | |
1974 | return wait_ms; |
1975 | } |
1976 | |
1977 | /* |
1978 | * Inline functions |
1979 | */ |
1980 | static inline u32 __f2fs_crc32(u32 crc, const void *address, |
1981 | unsigned int length) |
1982 | { |
1983 | return crc32(crc, address, length); |
1984 | } |
1985 | |
1986 | static inline u32 f2fs_crc32(const void *address, unsigned int length) |
1987 | { |
1988 | return __f2fs_crc32(F2FS_SUPER_MAGIC, address, length); |
1989 | } |
1990 | |
1991 | static inline u32 f2fs_chksum(u32 crc, const void *address, unsigned int length) |
1992 | { |
1993 | return __f2fs_crc32(crc, address, length); |
1994 | } |
1995 | |
1996 | static inline struct f2fs_inode_info *F2FS_I(struct inode *inode) |
1997 | { |
1998 | return container_of(inode, struct f2fs_inode_info, vfs_inode); |
1999 | } |
2000 | |
2001 | static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb) |
2002 | { |
2003 | return sb->s_fs_info; |
2004 | } |
2005 | |
2006 | static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode) |
2007 | { |
2008 | return F2FS_SB(sb: inode->i_sb); |
2009 | } |
2010 | |
2011 | static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping) |
2012 | { |
2013 | return F2FS_I_SB(inode: mapping->host); |
2014 | } |
2015 | |
2016 | static inline struct f2fs_sb_info *F2FS_F_SB(struct folio *folio) |
2017 | { |
2018 | return F2FS_M_SB(mapping: folio->mapping); |
2019 | } |
2020 | |
2021 | static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page) |
2022 | { |
2023 | return F2FS_F_SB(page_folio(page)); |
2024 | } |
2025 | |
2026 | static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) |
2027 | { |
2028 | return (struct f2fs_super_block *)(sbi->raw_super); |
2029 | } |
2030 | |
2031 | static inline struct f2fs_super_block *F2FS_SUPER_BLOCK(struct folio *folio, |
2032 | pgoff_t index) |
2033 | { |
2034 | pgoff_t idx_in_folio = index % (1 << folio_order(folio)); |
2035 | |
2036 | return (struct f2fs_super_block *) |
2037 | (page_address(folio_page(folio, idx_in_folio)) + |
2038 | F2FS_SUPER_OFFSET); |
2039 | } |
2040 | |
2041 | static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi) |
2042 | { |
2043 | return (struct f2fs_checkpoint *)(sbi->ckpt); |
2044 | } |
2045 | |
2046 | static inline struct f2fs_node *F2FS_NODE(const struct page *page) |
2047 | { |
2048 | return (struct f2fs_node *)page_address(page); |
2049 | } |
2050 | |
2051 | static inline struct f2fs_inode *F2FS_INODE(struct page *page) |
2052 | { |
2053 | return &((struct f2fs_node *)page_address(page))->i; |
2054 | } |
2055 | |
2056 | static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi) |
2057 | { |
2058 | return (struct f2fs_nm_info *)(sbi->nm_info); |
2059 | } |
2060 | |
2061 | static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi) |
2062 | { |
2063 | return (struct f2fs_sm_info *)(sbi->sm_info); |
2064 | } |
2065 | |
2066 | static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi) |
2067 | { |
2068 | return (struct sit_info *)(SM_I(sbi)->sit_info); |
2069 | } |
2070 | |
2071 | static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi) |
2072 | { |
2073 | return (struct free_segmap_info *)(SM_I(sbi)->free_info); |
2074 | } |
2075 | |
2076 | static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi) |
2077 | { |
2078 | return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info); |
2079 | } |
2080 | |
2081 | static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi) |
2082 | { |
2083 | return sbi->meta_inode->i_mapping; |
2084 | } |
2085 | |
2086 | static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi) |
2087 | { |
2088 | return sbi->node_inode->i_mapping; |
2089 | } |
2090 | |
2091 | static inline bool is_meta_folio(struct folio *folio) |
2092 | { |
2093 | return folio->mapping == META_MAPPING(sbi: F2FS_F_SB(folio)); |
2094 | } |
2095 | |
2096 | static inline bool is_node_folio(struct folio *folio) |
2097 | { |
2098 | return folio->mapping == NODE_MAPPING(sbi: F2FS_F_SB(folio)); |
2099 | } |
2100 | |
2101 | static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type) |
2102 | { |
2103 | return test_bit(type, &sbi->s_flag); |
2104 | } |
2105 | |
2106 | static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) |
2107 | { |
2108 | set_bit(nr: type, addr: &sbi->s_flag); |
2109 | } |
2110 | |
2111 | static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) |
2112 | { |
2113 | clear_bit(nr: type, addr: &sbi->s_flag); |
2114 | } |
2115 | |
2116 | static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp) |
2117 | { |
2118 | return le64_to_cpu(cp->checkpoint_ver); |
2119 | } |
2120 | |
2121 | static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type) |
2122 | { |
2123 | if (type < F2FS_MAX_QUOTAS) |
2124 | return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]); |
2125 | return 0; |
2126 | } |
2127 | |
2128 | static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp) |
2129 | { |
2130 | size_t crc_offset = le32_to_cpu(cp->checksum_offset); |
2131 | return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset))); |
2132 | } |
2133 | |
2134 | static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) |
2135 | { |
2136 | unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); |
2137 | |
2138 | return ckpt_flags & f; |
2139 | } |
2140 | |
2141 | static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) |
2142 | { |
2143 | return __is_set_ckpt_flags(cp: F2FS_CKPT(sbi), f); |
2144 | } |
2145 | |
2146 | static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) |
2147 | { |
2148 | unsigned int ckpt_flags; |
2149 | |
2150 | ckpt_flags = le32_to_cpu(cp->ckpt_flags); |
2151 | ckpt_flags |= f; |
2152 | cp->ckpt_flags = cpu_to_le32(ckpt_flags); |
2153 | } |
2154 | |
2155 | static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) |
2156 | { |
2157 | unsigned long flags; |
2158 | |
2159 | spin_lock_irqsave(&sbi->cp_lock, flags); |
2160 | __set_ckpt_flags(cp: F2FS_CKPT(sbi), f); |
2161 | spin_unlock_irqrestore(lock: &sbi->cp_lock, flags); |
2162 | } |
2163 | |
2164 | static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) |
2165 | { |
2166 | unsigned int ckpt_flags; |
2167 | |
2168 | ckpt_flags = le32_to_cpu(cp->ckpt_flags); |
2169 | ckpt_flags &= (~f); |
2170 | cp->ckpt_flags = cpu_to_le32(ckpt_flags); |
2171 | } |
2172 | |
2173 | static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) |
2174 | { |
2175 | unsigned long flags; |
2176 | |
2177 | spin_lock_irqsave(&sbi->cp_lock, flags); |
2178 | __clear_ckpt_flags(cp: F2FS_CKPT(sbi), f); |
2179 | spin_unlock_irqrestore(lock: &sbi->cp_lock, flags); |
2180 | } |
2181 | |
2182 | #define init_f2fs_rwsem(sem) \ |
2183 | do { \ |
2184 | static struct lock_class_key __key; \ |
2185 | \ |
2186 | __init_f2fs_rwsem((sem), #sem, &__key); \ |
2187 | } while (0) |
2188 | |
2189 | static inline void __init_f2fs_rwsem(struct f2fs_rwsem *sem, |
2190 | const char *sem_name, struct lock_class_key *key) |
2191 | { |
2192 | __init_rwsem(sem: &sem->internal_rwsem, name: sem_name, key); |
2193 | #ifdef CONFIG_F2FS_UNFAIR_RWSEM |
2194 | init_waitqueue_head(&sem->read_waiters); |
2195 | #endif |
2196 | } |
2197 | |
2198 | static inline int f2fs_rwsem_is_locked(struct f2fs_rwsem *sem) |
2199 | { |
2200 | return rwsem_is_locked(sem: &sem->internal_rwsem); |
2201 | } |
2202 | |
2203 | static inline int f2fs_rwsem_is_contended(struct f2fs_rwsem *sem) |
2204 | { |
2205 | return rwsem_is_contended(sem: &sem->internal_rwsem); |
2206 | } |
2207 | |
2208 | static inline void f2fs_down_read(struct f2fs_rwsem *sem) |
2209 | { |
2210 | #ifdef CONFIG_F2FS_UNFAIR_RWSEM |
2211 | wait_event(sem->read_waiters, down_read_trylock(&sem->internal_rwsem)); |
2212 | #else |
2213 | down_read(&sem->internal_rwsem); |
2214 | #endif |
2215 | } |
2216 | |
2217 | static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem) |
2218 | { |
2219 | return down_read_trylock(sem: &sem->internal_rwsem); |
2220 | } |
2221 | |
2222 | static inline void f2fs_up_read(struct f2fs_rwsem *sem) |
2223 | { |
2224 | up_read(sem: &sem->internal_rwsem); |
2225 | } |
2226 | |
2227 | static inline void f2fs_down_write(struct f2fs_rwsem *sem) |
2228 | { |
2229 | down_write(sem: &sem->internal_rwsem); |
2230 | } |
2231 | |
2232 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
2233 | static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass) |
2234 | { |
2235 | down_read_nested(sem: &sem->internal_rwsem, subclass); |
2236 | } |
2237 | |
2238 | static inline void f2fs_down_write_nested(struct f2fs_rwsem *sem, int subclass) |
2239 | { |
2240 | down_write_nested(sem: &sem->internal_rwsem, subclass); |
2241 | } |
2242 | #else |
2243 | #define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem) |
2244 | #define f2fs_down_write_nested(sem, subclass) f2fs_down_write(sem) |
2245 | #endif |
2246 | |
2247 | static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem) |
2248 | { |
2249 | return down_write_trylock(sem: &sem->internal_rwsem); |
2250 | } |
2251 | |
2252 | static inline void f2fs_up_write(struct f2fs_rwsem *sem) |
2253 | { |
2254 | up_write(sem: &sem->internal_rwsem); |
2255 | #ifdef CONFIG_F2FS_UNFAIR_RWSEM |
2256 | wake_up_all(&sem->read_waiters); |
2257 | #endif |
2258 | } |
2259 | |
2260 | static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock) |
2261 | { |
2262 | unsigned long flags; |
2263 | unsigned char *nat_bits; |
2264 | |
2265 | /* |
2266 | * In order to re-enable nat_bits we need to call fsck.f2fs by |
2267 | * set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost, |
2268 | * so let's rely on regular fsck or unclean shutdown. |
2269 | */ |
2270 | |
2271 | if (lock) |
2272 | spin_lock_irqsave(&sbi->cp_lock, flags); |
2273 | __clear_ckpt_flags(cp: F2FS_CKPT(sbi), CP_NAT_BITS_FLAG); |
2274 | nat_bits = NM_I(sbi)->nat_bits; |
2275 | NM_I(sbi)->nat_bits = NULL; |
2276 | if (lock) |
2277 | spin_unlock_irqrestore(lock: &sbi->cp_lock, flags); |
2278 | |
2279 | kvfree(addr: nat_bits); |
2280 | } |
2281 | |
2282 | static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi, |
2283 | struct cp_control *cpc) |
2284 | { |
2285 | bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG); |
2286 | |
2287 | return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set; |
2288 | } |
2289 | |
2290 | static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) |
2291 | { |
2292 | f2fs_down_read(sem: &sbi->cp_rwsem); |
2293 | } |
2294 | |
2295 | static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi) |
2296 | { |
2297 | if (time_to_inject(sbi, FAULT_LOCK_OP)) |
2298 | return 0; |
2299 | return f2fs_down_read_trylock(sem: &sbi->cp_rwsem); |
2300 | } |
2301 | |
2302 | static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi) |
2303 | { |
2304 | f2fs_up_read(sem: &sbi->cp_rwsem); |
2305 | } |
2306 | |
2307 | static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) |
2308 | { |
2309 | f2fs_down_write(sem: &sbi->cp_rwsem); |
2310 | } |
2311 | |
2312 | static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) |
2313 | { |
2314 | f2fs_up_write(sem: &sbi->cp_rwsem); |
2315 | } |
2316 | |
2317 | static inline int __get_cp_reason(struct f2fs_sb_info *sbi) |
2318 | { |
2319 | int reason = CP_SYNC; |
2320 | |
2321 | if (test_opt(sbi, FASTBOOT)) |
2322 | reason = CP_FASTBOOT; |
2323 | if (is_sbi_flag_set(sbi, type: SBI_IS_CLOSE)) |
2324 | reason = CP_UMOUNT; |
2325 | return reason; |
2326 | } |
2327 | |
2328 | static inline bool __remain_node_summaries(int reason) |
2329 | { |
2330 | return (reason & (CP_UMOUNT | CP_FASTBOOT)); |
2331 | } |
2332 | |
2333 | static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi) |
2334 | { |
2335 | return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) || |
2336 | is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG)); |
2337 | } |
2338 | |
2339 | /* |
2340 | * Check whether the inode has blocks or not |
2341 | */ |
2342 | static inline int F2FS_HAS_BLOCKS(struct inode *inode) |
2343 | { |
2344 | block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0; |
2345 | |
2346 | return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block; |
2347 | } |
2348 | |
2349 | static inline bool f2fs_has_xattr_block(unsigned int ofs) |
2350 | { |
2351 | return ofs == XATTR_NODE_OFFSET; |
2352 | } |
2353 | |
2354 | static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi, |
2355 | struct inode *inode, bool cap) |
2356 | { |
2357 | if (!inode) |
2358 | return true; |
2359 | if (!test_opt(sbi, RESERVE_ROOT)) |
2360 | return false; |
2361 | if (IS_NOQUOTA(inode)) |
2362 | return true; |
2363 | if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid())) |
2364 | return true; |
2365 | if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) && |
2366 | in_group_p(F2FS_OPTION(sbi).s_resgid)) |
2367 | return true; |
2368 | if (cap && capable(CAP_SYS_RESOURCE)) |
2369 | return true; |
2370 | return false; |
2371 | } |
2372 | |
2373 | static inline unsigned int get_available_block_count(struct f2fs_sb_info *sbi, |
2374 | struct inode *inode, bool cap) |
2375 | { |
2376 | block_t avail_user_block_count; |
2377 | |
2378 | avail_user_block_count = sbi->user_block_count - |
2379 | sbi->current_reserved_blocks; |
2380 | |
2381 | if (!__allow_reserved_blocks(sbi, inode, cap)) |
2382 | avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks; |
2383 | |
2384 | if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { |
2385 | if (avail_user_block_count > sbi->unusable_block_count) |
2386 | avail_user_block_count -= sbi->unusable_block_count; |
2387 | else |
2388 | avail_user_block_count = 0; |
2389 | } |
2390 | |
2391 | return avail_user_block_count; |
2392 | } |
2393 | |
2394 | static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool); |
2395 | static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, |
2396 | struct inode *inode, blkcnt_t *count, bool partial) |
2397 | { |
2398 | long long diff = 0, release = 0; |
2399 | block_t avail_user_block_count; |
2400 | int ret; |
2401 | |
2402 | ret = dquot_reserve_block(inode, nr: *count); |
2403 | if (ret) |
2404 | return ret; |
2405 | |
2406 | if (time_to_inject(sbi, FAULT_BLOCK)) { |
2407 | release = *count; |
2408 | goto release_quota; |
2409 | } |
2410 | |
2411 | /* |
2412 | * let's increase this in prior to actual block count change in order |
2413 | * for f2fs_sync_file to avoid data races when deciding checkpoint. |
2414 | */ |
2415 | percpu_counter_add(fbc: &sbi->alloc_valid_block_count, amount: (*count)); |
2416 | |
2417 | spin_lock(lock: &sbi->stat_lock); |
2418 | |
2419 | avail_user_block_count = get_available_block_count(sbi, inode, cap: true); |
2420 | diff = (long long)sbi->total_valid_block_count + *count - |
2421 | avail_user_block_count; |
2422 | if (unlikely(diff > 0)) { |
2423 | if (!partial) { |
2424 | spin_unlock(lock: &sbi->stat_lock); |
2425 | release = *count; |
2426 | goto enospc; |
2427 | } |
2428 | if (diff > *count) |
2429 | diff = *count; |
2430 | *count -= diff; |
2431 | release = diff; |
2432 | if (!*count) { |
2433 | spin_unlock(lock: &sbi->stat_lock); |
2434 | goto enospc; |
2435 | } |
2436 | } |
2437 | sbi->total_valid_block_count += (block_t)(*count); |
2438 | |
2439 | spin_unlock(lock: &sbi->stat_lock); |
2440 | |
2441 | if (unlikely(release)) { |
2442 | percpu_counter_sub(fbc: &sbi->alloc_valid_block_count, amount: release); |
2443 | dquot_release_reservation_block(inode, nr: release); |
2444 | } |
2445 | f2fs_i_blocks_write(inode, *count, true, true); |
2446 | return 0; |
2447 | |
2448 | enospc: |
2449 | percpu_counter_sub(fbc: &sbi->alloc_valid_block_count, amount: release); |
2450 | release_quota: |
2451 | dquot_release_reservation_block(inode, nr: release); |
2452 | return -ENOSPC; |
2453 | } |
2454 | |
2455 | #define PAGE_PRIVATE_GET_FUNC(name, flagname) \ |
2456 | static inline bool page_private_##name(struct page *page) \ |
2457 | { \ |
2458 | return PagePrivate(page) && \ |
2459 | test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \ |
2460 | test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ |
2461 | } |
2462 | |
2463 | #define PAGE_PRIVATE_SET_FUNC(name, flagname) \ |
2464 | static inline void set_page_private_##name(struct page *page) \ |
2465 | { \ |
2466 | if (!PagePrivate(page)) \ |
2467 | attach_page_private(page, (void *)0); \ |
2468 | set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \ |
2469 | set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ |
2470 | } |
2471 | |
2472 | #define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \ |
2473 | static inline void clear_page_private_##name(struct page *page) \ |
2474 | { \ |
2475 | clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ |
2476 | if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER)) \ |
2477 | detach_page_private(page); \ |
2478 | } |
2479 | |
2480 | PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER); |
2481 | PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE); |
2482 | PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION); |
2483 | PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE); |
2484 | |
2485 | PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE); |
2486 | PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE); |
2487 | PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION); |
2488 | PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE); |
2489 | |
2490 | PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE); |
2491 | PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE); |
2492 | PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION); |
2493 | PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE); |
2494 | |
2495 | static inline unsigned long get_page_private_data(struct page *page) |
2496 | { |
2497 | unsigned long data = page_private(page); |
2498 | |
2499 | if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data)) |
2500 | return 0; |
2501 | return data >> PAGE_PRIVATE_MAX; |
2502 | } |
2503 | |
2504 | static inline void set_page_private_data(struct page *page, unsigned long data) |
2505 | { |
2506 | if (!PagePrivate(page)) |
2507 | attach_page_private(page, data: (void *)0); |
2508 | set_bit(nr: PAGE_PRIVATE_NOT_POINTER, addr: &page_private(page)); |
2509 | page_private(page) |= data << PAGE_PRIVATE_MAX; |
2510 | } |
2511 | |
2512 | static inline void clear_page_private_data(struct page *page) |
2513 | { |
2514 | page_private(page) &= GENMASK(PAGE_PRIVATE_MAX - 1, 0); |
2515 | if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER)) |
2516 | detach_page_private(page); |
2517 | } |
2518 | |
2519 | static inline void clear_page_private_all(struct page *page) |
2520 | { |
2521 | clear_page_private_data(page); |
2522 | clear_page_private_reference(page); |
2523 | clear_page_private_gcing(page); |
2524 | clear_page_private_inline(page); |
2525 | clear_page_private_atomic(page); |
2526 | |
2527 | f2fs_bug_on(F2FS_P_SB(page), page_private(page)); |
2528 | } |
2529 | |
2530 | static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, |
2531 | struct inode *inode, |
2532 | block_t count) |
2533 | { |
2534 | blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK; |
2535 | |
2536 | spin_lock(lock: &sbi->stat_lock); |
2537 | if (unlikely(sbi->total_valid_block_count < count)) { |
2538 | f2fs_warn(sbi, "Inconsistent total_valid_block_count:%u, ino:%lu, count:%u", |
2539 | sbi->total_valid_block_count, inode->i_ino, count); |
2540 | sbi->total_valid_block_count = 0; |
2541 | set_sbi_flag(sbi, type: SBI_NEED_FSCK); |
2542 | } else { |
2543 | sbi->total_valid_block_count -= count; |
2544 | } |
2545 | if (sbi->reserved_blocks && |
2546 | sbi->current_reserved_blocks < sbi->reserved_blocks) |
2547 | sbi->current_reserved_blocks = min(sbi->reserved_blocks, |
2548 | sbi->current_reserved_blocks + count); |
2549 | spin_unlock(lock: &sbi->stat_lock); |
2550 | if (unlikely(inode->i_blocks < sectors)) { |
2551 | f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu", |
2552 | inode->i_ino, |
2553 | (unsigned long long)inode->i_blocks, |
2554 | (unsigned long long)sectors); |
2555 | set_sbi_flag(sbi, type: SBI_NEED_FSCK); |
2556 | return; |
2557 | } |
2558 | f2fs_i_blocks_write(inode, count, false, true); |
2559 | } |
2560 | |
2561 | static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) |
2562 | { |
2563 | atomic_inc(v: &sbi->nr_pages[count_type]); |
2564 | |
2565 | if (count_type == F2FS_DIRTY_DENTS || |
2566 | count_type == F2FS_DIRTY_NODES || |
2567 | count_type == F2FS_DIRTY_META || |
2568 | count_type == F2FS_DIRTY_QDATA || |
2569 | count_type == F2FS_DIRTY_IMETA) |
2570 | set_sbi_flag(sbi, type: SBI_IS_DIRTY); |
2571 | } |
2572 | |
2573 | static inline void inode_inc_dirty_pages(struct inode *inode) |
2574 | { |
2575 | atomic_inc(v: &F2FS_I(inode)->dirty_pages); |
2576 | inc_page_count(sbi: F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? |
2577 | F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); |
2578 | if (IS_NOQUOTA(inode)) |
2579 | inc_page_count(sbi: F2FS_I_SB(inode), count_type: F2FS_DIRTY_QDATA); |
2580 | } |
2581 | |
2582 | static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) |
2583 | { |
2584 | atomic_dec(v: &sbi->nr_pages[count_type]); |
2585 | } |
2586 | |
2587 | static inline void inode_dec_dirty_pages(struct inode *inode) |
2588 | { |
2589 | if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && |
2590 | !S_ISLNK(inode->i_mode)) |
2591 | return; |
2592 | |
2593 | atomic_dec(v: &F2FS_I(inode)->dirty_pages); |
2594 | dec_page_count(sbi: F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? |
2595 | F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); |
2596 | if (IS_NOQUOTA(inode)) |
2597 | dec_page_count(sbi: F2FS_I_SB(inode), count_type: F2FS_DIRTY_QDATA); |
2598 | } |
2599 | |
2600 | static inline void inc_atomic_write_cnt(struct inode *inode) |
2601 | { |
2602 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
2603 | struct f2fs_inode_info *fi = F2FS_I(inode); |
2604 | u64 current_write; |
2605 | |
2606 | fi->atomic_write_cnt++; |
2607 | atomic64_inc(v: &sbi->current_atomic_write); |
2608 | current_write = atomic64_read(v: &sbi->current_atomic_write); |
2609 | if (current_write > sbi->peak_atomic_write) |
2610 | sbi->peak_atomic_write = current_write; |
2611 | } |
2612 | |
2613 | static inline void release_atomic_write_cnt(struct inode *inode) |
2614 | { |
2615 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
2616 | struct f2fs_inode_info *fi = F2FS_I(inode); |
2617 | |
2618 | atomic64_sub(i: fi->atomic_write_cnt, v: &sbi->current_atomic_write); |
2619 | fi->atomic_write_cnt = 0; |
2620 | } |
2621 | |
2622 | static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type) |
2623 | { |
2624 | return atomic_read(v: &sbi->nr_pages[count_type]); |
2625 | } |
2626 | |
2627 | static inline int get_dirty_pages(struct inode *inode) |
2628 | { |
2629 | return atomic_read(v: &F2FS_I(inode)->dirty_pages); |
2630 | } |
2631 | |
2632 | static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type) |
2633 | { |
2634 | return div_u64(dividend: get_pages(sbi, count_type: block_type) + BLKS_PER_SEC(sbi) - 1, |
2635 | BLKS_PER_SEC(sbi)); |
2636 | } |
2637 | |
2638 | static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) |
2639 | { |
2640 | return sbi->total_valid_block_count; |
2641 | } |
2642 | |
2643 | static inline block_t discard_blocks(struct f2fs_sb_info *sbi) |
2644 | { |
2645 | return sbi->discard_blks; |
2646 | } |
2647 | |
2648 | static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) |
2649 | { |
2650 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); |
2651 | |
2652 | /* return NAT or SIT bitmap */ |
2653 | if (flag == NAT_BITMAP) |
2654 | return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); |
2655 | else if (flag == SIT_BITMAP) |
2656 | return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); |
2657 | |
2658 | return 0; |
2659 | } |
2660 | |
2661 | static inline block_t __cp_payload(struct f2fs_sb_info *sbi) |
2662 | { |
2663 | return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); |
2664 | } |
2665 | |
2666 | static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) |
2667 | { |
2668 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); |
2669 | void *tmp_ptr = &ckpt->sit_nat_version_bitmap; |
2670 | int offset; |
2671 | |
2672 | if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) { |
2673 | offset = (flag == SIT_BITMAP) ? |
2674 | le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0; |
2675 | /* |
2676 | * if large_nat_bitmap feature is enabled, leave checksum |
2677 | * protection for all nat/sit bitmaps. |
2678 | */ |
2679 | return tmp_ptr + offset + sizeof(__le32); |
2680 | } |
2681 | |
2682 | if (__cp_payload(sbi) > 0) { |
2683 | if (flag == NAT_BITMAP) |
2684 | return tmp_ptr; |
2685 | else |
2686 | return (unsigned char *)ckpt + F2FS_BLKSIZE; |
2687 | } else { |
2688 | offset = (flag == NAT_BITMAP) ? |
2689 | le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; |
2690 | return tmp_ptr + offset; |
2691 | } |
2692 | } |
2693 | |
2694 | static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) |
2695 | { |
2696 | block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); |
2697 | |
2698 | if (sbi->cur_cp_pack == 2) |
2699 | start_addr += BLKS_PER_SEG(sbi); |
2700 | return start_addr; |
2701 | } |
2702 | |
2703 | static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi) |
2704 | { |
2705 | block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); |
2706 | |
2707 | if (sbi->cur_cp_pack == 1) |
2708 | start_addr += BLKS_PER_SEG(sbi); |
2709 | return start_addr; |
2710 | } |
2711 | |
2712 | static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi) |
2713 | { |
2714 | sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1; |
2715 | } |
2716 | |
2717 | static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi) |
2718 | { |
2719 | return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); |
2720 | } |
2721 | |
2722 | extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync); |
2723 | static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, |
2724 | struct inode *inode, bool is_inode) |
2725 | { |
2726 | block_t valid_block_count; |
2727 | unsigned int valid_node_count; |
2728 | unsigned int avail_user_block_count; |
2729 | int err; |
2730 | |
2731 | if (is_inode) { |
2732 | if (inode) { |
2733 | err = dquot_alloc_inode(inode); |
2734 | if (err) |
2735 | return err; |
2736 | } |
2737 | } else { |
2738 | err = dquot_reserve_block(inode, nr: 1); |
2739 | if (err) |
2740 | return err; |
2741 | } |
2742 | |
2743 | if (time_to_inject(sbi, FAULT_BLOCK)) |
2744 | goto enospc; |
2745 | |
2746 | spin_lock(lock: &sbi->stat_lock); |
2747 | |
2748 | valid_block_count = sbi->total_valid_block_count + 1; |
2749 | avail_user_block_count = get_available_block_count(sbi, inode, cap: false); |
2750 | |
2751 | if (unlikely(valid_block_count > avail_user_block_count)) { |
2752 | spin_unlock(lock: &sbi->stat_lock); |
2753 | goto enospc; |
2754 | } |
2755 | |
2756 | valid_node_count = sbi->total_valid_node_count + 1; |
2757 | if (unlikely(valid_node_count > sbi->total_node_count)) { |
2758 | spin_unlock(lock: &sbi->stat_lock); |
2759 | goto enospc; |
2760 | } |
2761 | |
2762 | sbi->total_valid_node_count++; |
2763 | sbi->total_valid_block_count++; |
2764 | spin_unlock(lock: &sbi->stat_lock); |
2765 | |
2766 | if (inode) { |
2767 | if (is_inode) |
2768 | f2fs_mark_inode_dirty_sync(inode, sync: true); |
2769 | else |
2770 | f2fs_i_blocks_write(inode, 1, true, true); |
2771 | } |
2772 | |
2773 | percpu_counter_inc(fbc: &sbi->alloc_valid_block_count); |
2774 | return 0; |
2775 | |
2776 | enospc: |
2777 | if (is_inode) { |
2778 | if (inode) |
2779 | dquot_free_inode(inode); |
2780 | } else { |
2781 | dquot_release_reservation_block(inode, nr: 1); |
2782 | } |
2783 | return -ENOSPC; |
2784 | } |
2785 | |
2786 | static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, |
2787 | struct inode *inode, bool is_inode) |
2788 | { |
2789 | spin_lock(lock: &sbi->stat_lock); |
2790 | |
2791 | if (unlikely(!sbi->total_valid_block_count || |
2792 | !sbi->total_valid_node_count)) { |
2793 | f2fs_warn(sbi, "dec_valid_node_count: inconsistent block counts, total_valid_block:%u, total_valid_node:%u", |
2794 | sbi->total_valid_block_count, |
2795 | sbi->total_valid_node_count); |
2796 | set_sbi_flag(sbi, type: SBI_NEED_FSCK); |
2797 | } else { |
2798 | sbi->total_valid_block_count--; |
2799 | sbi->total_valid_node_count--; |
2800 | } |
2801 | |
2802 | if (sbi->reserved_blocks && |
2803 | sbi->current_reserved_blocks < sbi->reserved_blocks) |
2804 | sbi->current_reserved_blocks++; |
2805 | |
2806 | spin_unlock(lock: &sbi->stat_lock); |
2807 | |
2808 | if (is_inode) { |
2809 | dquot_free_inode(inode); |
2810 | } else { |
2811 | if (unlikely(inode->i_blocks == 0)) { |
2812 | f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu", |
2813 | inode->i_ino, |
2814 | (unsigned long long)inode->i_blocks); |
2815 | set_sbi_flag(sbi, type: SBI_NEED_FSCK); |
2816 | return; |
2817 | } |
2818 | f2fs_i_blocks_write(inode, 1, false, true); |
2819 | } |
2820 | } |
2821 | |
2822 | static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) |
2823 | { |
2824 | return sbi->total_valid_node_count; |
2825 | } |
2826 | |
2827 | static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) |
2828 | { |
2829 | percpu_counter_inc(fbc: &sbi->total_valid_inode_count); |
2830 | } |
2831 | |
2832 | static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi) |
2833 | { |
2834 | percpu_counter_dec(fbc: &sbi->total_valid_inode_count); |
2835 | } |
2836 | |
2837 | static inline s64 valid_inode_count(struct f2fs_sb_info *sbi) |
2838 | { |
2839 | return percpu_counter_sum_positive(fbc: &sbi->total_valid_inode_count); |
2840 | } |
2841 | |
2842 | static inline struct folio *f2fs_grab_cache_folio(struct address_space *mapping, |
2843 | pgoff_t index, bool for_write) |
2844 | { |
2845 | struct folio *folio; |
2846 | unsigned int flags; |
2847 | |
2848 | if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) { |
2849 | fgf_t fgf_flags; |
2850 | |
2851 | if (!for_write) |
2852 | fgf_flags = FGP_LOCK | FGP_ACCESSED; |
2853 | else |
2854 | fgf_flags = FGP_LOCK; |
2855 | folio = __filemap_get_folio(mapping, index, fgp_flags: fgf_flags, gfp: 0); |
2856 | if (!IS_ERR(ptr: folio)) |
2857 | return folio; |
2858 | |
2859 | if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) |
2860 | return ERR_PTR(error: -ENOMEM); |
2861 | } |
2862 | |
2863 | if (!for_write) |
2864 | return filemap_grab_folio(mapping, index); |
2865 | |
2866 | flags = memalloc_nofs_save(); |
2867 | folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, |
2868 | gfp: mapping_gfp_mask(mapping)); |
2869 | memalloc_nofs_restore(flags); |
2870 | |
2871 | return folio; |
2872 | } |
2873 | |
2874 | static inline struct folio *f2fs_filemap_get_folio( |
2875 | struct address_space *mapping, pgoff_t index, |
2876 | fgf_t fgp_flags, gfp_t gfp_mask) |
2877 | { |
2878 | if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) |
2879 | return ERR_PTR(error: -ENOMEM); |
2880 | |
2881 | return __filemap_get_folio(mapping, index, fgp_flags, gfp: gfp_mask); |
2882 | } |
2883 | |
2884 | static inline struct page *f2fs_pagecache_get_page( |
2885 | struct address_space *mapping, pgoff_t index, |
2886 | fgf_t fgp_flags, gfp_t gfp_mask) |
2887 | { |
2888 | if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) |
2889 | return NULL; |
2890 | |
2891 | return pagecache_get_page(mapping, index, fgp_flags, gfp: gfp_mask); |
2892 | } |
2893 | |
2894 | static inline void f2fs_folio_put(struct folio *folio, bool unlock) |
2895 | { |
2896 | if (IS_ERR_OR_NULL(ptr: folio)) |
2897 | return; |
2898 | |
2899 | if (unlock) { |
2900 | f2fs_bug_on(F2FS_F_SB(folio), !folio_test_locked(folio)); |
2901 | folio_unlock(folio); |
2902 | } |
2903 | folio_put(folio); |
2904 | } |
2905 | |
2906 | static inline void f2fs_put_page(struct page *page, int unlock) |
2907 | { |
2908 | if (!page) |
2909 | return; |
2910 | f2fs_folio_put(page_folio(page), unlock); |
2911 | } |
2912 | |
2913 | static inline void f2fs_put_dnode(struct dnode_of_data *dn) |
2914 | { |
2915 | if (dn->node_folio) |
2916 | f2fs_folio_put(folio: dn->node_folio, unlock: true); |
2917 | if (dn->inode_folio && dn->node_folio != dn->inode_folio) |
2918 | f2fs_folio_put(folio: dn->inode_folio, unlock: false); |
2919 | dn->node_folio = NULL; |
2920 | dn->inode_folio = NULL; |
2921 | } |
2922 | |
2923 | static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name, |
2924 | size_t size) |
2925 | { |
2926 | return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL); |
2927 | } |
2928 | |
2929 | static inline void *f2fs_kmem_cache_alloc_nofail(struct kmem_cache *cachep, |
2930 | gfp_t flags) |
2931 | { |
2932 | void *entry; |
2933 | |
2934 | entry = kmem_cache_alloc(cachep, flags); |
2935 | if (!entry) |
2936 | entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL); |
2937 | return entry; |
2938 | } |
2939 | |
2940 | static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep, |
2941 | gfp_t flags, bool nofail, struct f2fs_sb_info *sbi) |
2942 | { |
2943 | if (nofail) |
2944 | return f2fs_kmem_cache_alloc_nofail(cachep, flags); |
2945 | |
2946 | if (time_to_inject(sbi, FAULT_SLAB_ALLOC)) |
2947 | return NULL; |
2948 | |
2949 | return kmem_cache_alloc(cachep, flags); |
2950 | } |
2951 | |
2952 | static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type) |
2953 | { |
2954 | if (get_pages(sbi, count_type: F2FS_RD_DATA) || get_pages(sbi, count_type: F2FS_RD_NODE) || |
2955 | get_pages(sbi, count_type: F2FS_RD_META) || get_pages(sbi, count_type: F2FS_WB_DATA) || |
2956 | get_pages(sbi, count_type: F2FS_WB_CP_DATA) || |
2957 | get_pages(sbi, count_type: F2FS_DIO_READ) || |
2958 | get_pages(sbi, count_type: F2FS_DIO_WRITE)) |
2959 | return true; |
2960 | |
2961 | if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info && |
2962 | atomic_read(v: &SM_I(sbi)->dcc_info->queued_discard)) |
2963 | return true; |
2964 | |
2965 | if (SM_I(sbi) && SM_I(sbi)->fcc_info && |
2966 | atomic_read(v: &SM_I(sbi)->fcc_info->queued_flush)) |
2967 | return true; |
2968 | return false; |
2969 | } |
2970 | |
2971 | static inline bool is_inflight_read_io(struct f2fs_sb_info *sbi) |
2972 | { |
2973 | return get_pages(sbi, count_type: F2FS_RD_DATA) || get_pages(sbi, count_type: F2FS_DIO_READ); |
2974 | } |
2975 | |
2976 | static inline bool is_idle(struct f2fs_sb_info *sbi, int type) |
2977 | { |
2978 | bool zoned_gc = (type == GC_TIME && |
2979 | F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_BLKZONED)); |
2980 | |
2981 | if (sbi->gc_mode == GC_URGENT_HIGH) |
2982 | return true; |
2983 | |
2984 | if (zoned_gc) { |
2985 | if (is_inflight_read_io(sbi)) |
2986 | return false; |
2987 | } else { |
2988 | if (is_inflight_io(sbi, type)) |
2989 | return false; |
2990 | } |
2991 | |
2992 | if (sbi->gc_mode == GC_URGENT_MID) |
2993 | return true; |
2994 | |
2995 | if (sbi->gc_mode == GC_URGENT_LOW && |
2996 | (type == DISCARD_TIME || type == GC_TIME)) |
2997 | return true; |
2998 | |
2999 | if (zoned_gc) |
3000 | return true; |
3001 | |
3002 | return f2fs_time_over(sbi, type); |
3003 | } |
3004 | |
3005 | static inline void f2fs_radix_tree_insert(struct radix_tree_root *root, |
3006 | unsigned long index, void *item) |
3007 | { |
3008 | while (radix_tree_insert(root, index, item)) |
3009 | cond_resched(); |
3010 | } |
3011 | |
3012 | #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) |
3013 | |
3014 | static inline bool IS_INODE(struct page *page) |
3015 | { |
3016 | struct f2fs_node *p = F2FS_NODE(page); |
3017 | |
3018 | return RAW_IS_INODE(p); |
3019 | } |
3020 | |
3021 | static inline int offset_in_addr(struct f2fs_inode *i) |
3022 | { |
3023 | return (i->i_inline & F2FS_EXTRA_ATTR) ? |
3024 | (le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0; |
3025 | } |
3026 | |
3027 | static inline __le32 *blkaddr_in_node(struct f2fs_node *node) |
3028 | { |
3029 | return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr; |
3030 | } |
3031 | |
3032 | static inline int f2fs_has_extra_attr(struct inode *inode); |
3033 | static inline unsigned int get_dnode_base(struct inode *inode, |
3034 | struct page *node_page) |
3035 | { |
3036 | if (!IS_INODE(page: node_page)) |
3037 | return 0; |
3038 | |
3039 | return inode ? get_extra_isize(inode) : |
3040 | offset_in_addr(i: &F2FS_NODE(page: node_page)->i); |
3041 | } |
3042 | |
3043 | static inline __le32 *get_dnode_addr(struct inode *inode, |
3044 | struct folio *node_folio) |
3045 | { |
3046 | return blkaddr_in_node(node: F2FS_NODE(page: &node_folio->page)) + |
3047 | get_dnode_base(inode, node_page: &node_folio->page); |
3048 | } |
3049 | |
3050 | static inline block_t data_blkaddr(struct inode *inode, |
3051 | struct folio *node_folio, unsigned int offset) |
3052 | { |
3053 | return le32_to_cpu(*(get_dnode_addr(inode, node_folio) + offset)); |
3054 | } |
3055 | |
3056 | static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn) |
3057 | { |
3058 | return data_blkaddr(inode: dn->inode, node_folio: dn->node_folio, offset: dn->ofs_in_node); |
3059 | } |
3060 | |
3061 | static inline int f2fs_test_bit(unsigned int nr, char *addr) |
3062 | { |
3063 | int mask; |
3064 | |
3065 | addr += (nr >> 3); |
3066 | mask = BIT(7 - (nr & 0x07)); |
3067 | return mask & *addr; |
3068 | } |
3069 | |
3070 | static inline void f2fs_set_bit(unsigned int nr, char *addr) |
3071 | { |
3072 | int mask; |
3073 | |
3074 | addr += (nr >> 3); |
3075 | mask = BIT(7 - (nr & 0x07)); |
3076 | *addr |= mask; |
3077 | } |
3078 | |
3079 | static inline void f2fs_clear_bit(unsigned int nr, char *addr) |
3080 | { |
3081 | int mask; |
3082 | |
3083 | addr += (nr >> 3); |
3084 | mask = BIT(7 - (nr & 0x07)); |
3085 | *addr &= ~mask; |
3086 | } |
3087 | |
3088 | static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr) |
3089 | { |
3090 | int mask; |
3091 | int ret; |
3092 | |
3093 | addr += (nr >> 3); |
3094 | mask = BIT(7 - (nr & 0x07)); |
3095 | ret = mask & *addr; |
3096 | *addr |= mask; |
3097 | return ret; |
3098 | } |
3099 | |
3100 | static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr) |
3101 | { |
3102 | int mask; |
3103 | int ret; |
3104 | |
3105 | addr += (nr >> 3); |
3106 | mask = BIT(7 - (nr & 0x07)); |
3107 | ret = mask & *addr; |
3108 | *addr &= ~mask; |
3109 | return ret; |
3110 | } |
3111 | |
3112 | static inline void f2fs_change_bit(unsigned int nr, char *addr) |
3113 | { |
3114 | int mask; |
3115 | |
3116 | addr += (nr >> 3); |
3117 | mask = BIT(7 - (nr & 0x07)); |
3118 | *addr ^= mask; |
3119 | } |
3120 | |
3121 | /* |
3122 | * On-disk inode flags (f2fs_inode::i_flags) |
3123 | */ |
3124 | #define F2FS_COMPR_FL 0x00000004 /* Compress file */ |
3125 | #define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */ |
3126 | #define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */ |
3127 | #define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */ |
3128 | #define F2FS_NODUMP_FL 0x00000040 /* do not dump file */ |
3129 | #define F2FS_NOATIME_FL 0x00000080 /* do not update atime */ |
3130 | #define F2FS_NOCOMP_FL 0x00000400 /* Don't compress */ |
3131 | #define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */ |
3132 | #define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ |
3133 | #define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ |
3134 | #define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */ |
3135 | #define F2FS_DEVICE_ALIAS_FL 0x80000000 /* File for aliasing a device */ |
3136 | |
3137 | #define F2FS_QUOTA_DEFAULT_FL (F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL) |
3138 | |
3139 | /* Flags that should be inherited by new inodes from their parent. */ |
3140 | #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \ |
3141 | F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ |
3142 | F2FS_CASEFOLD_FL) |
3143 | |
3144 | /* Flags that are appropriate for regular files (all but dir-specific ones). */ |
3145 | #define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ |
3146 | F2FS_CASEFOLD_FL)) |
3147 | |
3148 | /* Flags that are appropriate for non-directories/regular files. */ |
3149 | #define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL) |
3150 | |
3151 | #define IS_DEVICE_ALIASING(inode) (F2FS_I(inode)->i_flags & F2FS_DEVICE_ALIAS_FL) |
3152 | |
3153 | static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags) |
3154 | { |
3155 | if (S_ISDIR(mode)) |
3156 | return flags; |
3157 | else if (S_ISREG(mode)) |
3158 | return flags & F2FS_REG_FLMASK; |
3159 | else |
3160 | return flags & F2FS_OTHER_FLMASK; |
3161 | } |
3162 | |
3163 | static inline void __mark_inode_dirty_flag(struct inode *inode, |
3164 | int flag, bool set) |
3165 | { |
3166 | switch (flag) { |
3167 | case FI_INLINE_XATTR: |
3168 | case FI_INLINE_DATA: |
3169 | case FI_INLINE_DENTRY: |
3170 | case FI_NEW_INODE: |
3171 | if (set) |
3172 | return; |
3173 | fallthrough; |
3174 | case FI_DATA_EXIST: |
3175 | case FI_PIN_FILE: |
3176 | case FI_COMPRESS_RELEASED: |
3177 | f2fs_mark_inode_dirty_sync(inode, sync: true); |
3178 | } |
3179 | } |
3180 | |
3181 | static inline void set_inode_flag(struct inode *inode, int flag) |
3182 | { |
3183 | set_bit(nr: flag, addr: F2FS_I(inode)->flags); |
3184 | __mark_inode_dirty_flag(inode, flag, set: true); |
3185 | } |
3186 | |
3187 | static inline int is_inode_flag_set(struct inode *inode, int flag) |
3188 | { |
3189 | return test_bit(flag, F2FS_I(inode)->flags); |
3190 | } |
3191 | |
3192 | static inline void clear_inode_flag(struct inode *inode, int flag) |
3193 | { |
3194 | clear_bit(nr: flag, addr: F2FS_I(inode)->flags); |
3195 | __mark_inode_dirty_flag(inode, flag, set: false); |
3196 | } |
3197 | |
3198 | static inline bool f2fs_verity_in_progress(struct inode *inode) |
3199 | { |
3200 | return IS_ENABLED(CONFIG_FS_VERITY) && |
3201 | is_inode_flag_set(inode, flag: FI_VERITY_IN_PROGRESS); |
3202 | } |
3203 | |
3204 | static inline void set_acl_inode(struct inode *inode, umode_t mode) |
3205 | { |
3206 | F2FS_I(inode)->i_acl_mode = mode; |
3207 | set_inode_flag(inode, flag: FI_ACL_MODE); |
3208 | f2fs_mark_inode_dirty_sync(inode, sync: false); |
3209 | } |
3210 | |
3211 | static inline void f2fs_i_links_write(struct inode *inode, bool inc) |
3212 | { |
3213 | if (inc) |
3214 | inc_nlink(inode); |
3215 | else |
3216 | drop_nlink(inode); |
3217 | f2fs_mark_inode_dirty_sync(inode, sync: true); |
3218 | } |
3219 | |
3220 | static inline void f2fs_i_blocks_write(struct inode *inode, |
3221 | block_t diff, bool add, bool claim) |
3222 | { |
3223 | bool clean = !is_inode_flag_set(inode, flag: FI_DIRTY_INODE); |
3224 | bool recover = is_inode_flag_set(inode, flag: FI_AUTO_RECOVER); |
3225 | |
3226 | /* add = 1, claim = 1 should be dquot_reserve_block in pair */ |
3227 | if (add) { |
3228 | if (claim) |
3229 | dquot_claim_block(inode, nr: diff); |
3230 | else |
3231 | dquot_alloc_block_nofail(inode, nr: diff); |
3232 | } else { |
3233 | dquot_free_block(inode, nr: diff); |
3234 | } |
3235 | |
3236 | f2fs_mark_inode_dirty_sync(inode, sync: true); |
3237 | if (clean || recover) |
3238 | set_inode_flag(inode, flag: FI_AUTO_RECOVER); |
3239 | } |
3240 | |
3241 | static inline bool f2fs_is_atomic_file(struct inode *inode); |
3242 | |
3243 | static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size) |
3244 | { |
3245 | bool clean = !is_inode_flag_set(inode, flag: FI_DIRTY_INODE); |
3246 | bool recover = is_inode_flag_set(inode, flag: FI_AUTO_RECOVER); |
3247 | |
3248 | if (i_size_read(inode) == i_size) |
3249 | return; |
3250 | |
3251 | i_size_write(inode, i_size); |
3252 | |
3253 | if (f2fs_is_atomic_file(inode)) |
3254 | return; |
3255 | |
3256 | f2fs_mark_inode_dirty_sync(inode, sync: true); |
3257 | if (clean || recover) |
3258 | set_inode_flag(inode, flag: FI_AUTO_RECOVER); |
3259 | } |
3260 | |
3261 | static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth) |
3262 | { |
3263 | F2FS_I(inode)->i_current_depth = depth; |
3264 | f2fs_mark_inode_dirty_sync(inode, sync: true); |
3265 | } |
3266 | |
3267 | static inline void f2fs_i_gc_failures_write(struct inode *inode, |
3268 | unsigned int count) |
3269 | { |
3270 | F2FS_I(inode)->i_gc_failures = count; |
3271 | f2fs_mark_inode_dirty_sync(inode, sync: true); |
3272 | } |
3273 | |
3274 | static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid) |
3275 | { |
3276 | F2FS_I(inode)->i_xattr_nid = xnid; |
3277 | f2fs_mark_inode_dirty_sync(inode, sync: true); |
3278 | } |
3279 | |
3280 | static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino) |
3281 | { |
3282 | F2FS_I(inode)->i_pino = pino; |
3283 | f2fs_mark_inode_dirty_sync(inode, sync: true); |
3284 | } |
3285 | |
3286 | static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri) |
3287 | { |
3288 | struct f2fs_inode_info *fi = F2FS_I(inode); |
3289 | |
3290 | if (ri->i_inline & F2FS_INLINE_XATTR) |
3291 | set_bit(nr: FI_INLINE_XATTR, addr: fi->flags); |
3292 | if (ri->i_inline & F2FS_INLINE_DATA) |
3293 | set_bit(nr: FI_INLINE_DATA, addr: fi->flags); |
3294 | if (ri->i_inline & F2FS_INLINE_DENTRY) |
3295 | set_bit(nr: FI_INLINE_DENTRY, addr: fi->flags); |
3296 | if (ri->i_inline & F2FS_DATA_EXIST) |
3297 | set_bit(nr: FI_DATA_EXIST, addr: fi->flags); |
3298 | if (ri->i_inline & F2FS_EXTRA_ATTR) |
3299 | set_bit(nr: FI_EXTRA_ATTR, addr: fi->flags); |
3300 | if (ri->i_inline & F2FS_PIN_FILE) |
3301 | set_bit(nr: FI_PIN_FILE, addr: fi->flags); |
3302 | if (ri->i_inline & F2FS_COMPRESS_RELEASED) |
3303 | set_bit(nr: FI_COMPRESS_RELEASED, addr: fi->flags); |
3304 | } |
3305 | |
3306 | static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri) |
3307 | { |
3308 | ri->i_inline = 0; |
3309 | |
3310 | if (is_inode_flag_set(inode, flag: FI_INLINE_XATTR)) |
3311 | ri->i_inline |= F2FS_INLINE_XATTR; |
3312 | if (is_inode_flag_set(inode, flag: FI_INLINE_DATA)) |
3313 | ri->i_inline |= F2FS_INLINE_DATA; |
3314 | if (is_inode_flag_set(inode, flag: FI_INLINE_DENTRY)) |
3315 | ri->i_inline |= F2FS_INLINE_DENTRY; |
3316 | if (is_inode_flag_set(inode, flag: FI_DATA_EXIST)) |
3317 | ri->i_inline |= F2FS_DATA_EXIST; |
3318 | if (is_inode_flag_set(inode, flag: FI_EXTRA_ATTR)) |
3319 | ri->i_inline |= F2FS_EXTRA_ATTR; |
3320 | if (is_inode_flag_set(inode, flag: FI_PIN_FILE)) |
3321 | ri->i_inline |= F2FS_PIN_FILE; |
3322 | if (is_inode_flag_set(inode, flag: FI_COMPRESS_RELEASED)) |
3323 | ri->i_inline |= F2FS_COMPRESS_RELEASED; |
3324 | } |
3325 | |
3326 | static inline int f2fs_has_extra_attr(struct inode *inode) |
3327 | { |
3328 | return is_inode_flag_set(inode, flag: FI_EXTRA_ATTR); |
3329 | } |
3330 | |
3331 | static inline int f2fs_has_inline_xattr(struct inode *inode) |
3332 | { |
3333 | return is_inode_flag_set(inode, flag: FI_INLINE_XATTR); |
3334 | } |
3335 | |
3336 | static inline int f2fs_compressed_file(struct inode *inode) |
3337 | { |
3338 | return S_ISREG(inode->i_mode) && |
3339 | is_inode_flag_set(inode, flag: FI_COMPRESSED_FILE); |
3340 | } |
3341 | |
3342 | static inline bool f2fs_need_compress_data(struct inode *inode) |
3343 | { |
3344 | int compress_mode = F2FS_OPTION(F2FS_I_SB(inode)).compress_mode; |
3345 | |
3346 | if (!f2fs_compressed_file(inode)) |
3347 | return false; |
3348 | |
3349 | if (compress_mode == COMPR_MODE_FS) |
3350 | return true; |
3351 | else if (compress_mode == COMPR_MODE_USER && |
3352 | is_inode_flag_set(inode, flag: FI_ENABLE_COMPRESS)) |
3353 | return true; |
3354 | |
3355 | return false; |
3356 | } |
3357 | |
3358 | static inline unsigned int addrs_per_page(struct inode *inode, |
3359 | bool is_inode) |
3360 | { |
3361 | unsigned int addrs = is_inode ? (CUR_ADDRS_PER_INODE(inode) - |
3362 | get_inline_xattr_addrs(inode)) : DEF_ADDRS_PER_BLOCK; |
3363 | |
3364 | if (f2fs_compressed_file(inode)) |
3365 | return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size); |
3366 | return addrs; |
3367 | } |
3368 | |
3369 | static inline void *inline_xattr_addr(struct inode *inode, struct folio *folio) |
3370 | { |
3371 | struct f2fs_inode *ri = F2FS_INODE(page: &folio->page); |
3372 | |
3373 | return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE - |
3374 | get_inline_xattr_addrs(inode)]); |
3375 | } |
3376 | |
3377 | static inline int inline_xattr_size(struct inode *inode) |
3378 | { |
3379 | if (f2fs_has_inline_xattr(inode)) |
3380 | return get_inline_xattr_addrs(inode) * sizeof(__le32); |
3381 | return 0; |
3382 | } |
3383 | |
3384 | /* |
3385 | * Notice: check inline_data flag without inode page lock is unsafe. |
3386 | * It could change at any time by f2fs_convert_inline_folio(). |
3387 | */ |
3388 | static inline int f2fs_has_inline_data(struct inode *inode) |
3389 | { |
3390 | return is_inode_flag_set(inode, flag: FI_INLINE_DATA); |
3391 | } |
3392 | |
3393 | static inline int f2fs_exist_data(struct inode *inode) |
3394 | { |
3395 | return is_inode_flag_set(inode, flag: FI_DATA_EXIST); |
3396 | } |
3397 | |
3398 | static inline int f2fs_is_mmap_file(struct inode *inode) |
3399 | { |
3400 | return is_inode_flag_set(inode, flag: FI_MMAP_FILE); |
3401 | } |
3402 | |
3403 | static inline bool f2fs_is_pinned_file(struct inode *inode) |
3404 | { |
3405 | return is_inode_flag_set(inode, flag: FI_PIN_FILE); |
3406 | } |
3407 | |
3408 | static inline bool f2fs_is_atomic_file(struct inode *inode) |
3409 | { |
3410 | return is_inode_flag_set(inode, flag: FI_ATOMIC_FILE); |
3411 | } |
3412 | |
3413 | static inline bool f2fs_is_cow_file(struct inode *inode) |
3414 | { |
3415 | return is_inode_flag_set(inode, flag: FI_COW_FILE); |
3416 | } |
3417 | |
3418 | static inline void *inline_data_addr(struct inode *inode, struct folio *folio) |
3419 | { |
3420 | __le32 *addr = get_dnode_addr(inode, node_folio: folio); |
3421 | |
3422 | return (void *)(addr + DEF_INLINE_RESERVED_SIZE); |
3423 | } |
3424 | |
3425 | static inline int f2fs_has_inline_dentry(struct inode *inode) |
3426 | { |
3427 | return is_inode_flag_set(inode, flag: FI_INLINE_DENTRY); |
3428 | } |
3429 | |
3430 | static inline int is_file(struct inode *inode, int type) |
3431 | { |
3432 | return F2FS_I(inode)->i_advise & type; |
3433 | } |
3434 | |
3435 | static inline void set_file(struct inode *inode, int type) |
3436 | { |
3437 | if (is_file(inode, type)) |
3438 | return; |
3439 | F2FS_I(inode)->i_advise |= type; |
3440 | f2fs_mark_inode_dirty_sync(inode, sync: true); |
3441 | } |
3442 | |
3443 | static inline void clear_file(struct inode *inode, int type) |
3444 | { |
3445 | if (!is_file(inode, type)) |
3446 | return; |
3447 | F2FS_I(inode)->i_advise &= ~type; |
3448 | f2fs_mark_inode_dirty_sync(inode, sync: true); |
3449 | } |
3450 | |
3451 | static inline bool f2fs_is_time_consistent(struct inode *inode) |
3452 | { |
3453 | struct timespec64 ts = inode_get_atime(inode); |
3454 | |
3455 | if (!timespec64_equal(a: F2FS_I(inode)->i_disk_time, b: &ts)) |
3456 | return false; |
3457 | ts = inode_get_ctime(inode); |
3458 | if (!timespec64_equal(a: F2FS_I(inode)->i_disk_time + 1, b: &ts)) |
3459 | return false; |
3460 | ts = inode_get_mtime(inode); |
3461 | if (!timespec64_equal(a: F2FS_I(inode)->i_disk_time + 2, b: &ts)) |
3462 | return false; |
3463 | return true; |
3464 | } |
3465 | |
3466 | static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync) |
3467 | { |
3468 | bool ret; |
3469 | |
3470 | if (dsync) { |
3471 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
3472 | |
3473 | spin_lock(lock: &sbi->inode_lock[DIRTY_META]); |
3474 | ret = list_empty(head: &F2FS_I(inode)->gdirty_list); |
3475 | spin_unlock(lock: &sbi->inode_lock[DIRTY_META]); |
3476 | return ret; |
3477 | } |
3478 | if (!is_inode_flag_set(inode, flag: FI_AUTO_RECOVER) || |
3479 | file_keep_isize(inode) || |
3480 | i_size_read(inode) & ~PAGE_MASK) |
3481 | return false; |
3482 | |
3483 | if (!f2fs_is_time_consistent(inode)) |
3484 | return false; |
3485 | |
3486 | spin_lock(lock: &F2FS_I(inode)->i_size_lock); |
3487 | ret = F2FS_I(inode)->last_disk_size == i_size_read(inode); |
3488 | spin_unlock(lock: &F2FS_I(inode)->i_size_lock); |
3489 | |
3490 | return ret; |
3491 | } |
3492 | |
3493 | static inline bool f2fs_readonly(struct super_block *sb) |
3494 | { |
3495 | return sb_rdonly(sb); |
3496 | } |
3497 | |
3498 | static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi) |
3499 | { |
3500 | return is_set_ckpt_flags(sbi, CP_ERROR_FLAG); |
3501 | } |
3502 | |
3503 | static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, |
3504 | size_t size, gfp_t flags) |
3505 | { |
3506 | if (time_to_inject(sbi, FAULT_KMALLOC)) |
3507 | return NULL; |
3508 | |
3509 | return kmalloc(size, flags); |
3510 | } |
3511 | |
3512 | static inline void *f2fs_getname(struct f2fs_sb_info *sbi) |
3513 | { |
3514 | if (time_to_inject(sbi, FAULT_KMALLOC)) |
3515 | return NULL; |
3516 | |
3517 | return __getname(); |
3518 | } |
3519 | |
3520 | static inline void f2fs_putname(char *buf) |
3521 | { |
3522 | __putname(buf); |
3523 | } |
3524 | |
3525 | static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi, |
3526 | size_t size, gfp_t flags) |
3527 | { |
3528 | return f2fs_kmalloc(sbi, size, flags: flags | __GFP_ZERO); |
3529 | } |
3530 | |
3531 | static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi, |
3532 | size_t size, gfp_t flags) |
3533 | { |
3534 | if (time_to_inject(sbi, FAULT_KVMALLOC)) |
3535 | return NULL; |
3536 | |
3537 | return kvmalloc(size, flags); |
3538 | } |
3539 | |
3540 | static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi, |
3541 | size_t size, gfp_t flags) |
3542 | { |
3543 | return f2fs_kvmalloc(sbi, size, flags: flags | __GFP_ZERO); |
3544 | } |
3545 | |
3546 | static inline void *f2fs_vmalloc(struct f2fs_sb_info *sbi, size_t size) |
3547 | { |
3548 | if (time_to_inject(sbi, FAULT_VMALLOC)) |
3549 | return NULL; |
3550 | |
3551 | return vmalloc(size); |
3552 | } |
3553 | |
3554 | static inline int get_extra_isize(struct inode *inode) |
3555 | { |
3556 | return F2FS_I(inode)->i_extra_isize / sizeof(__le32); |
3557 | } |
3558 | |
3559 | static inline int get_inline_xattr_addrs(struct inode *inode) |
3560 | { |
3561 | return F2FS_I(inode)->i_inline_xattr_size; |
3562 | } |
3563 | |
3564 | #define f2fs_get_inode_mode(i) \ |
3565 | ((is_inode_flag_set(i, FI_ACL_MODE)) ? \ |
3566 | (F2FS_I(i)->i_acl_mode) : ((i)->i_mode)) |
3567 | |
3568 | #define F2FS_MIN_EXTRA_ATTR_SIZE (sizeof(__le32)) |
3569 | |
3570 | #define F2FS_TOTAL_EXTRA_ATTR_SIZE \ |
3571 | (offsetof(struct f2fs_inode, i_extra_end) - \ |
3572 | offsetof(struct f2fs_inode, i_extra_isize)) \ |
3573 | |
3574 | #define F2FS_OLD_ATTRIBUTE_SIZE (offsetof(struct f2fs_inode, i_addr)) |
3575 | #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field) \ |
3576 | ((offsetof(typeof(*(f2fs_inode)), field) + \ |
3577 | sizeof((f2fs_inode)->field)) \ |
3578 | <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \ |
3579 | |
3580 | #define __is_large_section(sbi) (SEGS_PER_SEC(sbi) > 1) |
3581 | |
3582 | #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META) |
3583 | |
3584 | bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, |
3585 | block_t blkaddr, int type); |
3586 | static inline void verify_blkaddr(struct f2fs_sb_info *sbi, |
3587 | block_t blkaddr, int type) |
3588 | { |
3589 | if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) |
3590 | f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.", |
3591 | blkaddr, type); |
3592 | } |
3593 | |
3594 | static inline bool __is_valid_data_blkaddr(block_t blkaddr) |
3595 | { |
3596 | if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR || |
3597 | blkaddr == COMPRESS_ADDR) |
3598 | return false; |
3599 | return true; |
3600 | } |
3601 | |
3602 | /* |
3603 | * file.c |
3604 | */ |
3605 | int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); |
3606 | int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock); |
3607 | int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock); |
3608 | int f2fs_truncate(struct inode *inode); |
3609 | int f2fs_getattr(struct mnt_idmap *idmap, const struct path *path, |
3610 | struct kstat *stat, u32 request_mask, unsigned int flags); |
3611 | int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, |
3612 | struct iattr *attr); |
3613 | int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end); |
3614 | void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count); |
3615 | int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag, |
3616 | bool readonly, bool need_lock); |
3617 | int f2fs_precache_extents(struct inode *inode); |
3618 | int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa); |
3619 | int f2fs_fileattr_set(struct mnt_idmap *idmap, |
3620 | struct dentry *dentry, struct fileattr *fa); |
3621 | long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); |
3622 | long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); |
3623 | int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid); |
3624 | int f2fs_pin_file_control(struct inode *inode, bool inc); |
3625 | |
3626 | /* |
3627 | * inode.c |
3628 | */ |
3629 | void f2fs_set_inode_flags(struct inode *inode); |
3630 | bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct folio *folio); |
3631 | void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page); |
3632 | struct inode *f2fs_iget(struct super_block *sb, unsigned long ino); |
3633 | struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino); |
3634 | int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink); |
3635 | void f2fs_update_inode(struct inode *inode, struct folio *node_folio); |
3636 | void f2fs_update_inode_page(struct inode *inode); |
3637 | int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc); |
3638 | void f2fs_evict_inode(struct inode *inode); |
3639 | void f2fs_handle_failed_inode(struct inode *inode); |
3640 | |
3641 | /* |
3642 | * namei.c |
3643 | */ |
3644 | int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name, |
3645 | bool hot, bool set); |
3646 | struct dentry *f2fs_get_parent(struct dentry *child); |
3647 | int f2fs_get_tmpfile(struct mnt_idmap *idmap, struct inode *dir, |
3648 | struct inode **new_inode); |
3649 | |
3650 | /* |
3651 | * dir.c |
3652 | */ |
3653 | #if IS_ENABLED(CONFIG_UNICODE) |
3654 | int f2fs_init_casefolded_name(const struct inode *dir, |
3655 | struct f2fs_filename *fname); |
3656 | void f2fs_free_casefolded_name(struct f2fs_filename *fname); |
3657 | #else |
3658 | static inline int f2fs_init_casefolded_name(const struct inode *dir, |
3659 | struct f2fs_filename *fname) |
3660 | { |
3661 | return 0; |
3662 | } |
3663 | |
3664 | static inline void f2fs_free_casefolded_name(struct f2fs_filename *fname) |
3665 | { |
3666 | } |
3667 | #endif /* CONFIG_UNICODE */ |
3668 | |
3669 | int f2fs_setup_filename(struct inode *dir, const struct qstr *iname, |
3670 | int lookup, struct f2fs_filename *fname); |
3671 | int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry, |
3672 | struct f2fs_filename *fname); |
3673 | void f2fs_free_filename(struct f2fs_filename *fname); |
3674 | struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d, |
3675 | const struct f2fs_filename *fname, int *max_slots, |
3676 | bool use_hash); |
3677 | int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, |
3678 | unsigned int start_pos, struct fscrypt_str *fstr); |
3679 | void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent, |
3680 | struct f2fs_dentry_ptr *d); |
3681 | struct folio *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, |
3682 | const struct f2fs_filename *fname, struct folio *dfolio); |
3683 | void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode, |
3684 | unsigned int current_depth); |
3685 | int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots); |
3686 | void f2fs_drop_nlink(struct inode *dir, struct inode *inode); |
3687 | struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, |
3688 | const struct f2fs_filename *fname, struct folio **res_folio); |
3689 | struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, |
3690 | const struct qstr *child, struct folio **res_folio); |
3691 | struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct folio **f); |
3692 | ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr, |
3693 | struct folio **folio); |
3694 | void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, |
3695 | struct folio *folio, struct inode *inode); |
3696 | bool f2fs_has_enough_room(struct inode *dir, struct folio *ifolio, |
3697 | const struct f2fs_filename *fname); |
3698 | void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, |
3699 | const struct fscrypt_str *name, f2fs_hash_t name_hash, |
3700 | unsigned int bit_pos); |
3701 | int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname, |
3702 | struct inode *inode, nid_t ino, umode_t mode); |
3703 | int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname, |
3704 | struct inode *inode, nid_t ino, umode_t mode); |
3705 | int f2fs_do_add_link(struct inode *dir, const struct qstr *name, |
3706 | struct inode *inode, nid_t ino, umode_t mode); |
3707 | void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct folio *folio, |
3708 | struct inode *dir, struct inode *inode); |
3709 | int f2fs_do_tmpfile(struct inode *inode, struct inode *dir, |
3710 | struct f2fs_filename *fname); |
3711 | bool f2fs_empty_dir(struct inode *dir); |
3712 | |
3713 | static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) |
3714 | { |
3715 | if (fscrypt_is_nokey_name(dentry)) |
3716 | return -ENOKEY; |
3717 | return f2fs_do_add_link(dir: d_inode(dentry: dentry->d_parent), name: &dentry->d_name, |
3718 | inode, ino: inode->i_ino, mode: inode->i_mode); |
3719 | } |
3720 | |
3721 | /* |
3722 | * super.c |
3723 | */ |
3724 | int f2fs_inode_dirtied(struct inode *inode, bool sync); |
3725 | void f2fs_inode_synced(struct inode *inode); |
3726 | int f2fs_dquot_initialize(struct inode *inode); |
3727 | int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly); |
3728 | int f2fs_do_quota_sync(struct super_block *sb, int type); |
3729 | loff_t max_file_blocks(struct inode *inode); |
3730 | void f2fs_quota_off_umount(struct super_block *sb); |
3731 | void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag); |
3732 | void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason); |
3733 | void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error); |
3734 | void f2fs_handle_error_async(struct f2fs_sb_info *sbi, unsigned char error); |
3735 | int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); |
3736 | int f2fs_sync_fs(struct super_block *sb, int sync); |
3737 | int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi); |
3738 | |
3739 | /* |
3740 | * hash.c |
3741 | */ |
3742 | void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname); |
3743 | |
3744 | /* |
3745 | * node.c |
3746 | */ |
3747 | struct node_info; |
3748 | |
3749 | int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid); |
3750 | bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type); |
3751 | bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct folio *folio); |
3752 | void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi); |
3753 | void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct folio *folio); |
3754 | void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi); |
3755 | int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid); |
3756 | bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid); |
3757 | bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino); |
3758 | int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, |
3759 | struct node_info *ni, bool checkpoint_context); |
3760 | pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs); |
3761 | int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode); |
3762 | int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from); |
3763 | int f2fs_truncate_xattr_node(struct inode *inode); |
3764 | int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, |
3765 | unsigned int seq_id); |
3766 | int f2fs_remove_inode_page(struct inode *inode); |
3767 | struct folio *f2fs_new_inode_folio(struct inode *inode); |
3768 | struct folio *f2fs_new_node_folio(struct dnode_of_data *dn, unsigned int ofs); |
3769 | void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid); |
3770 | struct folio *f2fs_get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid); |
3771 | struct folio *f2fs_get_inode_folio(struct f2fs_sb_info *sbi, pgoff_t ino); |
3772 | struct folio *f2fs_get_xnode_folio(struct f2fs_sb_info *sbi, pgoff_t xnid); |
3773 | int f2fs_move_node_folio(struct folio *node_folio, int gc_type); |
3774 | void f2fs_flush_inline_data(struct f2fs_sb_info *sbi); |
3775 | int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, |
3776 | struct writeback_control *wbc, bool atomic, |
3777 | unsigned int *seq_id); |
3778 | int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, |
3779 | struct writeback_control *wbc, |
3780 | bool do_balance, enum iostat_type io_type); |
3781 | int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount); |
3782 | bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid); |
3783 | void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid); |
3784 | void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid); |
3785 | int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink); |
3786 | int f2fs_recover_inline_xattr(struct inode *inode, struct folio *folio); |
3787 | int f2fs_recover_xattr_data(struct inode *inode, struct page *page); |
3788 | int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page); |
3789 | int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, |
3790 | unsigned int segno, struct f2fs_summary_block *sum); |
3791 | int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); |
3792 | int f2fs_build_node_manager(struct f2fs_sb_info *sbi); |
3793 | void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi); |
3794 | int __init f2fs_create_node_manager_caches(void); |
3795 | void f2fs_destroy_node_manager_caches(void); |
3796 | |
3797 | /* |
3798 | * segment.c |
3799 | */ |
3800 | bool f2fs_need_SSR(struct f2fs_sb_info *sbi); |
3801 | int f2fs_commit_atomic_write(struct inode *inode); |
3802 | void f2fs_abort_atomic_write(struct inode *inode, bool clean); |
3803 | void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need); |
3804 | void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg); |
3805 | int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino); |
3806 | int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi); |
3807 | int f2fs_flush_device_cache(struct f2fs_sb_info *sbi); |
3808 | void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free); |
3809 | void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr, |
3810 | unsigned int len); |
3811 | bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); |
3812 | int f2fs_start_discard_thread(struct f2fs_sb_info *sbi); |
3813 | void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi); |
3814 | void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi); |
3815 | bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi); |
3816 | void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi, |
3817 | struct cp_control *cpc); |
3818 | void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi); |
3819 | block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi); |
3820 | int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable); |
3821 | void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi); |
3822 | int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); |
3823 | bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno); |
3824 | int f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi); |
3825 | int f2fs_reinit_atgc_curseg(struct f2fs_sb_info *sbi); |
3826 | void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi); |
3827 | void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi); |
3828 | int f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, |
3829 | unsigned int start, unsigned int end); |
3830 | int f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force); |
3831 | int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi); |
3832 | int f2fs_allocate_new_segments(struct f2fs_sb_info *sbi); |
3833 | int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range); |
3834 | bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, |
3835 | struct cp_control *cpc); |
3836 | struct folio *f2fs_get_sum_folio(struct f2fs_sb_info *sbi, unsigned int segno); |
3837 | void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src, |
3838 | block_t blk_addr); |
3839 | void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct folio *folio, |
3840 | enum iostat_type io_type); |
3841 | void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio); |
3842 | void f2fs_outplace_write_data(struct dnode_of_data *dn, |
3843 | struct f2fs_io_info *fio); |
3844 | int f2fs_inplace_write_data(struct f2fs_io_info *fio); |
3845 | void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, |
3846 | block_t old_blkaddr, block_t new_blkaddr, |
3847 | bool recover_curseg, bool recover_newaddr, |
3848 | bool from_gc); |
3849 | void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, |
3850 | block_t old_addr, block_t new_addr, |
3851 | unsigned char version, bool recover_curseg, |
3852 | bool recover_newaddr); |
3853 | enum temp_type f2fs_get_segment_temp(struct f2fs_sb_info *sbi, |
3854 | enum log_type seg_type); |
3855 | int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, |
3856 | block_t old_blkaddr, block_t *new_blkaddr, |
3857 | struct f2fs_summary *sum, int type, |
3858 | struct f2fs_io_info *fio); |
3859 | void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino, |
3860 | block_t blkaddr, unsigned int blkcnt); |
3861 | void f2fs_folio_wait_writeback(struct folio *folio, enum page_type type, |
3862 | bool ordered, bool locked); |
3863 | #define f2fs_wait_on_page_writeback(page, type, ordered, locked) \ |
3864 | f2fs_folio_wait_writeback(page_folio(page), type, ordered, locked) |
3865 | void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr); |
3866 | void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr, |
3867 | block_t len); |
3868 | void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk); |
3869 | void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk); |
3870 | int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type, |
3871 | unsigned int val, int alloc); |
3872 | void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); |
3873 | int f2fs_check_and_fix_write_pointer(struct f2fs_sb_info *sbi); |
3874 | int f2fs_build_segment_manager(struct f2fs_sb_info *sbi); |
3875 | void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi); |
3876 | int __init f2fs_create_segment_manager_caches(void); |
3877 | void f2fs_destroy_segment_manager_caches(void); |
3878 | int f2fs_rw_hint_to_seg_type(struct f2fs_sb_info *sbi, enum rw_hint hint); |
3879 | enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi, |
3880 | enum page_type type, enum temp_type temp); |
3881 | unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi); |
3882 | unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi, |
3883 | unsigned int segno); |
3884 | unsigned long long f2fs_get_section_mtime(struct f2fs_sb_info *sbi, |
3885 | unsigned int segno); |
3886 | |
3887 | static inline struct inode *fio_inode(struct f2fs_io_info *fio) |
3888 | { |
3889 | return page_folio(fio->page)->mapping->host; |
3890 | } |
3891 | |
3892 | #define DEF_FRAGMENT_SIZE 4 |
3893 | #define MIN_FRAGMENT_SIZE 1 |
3894 | #define MAX_FRAGMENT_SIZE 512 |
3895 | |
3896 | static inline bool f2fs_need_rand_seg(struct f2fs_sb_info *sbi) |
3897 | { |
3898 | return F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG || |
3899 | F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK; |
3900 | } |
3901 | |
3902 | /* |
3903 | * checkpoint.c |
3904 | */ |
3905 | void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io, |
3906 | unsigned char reason); |
3907 | void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi); |
3908 | struct folio *f2fs_grab_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index); |
3909 | struct folio *f2fs_get_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index); |
3910 | struct folio *f2fs_get_meta_folio_retry(struct f2fs_sb_info *sbi, pgoff_t index); |
3911 | struct folio *f2fs_get_tmp_folio(struct f2fs_sb_info *sbi, pgoff_t index); |
3912 | bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, |
3913 | block_t blkaddr, int type); |
3914 | bool f2fs_is_valid_blkaddr_raw(struct f2fs_sb_info *sbi, |
3915 | block_t blkaddr, int type); |
3916 | int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, |
3917 | int type, bool sync); |
3918 | void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index, |
3919 | unsigned int ra_blocks); |
3920 | long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, |
3921 | long nr_to_write, enum iostat_type io_type); |
3922 | void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); |
3923 | void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); |
3924 | void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all); |
3925 | bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode); |
3926 | void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, |
3927 | unsigned int devidx, int type); |
3928 | bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, |
3929 | unsigned int devidx, int type); |
3930 | int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi); |
3931 | void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi); |
3932 | void f2fs_add_orphan_inode(struct inode *inode); |
3933 | void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino); |
3934 | int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi); |
3935 | int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi); |
3936 | void f2fs_update_dirty_folio(struct inode *inode, struct folio *folio); |
3937 | void f2fs_remove_dirty_inode(struct inode *inode); |
3938 | int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type, |
3939 | bool from_cp); |
3940 | void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type); |
3941 | u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi); |
3942 | int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc); |
3943 | void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi); |
3944 | int __init f2fs_create_checkpoint_caches(void); |
3945 | void f2fs_destroy_checkpoint_caches(void); |
3946 | int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi); |
3947 | int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi); |
3948 | void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi); |
3949 | void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi); |
3950 | |
3951 | /* |
3952 | * data.c |
3953 | */ |
3954 | int __init f2fs_init_bioset(void); |
3955 | void f2fs_destroy_bioset(void); |
3956 | bool f2fs_is_cp_guaranteed(struct page *page); |
3957 | int f2fs_init_bio_entry_cache(void); |
3958 | void f2fs_destroy_bio_entry_cache(void); |
3959 | void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio, |
3960 | enum page_type type); |
3961 | int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi); |
3962 | void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type); |
3963 | void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi, |
3964 | struct inode *inode, struct page *page, |
3965 | nid_t ino, enum page_type type); |
3966 | void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, |
3967 | struct bio **bio, struct folio *folio); |
3968 | void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi); |
3969 | int f2fs_submit_page_bio(struct f2fs_io_info *fio); |
3970 | int f2fs_merge_page_bio(struct f2fs_io_info *fio); |
3971 | void f2fs_submit_page_write(struct f2fs_io_info *fio); |
3972 | struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi, |
3973 | block_t blk_addr, sector_t *sector); |
3974 | int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr); |
3975 | void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr); |
3976 | void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr); |
3977 | int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count); |
3978 | int f2fs_reserve_new_block(struct dnode_of_data *dn); |
3979 | int f2fs_get_block_locked(struct dnode_of_data *dn, pgoff_t index); |
3980 | int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index); |
3981 | struct folio *f2fs_get_read_data_folio(struct inode *inode, pgoff_t index, |
3982 | blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs); |
3983 | struct folio *f2fs_find_data_folio(struct inode *inode, pgoff_t index, |
3984 | pgoff_t *next_pgofs); |
3985 | struct folio *f2fs_get_lock_data_folio(struct inode *inode, pgoff_t index, |
3986 | bool for_write); |
3987 | struct folio *f2fs_get_new_data_folio(struct inode *inode, |
3988 | struct folio *ifolio, pgoff_t index, bool new_i_size); |
3989 | int f2fs_do_write_data_page(struct f2fs_io_info *fio); |
3990 | int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag); |
3991 | int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
3992 | u64 start, u64 len); |
3993 | int f2fs_encrypt_one_page(struct f2fs_io_info *fio); |
3994 | bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio); |
3995 | bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio); |
3996 | int f2fs_write_single_data_page(struct folio *folio, int *submitted, |
3997 | struct bio **bio, sector_t *last_block, |
3998 | struct writeback_control *wbc, |
3999 | enum iostat_type io_type, |
4000 | int compr_blocks, bool allow_balance); |
4001 | void f2fs_write_failed(struct inode *inode, loff_t to); |
4002 | void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length); |
4003 | bool f2fs_release_folio(struct folio *folio, gfp_t wait); |
4004 | bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len); |
4005 | void f2fs_clear_page_cache_dirty_tag(struct folio *folio); |
4006 | int f2fs_init_post_read_processing(void); |
4007 | void f2fs_destroy_post_read_processing(void); |
4008 | int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi); |
4009 | void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi); |
4010 | extern const struct iomap_ops f2fs_iomap_ops; |
4011 | |
4012 | /* |
4013 | * gc.c |
4014 | */ |
4015 | int f2fs_start_gc_thread(struct f2fs_sb_info *sbi); |
4016 | void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi); |
4017 | block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode); |
4018 | int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control); |
4019 | void f2fs_build_gc_manager(struct f2fs_sb_info *sbi); |
4020 | int f2fs_gc_range(struct f2fs_sb_info *sbi, |
4021 | unsigned int start_seg, unsigned int end_seg, |
4022 | bool dry_run, unsigned int dry_run_sections); |
4023 | int f2fs_resize_fs(struct file *filp, __u64 block_count); |
4024 | int __init f2fs_create_garbage_collection_cache(void); |
4025 | void f2fs_destroy_garbage_collection_cache(void); |
4026 | /* victim selection function for cleaning and SSR */ |
4027 | int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result, |
4028 | int gc_type, int type, char alloc_mode, |
4029 | unsigned long long age, bool one_time); |
4030 | |
4031 | /* |
4032 | * recovery.c |
4033 | */ |
4034 | int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only); |
4035 | bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi); |
4036 | int __init f2fs_create_recovery_cache(void); |
4037 | void f2fs_destroy_recovery_cache(void); |
4038 | |
4039 | /* |
4040 | * debug.c |
4041 | */ |
4042 | #ifdef CONFIG_F2FS_STAT_FS |
4043 | enum { |
4044 | DEVSTAT_INUSE, |
4045 | DEVSTAT_DIRTY, |
4046 | DEVSTAT_FULL, |
4047 | DEVSTAT_FREE, |
4048 | DEVSTAT_PREFREE, |
4049 | DEVSTAT_MAX, |
4050 | }; |
4051 | |
4052 | struct f2fs_dev_stats { |
4053 | unsigned int devstats[2][DEVSTAT_MAX]; /* 0: segs, 1: secs */ |
4054 | }; |
4055 | |
4056 | struct f2fs_stat_info { |
4057 | struct list_head stat_list; |
4058 | struct f2fs_sb_info *sbi; |
4059 | int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; |
4060 | int main_area_segs, main_area_sections, main_area_zones; |
4061 | unsigned long long hit_cached[NR_EXTENT_CACHES]; |
4062 | unsigned long long hit_rbtree[NR_EXTENT_CACHES]; |
4063 | unsigned long long total_ext[NR_EXTENT_CACHES]; |
4064 | unsigned long long hit_total[NR_EXTENT_CACHES]; |
4065 | int ext_tree[NR_EXTENT_CACHES]; |
4066 | int zombie_tree[NR_EXTENT_CACHES]; |
4067 | int ext_node[NR_EXTENT_CACHES]; |
4068 | /* to count memory footprint */ |
4069 | unsigned long long ext_mem[NR_EXTENT_CACHES]; |
4070 | /* for read extent cache */ |
4071 | unsigned long long hit_largest; |
4072 | /* for block age extent cache */ |
4073 | unsigned long long allocated_data_blocks; |
4074 | int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta; |
4075 | int ndirty_data, ndirty_qdata; |
4076 | unsigned int ndirty_dirs, ndirty_files, ndirty_all; |
4077 | unsigned int nquota_files, ndonate_files; |
4078 | int nats, dirty_nats, sits, dirty_sits; |
4079 | int free_nids, avail_nids, alloc_nids; |
4080 | int total_count, utilization; |
4081 | int nr_wb_cp_data, nr_wb_data; |
4082 | int nr_rd_data, nr_rd_node, nr_rd_meta; |
4083 | int nr_dio_read, nr_dio_write; |
4084 | unsigned int io_skip_bggc, other_skip_bggc; |
4085 | int nr_flushing, nr_flushed, flush_list_empty; |
4086 | int nr_discarding, nr_discarded; |
4087 | int nr_discard_cmd; |
4088 | unsigned int undiscard_blks; |
4089 | int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt; |
4090 | unsigned int cur_ckpt_time, peak_ckpt_time; |
4091 | int inline_xattr, inline_inode, inline_dir, append, update, orphans; |
4092 | int compr_inode, swapfile_inode; |
4093 | unsigned long long compr_blocks; |
4094 | int aw_cnt, max_aw_cnt; |
4095 | unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks; |
4096 | unsigned int bimodal, avg_vblocks; |
4097 | int util_free, util_valid, util_invalid; |
4098 | int rsvd_segs, overp_segs; |
4099 | int dirty_count, node_pages, meta_pages, compress_pages; |
4100 | int compress_page_hit; |
4101 | int prefree_count, free_segs, free_secs; |
4102 | int cp_call_count[MAX_CALL_TYPE], cp_count; |
4103 | int gc_call_count[MAX_CALL_TYPE]; |
4104 | int gc_segs[2][2]; |
4105 | int gc_secs[2][2]; |
4106 | int tot_blks, data_blks, node_blks; |
4107 | int bg_data_blks, bg_node_blks; |
4108 | int curseg[NR_CURSEG_TYPE]; |
4109 | int cursec[NR_CURSEG_TYPE]; |
4110 | int curzone[NR_CURSEG_TYPE]; |
4111 | unsigned int dirty_seg[NR_CURSEG_TYPE]; |
4112 | unsigned int full_seg[NR_CURSEG_TYPE]; |
4113 | unsigned int valid_blks[NR_CURSEG_TYPE]; |
4114 | |
4115 | unsigned int meta_count[META_MAX]; |
4116 | unsigned int segment_count[2]; |
4117 | unsigned int block_count[2]; |
4118 | unsigned int inplace_count; |
4119 | unsigned long long base_mem, cache_mem, page_mem; |
4120 | struct f2fs_dev_stats *dev_stats; |
4121 | }; |
4122 | |
4123 | static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi) |
4124 | { |
4125 | return (struct f2fs_stat_info *)sbi->stat_info; |
4126 | } |
4127 | |
4128 | #define stat_inc_cp_call_count(sbi, foreground) \ |
4129 | atomic_inc(&sbi->cp_call_count[(foreground)]) |
4130 | #define stat_inc_cp_count(sbi) (F2FS_STAT(sbi)->cp_count++) |
4131 | #define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++) |
4132 | #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++) |
4133 | #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++) |
4134 | #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--) |
4135 | #define stat_inc_total_hit(sbi, type) (atomic64_inc(&(sbi)->total_hit_ext[type])) |
4136 | #define stat_inc_rbtree_node_hit(sbi, type) (atomic64_inc(&(sbi)->read_hit_rbtree[type])) |
4137 | #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest)) |
4138 | #define stat_inc_cached_node_hit(sbi, type) (atomic64_inc(&(sbi)->read_hit_cached[type])) |
4139 | #define stat_inc_inline_xattr(inode) \ |
4140 | do { \ |
4141 | if (f2fs_has_inline_xattr(inode)) \ |
4142 | (atomic_inc(&F2FS_I_SB(inode)->inline_xattr)); \ |
4143 | } while (0) |
4144 | #define stat_dec_inline_xattr(inode) \ |
4145 | do { \ |
4146 | if (f2fs_has_inline_xattr(inode)) \ |
4147 | (atomic_dec(&F2FS_I_SB(inode)->inline_xattr)); \ |
4148 | } while (0) |
4149 | #define stat_inc_inline_inode(inode) \ |
4150 | do { \ |
4151 | if (f2fs_has_inline_data(inode)) \ |
4152 | (atomic_inc(&F2FS_I_SB(inode)->inline_inode)); \ |
4153 | } while (0) |
4154 | #define stat_dec_inline_inode(inode) \ |
4155 | do { \ |
4156 | if (f2fs_has_inline_data(inode)) \ |
4157 | (atomic_dec(&F2FS_I_SB(inode)->inline_inode)); \ |
4158 | } while (0) |
4159 | #define stat_inc_inline_dir(inode) \ |
4160 | do { \ |
4161 | if (f2fs_has_inline_dentry(inode)) \ |
4162 | (atomic_inc(&F2FS_I_SB(inode)->inline_dir)); \ |
4163 | } while (0) |
4164 | #define stat_dec_inline_dir(inode) \ |
4165 | do { \ |
4166 | if (f2fs_has_inline_dentry(inode)) \ |
4167 | (atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \ |
4168 | } while (0) |
4169 | #define stat_inc_compr_inode(inode) \ |
4170 | do { \ |
4171 | if (f2fs_compressed_file(inode)) \ |
4172 | (atomic_inc(&F2FS_I_SB(inode)->compr_inode)); \ |
4173 | } while (0) |
4174 | #define stat_dec_compr_inode(inode) \ |
4175 | do { \ |
4176 | if (f2fs_compressed_file(inode)) \ |
4177 | (atomic_dec(&F2FS_I_SB(inode)->compr_inode)); \ |
4178 | } while (0) |
4179 | #define stat_add_compr_blocks(inode, blocks) \ |
4180 | (atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks)) |
4181 | #define stat_sub_compr_blocks(inode, blocks) \ |
4182 | (atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks)) |
4183 | #define stat_inc_swapfile_inode(inode) \ |
4184 | (atomic_inc(&F2FS_I_SB(inode)->swapfile_inode)) |
4185 | #define stat_dec_swapfile_inode(inode) \ |
4186 | (atomic_dec(&F2FS_I_SB(inode)->swapfile_inode)) |
4187 | #define stat_inc_atomic_inode(inode) \ |
4188 | (atomic_inc(&F2FS_I_SB(inode)->atomic_files)) |
4189 | #define stat_dec_atomic_inode(inode) \ |
4190 | (atomic_dec(&F2FS_I_SB(inode)->atomic_files)) |
4191 | #define stat_inc_meta_count(sbi, blkaddr) \ |
4192 | do { \ |
4193 | if (blkaddr < SIT_I(sbi)->sit_base_addr) \ |
4194 | atomic_inc(&(sbi)->meta_count[META_CP]); \ |
4195 | else if (blkaddr < NM_I(sbi)->nat_blkaddr) \ |
4196 | atomic_inc(&(sbi)->meta_count[META_SIT]); \ |
4197 | else if (blkaddr < SM_I(sbi)->ssa_blkaddr) \ |
4198 | atomic_inc(&(sbi)->meta_count[META_NAT]); \ |
4199 | else if (blkaddr < SM_I(sbi)->main_blkaddr) \ |
4200 | atomic_inc(&(sbi)->meta_count[META_SSA]); \ |
4201 | } while (0) |
4202 | #define stat_inc_seg_type(sbi, curseg) \ |
4203 | ((sbi)->segment_count[(curseg)->alloc_type]++) |
4204 | #define stat_inc_block_count(sbi, curseg) \ |
4205 | ((sbi)->block_count[(curseg)->alloc_type]++) |
4206 | #define stat_inc_inplace_blocks(sbi) \ |
4207 | (atomic_inc(&(sbi)->inplace_count)) |
4208 | #define stat_update_max_atomic_write(inode) \ |
4209 | do { \ |
4210 | int cur = atomic_read(&F2FS_I_SB(inode)->atomic_files); \ |
4211 | int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \ |
4212 | if (cur > max) \ |
4213 | atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \ |
4214 | } while (0) |
4215 | #define stat_inc_gc_call_count(sbi, foreground) \ |
4216 | (F2FS_STAT(sbi)->gc_call_count[(foreground)]++) |
4217 | #define stat_inc_gc_sec_count(sbi, type, gc_type) \ |
4218 | (F2FS_STAT(sbi)->gc_secs[(type)][(gc_type)]++) |
4219 | #define stat_inc_gc_seg_count(sbi, type, gc_type) \ |
4220 | (F2FS_STAT(sbi)->gc_segs[(type)][(gc_type)]++) |
4221 | |
4222 | #define stat_inc_tot_blk_count(si, blks) \ |
4223 | ((si)->tot_blks += (blks)) |
4224 | |
4225 | #define stat_inc_data_blk_count(sbi, blks, gc_type) \ |
4226 | do { \ |
4227 | struct f2fs_stat_info *si = F2FS_STAT(sbi); \ |
4228 | stat_inc_tot_blk_count(si, blks); \ |
4229 | si->data_blks += (blks); \ |
4230 | si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ |
4231 | } while (0) |
4232 | |
4233 | #define stat_inc_node_blk_count(sbi, blks, gc_type) \ |
4234 | do { \ |
4235 | struct f2fs_stat_info *si = F2FS_STAT(sbi); \ |
4236 | stat_inc_tot_blk_count(si, blks); \ |
4237 | si->node_blks += (blks); \ |
4238 | si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ |
4239 | } while (0) |
4240 | |
4241 | int f2fs_build_stats(struct f2fs_sb_info *sbi); |
4242 | void f2fs_destroy_stats(struct f2fs_sb_info *sbi); |
4243 | void __init f2fs_create_root_stats(void); |
4244 | void f2fs_destroy_root_stats(void); |
4245 | void f2fs_update_sit_info(struct f2fs_sb_info *sbi); |
4246 | #else |
4247 | #define stat_inc_cp_call_count(sbi, foreground) do { } while (0) |
4248 | #define stat_inc_cp_count(sbi) do { } while (0) |
4249 | #define stat_io_skip_bggc_count(sbi) do { } while (0) |
4250 | #define stat_other_skip_bggc_count(sbi) do { } while (0) |
4251 | #define stat_inc_dirty_inode(sbi, type) do { } while (0) |
4252 | #define stat_dec_dirty_inode(sbi, type) do { } while (0) |
4253 | #define stat_inc_total_hit(sbi, type) do { } while (0) |
4254 | #define stat_inc_rbtree_node_hit(sbi, type) do { } while (0) |
4255 | #define stat_inc_largest_node_hit(sbi) do { } while (0) |
4256 | #define stat_inc_cached_node_hit(sbi, type) do { } while (0) |
4257 | #define stat_inc_inline_xattr(inode) do { } while (0) |
4258 | #define stat_dec_inline_xattr(inode) do { } while (0) |
4259 | #define stat_inc_inline_inode(inode) do { } while (0) |
4260 | #define stat_dec_inline_inode(inode) do { } while (0) |
4261 | #define stat_inc_inline_dir(inode) do { } while (0) |
4262 | #define stat_dec_inline_dir(inode) do { } while (0) |
4263 | #define stat_inc_compr_inode(inode) do { } while (0) |
4264 | #define stat_dec_compr_inode(inode) do { } while (0) |
4265 | #define stat_add_compr_blocks(inode, blocks) do { } while (0) |
4266 | #define stat_sub_compr_blocks(inode, blocks) do { } while (0) |
4267 | #define stat_inc_swapfile_inode(inode) do { } while (0) |
4268 | #define stat_dec_swapfile_inode(inode) do { } while (0) |
4269 | #define stat_inc_atomic_inode(inode) do { } while (0) |
4270 | #define stat_dec_atomic_inode(inode) do { } while (0) |
4271 | #define stat_update_max_atomic_write(inode) do { } while (0) |
4272 | #define stat_inc_meta_count(sbi, blkaddr) do { } while (0) |
4273 | #define stat_inc_seg_type(sbi, curseg) do { } while (0) |
4274 | #define stat_inc_block_count(sbi, curseg) do { } while (0) |
4275 | #define stat_inc_inplace_blocks(sbi) do { } while (0) |
4276 | #define stat_inc_gc_call_count(sbi, foreground) do { } while (0) |
4277 | #define stat_inc_gc_sec_count(sbi, type, gc_type) do { } while (0) |
4278 | #define stat_inc_gc_seg_count(sbi, type, gc_type) do { } while (0) |
4279 | #define stat_inc_tot_blk_count(si, blks) do { } while (0) |
4280 | #define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0) |
4281 | #define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0) |
4282 | |
4283 | static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } |
4284 | static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } |
4285 | static inline void __init f2fs_create_root_stats(void) { } |
4286 | static inline void f2fs_destroy_root_stats(void) { } |
4287 | static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {} |
4288 | #endif |
4289 | |
4290 | extern const struct file_operations f2fs_dir_operations; |
4291 | extern const struct file_operations f2fs_file_operations; |
4292 | extern const struct inode_operations f2fs_file_inode_operations; |
4293 | extern const struct address_space_operations f2fs_dblock_aops; |
4294 | extern const struct address_space_operations f2fs_node_aops; |
4295 | extern const struct address_space_operations f2fs_meta_aops; |
4296 | extern const struct inode_operations f2fs_dir_inode_operations; |
4297 | extern const struct inode_operations f2fs_symlink_inode_operations; |
4298 | extern const struct inode_operations f2fs_encrypted_symlink_inode_operations; |
4299 | extern const struct inode_operations f2fs_special_inode_operations; |
4300 | extern struct kmem_cache *f2fs_inode_entry_slab; |
4301 | |
4302 | /* |
4303 | * inline.c |
4304 | */ |
4305 | bool f2fs_may_inline_data(struct inode *inode); |
4306 | bool f2fs_sanity_check_inline_data(struct inode *inode, struct page *ipage); |
4307 | bool f2fs_may_inline_dentry(struct inode *inode); |
4308 | void f2fs_do_read_inline_data(struct folio *folio, struct folio *ifolio); |
4309 | void f2fs_truncate_inline_inode(struct inode *inode, struct folio *ifolio, |
4310 | u64 from); |
4311 | int f2fs_read_inline_data(struct inode *inode, struct folio *folio); |
4312 | int f2fs_convert_inline_folio(struct dnode_of_data *dn, struct folio *folio); |
4313 | int f2fs_convert_inline_inode(struct inode *inode); |
4314 | int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry); |
4315 | int f2fs_write_inline_data(struct inode *inode, struct folio *folio); |
4316 | int f2fs_recover_inline_data(struct inode *inode, struct folio *nfolio); |
4317 | struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, |
4318 | const struct f2fs_filename *fname, struct folio **res_folio, |
4319 | bool use_hash); |
4320 | int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, |
4321 | struct folio *ifolio); |
4322 | int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, |
4323 | struct inode *inode, nid_t ino, umode_t mode); |
4324 | void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, |
4325 | struct folio *folio, struct inode *dir, struct inode *inode); |
4326 | bool f2fs_empty_inline_dir(struct inode *dir); |
4327 | int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx, |
4328 | struct fscrypt_str *fstr); |
4329 | int f2fs_inline_data_fiemap(struct inode *inode, |
4330 | struct fiemap_extent_info *fieinfo, |
4331 | __u64 start, __u64 len); |
4332 | |
4333 | /* |
4334 | * shrinker.c |
4335 | */ |
4336 | unsigned long f2fs_shrink_count(struct shrinker *shrink, |
4337 | struct shrink_control *sc); |
4338 | unsigned long f2fs_shrink_scan(struct shrinker *shrink, |
4339 | struct shrink_control *sc); |
4340 | unsigned int f2fs_donate_files(void); |
4341 | void f2fs_reclaim_caches(unsigned int reclaim_caches_kb); |
4342 | void f2fs_join_shrinker(struct f2fs_sb_info *sbi); |
4343 | void f2fs_leave_shrinker(struct f2fs_sb_info *sbi); |
4344 | |
4345 | /* |
4346 | * extent_cache.c |
4347 | */ |
4348 | bool sanity_check_extent_cache(struct inode *inode, struct page *ipage); |
4349 | void f2fs_init_extent_tree(struct inode *inode); |
4350 | void f2fs_drop_extent_tree(struct inode *inode); |
4351 | void f2fs_destroy_extent_node(struct inode *inode); |
4352 | void f2fs_destroy_extent_tree(struct inode *inode); |
4353 | void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi); |
4354 | int __init f2fs_create_extent_cache(void); |
4355 | void f2fs_destroy_extent_cache(void); |
4356 | |
4357 | /* read extent cache ops */ |
4358 | void f2fs_init_read_extent_tree(struct inode *inode, struct folio *ifolio); |
4359 | bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs, |
4360 | struct extent_info *ei); |
4361 | bool f2fs_lookup_read_extent_cache_block(struct inode *inode, pgoff_t index, |
4362 | block_t *blkaddr); |
4363 | void f2fs_update_read_extent_cache(struct dnode_of_data *dn); |
4364 | void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn, |
4365 | pgoff_t fofs, block_t blkaddr, unsigned int len); |
4366 | unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi, |
4367 | int nr_shrink); |
4368 | |
4369 | /* block age extent cache ops */ |
4370 | void f2fs_init_age_extent_tree(struct inode *inode); |
4371 | bool f2fs_lookup_age_extent_cache(struct inode *inode, pgoff_t pgofs, |
4372 | struct extent_info *ei); |
4373 | void f2fs_update_age_extent_cache(struct dnode_of_data *dn); |
4374 | void f2fs_update_age_extent_cache_range(struct dnode_of_data *dn, |
4375 | pgoff_t fofs, unsigned int len); |
4376 | unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info *sbi, |
4377 | int nr_shrink); |
4378 | |
4379 | /* |
4380 | * sysfs.c |
4381 | */ |
4382 | #define MIN_RA_MUL 2 |
4383 | #define MAX_RA_MUL 256 |
4384 | |
4385 | int __init f2fs_init_sysfs(void); |
4386 | void f2fs_exit_sysfs(void); |
4387 | int f2fs_register_sysfs(struct f2fs_sb_info *sbi); |
4388 | void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi); |
4389 | |
4390 | /* verity.c */ |
4391 | extern const struct fsverity_operations f2fs_verityops; |
4392 | |
4393 | /* |
4394 | * crypto support |
4395 | */ |
4396 | static inline bool f2fs_encrypted_file(struct inode *inode) |
4397 | { |
4398 | return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); |
4399 | } |
4400 | |
4401 | static inline void f2fs_set_encrypted_inode(struct inode *inode) |
4402 | { |
4403 | #ifdef CONFIG_FS_ENCRYPTION |
4404 | file_set_encrypt(inode); |
4405 | f2fs_set_inode_flags(inode); |
4406 | #endif |
4407 | } |
4408 | |
4409 | /* |
4410 | * Returns true if the reads of the inode's data need to undergo some |
4411 | * postprocessing step, like decryption or authenticity verification. |
4412 | */ |
4413 | static inline bool f2fs_post_read_required(struct inode *inode) |
4414 | { |
4415 | return f2fs_encrypted_file(inode) || fsverity_active(inode) || |
4416 | f2fs_compressed_file(inode); |
4417 | } |
4418 | |
4419 | static inline bool f2fs_used_in_atomic_write(struct inode *inode) |
4420 | { |
4421 | return f2fs_is_atomic_file(inode) || f2fs_is_cow_file(inode); |
4422 | } |
4423 | |
4424 | static inline bool f2fs_meta_inode_gc_required(struct inode *inode) |
4425 | { |
4426 | return f2fs_post_read_required(inode) || f2fs_used_in_atomic_write(inode); |
4427 | } |
4428 | |
4429 | /* |
4430 | * compress.c |
4431 | */ |
4432 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
4433 | enum cluster_check_type { |
4434 | CLUSTER_IS_COMPR, /* check only if compressed cluster */ |
4435 | CLUSTER_COMPR_BLKS, /* return # of compressed blocks in a cluster */ |
4436 | CLUSTER_RAW_BLKS /* return # of raw blocks in a cluster */ |
4437 | }; |
4438 | bool f2fs_is_compressed_page(struct page *page); |
4439 | struct folio *f2fs_compress_control_folio(struct folio *folio); |
4440 | int f2fs_prepare_compress_overwrite(struct inode *inode, |
4441 | struct page **pagep, pgoff_t index, void **fsdata); |
4442 | bool f2fs_compress_write_end(struct inode *inode, void *fsdata, |
4443 | pgoff_t index, unsigned copied); |
4444 | int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock); |
4445 | void f2fs_compress_write_end_io(struct bio *bio, struct page *page); |
4446 | bool f2fs_is_compress_backend_ready(struct inode *inode); |
4447 | bool f2fs_is_compress_level_valid(int alg, int lvl); |
4448 | int __init f2fs_init_compress_mempool(void); |
4449 | void f2fs_destroy_compress_mempool(void); |
4450 | void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task); |
4451 | void f2fs_end_read_compressed_page(struct page *page, bool failed, |
4452 | block_t blkaddr, bool in_task); |
4453 | bool f2fs_cluster_is_empty(struct compress_ctx *cc); |
4454 | bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index); |
4455 | bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages, |
4456 | int index, int nr_pages, bool uptodate); |
4457 | bool f2fs_sanity_check_cluster(struct dnode_of_data *dn); |
4458 | void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct folio *folio); |
4459 | int f2fs_write_multi_pages(struct compress_ctx *cc, |
4460 | int *submitted, |
4461 | struct writeback_control *wbc, |
4462 | enum iostat_type io_type); |
4463 | int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index); |
4464 | bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index); |
4465 | void f2fs_update_read_extent_tree_range_compressed(struct inode *inode, |
4466 | pgoff_t fofs, block_t blkaddr, |
4467 | unsigned int llen, unsigned int c_len); |
4468 | int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, |
4469 | unsigned nr_pages, sector_t *last_block_in_bio, |
4470 | struct readahead_control *rac, bool for_write); |
4471 | struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc); |
4472 | void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed, |
4473 | bool in_task); |
4474 | void f2fs_put_folio_dic(struct folio *folio, bool in_task); |
4475 | unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn, |
4476 | unsigned int ofs_in_node); |
4477 | int f2fs_init_compress_ctx(struct compress_ctx *cc); |
4478 | void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse); |
4479 | void f2fs_init_compress_info(struct f2fs_sb_info *sbi); |
4480 | int f2fs_init_compress_inode(struct f2fs_sb_info *sbi); |
4481 | void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi); |
4482 | int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi); |
4483 | void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi); |
4484 | int __init f2fs_init_compress_cache(void); |
4485 | void f2fs_destroy_compress_cache(void); |
4486 | struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi); |
4487 | void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi, |
4488 | block_t blkaddr, unsigned int len); |
4489 | void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page, |
4490 | nid_t ino, block_t blkaddr); |
4491 | bool f2fs_load_compressed_folio(struct f2fs_sb_info *sbi, struct folio *folio, |
4492 | block_t blkaddr); |
4493 | void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino); |
4494 | #define inc_compr_inode_stat(inode) \ |
4495 | do { \ |
4496 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \ |
4497 | sbi->compr_new_inode++; \ |
4498 | } while (0) |
4499 | #define add_compr_block_stat(inode, blocks) \ |
4500 | do { \ |
4501 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \ |
4502 | int diff = F2FS_I(inode)->i_cluster_size - blocks; \ |
4503 | sbi->compr_written_block += blocks; \ |
4504 | sbi->compr_saved_block += diff; \ |
4505 | } while (0) |
4506 | #else |
4507 | static inline bool f2fs_is_compressed_page(struct page *page) { return false; } |
4508 | static inline bool f2fs_is_compress_backend_ready(struct inode *inode) |
4509 | { |
4510 | if (!f2fs_compressed_file(inode)) |
4511 | return true; |
4512 | /* not support compression */ |
4513 | return false; |
4514 | } |
4515 | static inline bool f2fs_is_compress_level_valid(int alg, int lvl) { return false; } |
4516 | static inline struct folio *f2fs_compress_control_folio(struct folio *folio) |
4517 | { |
4518 | WARN_ON_ONCE(1); |
4519 | return ERR_PTR(-EINVAL); |
4520 | } |
4521 | static inline int __init f2fs_init_compress_mempool(void) { return 0; } |
4522 | static inline void f2fs_destroy_compress_mempool(void) { } |
4523 | static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic, |
4524 | bool in_task) { } |
4525 | static inline void f2fs_end_read_compressed_page(struct page *page, |
4526 | bool failed, block_t blkaddr, bool in_task) |
4527 | { |
4528 | WARN_ON_ONCE(1); |
4529 | } |
4530 | static inline void f2fs_put_folio_dic(struct folio *folio, bool in_task) |
4531 | { |
4532 | WARN_ON_ONCE(1); |
4533 | } |
4534 | static inline unsigned int f2fs_cluster_blocks_are_contiguous( |
4535 | struct dnode_of_data *dn, unsigned int ofs_in_node) { return 0; } |
4536 | static inline bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { return false; } |
4537 | static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; } |
4538 | static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { } |
4539 | static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; } |
4540 | static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { } |
4541 | static inline int __init f2fs_init_compress_cache(void) { return 0; } |
4542 | static inline void f2fs_destroy_compress_cache(void) { } |
4543 | static inline void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi, |
4544 | block_t blkaddr, unsigned int len) { } |
4545 | static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, |
4546 | struct page *page, nid_t ino, block_t blkaddr) { } |
4547 | static inline bool f2fs_load_compressed_folio(struct f2fs_sb_info *sbi, |
4548 | struct folio *folio, block_t blkaddr) { return false; } |
4549 | static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, |
4550 | nid_t ino) { } |
4551 | #define inc_compr_inode_stat(inode) do { } while (0) |
4552 | static inline int f2fs_is_compressed_cluster( |
4553 | struct inode *inode, |
4554 | pgoff_t index) { return 0; } |
4555 | static inline bool f2fs_is_sparse_cluster( |
4556 | struct inode *inode, |
4557 | pgoff_t index) { return true; } |
4558 | static inline void f2fs_update_read_extent_tree_range_compressed( |
4559 | struct inode *inode, |
4560 | pgoff_t fofs, block_t blkaddr, |
4561 | unsigned int llen, unsigned int c_len) { } |
4562 | #endif |
4563 | |
4564 | static inline int set_compress_context(struct inode *inode) |
4565 | { |
4566 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
4567 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
4568 | struct f2fs_inode_info *fi = F2FS_I(inode); |
4569 | |
4570 | fi->i_compress_algorithm = F2FS_OPTION(sbi).compress_algorithm; |
4571 | fi->i_log_cluster_size = F2FS_OPTION(sbi).compress_log_size; |
4572 | fi->i_compress_flag = F2FS_OPTION(sbi).compress_chksum ? |
4573 | BIT(COMPRESS_CHKSUM) : 0; |
4574 | fi->i_cluster_size = BIT(fi->i_log_cluster_size); |
4575 | if ((fi->i_compress_algorithm == COMPRESS_LZ4 || |
4576 | fi->i_compress_algorithm == COMPRESS_ZSTD) && |
4577 | F2FS_OPTION(sbi).compress_level) |
4578 | fi->i_compress_level = F2FS_OPTION(sbi).compress_level; |
4579 | fi->i_flags |= F2FS_COMPR_FL; |
4580 | set_inode_flag(inode, flag: FI_COMPRESSED_FILE); |
4581 | stat_inc_compr_inode(inode); |
4582 | inc_compr_inode_stat(inode); |
4583 | f2fs_mark_inode_dirty_sync(inode, sync: true); |
4584 | return 0; |
4585 | #else |
4586 | return -EOPNOTSUPP; |
4587 | #endif |
4588 | } |
4589 | |
4590 | static inline bool f2fs_disable_compressed_file(struct inode *inode) |
4591 | { |
4592 | struct f2fs_inode_info *fi = F2FS_I(inode); |
4593 | |
4594 | f2fs_down_write(sem: &fi->i_sem); |
4595 | |
4596 | if (!f2fs_compressed_file(inode)) { |
4597 | f2fs_up_write(sem: &fi->i_sem); |
4598 | return true; |
4599 | } |
4600 | if (f2fs_is_mmap_file(inode) || |
4601 | (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))) { |
4602 | f2fs_up_write(sem: &fi->i_sem); |
4603 | return false; |
4604 | } |
4605 | |
4606 | fi->i_flags &= ~F2FS_COMPR_FL; |
4607 | stat_dec_compr_inode(inode); |
4608 | clear_inode_flag(inode, flag: FI_COMPRESSED_FILE); |
4609 | f2fs_mark_inode_dirty_sync(inode, sync: true); |
4610 | |
4611 | f2fs_up_write(sem: &fi->i_sem); |
4612 | return true; |
4613 | } |
4614 | |
4615 | #define F2FS_FEATURE_FUNCS(name, flagname) \ |
4616 | static inline bool f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \ |
4617 | { \ |
4618 | return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \ |
4619 | } |
4620 | |
4621 | F2FS_FEATURE_FUNCS(encrypt, ENCRYPT); |
4622 | F2FS_FEATURE_FUNCS(blkzoned, BLKZONED); |
4623 | F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR); |
4624 | F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA); |
4625 | F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM); |
4626 | F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR); |
4627 | F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO); |
4628 | F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME); |
4629 | F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND); |
4630 | F2FS_FEATURE_FUNCS(verity, VERITY); |
4631 | F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM); |
4632 | F2FS_FEATURE_FUNCS(casefold, CASEFOLD); |
4633 | F2FS_FEATURE_FUNCS(compression, COMPRESSION); |
4634 | F2FS_FEATURE_FUNCS(readonly, RO); |
4635 | F2FS_FEATURE_FUNCS(device_alias, DEVICE_ALIAS); |
4636 | |
4637 | #ifdef CONFIG_BLK_DEV_ZONED |
4638 | static inline bool f2fs_zone_is_seq(struct f2fs_sb_info *sbi, int devi, |
4639 | unsigned int zone) |
4640 | { |
4641 | return test_bit(zone, FDEV(devi).blkz_seq); |
4642 | } |
4643 | |
4644 | static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi, |
4645 | block_t blkaddr) |
4646 | { |
4647 | return f2fs_zone_is_seq(sbi, devi, zone: blkaddr / sbi->blocks_per_blkz); |
4648 | } |
4649 | #endif |
4650 | |
4651 | static inline int f2fs_bdev_index(struct f2fs_sb_info *sbi, |
4652 | struct block_device *bdev) |
4653 | { |
4654 | int i; |
4655 | |
4656 | if (!f2fs_is_multi_device(sbi)) |
4657 | return 0; |
4658 | |
4659 | for (i = 0; i < sbi->s_ndevs; i++) |
4660 | if (FDEV(i).bdev == bdev) |
4661 | return i; |
4662 | |
4663 | WARN_ON(1); |
4664 | return -1; |
4665 | } |
4666 | |
4667 | static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi) |
4668 | { |
4669 | return f2fs_sb_has_blkzoned(sbi); |
4670 | } |
4671 | |
4672 | static inline bool f2fs_bdev_support_discard(struct block_device *bdev) |
4673 | { |
4674 | return bdev_max_discard_sectors(bdev) || bdev_is_zoned(bdev); |
4675 | } |
4676 | |
4677 | static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi) |
4678 | { |
4679 | int i; |
4680 | |
4681 | if (!f2fs_is_multi_device(sbi)) |
4682 | return f2fs_bdev_support_discard(bdev: sbi->sb->s_bdev); |
4683 | |
4684 | for (i = 0; i < sbi->s_ndevs; i++) |
4685 | if (f2fs_bdev_support_discard(FDEV(i).bdev)) |
4686 | return true; |
4687 | return false; |
4688 | } |
4689 | |
4690 | static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi) |
4691 | { |
4692 | return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) || |
4693 | f2fs_hw_should_discard(sbi); |
4694 | } |
4695 | |
4696 | static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi) |
4697 | { |
4698 | int i; |
4699 | |
4700 | if (!f2fs_is_multi_device(sbi)) |
4701 | return bdev_read_only(bdev: sbi->sb->s_bdev); |
4702 | |
4703 | for (i = 0; i < sbi->s_ndevs; i++) |
4704 | if (bdev_read_only(FDEV(i).bdev)) |
4705 | return true; |
4706 | return false; |
4707 | } |
4708 | |
4709 | static inline bool f2fs_dev_is_readonly(struct f2fs_sb_info *sbi) |
4710 | { |
4711 | return f2fs_sb_has_readonly(sbi) || f2fs_hw_is_readonly(sbi); |
4712 | } |
4713 | |
4714 | static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi) |
4715 | { |
4716 | return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS; |
4717 | } |
4718 | |
4719 | static inline bool f2fs_is_sequential_zone_area(struct f2fs_sb_info *sbi, |
4720 | block_t blkaddr) |
4721 | { |
4722 | if (f2fs_sb_has_blkzoned(sbi)) { |
4723 | #ifdef CONFIG_BLK_DEV_ZONED |
4724 | int devi = f2fs_target_device_index(sbi, blkaddr); |
4725 | |
4726 | if (!bdev_is_zoned(FDEV(devi).bdev)) |
4727 | return false; |
4728 | |
4729 | if (f2fs_is_multi_device(sbi)) { |
4730 | if (blkaddr < FDEV(devi).start_blk || |
4731 | blkaddr > FDEV(devi).end_blk) { |
4732 | f2fs_err(sbi, "Invalid block %x", blkaddr); |
4733 | return false; |
4734 | } |
4735 | blkaddr -= FDEV(devi).start_blk; |
4736 | } |
4737 | |
4738 | return f2fs_blkz_is_seq(sbi, devi, blkaddr); |
4739 | #else |
4740 | return false; |
4741 | #endif |
4742 | } |
4743 | return false; |
4744 | } |
4745 | |
4746 | static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi) |
4747 | { |
4748 | return F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW; |
4749 | } |
4750 | |
4751 | static inline bool f2fs_may_compress(struct inode *inode) |
4752 | { |
4753 | if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) || |
4754 | f2fs_is_atomic_file(inode) || f2fs_has_inline_data(inode) || |
4755 | f2fs_is_mmap_file(inode)) |
4756 | return false; |
4757 | return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode); |
4758 | } |
4759 | |
4760 | static inline void f2fs_i_compr_blocks_update(struct inode *inode, |
4761 | u64 blocks, bool add) |
4762 | { |
4763 | struct f2fs_inode_info *fi = F2FS_I(inode); |
4764 | int diff = fi->i_cluster_size - blocks; |
4765 | |
4766 | /* don't update i_compr_blocks if saved blocks were released */ |
4767 | if (!add && !atomic_read(v: &fi->i_compr_blocks)) |
4768 | return; |
4769 | |
4770 | if (add) { |
4771 | atomic_add(i: diff, v: &fi->i_compr_blocks); |
4772 | stat_add_compr_blocks(inode, diff); |
4773 | } else { |
4774 | atomic_sub(i: diff, v: &fi->i_compr_blocks); |
4775 | stat_sub_compr_blocks(inode, diff); |
4776 | } |
4777 | f2fs_mark_inode_dirty_sync(inode, sync: true); |
4778 | } |
4779 | |
4780 | static inline bool f2fs_allow_multi_device_dio(struct f2fs_sb_info *sbi, |
4781 | int flag) |
4782 | { |
4783 | if (!f2fs_is_multi_device(sbi)) |
4784 | return false; |
4785 | if (flag != F2FS_GET_BLOCK_DIO) |
4786 | return false; |
4787 | return sbi->aligned_blksize; |
4788 | } |
4789 | |
4790 | static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx) |
4791 | { |
4792 | return fsverity_active(inode) && |
4793 | idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE); |
4794 | } |
4795 | |
4796 | #ifdef CONFIG_F2FS_FAULT_INJECTION |
4797 | extern int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate, |
4798 | unsigned long type, enum fault_option fo); |
4799 | #else |
4800 | static inline int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, |
4801 | unsigned long rate, unsigned long type, |
4802 | enum fault_option fo) |
4803 | { |
4804 | return 0; |
4805 | } |
4806 | #endif |
4807 | |
4808 | static inline bool is_journalled_quota(struct f2fs_sb_info *sbi) |
4809 | { |
4810 | #ifdef CONFIG_QUOTA |
4811 | if (f2fs_sb_has_quota_ino(sbi)) |
4812 | return true; |
4813 | if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || |
4814 | F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] || |
4815 | F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) |
4816 | return true; |
4817 | #endif |
4818 | return false; |
4819 | } |
4820 | |
4821 | static inline bool f2fs_block_unit_discard(struct f2fs_sb_info *sbi) |
4822 | { |
4823 | return F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK; |
4824 | } |
4825 | |
4826 | static inline void f2fs_io_schedule_timeout(long timeout) |
4827 | { |
4828 | set_current_state(TASK_UNINTERRUPTIBLE); |
4829 | io_schedule_timeout(timeout); |
4830 | } |
4831 | |
4832 | static inline void f2fs_io_schedule_timeout_killable(long timeout) |
4833 | { |
4834 | while (timeout) { |
4835 | if (fatal_signal_pending(current)) |
4836 | return; |
4837 | set_current_state(TASK_UNINTERRUPTIBLE); |
4838 | io_schedule_timeout(DEFAULT_IO_TIMEOUT); |
4839 | if (timeout <= DEFAULT_IO_TIMEOUT) |
4840 | return; |
4841 | timeout -= DEFAULT_IO_TIMEOUT; |
4842 | } |
4843 | } |
4844 | |
4845 | static inline void f2fs_handle_page_eio(struct f2fs_sb_info *sbi, |
4846 | struct folio *folio, enum page_type type) |
4847 | { |
4848 | pgoff_t ofs = folio->index; |
4849 | |
4850 | if (unlikely(f2fs_cp_error(sbi))) |
4851 | return; |
4852 | |
4853 | if (ofs == sbi->page_eio_ofs[type]) { |
4854 | if (sbi->page_eio_cnt[type]++ == MAX_RETRY_PAGE_EIO) |
4855 | set_ckpt_flags(sbi, CP_ERROR_FLAG); |
4856 | } else { |
4857 | sbi->page_eio_ofs[type] = ofs; |
4858 | sbi->page_eio_cnt[type] = 0; |
4859 | } |
4860 | } |
4861 | |
4862 | static inline bool f2fs_is_readonly(struct f2fs_sb_info *sbi) |
4863 | { |
4864 | return f2fs_sb_has_readonly(sbi) || f2fs_readonly(sb: sbi->sb); |
4865 | } |
4866 | |
4867 | static inline void f2fs_truncate_meta_inode_pages(struct f2fs_sb_info *sbi, |
4868 | block_t blkaddr, unsigned int cnt) |
4869 | { |
4870 | bool need_submit = false; |
4871 | int i = 0; |
4872 | |
4873 | do { |
4874 | struct folio *folio; |
4875 | |
4876 | folio = filemap_get_folio(mapping: META_MAPPING(sbi), index: blkaddr + i); |
4877 | if (!IS_ERR(ptr: folio)) { |
4878 | if (folio_test_writeback(folio)) |
4879 | need_submit = true; |
4880 | f2fs_folio_put(folio, unlock: false); |
4881 | } |
4882 | } while (++i < cnt && !need_submit); |
4883 | |
4884 | if (need_submit) |
4885 | f2fs_submit_merged_write_cond(sbi, inode: sbi->meta_inode, |
4886 | NULL, ino: 0, type: DATA); |
4887 | |
4888 | truncate_inode_pages_range(META_MAPPING(sbi), |
4889 | F2FS_BLK_TO_BYTES((loff_t)blkaddr), |
4890 | F2FS_BLK_END_BYTES((loff_t)(blkaddr + cnt - 1))); |
4891 | } |
4892 | |
4893 | static inline void f2fs_invalidate_internal_cache(struct f2fs_sb_info *sbi, |
4894 | block_t blkaddr, unsigned int len) |
4895 | { |
4896 | f2fs_truncate_meta_inode_pages(sbi, blkaddr, cnt: len); |
4897 | f2fs_invalidate_compress_pages_range(sbi, blkaddr, len); |
4898 | } |
4899 | |
4900 | #define EFSBADCRC EBADMSG /* Bad CRC detected */ |
4901 | #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ |
4902 | |
4903 | #endif /* _LINUX_F2FS_H */ |
4904 |
Definitions
- fault_option
- f2fs_fault_info
- blkzone_allocation_policy
- f2fs_rwsem
- f2fs_mount_info
- cp_control
- ino_entry
- inode_entry
- fsync_node_entry
- ckpt_req
- ckpt_req_control
- discard_entry
- discard_info
- discard_cmd
- discard_policy
- discard_cmd_control
- fsync_inode_entry
- update_nats_in_cursum
- update_sits_in_cursum
- __has_cursum_space
- f2fs_filename
- f2fs_dentry_ptr
- make_dentry_ptr_block
- make_dentry_ptr_inline
- extent_type
- extent_info
- extent_node
- extent_tree
- extent_tree_info
- f2fs_map_blocks
- f2fs_inode_info
- get_read_extent_info
- set_raw_read_extent
- __is_discard_mergeable
- __is_discard_back_mergeable
- __is_discard_front_mergeable
- nid_state
- nat_state
- f2fs_nm_info
- dnode_of_data
- set_new_dnode
- log_type
- flush_cmd
- flush_cmd_control
- f2fs_sm_info
- count_type
- page_type
- temp_type
- need_lock_type
- cp_reason_type
- iostat_type
- f2fs_io_info
- bio_entry
- f2fs_bio_info
- f2fs_dev_info
- inode_type
- inode_management
- atgc_management
- f2fs_gc_control
- fsync_mode
- errors_option
- compress_algorithm_type
- compress_flag
- compress_data
- compress_ctx
- compress_io_ctx
- decompress_io_ctx
- f2fs_sb_info
- __time_to_inject
- f2fs_is_multi_device
- f2fs_update_time
- f2fs_time_over
- f2fs_time_to_wait
- __f2fs_crc32
- f2fs_crc32
- f2fs_chksum
- F2FS_I
- F2FS_SB
- F2FS_I_SB
- F2FS_M_SB
- F2FS_F_SB
- F2FS_P_SB
- F2FS_RAW_SUPER
- F2FS_SUPER_BLOCK
- F2FS_CKPT
- F2FS_NODE
- F2FS_INODE
- NM_I
- SM_I
- SIT_I
- FREE_I
- DIRTY_I
- META_MAPPING
- NODE_MAPPING
- is_meta_folio
- is_node_folio
- is_sbi_flag_set
- set_sbi_flag
- clear_sbi_flag
- cur_cp_version
- f2fs_qf_ino
- cur_cp_crc
- __is_set_ckpt_flags
- is_set_ckpt_flags
- __set_ckpt_flags
- set_ckpt_flags
- __clear_ckpt_flags
- clear_ckpt_flags
- __init_f2fs_rwsem
- f2fs_rwsem_is_locked
- f2fs_rwsem_is_contended
- f2fs_down_read
- f2fs_down_read_trylock
- f2fs_up_read
- f2fs_down_write
- f2fs_down_read_nested
- f2fs_down_write_nested
- f2fs_down_write_trylock
- f2fs_up_write
- disable_nat_bits
- enabled_nat_bits
- f2fs_lock_op
- f2fs_trylock_op
- f2fs_unlock_op
- f2fs_lock_all
- f2fs_unlock_all
- __get_cp_reason
- __remain_node_summaries
- __exist_node_summaries
- F2FS_HAS_BLOCKS
- f2fs_has_xattr_block
- __allow_reserved_blocks
- get_available_block_count
- inc_valid_block_count
- get_page_private_data
- set_page_private_data
- clear_page_private_data
- clear_page_private_all
- dec_valid_block_count
- inc_page_count
- inode_inc_dirty_pages
- dec_page_count
- inode_dec_dirty_pages
- inc_atomic_write_cnt
- release_atomic_write_cnt
- get_pages
- get_dirty_pages
- get_blocktype_secs
- valid_user_blocks
- discard_blocks
- __bitmap_size
- __cp_payload
- __bitmap_ptr
- __start_cp_addr
- __start_cp_next_addr
- __set_cp_next_pack
- __start_sum_addr
- inc_valid_node_count
- dec_valid_node_count
- valid_node_count
- inc_valid_inode_count
- dec_valid_inode_count
- valid_inode_count
- f2fs_grab_cache_folio
- f2fs_filemap_get_folio
- f2fs_pagecache_get_page
- f2fs_folio_put
- f2fs_put_page
- f2fs_put_dnode
- f2fs_kmem_cache_create
- f2fs_kmem_cache_alloc_nofail
- f2fs_kmem_cache_alloc
- is_inflight_io
- is_inflight_read_io
- is_idle
- f2fs_radix_tree_insert
- IS_INODE
- offset_in_addr
- blkaddr_in_node
- get_dnode_base
- get_dnode_addr
- data_blkaddr
- f2fs_data_blkaddr
- f2fs_test_bit
- f2fs_set_bit
- f2fs_clear_bit
- f2fs_test_and_set_bit
- f2fs_test_and_clear_bit
- f2fs_change_bit
- f2fs_mask_flags
- __mark_inode_dirty_flag
- set_inode_flag
- is_inode_flag_set
- clear_inode_flag
- f2fs_verity_in_progress
- set_acl_inode
- f2fs_i_links_write
- f2fs_i_blocks_write
- f2fs_i_size_write
- f2fs_i_depth_write
- f2fs_i_gc_failures_write
- f2fs_i_xnid_write
- f2fs_i_pino_write
- get_inline_info
- set_raw_inline
- f2fs_has_extra_attr
- f2fs_has_inline_xattr
- f2fs_compressed_file
- f2fs_need_compress_data
- addrs_per_page
- inline_xattr_addr
- inline_xattr_size
- f2fs_has_inline_data
- f2fs_exist_data
- f2fs_is_mmap_file
- f2fs_is_pinned_file
- f2fs_is_atomic_file
- f2fs_is_cow_file
- inline_data_addr
- f2fs_has_inline_dentry
- is_file
- set_file
- clear_file
- f2fs_is_time_consistent
- f2fs_skip_inode_update
- f2fs_readonly
- f2fs_cp_error
- f2fs_kmalloc
- f2fs_getname
- f2fs_putname
- f2fs_kzalloc
- f2fs_kvmalloc
- f2fs_kvzalloc
- f2fs_vmalloc
- get_extra_isize
- get_inline_xattr_addrs
- verify_blkaddr
- __is_valid_data_blkaddr
- f2fs_add_link
- fio_inode
- f2fs_need_rand_seg
- f2fs_dev_stats
- f2fs_stat_info
- F2FS_STAT
- f2fs_encrypted_file
- f2fs_set_encrypted_inode
- f2fs_post_read_required
- f2fs_used_in_atomic_write
- f2fs_meta_inode_gc_required
- cluster_check_type
- set_compress_context
- f2fs_disable_compressed_file
- f2fs_zone_is_seq
- f2fs_blkz_is_seq
- f2fs_bdev_index
- f2fs_hw_should_discard
- f2fs_bdev_support_discard
- f2fs_hw_support_discard
- f2fs_realtime_discard_enable
- f2fs_hw_is_readonly
- f2fs_dev_is_readonly
- f2fs_lfs_mode
- f2fs_is_sequential_zone_area
- f2fs_low_mem_mode
- f2fs_may_compress
- f2fs_i_compr_blocks_update
- f2fs_allow_multi_device_dio
- f2fs_need_verity
- is_journalled_quota
- f2fs_block_unit_discard
- f2fs_io_schedule_timeout
- f2fs_io_schedule_timeout_killable
- f2fs_handle_page_eio
- f2fs_is_readonly
- f2fs_truncate_meta_inode_pages
Improve your Profiling and Debugging skills
Find out more