1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * fs/f2fs/f2fs.h
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8 #ifndef _LINUX_F2FS_H
9 #define _LINUX_F2FS_H
10
11 #include <linux/uio.h>
12 #include <linux/types.h>
13 #include <linux/page-flags.h>
14 #include <linux/buffer_head.h>
15 #include <linux/slab.h>
16 #include <linux/crc32.h>
17 #include <linux/magic.h>
18 #include <linux/kobject.h>
19 #include <linux/sched.h>
20 #include <linux/cred.h>
21 #include <linux/vmalloc.h>
22 #include <linux/bio.h>
23 #include <linux/blkdev.h>
24 #include <linux/quotaops.h>
25 #include <linux/part_stat.h>
26 #include <crypto/hash.h>
27
28 #include <linux/fscrypt.h>
29 #include <linux/fsverity.h>
30
31 #ifdef CONFIG_F2FS_CHECK_FS
32 #define f2fs_bug_on(sbi, condition) BUG_ON(condition)
33 #else
34 #define f2fs_bug_on(sbi, condition) \
35 do { \
36 if (WARN_ON(condition)) \
37 set_sbi_flag(sbi, SBI_NEED_FSCK); \
38 } while (0)
39 #endif
40
41 enum {
42 FAULT_KMALLOC,
43 FAULT_KVMALLOC,
44 FAULT_PAGE_ALLOC,
45 FAULT_PAGE_GET,
46 FAULT_ALLOC_NID,
47 FAULT_ORPHAN,
48 FAULT_BLOCK,
49 FAULT_DIR_DEPTH,
50 FAULT_EVICT_INODE,
51 FAULT_TRUNCATE,
52 FAULT_READ_IO,
53 FAULT_CHECKPOINT,
54 FAULT_DISCARD,
55 FAULT_WRITE_IO,
56 FAULT_MAX,
57 };
58
59 #ifdef CONFIG_F2FS_FAULT_INJECTION
60 #define F2FS_ALL_FAULT_TYPE ((1 << FAULT_MAX) - 1)
61
62 struct f2fs_fault_info {
63 atomic_t inject_ops;
64 unsigned int inject_rate;
65 unsigned int inject_type;
66 };
67
68 extern const char *f2fs_fault_name[FAULT_MAX];
69 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
70 #endif
71
72 #define MIN_ROOT_RESERVED_BLOCKS (128 * 1024 * 1024)
73
74 /*
75 * For mount options
76 */
77 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002
78 #define F2FS_MOUNT_DISCARD 0x00000004
79 #define F2FS_MOUNT_NOHEAP 0x00000008
80 #define F2FS_MOUNT_XATTR_USER 0x00000010
81 #define F2FS_MOUNT_POSIX_ACL 0x00000020
82 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040
83 #define F2FS_MOUNT_INLINE_XATTR 0x00000080
84 #define F2FS_MOUNT_INLINE_DATA 0x00000100
85 #define F2FS_MOUNT_INLINE_DENTRY 0x00000200
86 #define F2FS_MOUNT_FLUSH_MERGE 0x00000400
87 #define F2FS_MOUNT_NOBARRIER 0x00000800
88 #define F2FS_MOUNT_FASTBOOT 0x00001000
89 #define F2FS_MOUNT_READ_EXTENT_CACHE 0x00002000
90 #define F2FS_MOUNT_DATA_FLUSH 0x00008000
91 #define F2FS_MOUNT_FAULT_INJECTION 0x00010000
92 #define F2FS_MOUNT_USRQUOTA 0x00080000
93 #define F2FS_MOUNT_GRPQUOTA 0x00100000
94 #define F2FS_MOUNT_PRJQUOTA 0x00200000
95 #define F2FS_MOUNT_QUOTA 0x00400000
96 #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000
97 #define F2FS_MOUNT_RESERVE_ROOT 0x01000000
98 #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000
99 #define F2FS_MOUNT_NORECOVERY 0x04000000
100 #define F2FS_MOUNT_ATGC 0x08000000
101 #define F2FS_MOUNT_MERGE_CHECKPOINT 0x10000000
102 #define F2FS_MOUNT_GC_MERGE 0x20000000
103 #define F2FS_MOUNT_COMPRESS_CACHE 0x40000000
104 #define F2FS_MOUNT_AGE_EXTENT_CACHE 0x80000000
105
106 #define F2FS_OPTION(sbi) ((sbi)->mount_opt)
107 #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
108 #define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option)
109 #define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option)
110
111 #define ver_after(a, b) (typecheck(unsigned long long, a) && \
112 typecheck(unsigned long long, b) && \
113 ((long long)((a) - (b)) > 0))
114
115 typedef u32 block_t; /*
116 * should not change u32, since it is the on-disk block
117 * address format, __le32.
118 */
119 typedef u32 nid_t;
120
121 #define COMPRESS_EXT_NUM 16
122
123 /*
124 * An implementation of an rwsem that is explicitly unfair to readers. This
125 * prevents priority inversion when a low-priority reader acquires the read lock
126 * while sleeping on the write lock but the write lock is needed by
127 * higher-priority clients.
128 */
129
130 struct f2fs_rwsem {
131 struct rw_semaphore internal_rwsem;
132 wait_queue_head_t read_waiters;
133 };
134
135 struct f2fs_mount_info {
136 unsigned int opt;
137 int write_io_size_bits; /* Write IO size bits */
138 block_t root_reserved_blocks; /* root reserved blocks */
139 kuid_t s_resuid; /* reserved blocks for uid */
140 kgid_t s_resgid; /* reserved blocks for gid */
141 int active_logs; /* # of active logs */
142 int inline_xattr_size; /* inline xattr size */
143 #ifdef CONFIG_F2FS_FAULT_INJECTION
144 struct f2fs_fault_info fault_info; /* For fault injection */
145 #endif
146 #ifdef CONFIG_QUOTA
147 /* Names of quota files with journalled quota */
148 char *s_qf_names[MAXQUOTAS];
149 int s_jquota_fmt; /* Format of quota to use */
150 #endif
151 /* For which write hints are passed down to block layer */
152 int whint_mode;
153 int alloc_mode; /* segment allocation policy */
154 int fsync_mode; /* fsync policy */
155 int fs_mode; /* fs mode: LFS or ADAPTIVE */
156 int bggc_mode; /* bggc mode: off, on or sync */
157 int memory_mode; /* memory mode */
158 struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */
159 block_t unusable_cap_perc; /* percentage for cap */
160 block_t unusable_cap; /* Amount of space allowed to be
161 * unusable when disabling checkpoint
162 */
163
164 /* For compression */
165 unsigned char compress_algorithm; /* algorithm type */
166 unsigned char compress_log_size; /* cluster log size */
167 unsigned char compress_level; /* compress level */
168 bool compress_chksum; /* compressed data chksum */
169 unsigned char compress_ext_cnt; /* extension count */
170 int compress_mode; /* compression mode */
171 unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */
172 };
173
174 #define F2FS_FEATURE_ENCRYPT 0x0001
175 #define F2FS_FEATURE_BLKZONED 0x0002
176 #define F2FS_FEATURE_ATOMIC_WRITE 0x0004
177 #define F2FS_FEATURE_EXTRA_ATTR 0x0008
178 #define F2FS_FEATURE_PRJQUOTA 0x0010
179 #define F2FS_FEATURE_INODE_CHKSUM 0x0020
180 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x0040
181 #define F2FS_FEATURE_QUOTA_INO 0x0080
182 #define F2FS_FEATURE_INODE_CRTIME 0x0100
183 #define F2FS_FEATURE_LOST_FOUND 0x0200
184 #define F2FS_FEATURE_VERITY 0x0400
185 #define F2FS_FEATURE_SB_CHKSUM 0x0800
186 #define F2FS_FEATURE_CASEFOLD 0x1000
187 #define F2FS_FEATURE_COMPRESSION 0x2000
188 #define F2FS_FEATURE_RO 0x4000
189
190 #define __F2FS_HAS_FEATURE(raw_super, mask) \
191 ((raw_super->feature & cpu_to_le32(mask)) != 0)
192 #define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask)
193 #define F2FS_SET_FEATURE(sbi, mask) \
194 (sbi->raw_super->feature |= cpu_to_le32(mask))
195 #define F2FS_CLEAR_FEATURE(sbi, mask) \
196 (sbi->raw_super->feature &= ~cpu_to_le32(mask))
197
198 /*
199 * Default values for user and/or group using reserved blocks
200 */
201 #define F2FS_DEF_RESUID 0
202 #define F2FS_DEF_RESGID 0
203
204 /*
205 * For checkpoint manager
206 */
207 enum {
208 NAT_BITMAP,
209 SIT_BITMAP
210 };
211
212 #define CP_UMOUNT 0x00000001
213 #define CP_FASTBOOT 0x00000002
214 #define CP_SYNC 0x00000004
215 #define CP_RECOVERY 0x00000008
216 #define CP_DISCARD 0x00000010
217 #define CP_TRIMMED 0x00000020
218 #define CP_PAUSE 0x00000040
219 #define CP_RESIZE 0x00000080
220
221 #define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi)
222 #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */
223 #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */
224 #define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */
225 #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */
226 #define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */
227 #define DEF_CP_INTERVAL 60 /* 60 secs */
228 #define DEF_IDLE_INTERVAL 5 /* 5 secs */
229 #define DEF_DISABLE_INTERVAL 5 /* 5 secs */
230 #define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */
231 #define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */
232
233 struct cp_control {
234 int reason;
235 __u64 trim_start;
236 __u64 trim_end;
237 __u64 trim_minlen;
238 };
239
240 /*
241 * indicate meta/data type
242 */
243 enum {
244 META_CP,
245 META_NAT,
246 META_SIT,
247 META_SSA,
248 META_MAX,
249 META_POR,
250 DATA_GENERIC, /* check range only */
251 DATA_GENERIC_ENHANCE, /* strong check on range and segment bitmap */
252 DATA_GENERIC_ENHANCE_READ, /*
253 * strong check on range and segment
254 * bitmap but no warning due to race
255 * condition of read on truncated area
256 * by extent_cache
257 */
258 DATA_GENERIC_ENHANCE_UPDATE, /*
259 * strong check on range and segment
260 * bitmap for update case
261 */
262 META_GENERIC,
263 };
264
265 /* for the list of ino */
266 enum {
267 ORPHAN_INO, /* for orphan ino list */
268 APPEND_INO, /* for append ino list */
269 UPDATE_INO, /* for update ino list */
270 TRANS_DIR_INO, /* for trasactions dir ino list */
271 FLUSH_INO, /* for multiple device flushing */
272 MAX_INO_ENTRY, /* max. list */
273 };
274
275 struct ino_entry {
276 struct list_head list; /* list head */
277 nid_t ino; /* inode number */
278 unsigned int dirty_device; /* dirty device bitmap */
279 };
280
281 /* for the list of inodes to be GCed */
282 struct inode_entry {
283 struct list_head list; /* list head */
284 struct inode *inode; /* vfs inode pointer */
285 };
286
287 struct fsync_node_entry {
288 struct list_head list; /* list head */
289 struct page *page; /* warm node page pointer */
290 unsigned int seq_id; /* sequence id */
291 };
292
293 struct ckpt_req {
294 struct completion wait; /* completion for checkpoint done */
295 struct llist_node llnode; /* llist_node to be linked in wait queue */
296 int ret; /* return code of checkpoint */
297 ktime_t queue_time; /* request queued time */
298 };
299
300 struct ckpt_req_control {
301 struct task_struct *f2fs_issue_ckpt; /* checkpoint task */
302 int ckpt_thread_ioprio; /* checkpoint merge thread ioprio */
303 wait_queue_head_t ckpt_wait_queue; /* waiting queue for wake-up */
304 atomic_t issued_ckpt; /* # of actually issued ckpts */
305 atomic_t total_ckpt; /* # of total ckpts */
306 atomic_t queued_ckpt; /* # of queued ckpts */
307 struct llist_head issue_list; /* list for command issue */
308 spinlock_t stat_lock; /* lock for below checkpoint time stats */
309 unsigned int cur_time; /* cur wait time in msec for currently issued checkpoint */
310 unsigned int peak_time; /* peak wait time in msec until now */
311 };
312
313 /* for the bitmap indicate blocks to be discarded */
314 struct discard_entry {
315 struct list_head list; /* list head */
316 block_t start_blkaddr; /* start blockaddr of current segment */
317 unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */
318 };
319
320 /* default discard granularity of inner discard thread, unit: block count */
321 #define DEFAULT_DISCARD_GRANULARITY 16
322
323 /* max discard pend list number */
324 #define MAX_PLIST_NUM 512
325 #define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \
326 (MAX_PLIST_NUM - 1) : ((blk_num) - 1))
327
328 enum {
329 D_PREP, /* initial */
330 D_PARTIAL, /* partially submitted */
331 D_SUBMIT, /* all submitted */
332 D_DONE, /* finished */
333 };
334
335 struct discard_info {
336 block_t lstart; /* logical start address */
337 block_t len; /* length */
338 block_t start; /* actual start address in dev */
339 };
340
341 struct discard_cmd {
342 struct rb_node rb_node; /* rb node located in rb-tree */
343 union {
344 struct {
345 block_t lstart; /* logical start address */
346 block_t len; /* length */
347 block_t start; /* actual start address in dev */
348 };
349 struct discard_info di; /* discard info */
350
351 };
352 struct list_head list; /* command list */
353 struct completion wait; /* compleation */
354 struct block_device *bdev; /* bdev */
355 unsigned short ref; /* reference count */
356 unsigned char state; /* state */
357 unsigned char queued; /* queued discard */
358 int error; /* bio error */
359 spinlock_t lock; /* for state/bio_ref updating */
360 unsigned short bio_ref; /* bio reference count */
361 };
362
363 enum {
364 DPOLICY_BG,
365 DPOLICY_FORCE,
366 DPOLICY_FSTRIM,
367 DPOLICY_UMOUNT,
368 MAX_DPOLICY,
369 };
370
371 struct discard_policy {
372 int type; /* type of discard */
373 unsigned int min_interval; /* used for candidates exist */
374 unsigned int mid_interval; /* used for device busy */
375 unsigned int max_interval; /* used for candidates not exist */
376 unsigned int max_requests; /* # of discards issued per round */
377 unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */
378 bool io_aware; /* issue discard in idle time */
379 bool sync; /* submit discard with REQ_SYNC flag */
380 bool ordered; /* issue discard by lba order */
381 bool timeout; /* discard timeout for put_super */
382 unsigned int granularity; /* discard granularity */
383 };
384
385 struct discard_cmd_control {
386 struct task_struct *f2fs_issue_discard; /* discard thread */
387 struct list_head entry_list; /* 4KB discard entry list */
388 struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */
389 struct list_head wait_list; /* store on-flushing entries */
390 struct list_head fstrim_list; /* in-flight discard from fstrim */
391 wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */
392 unsigned int discard_wake; /* to wake up discard thread */
393 struct mutex cmd_lock;
394 unsigned int nr_discards; /* # of discards in the list */
395 unsigned int max_discards; /* max. discards to be issued */
396 unsigned int discard_granularity; /* discard granularity */
397 unsigned int undiscard_blks; /* # of undiscard blocks */
398 unsigned int next_pos; /* next discard position */
399 atomic_t issued_discard; /* # of issued discard */
400 atomic_t queued_discard; /* # of queued discard */
401 atomic_t discard_cmd_cnt; /* # of cached cmd count */
402 struct rb_root_cached root; /* root of discard rb-tree */
403 bool rbtree_check; /* config for consistence check */
404 };
405
406 /* for the list of fsync inodes, used only during recovery */
407 struct fsync_inode_entry {
408 struct list_head list; /* list head */
409 struct inode *inode; /* vfs inode pointer */
410 block_t blkaddr; /* block address locating the last fsync */
411 block_t last_dentry; /* block address locating the last dentry */
412 };
413
414 #define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats))
415 #define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits))
416
417 #define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne)
418 #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid)
419 #define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se)
420 #define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno)
421
422 #define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl))
423 #define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl))
424
update_nats_in_cursum(struct f2fs_journal * journal,int i)425 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i)
426 {
427 int before = nats_in_cursum(journal);
428
429 journal->n_nats = cpu_to_le16(before + i);
430 return before;
431 }
432
update_sits_in_cursum(struct f2fs_journal * journal,int i)433 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i)
434 {
435 int before = sits_in_cursum(journal);
436
437 journal->n_sits = cpu_to_le16(before + i);
438 return before;
439 }
440
__has_cursum_space(struct f2fs_journal * journal,int size,int type)441 static inline bool __has_cursum_space(struct f2fs_journal *journal,
442 int size, int type)
443 {
444 if (type == NAT_JOURNAL)
445 return size <= MAX_NAT_JENTRIES(journal);
446 return size <= MAX_SIT_JENTRIES(journal);
447 }
448
449 /* for inline stuff */
450 #define DEF_INLINE_RESERVED_SIZE 1
451 static inline int get_extra_isize(struct inode *inode);
452 static inline int get_inline_xattr_addrs(struct inode *inode);
453 #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \
454 (CUR_ADDRS_PER_INODE(inode) - \
455 get_inline_xattr_addrs(inode) - \
456 DEF_INLINE_RESERVED_SIZE))
457
458 /* for inline dir */
459 #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \
460 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
461 BITS_PER_BYTE + 1))
462 #define INLINE_DENTRY_BITMAP_SIZE(inode) \
463 DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE)
464 #define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \
465 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
466 NR_INLINE_DENTRY(inode) + \
467 INLINE_DENTRY_BITMAP_SIZE(inode)))
468
469 /*
470 * For INODE and NODE manager
471 */
472 /* for directory operations */
473
474 struct f2fs_filename {
475 /*
476 * The filename the user specified. This is NULL for some
477 * filesystem-internal operations, e.g. converting an inline directory
478 * to a non-inline one, or roll-forward recovering an encrypted dentry.
479 */
480 const struct qstr *usr_fname;
481
482 /*
483 * The on-disk filename. For encrypted directories, this is encrypted.
484 * This may be NULL for lookups in an encrypted dir without the key.
485 */
486 struct fscrypt_str disk_name;
487
488 /* The dirhash of this filename */
489 f2fs_hash_t hash;
490
491 #ifdef CONFIG_FS_ENCRYPTION
492 /*
493 * For lookups in encrypted directories: either the buffer backing
494 * disk_name, or a buffer that holds the decoded no-key name.
495 */
496 struct fscrypt_str crypto_buf;
497 #endif
498 #ifdef CONFIG_UNICODE
499 /*
500 * For casefolded directories: the casefolded name, but it's left NULL
501 * if the original name is not valid Unicode, if the original name is
502 * "." or "..", if the directory is both casefolded and encrypted and
503 * its encryption key is unavailable, or if the filesystem is doing an
504 * internal operation where usr_fname is also NULL. In all these cases
505 * we fall back to treating the name as an opaque byte sequence.
506 */
507 struct fscrypt_str cf_name;
508 #endif
509 };
510
511 struct f2fs_dentry_ptr {
512 struct inode *inode;
513 void *bitmap;
514 struct f2fs_dir_entry *dentry;
515 __u8 (*filename)[F2FS_SLOT_LEN];
516 int max;
517 int nr_bitmap;
518 };
519
make_dentry_ptr_block(struct inode * inode,struct f2fs_dentry_ptr * d,struct f2fs_dentry_block * t)520 static inline void make_dentry_ptr_block(struct inode *inode,
521 struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t)
522 {
523 d->inode = inode;
524 d->max = NR_DENTRY_IN_BLOCK;
525 d->nr_bitmap = SIZE_OF_DENTRY_BITMAP;
526 d->bitmap = t->dentry_bitmap;
527 d->dentry = t->dentry;
528 d->filename = t->filename;
529 }
530
make_dentry_ptr_inline(struct inode * inode,struct f2fs_dentry_ptr * d,void * t)531 static inline void make_dentry_ptr_inline(struct inode *inode,
532 struct f2fs_dentry_ptr *d, void *t)
533 {
534 int entry_cnt = NR_INLINE_DENTRY(inode);
535 int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode);
536 int reserved_size = INLINE_RESERVED_SIZE(inode);
537
538 d->inode = inode;
539 d->max = entry_cnt;
540 d->nr_bitmap = bitmap_size;
541 d->bitmap = t;
542 d->dentry = t + bitmap_size + reserved_size;
543 d->filename = t + bitmap_size + reserved_size +
544 SIZE_OF_DIR_ENTRY * entry_cnt;
545 }
546
547 /*
548 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1
549 * as its node offset to distinguish from index node blocks.
550 * But some bits are used to mark the node block.
551 */
552 #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \
553 >> OFFSET_BIT_SHIFT)
554 enum {
555 ALLOC_NODE, /* allocate a new node page if needed */
556 LOOKUP_NODE, /* look up a node without readahead */
557 LOOKUP_NODE_RA, /*
558 * look up a node with readahead called
559 * by get_data_block.
560 */
561 };
562
563 #define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO count */
564
565 /* congestion wait timeout value, default: 20ms */
566 #define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20))
567
568 /* maximum retry quota flush count */
569 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8
570
571 #define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */
572
573 #define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */
574
575 /* for in-memory extent cache entry */
576 #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */
577
578 /* number of extent info in extent cache we try to shrink */
579 #define READ_EXTENT_CACHE_SHRINK_NUMBER 128
580
581 /* number of age extent info in extent cache we try to shrink */
582 #define AGE_EXTENT_CACHE_SHRINK_NUMBER 128
583 #define LAST_AGE_WEIGHT 30
584 #define SAME_AGE_REGION 1024
585
586 /*
587 * Define data block with age less than 1GB as hot data
588 * define data block with age less than 10GB but more than 1GB as warm data
589 */
590 #define DEF_HOT_DATA_AGE_THRESHOLD 262144
591 #define DEF_WARM_DATA_AGE_THRESHOLD 2621440
592
593 /* extent cache type */
594 enum extent_type {
595 EX_READ,
596 EX_BLOCK_AGE,
597 NR_EXTENT_CACHES,
598 };
599
600 struct rb_entry {
601 struct rb_node rb_node; /* rb node located in rb-tree */
602 union {
603 struct {
604 unsigned int ofs; /* start offset of the entry */
605 unsigned int len; /* length of the entry */
606 };
607 unsigned long long key; /* 64-bits key */
608 } __packed;
609 };
610
611 struct extent_info {
612 unsigned int fofs; /* start offset in a file */
613 unsigned int len; /* length of the extent */
614 union {
615 /* read extent_cache */
616 struct {
617 /* start block address of the extent */
618 block_t blk;
619 #ifdef CONFIG_F2FS_FS_COMPRESSION
620 /* physical extent length of compressed blocks */
621 unsigned int c_len;
622 #endif
623 };
624 /* block age extent_cache */
625 struct {
626 /* block age of the extent */
627 unsigned long long age;
628 /* last total blocks allocated */
629 unsigned long long last_blocks;
630 };
631 };
632 };
633
634 struct extent_node {
635 struct rb_node rb_node; /* rb node located in rb-tree */
636 struct extent_info ei; /* extent info */
637 struct list_head list; /* node in global extent list of sbi */
638 struct extent_tree *et; /* extent tree pointer */
639 };
640
641 struct extent_tree {
642 nid_t ino; /* inode number */
643 enum extent_type type; /* keep the extent tree type */
644 struct rb_root_cached root; /* root of extent info rb-tree */
645 struct extent_node *cached_en; /* recently accessed extent node */
646 struct list_head list; /* to be used by sbi->zombie_list */
647 rwlock_t lock; /* protect extent info rb-tree */
648 atomic_t node_cnt; /* # of extent node in rb-tree*/
649 bool largest_updated; /* largest extent updated */
650 struct extent_info largest; /* largest cached extent for EX_READ */
651 };
652
653 struct extent_tree_info {
654 struct radix_tree_root extent_tree_root;/* cache extent cache entries */
655 struct mutex extent_tree_lock; /* locking extent radix tree */
656 struct list_head extent_list; /* lru list for shrinker */
657 spinlock_t extent_lock; /* locking extent lru list */
658 atomic_t total_ext_tree; /* extent tree count */
659 struct list_head zombie_list; /* extent zombie tree list */
660 atomic_t total_zombie_tree; /* extent zombie tree count */
661 atomic_t total_ext_node; /* extent info count */
662 };
663
664 /*
665 * This structure is taken from ext4_map_blocks.
666 *
667 * Note that, however, f2fs uses NEW and MAPPED flags for f2fs_map_blocks().
668 */
669 #define F2FS_MAP_NEW (1 << BH_New)
670 #define F2FS_MAP_MAPPED (1 << BH_Mapped)
671 #define F2FS_MAP_UNWRITTEN (1 << BH_Unwritten)
672 #define F2FS_MAP_FLAGS (F2FS_MAP_NEW | F2FS_MAP_MAPPED |\
673 F2FS_MAP_UNWRITTEN)
674
675 struct f2fs_map_blocks {
676 block_t m_pblk;
677 block_t m_lblk;
678 unsigned int m_len;
679 unsigned int m_flags;
680 pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */
681 pgoff_t *m_next_extent; /* point to next possible extent */
682 int m_seg_type;
683 bool m_may_create; /* indicate it is from write path */
684 };
685
686 /* for flag in get_data_block */
687 enum {
688 F2FS_GET_BLOCK_DEFAULT,
689 F2FS_GET_BLOCK_FIEMAP,
690 F2FS_GET_BLOCK_BMAP,
691 F2FS_GET_BLOCK_DIO,
692 F2FS_GET_BLOCK_PRE_DIO,
693 F2FS_GET_BLOCK_PRE_AIO,
694 F2FS_GET_BLOCK_PRECACHE,
695 };
696
697 /*
698 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
699 */
700 #define FADVISE_COLD_BIT 0x01
701 #define FADVISE_LOST_PINO_BIT 0x02
702 #define FADVISE_ENCRYPT_BIT 0x04
703 #define FADVISE_ENC_NAME_BIT 0x08
704 #define FADVISE_KEEP_SIZE_BIT 0x10
705 #define FADVISE_HOT_BIT 0x20
706 #define FADVISE_VERITY_BIT 0x40
707
708 #define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT)
709
710 #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT)
711 #define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT)
712 #define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT)
713
714 #define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT)
715 #define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT)
716 #define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT)
717
718 #define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT)
719 #define file_set_encrypt(inode) set_file(inode, FADVISE_ENCRYPT_BIT)
720
721 #define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT)
722 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT)
723
724 #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT)
725 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT)
726
727 #define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT)
728 #define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT)
729 #define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT)
730
731 #define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT)
732 #define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT)
733
734 #define DEF_DIR_LEVEL 0
735
736 enum {
737 GC_FAILURE_PIN,
738 GC_FAILURE_ATOMIC,
739 MAX_GC_FAILURE
740 };
741
742 /* used for f2fs_inode_info->flags */
743 enum {
744 FI_NEW_INODE, /* indicate newly allocated inode */
745 FI_DIRTY_INODE, /* indicate inode is dirty or not */
746 FI_AUTO_RECOVER, /* indicate inode is recoverable */
747 FI_DIRTY_DIR, /* indicate directory has dirty pages */
748 FI_INC_LINK, /* need to increment i_nlink */
749 FI_ACL_MODE, /* indicate acl mode */
750 FI_NO_ALLOC, /* should not allocate any blocks */
751 FI_FREE_NID, /* free allocated nide */
752 FI_NO_EXTENT, /* not to use the extent cache */
753 FI_INLINE_XATTR, /* used for inline xattr */
754 FI_INLINE_DATA, /* used for inline data*/
755 FI_INLINE_DENTRY, /* used for inline dentry */
756 FI_APPEND_WRITE, /* inode has appended data */
757 FI_UPDATE_WRITE, /* inode has in-place-update data */
758 FI_NEED_IPU, /* used for ipu per file */
759 FI_ATOMIC_FILE, /* indicate atomic file */
760 FI_ATOMIC_COMMIT, /* indicate the state of atomical committing */
761 FI_VOLATILE_FILE, /* indicate volatile file */
762 FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */
763 FI_DROP_CACHE, /* drop dirty page cache */
764 FI_DATA_EXIST, /* indicate data exists */
765 FI_INLINE_DOTS, /* indicate inline dot dentries */
766 FI_SKIP_WRITES, /* should skip data page writeback */
767 FI_OPU_WRITE, /* used for opu per file */
768 FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */
769 FI_NO_PREALLOC, /* indicate skipped preallocated blocks */
770 FI_HOT_DATA, /* indicate file is hot */
771 FI_EXTRA_ATTR, /* indicate file has extra attribute */
772 FI_PROJ_INHERIT, /* indicate file inherits projectid */
773 FI_PIN_FILE, /* indicate file should not be gced */
774 FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */
775 FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */
776 FI_COMPRESSED_FILE, /* indicate file's data can be compressed */
777 FI_COMPRESS_CORRUPT, /* indicate compressed cluster is corrupted */
778 FI_MMAP_FILE, /* indicate file was mmapped */
779 FI_ENABLE_COMPRESS, /* enable compression in "user" compression mode */
780 FI_COMPRESS_RELEASED, /* compressed blocks were released */
781 FI_ALIGNED_WRITE, /* enable aligned write */
782 FI_MAX, /* max flag, never be used */
783 };
784
785 struct f2fs_inode_info {
786 struct inode vfs_inode; /* serve a vfs inode */
787 unsigned long i_flags; /* keep an inode flags for ioctl */
788 unsigned char i_advise; /* use to give file attribute hints */
789 unsigned char i_dir_level; /* use for dentry level for large dir */
790 unsigned int i_current_depth; /* only for directory depth */
791 /* for gc failure statistic */
792 unsigned int i_gc_failures[MAX_GC_FAILURE];
793 unsigned int i_pino; /* parent inode number */
794 umode_t i_acl_mode; /* keep file acl mode temporarily */
795
796 /* Use below internally in f2fs*/
797 unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */
798 struct f2fs_rwsem i_sem; /* protect fi info */
799 atomic_t dirty_pages; /* # of dirty pages */
800 f2fs_hash_t chash; /* hash value of given file name */
801 unsigned int clevel; /* maximum level of given file name */
802 struct task_struct *task; /* lookup and create consistency */
803 struct task_struct *cp_task; /* separate cp/wb IO stats*/
804 struct task_struct *wb_task; /* indicate inode is in context of writeback */
805 nid_t i_xattr_nid; /* node id that contains xattrs */
806 loff_t last_disk_size; /* lastly written file size */
807 spinlock_t i_size_lock; /* protect last_disk_size */
808
809 #ifdef CONFIG_QUOTA
810 struct dquot *i_dquot[MAXQUOTAS];
811
812 /* quota space reservation, managed internally by quota code */
813 qsize_t i_reserved_quota;
814 #endif
815 struct list_head dirty_list; /* dirty list for dirs and files */
816 struct list_head gdirty_list; /* linked in global dirty list */
817 struct list_head inmem_ilist; /* list for inmem inodes */
818 struct list_head inmem_pages; /* inmemory pages managed by f2fs */
819 struct task_struct *inmem_task; /* store inmemory task */
820 struct mutex inmem_lock; /* lock for inmemory pages */
821 struct extent_tree *extent_tree[NR_EXTENT_CACHES];
822 /* cached extent_tree entry */
823
824 /* avoid racing between foreground op and gc */
825 struct f2fs_rwsem i_gc_rwsem[2];
826 struct f2fs_rwsem i_mmap_sem;
827 struct f2fs_rwsem i_xattr_sem; /* avoid racing between reading and changing EAs */
828
829 int i_extra_isize; /* size of extra space located in i_addr */
830 kprojid_t i_projid; /* id for project quota */
831 int i_inline_xattr_size; /* inline xattr size */
832 struct timespec64 i_crtime; /* inode creation time */
833 struct timespec64 i_disk_time[4];/* inode disk times */
834
835 /* for file compress */
836 atomic_t i_compr_blocks; /* # of compressed blocks */
837 unsigned char i_compress_algorithm; /* algorithm type */
838 unsigned char i_log_cluster_size; /* log of cluster size */
839 unsigned char i_compress_level; /* compress level (lz4hc,zstd) */
840 unsigned short i_compress_flag; /* compress flag */
841 unsigned int i_cluster_size; /* cluster size */
842 };
843
get_read_extent_info(struct extent_info * ext,struct f2fs_extent * i_ext)844 static inline void get_read_extent_info(struct extent_info *ext,
845 struct f2fs_extent *i_ext)
846 {
847 ext->fofs = le32_to_cpu(i_ext->fofs);
848 ext->blk = le32_to_cpu(i_ext->blk);
849 ext->len = le32_to_cpu(i_ext->len);
850 }
851
set_raw_read_extent(struct extent_info * ext,struct f2fs_extent * i_ext)852 static inline void set_raw_read_extent(struct extent_info *ext,
853 struct f2fs_extent *i_ext)
854 {
855 i_ext->fofs = cpu_to_le32(ext->fofs);
856 i_ext->blk = cpu_to_le32(ext->blk);
857 i_ext->len = cpu_to_le32(ext->len);
858 }
859
__is_discard_mergeable(struct discard_info * back,struct discard_info * front,unsigned int max_len)860 static inline bool __is_discard_mergeable(struct discard_info *back,
861 struct discard_info *front, unsigned int max_len)
862 {
863 return (back->lstart + back->len == front->lstart) &&
864 (back->len + front->len <= max_len);
865 }
866
__is_discard_back_mergeable(struct discard_info * cur,struct discard_info * back,unsigned int max_len)867 static inline bool __is_discard_back_mergeable(struct discard_info *cur,
868 struct discard_info *back, unsigned int max_len)
869 {
870 return __is_discard_mergeable(back, cur, max_len);
871 }
872
__is_discard_front_mergeable(struct discard_info * cur,struct discard_info * front,unsigned int max_len)873 static inline bool __is_discard_front_mergeable(struct discard_info *cur,
874 struct discard_info *front, unsigned int max_len)
875 {
876 return __is_discard_mergeable(cur, front, max_len);
877 }
878
879 /*
880 * For free nid management
881 */
882 enum nid_state {
883 FREE_NID, /* newly added to free nid list */
884 PREALLOC_NID, /* it is preallocated */
885 MAX_NID_STATE,
886 };
887
888 enum nat_state {
889 TOTAL_NAT,
890 DIRTY_NAT,
891 RECLAIMABLE_NAT,
892 MAX_NAT_STATE,
893 };
894
895 struct f2fs_nm_info {
896 block_t nat_blkaddr; /* base disk address of NAT */
897 nid_t max_nid; /* maximum possible node ids */
898 nid_t available_nids; /* # of available node ids */
899 nid_t next_scan_nid; /* the next nid to be scanned */
900 unsigned int ram_thresh; /* control the memory footprint */
901 unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */
902 unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */
903
904 /* NAT cache management */
905 struct radix_tree_root nat_root;/* root of the nat entry cache */
906 struct radix_tree_root nat_set_root;/* root of the nat set cache */
907 struct f2fs_rwsem nat_tree_lock; /* protect nat entry tree */
908 struct list_head nat_entries; /* cached nat entry list (clean) */
909 spinlock_t nat_list_lock; /* protect clean nat entry list */
910 unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */
911 unsigned int nat_blocks; /* # of nat blocks */
912
913 /* free node ids management */
914 struct radix_tree_root free_nid_root;/* root of the free_nid cache */
915 struct list_head free_nid_list; /* list for free nids excluding preallocated nids */
916 unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */
917 spinlock_t nid_list_lock; /* protect nid lists ops */
918 struct mutex build_lock; /* lock for build free nids */
919 unsigned char **free_nid_bitmap;
920 unsigned char *nat_block_bitmap;
921 unsigned short *free_nid_count; /* free nid count of NAT block */
922
923 /* for checkpoint */
924 char *nat_bitmap; /* NAT bitmap pointer */
925
926 unsigned int nat_bits_blocks; /* # of nat bits blocks */
927 unsigned char *nat_bits; /* NAT bits blocks */
928 unsigned char *full_nat_bits; /* full NAT pages */
929 unsigned char *empty_nat_bits; /* empty NAT pages */
930 #ifdef CONFIG_F2FS_CHECK_FS
931 char *nat_bitmap_mir; /* NAT bitmap mirror */
932 #endif
933 int bitmap_size; /* bitmap size */
934 };
935
936 /*
937 * this structure is used as one of function parameters.
938 * all the information are dedicated to a given direct node block determined
939 * by the data offset in a file.
940 */
941 struct dnode_of_data {
942 struct inode *inode; /* vfs inode pointer */
943 struct page *inode_page; /* its inode page, NULL is possible */
944 struct page *node_page; /* cached direct node page */
945 nid_t nid; /* node id of the direct node block */
946 unsigned int ofs_in_node; /* data offset in the node page */
947 bool inode_page_locked; /* inode page is locked or not */
948 bool node_changed; /* is node block changed */
949 char cur_level; /* level of hole node page */
950 char max_level; /* level of current page located */
951 block_t data_blkaddr; /* block address of the node block */
952 };
953
set_new_dnode(struct dnode_of_data * dn,struct inode * inode,struct page * ipage,struct page * npage,nid_t nid)954 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
955 struct page *ipage, struct page *npage, nid_t nid)
956 {
957 memset(dn, 0, sizeof(*dn));
958 dn->inode = inode;
959 dn->inode_page = ipage;
960 dn->node_page = npage;
961 dn->nid = nid;
962 }
963
964 /*
965 * For SIT manager
966 *
967 * By default, there are 6 active log areas across the whole main area.
968 * When considering hot and cold data separation to reduce cleaning overhead,
969 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
970 * respectively.
971 * In the current design, you should not change the numbers intentionally.
972 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
973 * logs individually according to the underlying devices. (default: 6)
974 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
975 * data and 8 for node logs.
976 */
977 #define NR_CURSEG_DATA_TYPE (3)
978 #define NR_CURSEG_NODE_TYPE (3)
979 #define NR_CURSEG_INMEM_TYPE (2)
980 #define NR_CURSEG_RO_TYPE (2)
981 #define NR_CURSEG_PERSIST_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
982 #define NR_CURSEG_TYPE (NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE)
983
984 enum {
985 CURSEG_HOT_DATA = 0, /* directory entry blocks */
986 CURSEG_WARM_DATA, /* data blocks */
987 CURSEG_COLD_DATA, /* multimedia or GCed data blocks */
988 CURSEG_HOT_NODE, /* direct node blocks of directory files */
989 CURSEG_WARM_NODE, /* direct node blocks of normal files */
990 CURSEG_COLD_NODE, /* indirect node blocks */
991 NR_PERSISTENT_LOG, /* number of persistent log */
992 CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG,
993 /* pinned file that needs consecutive block address */
994 CURSEG_ALL_DATA_ATGC, /* SSR alloctor in hot/warm/cold data area */
995 NO_CHECK_TYPE, /* number of persistent & inmem log */
996 };
997
998 struct flush_cmd {
999 struct completion wait;
1000 struct llist_node llnode;
1001 nid_t ino;
1002 int ret;
1003 };
1004
1005 struct flush_cmd_control {
1006 struct task_struct *f2fs_issue_flush; /* flush thread */
1007 wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */
1008 atomic_t issued_flush; /* # of issued flushes */
1009 atomic_t queued_flush; /* # of queued flushes */
1010 struct llist_head issue_list; /* list for command issue */
1011 struct llist_node *dispatch_list; /* list for command dispatch */
1012 };
1013
1014 struct f2fs_sm_info {
1015 struct sit_info *sit_info; /* whole segment information */
1016 struct free_segmap_info *free_info; /* free segment information */
1017 struct dirty_seglist_info *dirty_info; /* dirty segment information */
1018 struct curseg_info *curseg_array; /* active segment information */
1019
1020 struct f2fs_rwsem curseg_lock; /* for preventing curseg change */
1021
1022 block_t seg0_blkaddr; /* block address of 0'th segment */
1023 block_t main_blkaddr; /* start block address of main area */
1024 block_t ssa_blkaddr; /* start block address of SSA area */
1025
1026 unsigned int segment_count; /* total # of segments */
1027 unsigned int main_segments; /* # of segments in main area */
1028 unsigned int reserved_segments; /* # of reserved segments */
1029 unsigned int additional_reserved_segments;/* reserved segs for IO align feature */
1030 unsigned int ovp_segments; /* # of overprovision segments */
1031
1032 /* a threshold to reclaim prefree segments */
1033 unsigned int rec_prefree_segments;
1034
1035 /* for batched trimming */
1036 unsigned int trim_sections; /* # of sections to trim */
1037
1038 struct list_head sit_entry_set; /* sit entry set list */
1039
1040 unsigned int ipu_policy; /* in-place-update policy */
1041 unsigned int min_ipu_util; /* in-place-update threshold */
1042 unsigned int min_fsync_blocks; /* threshold for fsync */
1043 unsigned int min_seq_blocks; /* threshold for sequential blocks */
1044 unsigned int min_hot_blocks; /* threshold for hot block allocation */
1045 unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */
1046
1047 /* for flush command control */
1048 struct flush_cmd_control *fcc_info;
1049
1050 /* for discard command control */
1051 struct discard_cmd_control *dcc_info;
1052 };
1053
1054 /*
1055 * For superblock
1056 */
1057 /*
1058 * COUNT_TYPE for monitoring
1059 *
1060 * f2fs monitors the number of several block types such as on-writeback,
1061 * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
1062 */
1063 #define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
1064 enum count_type {
1065 F2FS_DIRTY_DENTS,
1066 F2FS_DIRTY_DATA,
1067 F2FS_DIRTY_QDATA,
1068 F2FS_DIRTY_NODES,
1069 F2FS_DIRTY_META,
1070 F2FS_INMEM_PAGES,
1071 F2FS_DIRTY_IMETA,
1072 F2FS_WB_CP_DATA,
1073 F2FS_WB_DATA,
1074 F2FS_RD_DATA,
1075 F2FS_RD_NODE,
1076 F2FS_RD_META,
1077 F2FS_DIO_WRITE,
1078 F2FS_DIO_READ,
1079 NR_COUNT_TYPE,
1080 };
1081
1082 /*
1083 * The below are the page types of bios used in submit_bio().
1084 * The available types are:
1085 * DATA User data pages. It operates as async mode.
1086 * NODE Node pages. It operates as async mode.
1087 * META FS metadata pages such as SIT, NAT, CP.
1088 * NR_PAGE_TYPE The number of page types.
1089 * META_FLUSH Make sure the previous pages are written
1090 * with waiting the bio's completion
1091 * ... Only can be used with META.
1092 */
1093 #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type))
1094 enum page_type {
1095 DATA = 0,
1096 NODE = 1, /* should not change this */
1097 META,
1098 NR_PAGE_TYPE,
1099 META_FLUSH,
1100 INMEM, /* the below types are used by tracepoints only. */
1101 INMEM_DROP,
1102 INMEM_INVALIDATE,
1103 INMEM_REVOKE,
1104 IPU,
1105 OPU,
1106 };
1107
1108 enum temp_type {
1109 HOT = 0, /* must be zero for meta bio */
1110 WARM,
1111 COLD,
1112 NR_TEMP_TYPE,
1113 };
1114
1115 enum need_lock_type {
1116 LOCK_REQ = 0,
1117 LOCK_DONE,
1118 LOCK_RETRY,
1119 };
1120
1121 enum cp_reason_type {
1122 CP_NO_NEEDED,
1123 CP_NON_REGULAR,
1124 CP_COMPRESSED,
1125 CP_HARDLINK,
1126 CP_SB_NEED_CP,
1127 CP_WRONG_PINO,
1128 CP_NO_SPC_ROLL,
1129 CP_NODE_NEED_CP,
1130 CP_FASTBOOT_MODE,
1131 CP_SPEC_LOG_NUM,
1132 CP_RECOVER_DIR,
1133 };
1134
1135 enum iostat_type {
1136 /* WRITE IO */
1137 APP_DIRECT_IO, /* app direct write IOs */
1138 APP_BUFFERED_IO, /* app buffered write IOs */
1139 APP_WRITE_IO, /* app write IOs */
1140 APP_MAPPED_IO, /* app mapped IOs */
1141 FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */
1142 FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */
1143 FS_META_IO, /* meta IOs from kworker/reclaimer */
1144 FS_GC_DATA_IO, /* data IOs from forground gc */
1145 FS_GC_NODE_IO, /* node IOs from forground gc */
1146 FS_CP_DATA_IO, /* data IOs from checkpoint */
1147 FS_CP_NODE_IO, /* node IOs from checkpoint */
1148 FS_CP_META_IO, /* meta IOs from checkpoint */
1149
1150 /* READ IO */
1151 APP_DIRECT_READ_IO, /* app direct read IOs */
1152 APP_BUFFERED_READ_IO, /* app buffered read IOs */
1153 APP_READ_IO, /* app read IOs */
1154 APP_MAPPED_READ_IO, /* app mapped read IOs */
1155 FS_DATA_READ_IO, /* data read IOs */
1156 FS_GDATA_READ_IO, /* data read IOs from background gc */
1157 FS_CDATA_READ_IO, /* compressed data read IOs */
1158 FS_NODE_READ_IO, /* node read IOs */
1159 FS_META_READ_IO, /* meta read IOs */
1160
1161 /* other */
1162 FS_DISCARD, /* discard */
1163 NR_IO_TYPE,
1164 };
1165
1166 struct f2fs_io_info {
1167 struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */
1168 nid_t ino; /* inode number */
1169 enum page_type type; /* contains DATA/NODE/META/META_FLUSH */
1170 enum temp_type temp; /* contains HOT/WARM/COLD */
1171 int op; /* contains REQ_OP_ */
1172 int op_flags; /* req_flag_bits */
1173 block_t new_blkaddr; /* new block address to be written */
1174 block_t old_blkaddr; /* old block address before Cow */
1175 struct page *page; /* page to be written */
1176 struct page *encrypted_page; /* encrypted page */
1177 struct page *compressed_page; /* compressed page */
1178 struct list_head list; /* serialize IOs */
1179 bool submitted; /* indicate IO submission */
1180 int need_lock; /* indicate we need to lock cp_rwsem */
1181 bool in_list; /* indicate fio is in io_list */
1182 bool is_por; /* indicate IO is from recovery or not */
1183 bool retry; /* need to reallocate block address */
1184 int compr_blocks; /* # of compressed block addresses */
1185 bool encrypted; /* indicate file is encrypted */
1186 bool post_read; /* require post read */
1187 enum iostat_type io_type; /* io type */
1188 struct writeback_control *io_wbc; /* writeback control */
1189 struct bio **bio; /* bio for ipu */
1190 sector_t *last_block; /* last block number in bio */
1191 unsigned char version; /* version of the node */
1192 };
1193
1194 struct bio_entry {
1195 struct bio *bio;
1196 struct list_head list;
1197 };
1198
1199 #define is_read_io(rw) ((rw) == READ)
1200 struct f2fs_bio_info {
1201 struct f2fs_sb_info *sbi; /* f2fs superblock */
1202 struct bio *bio; /* bios to merge */
1203 sector_t last_block_in_bio; /* last block number */
1204 struct f2fs_io_info fio; /* store buffered io info. */
1205 struct f2fs_rwsem io_rwsem; /* blocking op for bio */
1206 spinlock_t io_lock; /* serialize DATA/NODE IOs */
1207 struct list_head io_list; /* track fios */
1208 struct list_head bio_list; /* bio entry list head */
1209 struct f2fs_rwsem bio_list_lock; /* lock to protect bio entry list */
1210 };
1211
1212 #define FDEV(i) (sbi->devs[i])
1213 #define RDEV(i) (raw_super->devs[i])
1214 struct f2fs_dev_info {
1215 struct block_device *bdev;
1216 char path[MAX_PATH_LEN];
1217 unsigned int total_segments;
1218 block_t start_blk;
1219 block_t end_blk;
1220 #ifdef CONFIG_BLK_DEV_ZONED
1221 unsigned int nr_blkz; /* Total number of zones */
1222 unsigned long *blkz_seq; /* Bitmap indicating sequential zones */
1223 block_t *zone_capacity_blocks; /* Array of zone capacity in blks */
1224 #endif
1225 };
1226
1227 enum inode_type {
1228 DIR_INODE, /* for dirty dir inode */
1229 FILE_INODE, /* for dirty regular/symlink inode */
1230 DIRTY_META, /* for all dirtied inode metadata */
1231 ATOMIC_FILE, /* for all atomic files */
1232 NR_INODE_TYPE,
1233 };
1234
1235 /* for inner inode cache management */
1236 struct inode_management {
1237 struct radix_tree_root ino_root; /* ino entry array */
1238 spinlock_t ino_lock; /* for ino entry lock */
1239 struct list_head ino_list; /* inode list head */
1240 unsigned long ino_num; /* number of entries */
1241 };
1242
1243 /* for GC_AT */
1244 struct atgc_management {
1245 bool atgc_enabled; /* ATGC is enabled or not */
1246 struct rb_root_cached root; /* root of victim rb-tree */
1247 struct list_head victim_list; /* linked with all victim entries */
1248 unsigned int victim_count; /* victim count in rb-tree */
1249 unsigned int candidate_ratio; /* candidate ratio */
1250 unsigned int max_candidate_count; /* max candidate count */
1251 unsigned int age_weight; /* age weight, vblock_weight = 100 - age_weight */
1252 unsigned long long age_threshold; /* age threshold */
1253 };
1254
1255 /* For s_flag in struct f2fs_sb_info */
1256 enum {
1257 SBI_IS_DIRTY, /* dirty flag for checkpoint */
1258 SBI_IS_CLOSE, /* specify unmounting */
1259 SBI_NEED_FSCK, /* need fsck.f2fs to fix */
1260 SBI_POR_DOING, /* recovery is doing or not */
1261 SBI_NEED_SB_WRITE, /* need to recover superblock */
1262 SBI_NEED_CP, /* need to checkpoint */
1263 SBI_IS_SHUTDOWN, /* shutdown by ioctl */
1264 SBI_IS_RECOVERED, /* recovered orphan/data */
1265 SBI_CP_DISABLED, /* CP was disabled last mount */
1266 SBI_CP_DISABLED_QUICK, /* CP was disabled quickly */
1267 SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */
1268 SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */
1269 SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */
1270 SBI_IS_RESIZEFS, /* resizefs is in process */
1271 SBI_IS_FREEZING, /* freezefs is in process */
1272 };
1273
1274 enum {
1275 CP_TIME,
1276 REQ_TIME,
1277 DISCARD_TIME,
1278 GC_TIME,
1279 DISABLE_TIME,
1280 UMOUNT_DISCARD_TIMEOUT,
1281 MAX_TIME,
1282 };
1283
1284 enum {
1285 GC_NORMAL,
1286 GC_IDLE_CB,
1287 GC_IDLE_GREEDY,
1288 GC_IDLE_AT,
1289 GC_URGENT_HIGH,
1290 GC_URGENT_LOW,
1291 MAX_GC_MODE,
1292 };
1293
1294 enum {
1295 BGGC_MODE_ON, /* background gc is on */
1296 BGGC_MODE_OFF, /* background gc is off */
1297 BGGC_MODE_SYNC, /*
1298 * background gc is on, migrating blocks
1299 * like foreground gc
1300 */
1301 };
1302
1303 enum {
1304 FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */
1305 FS_MODE_LFS, /* use lfs allocation only */
1306 };
1307
1308 enum {
1309 WHINT_MODE_OFF, /* not pass down write hints */
1310 WHINT_MODE_USER, /* try to pass down hints given by users */
1311 WHINT_MODE_FS, /* pass down hints with F2FS policy */
1312 };
1313
1314 enum {
1315 ALLOC_MODE_DEFAULT, /* stay default */
1316 ALLOC_MODE_REUSE, /* reuse segments as much as possible */
1317 };
1318
1319 enum fsync_mode {
1320 FSYNC_MODE_POSIX, /* fsync follows posix semantics */
1321 FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */
1322 FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */
1323 };
1324
1325 enum {
1326 COMPR_MODE_FS, /*
1327 * automatically compress compression
1328 * enabled files
1329 */
1330 COMPR_MODE_USER, /*
1331 * automatical compression is disabled.
1332 * user can control the file compression
1333 * using ioctls
1334 */
1335 };
1336
1337 enum {
1338 MEMORY_MODE_NORMAL, /* memory mode for normal devices */
1339 MEMORY_MODE_LOW, /* memory mode for low memry devices */
1340 };
1341
1342 static inline int f2fs_test_bit(unsigned int nr, char *addr);
1343 static inline void f2fs_set_bit(unsigned int nr, char *addr);
1344 static inline void f2fs_clear_bit(unsigned int nr, char *addr);
1345
1346 /*
1347 * Layout of f2fs page.private:
1348 *
1349 * Layout A: lowest bit should be 1
1350 * | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... |
1351 * bit 0 PAGE_PRIVATE_NOT_POINTER
1352 * bit 1 PAGE_PRIVATE_ATOMIC_WRITE
1353 * bit 2 PAGE_PRIVATE_DUMMY_WRITE
1354 * bit 3 PAGE_PRIVATE_ONGOING_MIGRATION
1355 * bit 4 PAGE_PRIVATE_INLINE_INODE
1356 * bit 5 PAGE_PRIVATE_REF_RESOURCE
1357 * bit 6- f2fs private data
1358 *
1359 * Layout B: lowest bit should be 0
1360 * page.private is a wrapped pointer.
1361 */
1362 enum {
1363 PAGE_PRIVATE_NOT_POINTER, /* private contains non-pointer data */
1364 PAGE_PRIVATE_ATOMIC_WRITE, /* data page from atomic write path */
1365 PAGE_PRIVATE_DUMMY_WRITE, /* data page for padding aligned IO */
1366 PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */
1367 PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */
1368 PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */
1369 PAGE_PRIVATE_MAX
1370 };
1371
1372 #define PAGE_PRIVATE_GET_FUNC(name, flagname) \
1373 static inline bool page_private_##name(struct page *page) \
1374 { \
1375 return PagePrivate(page) && \
1376 test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \
1377 test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
1378 }
1379
1380 #define PAGE_PRIVATE_SET_FUNC(name, flagname) \
1381 static inline void set_page_private_##name(struct page *page) \
1382 { \
1383 if (!PagePrivate(page)) { \
1384 get_page(page); \
1385 SetPagePrivate(page); \
1386 set_page_private(page, 0); \
1387 } \
1388 set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \
1389 set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
1390 }
1391
1392 #define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \
1393 static inline void clear_page_private_##name(struct page *page) \
1394 { \
1395 clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
1396 if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) { \
1397 set_page_private(page, 0); \
1398 if (PagePrivate(page)) { \
1399 ClearPagePrivate(page); \
1400 put_page(page); \
1401 }\
1402 } \
1403 }
1404
1405 PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER);
1406 PAGE_PRIVATE_GET_FUNC(reference, REF_RESOURCE);
1407 PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE);
1408 PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION);
1409 PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE);
1410 PAGE_PRIVATE_GET_FUNC(dummy, DUMMY_WRITE);
1411
1412 PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE);
1413 PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE);
1414 PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION);
1415 PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE);
1416 PAGE_PRIVATE_SET_FUNC(dummy, DUMMY_WRITE);
1417
1418 PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE);
1419 PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE);
1420 PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION);
1421 PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE);
1422 PAGE_PRIVATE_CLEAR_FUNC(dummy, DUMMY_WRITE);
1423
get_page_private_data(struct page * page)1424 static inline unsigned long get_page_private_data(struct page *page)
1425 {
1426 unsigned long data = page_private(page);
1427
1428 if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data))
1429 return 0;
1430 return data >> PAGE_PRIVATE_MAX;
1431 }
1432
set_page_private_data(struct page * page,unsigned long data)1433 static inline void set_page_private_data(struct page *page, unsigned long data)
1434 {
1435 if (!PagePrivate(page)) {
1436 get_page(page);
1437 SetPagePrivate(page);
1438 set_page_private(page, 0);
1439 }
1440 set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page));
1441 page_private(page) |= data << PAGE_PRIVATE_MAX;
1442 }
1443
clear_page_private_data(struct page * page)1444 static inline void clear_page_private_data(struct page *page)
1445 {
1446 page_private(page) &= (1 << PAGE_PRIVATE_MAX) - 1;
1447 if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) {
1448 set_page_private(page, 0);
1449 if (PagePrivate(page)) {
1450 ClearPagePrivate(page);
1451 put_page(page);
1452 }
1453 }
1454 }
1455
1456 /* For compression */
1457 enum compress_algorithm_type {
1458 COMPRESS_LZO,
1459 COMPRESS_LZ4,
1460 COMPRESS_ZSTD,
1461 COMPRESS_LZORLE,
1462 COMPRESS_MAX,
1463 };
1464
1465 enum compress_flag {
1466 COMPRESS_CHKSUM,
1467 COMPRESS_MAX_FLAG,
1468 };
1469
1470 #define COMPRESS_WATERMARK 20
1471 #define COMPRESS_PERCENT 20
1472
1473 #define COMPRESS_DATA_RESERVED_SIZE 4
1474 struct compress_data {
1475 __le32 clen; /* compressed data size */
1476 __le32 chksum; /* compressed data chksum */
1477 __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */
1478 u8 cdata[]; /* compressed data */
1479 };
1480
1481 #define COMPRESS_HEADER_SIZE (sizeof(struct compress_data))
1482
1483 #define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000
1484
1485 #define COMPRESS_LEVEL_OFFSET 8
1486
1487 /* compress context */
1488 struct compress_ctx {
1489 struct inode *inode; /* inode the context belong to */
1490 pgoff_t cluster_idx; /* cluster index number */
1491 unsigned int cluster_size; /* page count in cluster */
1492 unsigned int log_cluster_size; /* log of cluster size */
1493 struct page **rpages; /* pages store raw data in cluster */
1494 unsigned int nr_rpages; /* total page number in rpages */
1495 struct page **cpages; /* pages store compressed data in cluster */
1496 unsigned int nr_cpages; /* total page number in cpages */
1497 void *rbuf; /* virtual mapped address on rpages */
1498 struct compress_data *cbuf; /* virtual mapped address on cpages */
1499 size_t rlen; /* valid data length in rbuf */
1500 size_t clen; /* valid data length in cbuf */
1501 void *private; /* payload buffer for specified compression algorithm */
1502 void *private2; /* extra payload buffer */
1503 };
1504
1505 /* compress context for write IO path */
1506 struct compress_io_ctx {
1507 u32 magic; /* magic number to indicate page is compressed */
1508 struct inode *inode; /* inode the context belong to */
1509 struct page **rpages; /* pages store raw data in cluster */
1510 unsigned int nr_rpages; /* total page number in rpages */
1511 atomic_t pending_pages; /* in-flight compressed page count */
1512 };
1513
1514 /* Context for decompressing one cluster on the read IO path */
1515 struct decompress_io_ctx {
1516 u32 magic; /* magic number to indicate page is compressed */
1517 struct inode *inode; /* inode the context belong to */
1518 pgoff_t cluster_idx; /* cluster index number */
1519 unsigned int cluster_size; /* page count in cluster */
1520 unsigned int log_cluster_size; /* log of cluster size */
1521 struct page **rpages; /* pages store raw data in cluster */
1522 unsigned int nr_rpages; /* total page number in rpages */
1523 struct page **cpages; /* pages store compressed data in cluster */
1524 unsigned int nr_cpages; /* total page number in cpages */
1525 struct page **tpages; /* temp pages to pad holes in cluster */
1526 void *rbuf; /* virtual mapped address on rpages */
1527 struct compress_data *cbuf; /* virtual mapped address on cpages */
1528 size_t rlen; /* valid data length in rbuf */
1529 size_t clen; /* valid data length in cbuf */
1530
1531 /*
1532 * The number of compressed pages remaining to be read in this cluster.
1533 * This is initially nr_cpages. It is decremented by 1 each time a page
1534 * has been read (or failed to be read). When it reaches 0, the cluster
1535 * is decompressed (or an error is reported).
1536 *
1537 * If an error occurs before all the pages have been submitted for I/O,
1538 * then this will never reach 0. In this case the I/O submitter is
1539 * responsible for calling f2fs_decompress_end_io() instead.
1540 */
1541 atomic_t remaining_pages;
1542
1543 /*
1544 * Number of references to this decompress_io_ctx.
1545 *
1546 * One reference is held for I/O completion. This reference is dropped
1547 * after the pagecache pages are updated and unlocked -- either after
1548 * decompression (and verity if enabled), or after an error.
1549 *
1550 * In addition, each compressed page holds a reference while it is in a
1551 * bio. These references are necessary prevent compressed pages from
1552 * being freed while they are still in a bio.
1553 */
1554 refcount_t refcnt;
1555
1556 bool failed; /* IO error occurred before decompression? */
1557 bool need_verity; /* need fs-verity verification after decompression? */
1558 void *private; /* payload buffer for specified decompression algorithm */
1559 void *private2; /* extra payload buffer */
1560 struct work_struct verity_work; /* work to verify the decompressed pages */
1561 struct work_struct free_work; /* work for late free this structure itself */
1562 };
1563
1564 #define NULL_CLUSTER ((unsigned int)(~0))
1565 #define MIN_COMPRESS_LOG_SIZE 2
1566 #define MAX_COMPRESS_LOG_SIZE 8
1567 #define MAX_COMPRESS_WINDOW_SIZE(log_size) ((PAGE_SIZE) << (log_size))
1568
1569 struct f2fs_sb_info {
1570 struct super_block *sb; /* pointer to VFS super block */
1571 struct proc_dir_entry *s_proc; /* proc entry */
1572 struct f2fs_super_block *raw_super; /* raw super block pointer */
1573 struct f2fs_rwsem sb_lock; /* lock for raw super block */
1574 int valid_super_block; /* valid super block no */
1575 unsigned long s_flag; /* flags for sbi */
1576 struct mutex writepages; /* mutex for writepages() */
1577
1578 #ifdef CONFIG_BLK_DEV_ZONED
1579 unsigned int blocks_per_blkz; /* F2FS blocks per zone */
1580 unsigned int log_blocks_per_blkz; /* log2 F2FS blocks per zone */
1581 #endif
1582
1583 /* for node-related operations */
1584 struct f2fs_nm_info *nm_info; /* node manager */
1585 struct inode *node_inode; /* cache node blocks */
1586
1587 /* for segment-related operations */
1588 struct f2fs_sm_info *sm_info; /* segment manager */
1589
1590 /* for bio operations */
1591 struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */
1592 /* keep migration IO order for LFS mode */
1593 struct f2fs_rwsem io_order_lock;
1594 mempool_t *write_io_dummy; /* Dummy pages */
1595
1596 /* for checkpoint */
1597 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
1598 int cur_cp_pack; /* remain current cp pack */
1599 spinlock_t cp_lock; /* for flag in ckpt */
1600 struct inode *meta_inode; /* cache meta blocks */
1601 struct f2fs_rwsem cp_global_sem; /* checkpoint procedure lock */
1602 struct f2fs_rwsem cp_rwsem; /* blocking FS operations */
1603 struct f2fs_rwsem node_write; /* locking node writes */
1604 struct f2fs_rwsem node_change; /* locking node change */
1605 wait_queue_head_t cp_wait;
1606 unsigned long last_time[MAX_TIME]; /* to store time in jiffies */
1607 long interval_time[MAX_TIME]; /* to store thresholds */
1608 struct ckpt_req_control cprc_info; /* for checkpoint request control */
1609
1610 struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */
1611
1612 spinlock_t fsync_node_lock; /* for node entry lock */
1613 struct list_head fsync_node_list; /* node list head */
1614 unsigned int fsync_seg_id; /* sequence id */
1615 unsigned int fsync_node_num; /* number of node entries */
1616
1617 /* for orphan inode, use 0'th array */
1618 unsigned int max_orphans; /* max orphan inodes */
1619
1620 /* for inode management */
1621 struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */
1622 spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */
1623 struct mutex flush_lock; /* for flush exclusion */
1624
1625 /* for extent tree cache */
1626 struct extent_tree_info extent_tree[NR_EXTENT_CACHES];
1627 atomic64_t allocated_data_blocks; /* for block age extent_cache */
1628
1629 /* The threshold used for hot and warm data seperation*/
1630 unsigned int hot_data_age_threshold;
1631 unsigned int warm_data_age_threshold;
1632 unsigned int last_age_weight;
1633
1634 /* basic filesystem units */
1635 unsigned int log_sectors_per_block; /* log2 sectors per block */
1636 unsigned int log_blocksize; /* log2 block size */
1637 unsigned int blocksize; /* block size */
1638 unsigned int root_ino_num; /* root inode number*/
1639 unsigned int node_ino_num; /* node inode number*/
1640 unsigned int meta_ino_num; /* meta inode number*/
1641 unsigned int log_blocks_per_seg; /* log2 blocks per segment */
1642 unsigned int blocks_per_seg; /* blocks per segment */
1643 unsigned int segs_per_sec; /* segments per section */
1644 unsigned int secs_per_zone; /* sections per zone */
1645 unsigned int total_sections; /* total section count */
1646 unsigned int total_node_count; /* total node block count */
1647 unsigned int total_valid_node_count; /* valid node block count */
1648 int dir_level; /* directory level */
1649 int readdir_ra; /* readahead inode in readdir */
1650 u64 max_io_bytes; /* max io bytes to merge IOs */
1651
1652 block_t user_block_count; /* # of user blocks */
1653 block_t total_valid_block_count; /* # of valid blocks */
1654 block_t discard_blks; /* discard command candidats */
1655 block_t last_valid_block_count; /* for recovery */
1656 block_t reserved_blocks; /* configurable reserved blocks */
1657 block_t current_reserved_blocks; /* current reserved blocks */
1658
1659 /* Additional tracking for no checkpoint mode */
1660 block_t unusable_block_count; /* # of blocks saved by last cp */
1661
1662 unsigned int nquota_files; /* # of quota sysfile */
1663 struct f2fs_rwsem quota_sem; /* blocking cp for flags */
1664
1665 /* # of pages, see count_type */
1666 atomic_t nr_pages[NR_COUNT_TYPE];
1667 /* # of allocated blocks */
1668 struct percpu_counter alloc_valid_block_count;
1669
1670 /* writeback control */
1671 atomic_t wb_sync_req[META]; /* count # of WB_SYNC threads */
1672
1673 /* valid inode count */
1674 struct percpu_counter total_valid_inode_count;
1675
1676 struct f2fs_mount_info mount_opt; /* mount options */
1677
1678 /* for cleaning operations */
1679 struct f2fs_rwsem gc_lock; /*
1680 * semaphore for GC, avoid
1681 * race between GC and GC or CP
1682 */
1683 struct f2fs_gc_kthread *gc_thread; /* GC thread */
1684 struct atgc_management am; /* atgc management */
1685 unsigned int cur_victim_sec; /* current victim section num */
1686 unsigned int gc_mode; /* current GC state */
1687 unsigned int next_victim_seg[2]; /* next segment in victim section */
1688
1689 /* for skip statistic */
1690 unsigned int atomic_files; /* # of opened atomic file */
1691 unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */
1692 unsigned long long skipped_gc_rwsem; /* FG_GC only */
1693
1694 /* threshold for gc trials on pinned files */
1695 u64 gc_pin_file_threshold;
1696 struct f2fs_rwsem pin_sem;
1697
1698 /* maximum # of trials to find a victim segment for SSR and GC */
1699 unsigned int max_victim_search;
1700 /* migration granularity of garbage collection, unit: segment */
1701 unsigned int migration_granularity;
1702
1703 atomic_t no_cp_fsync_pages;
1704
1705 /*
1706 * for stat information.
1707 * one is for the LFS mode, and the other is for the SSR mode.
1708 */
1709 #ifdef CONFIG_F2FS_STAT_FS
1710 struct f2fs_stat_info *stat_info; /* FS status information */
1711 atomic_t meta_count[META_MAX]; /* # of meta blocks */
1712 unsigned int segment_count[2]; /* # of allocated segments */
1713 unsigned int block_count[2]; /* # of allocated blocks */
1714 atomic_t inplace_count; /* # of inplace update */
1715 /* # of lookup extent cache */
1716 atomic64_t total_hit_ext[NR_EXTENT_CACHES];
1717 /* # of hit rbtree extent node */
1718 atomic64_t read_hit_rbtree[NR_EXTENT_CACHES];
1719 /* # of hit cached extent node */
1720 atomic64_t read_hit_cached[NR_EXTENT_CACHES];
1721 /* # of hit largest extent node in read extent cache */
1722 atomic64_t read_hit_largest;
1723 atomic_t inline_xattr; /* # of inline_xattr inodes */
1724 atomic_t inline_inode; /* # of inline_data inodes */
1725 atomic_t inline_dir; /* # of inline_dentry inodes */
1726 atomic_t compr_inode; /* # of compressed inodes */
1727 atomic64_t compr_blocks; /* # of compressed blocks */
1728 atomic_t vw_cnt; /* # of volatile writes */
1729 atomic_t max_aw_cnt; /* max # of atomic writes */
1730 atomic_t max_vw_cnt; /* max # of volatile writes */
1731 unsigned int io_skip_bggc; /* skip background gc for in-flight IO */
1732 unsigned int other_skip_bggc; /* skip background gc for other reasons */
1733 unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */
1734 #endif
1735 spinlock_t stat_lock; /* lock for stat operations */
1736
1737 /* For app/fs IO statistics */
1738 spinlock_t iostat_lock;
1739 unsigned long long rw_iostat[NR_IO_TYPE];
1740 unsigned long long prev_rw_iostat[NR_IO_TYPE];
1741 bool iostat_enable;
1742 unsigned long iostat_next_period;
1743 unsigned int iostat_period_ms;
1744
1745 /* to attach REQ_META|REQ_FUA flags */
1746 unsigned int data_io_flag;
1747 unsigned int node_io_flag;
1748
1749 /* For sysfs suppport */
1750 struct kobject s_kobj; /* /sys/fs/f2fs/<devname> */
1751 struct completion s_kobj_unregister;
1752
1753 struct kobject s_stat_kobj; /* /sys/fs/f2fs/<devname>/stat */
1754 struct completion s_stat_kobj_unregister;
1755
1756 struct kobject s_feature_list_kobj; /* /sys/fs/f2fs/<devname>/feature_list */
1757 struct completion s_feature_list_kobj_unregister;
1758
1759 /* For shrinker support */
1760 struct list_head s_list;
1761 int s_ndevs; /* number of devices */
1762 struct f2fs_dev_info *devs; /* for device list */
1763 unsigned int dirty_device; /* for checkpoint data flush */
1764 spinlock_t dev_lock; /* protect dirty_device */
1765 struct mutex umount_mutex;
1766 unsigned int shrinker_run_no;
1767
1768 /* For write statistics */
1769 u64 sectors_written_start;
1770 u64 kbytes_written;
1771
1772 /* Reference to checksum algorithm driver via cryptoapi */
1773 struct crypto_shash *s_chksum_driver;
1774
1775 /* Precomputed FS UUID checksum for seeding other checksums */
1776 __u32 s_chksum_seed;
1777
1778 struct workqueue_struct *post_read_wq; /* post read workqueue */
1779
1780 struct kmem_cache *inline_xattr_slab; /* inline xattr entry */
1781 unsigned int inline_xattr_slab_size; /* default inline xattr slab size */
1782
1783 /* For reclaimed segs statistics per each GC mode */
1784 unsigned int gc_segment_mode; /* GC state for reclaimed segments */
1785 unsigned int gc_reclaimed_segs[MAX_GC_MODE]; /* Reclaimed segs for each mode */
1786
1787 #ifdef CONFIG_F2FS_FS_COMPRESSION
1788 struct kmem_cache *page_array_slab; /* page array entry */
1789 unsigned int page_array_slab_size; /* default page array slab size */
1790
1791 /* For runtime compression statistics */
1792 u64 compr_written_block;
1793 u64 compr_saved_block;
1794 u32 compr_new_inode;
1795
1796 /* For compressed block cache */
1797 struct inode *compress_inode; /* cache compressed blocks */
1798 unsigned int compress_percent; /* cache page percentage */
1799 unsigned int compress_watermark; /* cache page watermark */
1800 atomic_t compress_page_hit; /* cache hit count */
1801 #endif
1802 };
1803
1804 struct f2fs_private_dio {
1805 struct inode *inode;
1806 void *orig_private;
1807 bio_end_io_t *orig_end_io;
1808 bool write;
1809 };
1810
1811 #ifdef CONFIG_F2FS_FAULT_INJECTION
1812 #define f2fs_show_injection_info(sbi, type) \
1813 printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n", \
1814 KERN_INFO, sbi->sb->s_id, \
1815 f2fs_fault_name[type], \
1816 __func__, __builtin_return_address(0))
time_to_inject(struct f2fs_sb_info * sbi,int type)1817 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
1818 {
1819 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
1820
1821 if (!ffi->inject_rate)
1822 return false;
1823
1824 if (!IS_FAULT_SET(ffi, type))
1825 return false;
1826
1827 atomic_inc(&ffi->inject_ops);
1828 if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
1829 atomic_set(&ffi->inject_ops, 0);
1830 return true;
1831 }
1832 return false;
1833 }
1834 #else
1835 #define f2fs_show_injection_info(sbi, type) do { } while (0)
time_to_inject(struct f2fs_sb_info * sbi,int type)1836 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
1837 {
1838 return false;
1839 }
1840 #endif
1841
1842 /*
1843 * Test if the mounted volume is a multi-device volume.
1844 * - For a single regular disk volume, sbi->s_ndevs is 0.
1845 * - For a single zoned disk volume, sbi->s_ndevs is 1.
1846 * - For a multi-device volume, sbi->s_ndevs is always 2 or more.
1847 */
f2fs_is_multi_device(struct f2fs_sb_info * sbi)1848 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi)
1849 {
1850 return sbi->s_ndevs > 1;
1851 }
1852
f2fs_update_time(struct f2fs_sb_info * sbi,int type)1853 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
1854 {
1855 unsigned long now = jiffies;
1856
1857 sbi->last_time[type] = now;
1858
1859 /* DISCARD_TIME and GC_TIME are based on REQ_TIME */
1860 if (type == REQ_TIME) {
1861 sbi->last_time[DISCARD_TIME] = now;
1862 sbi->last_time[GC_TIME] = now;
1863 }
1864 }
1865
f2fs_time_over(struct f2fs_sb_info * sbi,int type)1866 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
1867 {
1868 unsigned long interval = sbi->interval_time[type] * HZ;
1869
1870 return time_after(jiffies, sbi->last_time[type] + interval);
1871 }
1872
f2fs_time_to_wait(struct f2fs_sb_info * sbi,int type)1873 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi,
1874 int type)
1875 {
1876 unsigned long interval = sbi->interval_time[type] * HZ;
1877 unsigned int wait_ms = 0;
1878 long delta;
1879
1880 delta = (sbi->last_time[type] + interval) - jiffies;
1881 if (delta > 0)
1882 wait_ms = jiffies_to_msecs(delta);
1883
1884 return wait_ms;
1885 }
1886
1887 /*
1888 * Inline functions
1889 */
__f2fs_crc32(struct f2fs_sb_info * sbi,u32 crc,const void * address,unsigned int length)1890 static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc,
1891 const void *address, unsigned int length)
1892 {
1893 struct {
1894 struct shash_desc shash;
1895 char ctx[4];
1896 } desc;
1897 int err;
1898
1899 BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx));
1900
1901 desc.shash.tfm = sbi->s_chksum_driver;
1902 *(u32 *)desc.ctx = crc;
1903
1904 err = crypto_shash_update(&desc.shash, address, length);
1905 BUG_ON(err);
1906
1907 return *(u32 *)desc.ctx;
1908 }
1909
f2fs_crc32(struct f2fs_sb_info * sbi,const void * address,unsigned int length)1910 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
1911 unsigned int length)
1912 {
1913 return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length);
1914 }
1915
f2fs_crc_valid(struct f2fs_sb_info * sbi,__u32 blk_crc,void * buf,size_t buf_size)1916 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
1917 void *buf, size_t buf_size)
1918 {
1919 return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
1920 }
1921
f2fs_chksum(struct f2fs_sb_info * sbi,u32 crc,const void * address,unsigned int length)1922 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc,
1923 const void *address, unsigned int length)
1924 {
1925 return __f2fs_crc32(sbi, crc, address, length);
1926 }
1927
F2FS_I(struct inode * inode)1928 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
1929 {
1930 return container_of(inode, struct f2fs_inode_info, vfs_inode);
1931 }
1932
F2FS_SB(struct super_block * sb)1933 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
1934 {
1935 return sb->s_fs_info;
1936 }
1937
F2FS_I_SB(struct inode * inode)1938 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode)
1939 {
1940 return F2FS_SB(inode->i_sb);
1941 }
1942
F2FS_M_SB(struct address_space * mapping)1943 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
1944 {
1945 return F2FS_I_SB(mapping->host);
1946 }
1947
F2FS_P_SB(struct page * page)1948 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
1949 {
1950 return F2FS_M_SB(page_file_mapping(page));
1951 }
1952
F2FS_RAW_SUPER(struct f2fs_sb_info * sbi)1953 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
1954 {
1955 return (struct f2fs_super_block *)(sbi->raw_super);
1956 }
1957
F2FS_CKPT(struct f2fs_sb_info * sbi)1958 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
1959 {
1960 return (struct f2fs_checkpoint *)(sbi->ckpt);
1961 }
1962
F2FS_NODE(struct page * page)1963 static inline struct f2fs_node *F2FS_NODE(struct page *page)
1964 {
1965 return (struct f2fs_node *)page_address(page);
1966 }
1967
F2FS_INODE(struct page * page)1968 static inline struct f2fs_inode *F2FS_INODE(struct page *page)
1969 {
1970 return &((struct f2fs_node *)page_address(page))->i;
1971 }
1972
NM_I(struct f2fs_sb_info * sbi)1973 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
1974 {
1975 return (struct f2fs_nm_info *)(sbi->nm_info);
1976 }
1977
SM_I(struct f2fs_sb_info * sbi)1978 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi)
1979 {
1980 return (struct f2fs_sm_info *)(sbi->sm_info);
1981 }
1982
SIT_I(struct f2fs_sb_info * sbi)1983 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi)
1984 {
1985 return (struct sit_info *)(SM_I(sbi)->sit_info);
1986 }
1987
FREE_I(struct f2fs_sb_info * sbi)1988 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi)
1989 {
1990 return (struct free_segmap_info *)(SM_I(sbi)->free_info);
1991 }
1992
DIRTY_I(struct f2fs_sb_info * sbi)1993 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
1994 {
1995 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
1996 }
1997
META_MAPPING(struct f2fs_sb_info * sbi)1998 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi)
1999 {
2000 return sbi->meta_inode->i_mapping;
2001 }
2002
NODE_MAPPING(struct f2fs_sb_info * sbi)2003 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
2004 {
2005 return sbi->node_inode->i_mapping;
2006 }
2007
is_sbi_flag_set(struct f2fs_sb_info * sbi,unsigned int type)2008 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
2009 {
2010 return test_bit(type, &sbi->s_flag);
2011 }
2012
set_sbi_flag(struct f2fs_sb_info * sbi,unsigned int type)2013 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
2014 {
2015 set_bit(type, &sbi->s_flag);
2016 }
2017
clear_sbi_flag(struct f2fs_sb_info * sbi,unsigned int type)2018 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
2019 {
2020 clear_bit(type, &sbi->s_flag);
2021 }
2022
cur_cp_version(struct f2fs_checkpoint * cp)2023 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
2024 {
2025 return le64_to_cpu(cp->checkpoint_ver);
2026 }
2027
f2fs_qf_ino(struct super_block * sb,int type)2028 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type)
2029 {
2030 if (type < F2FS_MAX_QUOTAS)
2031 return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]);
2032 return 0;
2033 }
2034
cur_cp_crc(struct f2fs_checkpoint * cp)2035 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp)
2036 {
2037 size_t crc_offset = le32_to_cpu(cp->checksum_offset);
2038 return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset)));
2039 }
2040
__is_set_ckpt_flags(struct f2fs_checkpoint * cp,unsigned int f)2041 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2042 {
2043 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2044
2045 return ckpt_flags & f;
2046 }
2047
is_set_ckpt_flags(struct f2fs_sb_info * sbi,unsigned int f)2048 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2049 {
2050 return __is_set_ckpt_flags(F2FS_CKPT(sbi), f);
2051 }
2052
__set_ckpt_flags(struct f2fs_checkpoint * cp,unsigned int f)2053 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2054 {
2055 unsigned int ckpt_flags;
2056
2057 ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2058 ckpt_flags |= f;
2059 cp->ckpt_flags = cpu_to_le32(ckpt_flags);
2060 }
2061
set_ckpt_flags(struct f2fs_sb_info * sbi,unsigned int f)2062 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2063 {
2064 unsigned long flags;
2065
2066 spin_lock_irqsave(&sbi->cp_lock, flags);
2067 __set_ckpt_flags(F2FS_CKPT(sbi), f);
2068 spin_unlock_irqrestore(&sbi->cp_lock, flags);
2069 }
2070
__clear_ckpt_flags(struct f2fs_checkpoint * cp,unsigned int f)2071 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2072 {
2073 unsigned int ckpt_flags;
2074
2075 ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2076 ckpt_flags &= (~f);
2077 cp->ckpt_flags = cpu_to_le32(ckpt_flags);
2078 }
2079
clear_ckpt_flags(struct f2fs_sb_info * sbi,unsigned int f)2080 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2081 {
2082 unsigned long flags;
2083
2084 spin_lock_irqsave(&sbi->cp_lock, flags);
2085 __clear_ckpt_flags(F2FS_CKPT(sbi), f);
2086 spin_unlock_irqrestore(&sbi->cp_lock, flags);
2087 }
2088
disable_nat_bits(struct f2fs_sb_info * sbi,bool lock)2089 static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock)
2090 {
2091 unsigned long flags;
2092 unsigned char *nat_bits;
2093
2094 /*
2095 * In order to re-enable nat_bits we need to call fsck.f2fs by
2096 * set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost,
2097 * so let's rely on regular fsck or unclean shutdown.
2098 */
2099
2100 if (lock)
2101 spin_lock_irqsave(&sbi->cp_lock, flags);
2102 __clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG);
2103 nat_bits = NM_I(sbi)->nat_bits;
2104 NM_I(sbi)->nat_bits = NULL;
2105 if (lock)
2106 spin_unlock_irqrestore(&sbi->cp_lock, flags);
2107
2108 kvfree(nat_bits);
2109 }
2110
enabled_nat_bits(struct f2fs_sb_info * sbi,struct cp_control * cpc)2111 static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi,
2112 struct cp_control *cpc)
2113 {
2114 bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
2115
2116 return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set;
2117 }
2118
2119 #define init_f2fs_rwsem(sem) \
2120 do { \
2121 static struct lock_class_key __key; \
2122 \
2123 __init_f2fs_rwsem((sem), #sem, &__key); \
2124 } while (0)
2125
__init_f2fs_rwsem(struct f2fs_rwsem * sem,const char * sem_name,struct lock_class_key * key)2126 static inline void __init_f2fs_rwsem(struct f2fs_rwsem *sem,
2127 const char *sem_name, struct lock_class_key *key)
2128 {
2129 __init_rwsem(&sem->internal_rwsem, sem_name, key);
2130 init_waitqueue_head(&sem->read_waiters);
2131 }
2132
f2fs_rwsem_is_locked(struct f2fs_rwsem * sem)2133 static inline int f2fs_rwsem_is_locked(struct f2fs_rwsem *sem)
2134 {
2135 return rwsem_is_locked(&sem->internal_rwsem);
2136 }
2137
f2fs_rwsem_is_contended(struct f2fs_rwsem * sem)2138 static inline int f2fs_rwsem_is_contended(struct f2fs_rwsem *sem)
2139 {
2140 return rwsem_is_contended(&sem->internal_rwsem);
2141 }
2142
f2fs_down_read(struct f2fs_rwsem * sem)2143 static inline void f2fs_down_read(struct f2fs_rwsem *sem)
2144 {
2145 wait_event(sem->read_waiters, down_read_trylock(&sem->internal_rwsem));
2146 }
2147
f2fs_down_read_trylock(struct f2fs_rwsem * sem)2148 static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem)
2149 {
2150 return down_read_trylock(&sem->internal_rwsem);
2151 }
2152
2153 #ifdef CONFIG_DEBUG_LOCK_ALLOC
f2fs_down_read_nested(struct f2fs_rwsem * sem,int subclass)2154 static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass)
2155 {
2156 down_read_nested(&sem->internal_rwsem, subclass);
2157 }
2158 #else
2159 #define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem)
2160 #endif
2161
f2fs_up_read(struct f2fs_rwsem * sem)2162 static inline void f2fs_up_read(struct f2fs_rwsem *sem)
2163 {
2164 up_read(&sem->internal_rwsem);
2165 }
2166
f2fs_down_write(struct f2fs_rwsem * sem)2167 static inline void f2fs_down_write(struct f2fs_rwsem *sem)
2168 {
2169 down_write(&sem->internal_rwsem);
2170 }
2171
f2fs_down_write_trylock(struct f2fs_rwsem * sem)2172 static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem)
2173 {
2174 return down_write_trylock(&sem->internal_rwsem);
2175 }
2176
f2fs_up_write(struct f2fs_rwsem * sem)2177 static inline void f2fs_up_write(struct f2fs_rwsem *sem)
2178 {
2179 up_write(&sem->internal_rwsem);
2180 wake_up_all(&sem->read_waiters);
2181 }
2182
f2fs_lock_op(struct f2fs_sb_info * sbi)2183 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
2184 {
2185 f2fs_down_read(&sbi->cp_rwsem);
2186 }
2187
f2fs_trylock_op(struct f2fs_sb_info * sbi)2188 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi)
2189 {
2190 return f2fs_down_read_trylock(&sbi->cp_rwsem);
2191 }
2192
f2fs_unlock_op(struct f2fs_sb_info * sbi)2193 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
2194 {
2195 f2fs_up_read(&sbi->cp_rwsem);
2196 }
2197
f2fs_lock_all(struct f2fs_sb_info * sbi)2198 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
2199 {
2200 f2fs_down_write(&sbi->cp_rwsem);
2201 }
2202
f2fs_unlock_all(struct f2fs_sb_info * sbi)2203 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
2204 {
2205 f2fs_up_write(&sbi->cp_rwsem);
2206 }
2207
__get_cp_reason(struct f2fs_sb_info * sbi)2208 static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
2209 {
2210 int reason = CP_SYNC;
2211
2212 if (test_opt(sbi, FASTBOOT))
2213 reason = CP_FASTBOOT;
2214 if (is_sbi_flag_set(sbi, SBI_IS_CLOSE))
2215 reason = CP_UMOUNT;
2216 return reason;
2217 }
2218
__remain_node_summaries(int reason)2219 static inline bool __remain_node_summaries(int reason)
2220 {
2221 return (reason & (CP_UMOUNT | CP_FASTBOOT));
2222 }
2223
__exist_node_summaries(struct f2fs_sb_info * sbi)2224 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
2225 {
2226 return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) ||
2227 is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG));
2228 }
2229
2230 /*
2231 * Check whether the inode has blocks or not
2232 */
F2FS_HAS_BLOCKS(struct inode * inode)2233 static inline int F2FS_HAS_BLOCKS(struct inode *inode)
2234 {
2235 block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0;
2236
2237 return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block;
2238 }
2239
f2fs_has_xattr_block(unsigned int ofs)2240 static inline bool f2fs_has_xattr_block(unsigned int ofs)
2241 {
2242 return ofs == XATTR_NODE_OFFSET;
2243 }
2244
__allow_reserved_blocks(struct f2fs_sb_info * sbi,struct inode * inode,bool cap)2245 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
2246 struct inode *inode, bool cap)
2247 {
2248 if (!inode)
2249 return true;
2250 if (!test_opt(sbi, RESERVE_ROOT))
2251 return false;
2252 if (IS_NOQUOTA(inode))
2253 return true;
2254 if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid()))
2255 return true;
2256 if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) &&
2257 in_group_p(F2FS_OPTION(sbi).s_resgid))
2258 return true;
2259 if (cap && capable(CAP_SYS_RESOURCE))
2260 return true;
2261 return false;
2262 }
2263
2264 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
inc_valid_block_count(struct f2fs_sb_info * sbi,struct inode * inode,blkcnt_t * count)2265 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
2266 struct inode *inode, blkcnt_t *count)
2267 {
2268 blkcnt_t diff = 0, release = 0;
2269 block_t avail_user_block_count;
2270 int ret;
2271
2272 ret = dquot_reserve_block(inode, *count);
2273 if (ret)
2274 return ret;
2275
2276 if (time_to_inject(sbi, FAULT_BLOCK)) {
2277 f2fs_show_injection_info(sbi, FAULT_BLOCK);
2278 release = *count;
2279 goto release_quota;
2280 }
2281
2282 /*
2283 * let's increase this in prior to actual block count change in order
2284 * for f2fs_sync_file to avoid data races when deciding checkpoint.
2285 */
2286 percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
2287
2288 spin_lock(&sbi->stat_lock);
2289 sbi->total_valid_block_count += (block_t)(*count);
2290 avail_user_block_count = sbi->user_block_count -
2291 sbi->current_reserved_blocks;
2292
2293 if (!__allow_reserved_blocks(sbi, inode, true))
2294 avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
2295
2296 if (F2FS_IO_ALIGNED(sbi))
2297 avail_user_block_count -= sbi->blocks_per_seg *
2298 SM_I(sbi)->additional_reserved_segments;
2299
2300 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2301 if (avail_user_block_count > sbi->unusable_block_count)
2302 avail_user_block_count -= sbi->unusable_block_count;
2303 else
2304 avail_user_block_count = 0;
2305 }
2306 if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
2307 diff = sbi->total_valid_block_count - avail_user_block_count;
2308 if (diff > *count)
2309 diff = *count;
2310 *count -= diff;
2311 release = diff;
2312 sbi->total_valid_block_count -= diff;
2313 if (!*count) {
2314 spin_unlock(&sbi->stat_lock);
2315 goto enospc;
2316 }
2317 }
2318 spin_unlock(&sbi->stat_lock);
2319
2320 if (unlikely(release)) {
2321 percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2322 dquot_release_reservation_block(inode, release);
2323 }
2324 f2fs_i_blocks_write(inode, *count, true, true);
2325 return 0;
2326
2327 enospc:
2328 percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2329 release_quota:
2330 dquot_release_reservation_block(inode, release);
2331 return -ENOSPC;
2332 }
2333
2334 __printf(2, 3)
2335 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...);
2336
2337 #define f2fs_err(sbi, fmt, ...) \
2338 f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__)
2339 #define f2fs_warn(sbi, fmt, ...) \
2340 f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__)
2341 #define f2fs_notice(sbi, fmt, ...) \
2342 f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__)
2343 #define f2fs_info(sbi, fmt, ...) \
2344 f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__)
2345 #define f2fs_debug(sbi, fmt, ...) \
2346 f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__)
2347
dec_valid_block_count(struct f2fs_sb_info * sbi,struct inode * inode,block_t count)2348 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
2349 struct inode *inode,
2350 block_t count)
2351 {
2352 blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK;
2353
2354 spin_lock(&sbi->stat_lock);
2355 f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
2356 sbi->total_valid_block_count -= (block_t)count;
2357 if (sbi->reserved_blocks &&
2358 sbi->current_reserved_blocks < sbi->reserved_blocks)
2359 sbi->current_reserved_blocks = min(sbi->reserved_blocks,
2360 sbi->current_reserved_blocks + count);
2361 spin_unlock(&sbi->stat_lock);
2362 if (unlikely(inode->i_blocks < sectors)) {
2363 f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu",
2364 inode->i_ino,
2365 (unsigned long long)inode->i_blocks,
2366 (unsigned long long)sectors);
2367 set_sbi_flag(sbi, SBI_NEED_FSCK);
2368 return;
2369 }
2370 f2fs_i_blocks_write(inode, count, false, true);
2371 }
2372
inc_page_count(struct f2fs_sb_info * sbi,int count_type)2373 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
2374 {
2375 atomic_inc(&sbi->nr_pages[count_type]);
2376
2377 if (count_type == F2FS_DIRTY_DENTS ||
2378 count_type == F2FS_DIRTY_NODES ||
2379 count_type == F2FS_DIRTY_META ||
2380 count_type == F2FS_DIRTY_QDATA ||
2381 count_type == F2FS_DIRTY_IMETA)
2382 set_sbi_flag(sbi, SBI_IS_DIRTY);
2383 }
2384
inode_inc_dirty_pages(struct inode * inode)2385 static inline void inode_inc_dirty_pages(struct inode *inode)
2386 {
2387 atomic_inc(&F2FS_I(inode)->dirty_pages);
2388 inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2389 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2390 if (IS_NOQUOTA(inode))
2391 inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2392 }
2393
dec_page_count(struct f2fs_sb_info * sbi,int count_type)2394 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
2395 {
2396 atomic_dec(&sbi->nr_pages[count_type]);
2397 }
2398
inode_dec_dirty_pages(struct inode * inode)2399 static inline void inode_dec_dirty_pages(struct inode *inode)
2400 {
2401 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
2402 !S_ISLNK(inode->i_mode))
2403 return;
2404
2405 atomic_dec(&F2FS_I(inode)->dirty_pages);
2406 dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2407 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2408 if (IS_NOQUOTA(inode))
2409 dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2410 }
2411
get_pages(struct f2fs_sb_info * sbi,int count_type)2412 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
2413 {
2414 return atomic_read(&sbi->nr_pages[count_type]);
2415 }
2416
get_dirty_pages(struct inode * inode)2417 static inline int get_dirty_pages(struct inode *inode)
2418 {
2419 return atomic_read(&F2FS_I(inode)->dirty_pages);
2420 }
2421
get_blocktype_secs(struct f2fs_sb_info * sbi,int block_type)2422 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
2423 {
2424 unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
2425 unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >>
2426 sbi->log_blocks_per_seg;
2427
2428 return segs / sbi->segs_per_sec;
2429 }
2430
valid_user_blocks(struct f2fs_sb_info * sbi)2431 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
2432 {
2433 return sbi->total_valid_block_count;
2434 }
2435
discard_blocks(struct f2fs_sb_info * sbi)2436 static inline block_t discard_blocks(struct f2fs_sb_info *sbi)
2437 {
2438 return sbi->discard_blks;
2439 }
2440
__bitmap_size(struct f2fs_sb_info * sbi,int flag)2441 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
2442 {
2443 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2444
2445 /* return NAT or SIT bitmap */
2446 if (flag == NAT_BITMAP)
2447 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
2448 else if (flag == SIT_BITMAP)
2449 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
2450
2451 return 0;
2452 }
2453
__cp_payload(struct f2fs_sb_info * sbi)2454 static inline block_t __cp_payload(struct f2fs_sb_info *sbi)
2455 {
2456 return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
2457 }
2458
__bitmap_ptr(struct f2fs_sb_info * sbi,int flag)2459 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
2460 {
2461 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2462 void *tmp_ptr = &ckpt->sit_nat_version_bitmap;
2463 int offset;
2464
2465 if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) {
2466 offset = (flag == SIT_BITMAP) ?
2467 le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0;
2468 /*
2469 * if large_nat_bitmap feature is enabled, leave checksum
2470 * protection for all nat/sit bitmaps.
2471 */
2472 return tmp_ptr + offset + sizeof(__le32);
2473 }
2474
2475 if (__cp_payload(sbi) > 0) {
2476 if (flag == NAT_BITMAP)
2477 return &ckpt->sit_nat_version_bitmap;
2478 else
2479 return (unsigned char *)ckpt + F2FS_BLKSIZE;
2480 } else {
2481 offset = (flag == NAT_BITMAP) ?
2482 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
2483 return tmp_ptr + offset;
2484 }
2485 }
2486
__start_cp_addr(struct f2fs_sb_info * sbi)2487 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
2488 {
2489 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2490
2491 if (sbi->cur_cp_pack == 2)
2492 start_addr += sbi->blocks_per_seg;
2493 return start_addr;
2494 }
2495
__start_cp_next_addr(struct f2fs_sb_info * sbi)2496 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
2497 {
2498 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2499
2500 if (sbi->cur_cp_pack == 1)
2501 start_addr += sbi->blocks_per_seg;
2502 return start_addr;
2503 }
2504
__set_cp_next_pack(struct f2fs_sb_info * sbi)2505 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
2506 {
2507 sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
2508 }
2509
__start_sum_addr(struct f2fs_sb_info * sbi)2510 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
2511 {
2512 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
2513 }
2514
2515 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
inc_valid_node_count(struct f2fs_sb_info * sbi,struct inode * inode,bool is_inode)2516 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
2517 struct inode *inode, bool is_inode)
2518 {
2519 block_t valid_block_count;
2520 unsigned int valid_node_count, user_block_count;
2521 int err;
2522
2523 if (is_inode) {
2524 if (inode) {
2525 err = dquot_alloc_inode(inode);
2526 if (err)
2527 return err;
2528 }
2529 } else {
2530 err = dquot_reserve_block(inode, 1);
2531 if (err)
2532 return err;
2533 }
2534
2535 if (time_to_inject(sbi, FAULT_BLOCK)) {
2536 f2fs_show_injection_info(sbi, FAULT_BLOCK);
2537 goto enospc;
2538 }
2539
2540 spin_lock(&sbi->stat_lock);
2541
2542 valid_block_count = sbi->total_valid_block_count +
2543 sbi->current_reserved_blocks + 1;
2544
2545 if (!__allow_reserved_blocks(sbi, inode, false))
2546 valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
2547
2548 if (F2FS_IO_ALIGNED(sbi))
2549 valid_block_count += sbi->blocks_per_seg *
2550 SM_I(sbi)->additional_reserved_segments;
2551
2552 user_block_count = sbi->user_block_count;
2553 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2554 user_block_count -= sbi->unusable_block_count;
2555
2556 if (unlikely(valid_block_count > user_block_count)) {
2557 spin_unlock(&sbi->stat_lock);
2558 goto enospc;
2559 }
2560
2561 valid_node_count = sbi->total_valid_node_count + 1;
2562 if (unlikely(valid_node_count > sbi->total_node_count)) {
2563 spin_unlock(&sbi->stat_lock);
2564 goto enospc;
2565 }
2566
2567 sbi->total_valid_node_count++;
2568 sbi->total_valid_block_count++;
2569 spin_unlock(&sbi->stat_lock);
2570
2571 if (inode) {
2572 if (is_inode)
2573 f2fs_mark_inode_dirty_sync(inode, true);
2574 else
2575 f2fs_i_blocks_write(inode, 1, true, true);
2576 }
2577
2578 percpu_counter_inc(&sbi->alloc_valid_block_count);
2579 return 0;
2580
2581 enospc:
2582 if (is_inode) {
2583 if (inode)
2584 dquot_free_inode(inode);
2585 } else {
2586 dquot_release_reservation_block(inode, 1);
2587 }
2588 return -ENOSPC;
2589 }
2590
dec_valid_node_count(struct f2fs_sb_info * sbi,struct inode * inode,bool is_inode)2591 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
2592 struct inode *inode, bool is_inode)
2593 {
2594 spin_lock(&sbi->stat_lock);
2595
2596 if (unlikely(!sbi->total_valid_block_count ||
2597 !sbi->total_valid_node_count)) {
2598 f2fs_warn(sbi, "dec_valid_node_count: inconsistent block counts, total_valid_block:%u, total_valid_node:%u",
2599 sbi->total_valid_block_count,
2600 sbi->total_valid_node_count);
2601 set_sbi_flag(sbi, SBI_NEED_FSCK);
2602 } else {
2603 sbi->total_valid_block_count--;
2604 sbi->total_valid_node_count--;
2605 }
2606
2607 if (sbi->reserved_blocks &&
2608 sbi->current_reserved_blocks < sbi->reserved_blocks)
2609 sbi->current_reserved_blocks++;
2610
2611 spin_unlock(&sbi->stat_lock);
2612
2613 if (is_inode) {
2614 dquot_free_inode(inode);
2615 } else {
2616 if (unlikely(inode->i_blocks == 0)) {
2617 f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu",
2618 inode->i_ino,
2619 (unsigned long long)inode->i_blocks);
2620 set_sbi_flag(sbi, SBI_NEED_FSCK);
2621 return;
2622 }
2623 f2fs_i_blocks_write(inode, 1, false, true);
2624 }
2625 }
2626
valid_node_count(struct f2fs_sb_info * sbi)2627 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
2628 {
2629 return sbi->total_valid_node_count;
2630 }
2631
inc_valid_inode_count(struct f2fs_sb_info * sbi)2632 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
2633 {
2634 percpu_counter_inc(&sbi->total_valid_inode_count);
2635 }
2636
dec_valid_inode_count(struct f2fs_sb_info * sbi)2637 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi)
2638 {
2639 percpu_counter_dec(&sbi->total_valid_inode_count);
2640 }
2641
valid_inode_count(struct f2fs_sb_info * sbi)2642 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
2643 {
2644 return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
2645 }
2646
f2fs_grab_cache_page(struct address_space * mapping,pgoff_t index,bool for_write)2647 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
2648 pgoff_t index, bool for_write)
2649 {
2650 struct page *page;
2651
2652 if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
2653 if (!for_write)
2654 page = find_get_page_flags(mapping, index,
2655 FGP_LOCK | FGP_ACCESSED);
2656 else
2657 page = find_lock_page(mapping, index);
2658 if (page)
2659 return page;
2660
2661 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
2662 f2fs_show_injection_info(F2FS_M_SB(mapping),
2663 FAULT_PAGE_ALLOC);
2664 return NULL;
2665 }
2666 }
2667
2668 if (!for_write)
2669 return grab_cache_page(mapping, index);
2670 return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
2671 }
2672
f2fs_pagecache_get_page(struct address_space * mapping,pgoff_t index,int fgp_flags,gfp_t gfp_mask)2673 static inline struct page *f2fs_pagecache_get_page(
2674 struct address_space *mapping, pgoff_t index,
2675 int fgp_flags, gfp_t gfp_mask)
2676 {
2677 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) {
2678 f2fs_show_injection_info(F2FS_M_SB(mapping), FAULT_PAGE_GET);
2679 return NULL;
2680 }
2681
2682 return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
2683 }
2684
f2fs_copy_page(struct page * src,struct page * dst)2685 static inline void f2fs_copy_page(struct page *src, struct page *dst)
2686 {
2687 char *src_kaddr = kmap(src);
2688 char *dst_kaddr = kmap(dst);
2689
2690 memcpy(dst_kaddr, src_kaddr, PAGE_SIZE);
2691 kunmap(dst);
2692 kunmap(src);
2693 }
2694
f2fs_put_page(struct page * page,int unlock)2695 static inline void f2fs_put_page(struct page *page, int unlock)
2696 {
2697 if (!page)
2698 return;
2699
2700 if (unlock) {
2701 f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
2702 unlock_page(page);
2703 }
2704 put_page(page);
2705 }
2706
f2fs_put_dnode(struct dnode_of_data * dn)2707 static inline void f2fs_put_dnode(struct dnode_of_data *dn)
2708 {
2709 if (dn->node_page)
2710 f2fs_put_page(dn->node_page, 1);
2711 if (dn->inode_page && dn->node_page != dn->inode_page)
2712 f2fs_put_page(dn->inode_page, 0);
2713 dn->node_page = NULL;
2714 dn->inode_page = NULL;
2715 }
2716
f2fs_kmem_cache_create(const char * name,size_t size)2717 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
2718 size_t size)
2719 {
2720 return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL);
2721 }
2722
f2fs_kmem_cache_alloc(struct kmem_cache * cachep,gfp_t flags)2723 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
2724 gfp_t flags)
2725 {
2726 void *entry;
2727
2728 entry = kmem_cache_alloc(cachep, flags);
2729 if (!entry)
2730 entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL);
2731 return entry;
2732 }
2733
is_inflight_io(struct f2fs_sb_info * sbi,int type)2734 static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type)
2735 {
2736 if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
2737 get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) ||
2738 get_pages(sbi, F2FS_WB_CP_DATA) ||
2739 get_pages(sbi, F2FS_DIO_READ) ||
2740 get_pages(sbi, F2FS_DIO_WRITE))
2741 return true;
2742
2743 if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info &&
2744 atomic_read(&SM_I(sbi)->dcc_info->queued_discard))
2745 return true;
2746
2747 if (SM_I(sbi) && SM_I(sbi)->fcc_info &&
2748 atomic_read(&SM_I(sbi)->fcc_info->queued_flush))
2749 return true;
2750 return false;
2751 }
2752
is_idle(struct f2fs_sb_info * sbi,int type)2753 static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
2754 {
2755 if (sbi->gc_mode == GC_URGENT_HIGH)
2756 return true;
2757
2758 if (is_inflight_io(sbi, type))
2759 return false;
2760
2761 if (sbi->gc_mode == GC_URGENT_LOW &&
2762 (type == DISCARD_TIME || type == GC_TIME))
2763 return true;
2764
2765 return f2fs_time_over(sbi, type);
2766 }
2767
f2fs_radix_tree_insert(struct radix_tree_root * root,unsigned long index,void * item)2768 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
2769 unsigned long index, void *item)
2770 {
2771 while (radix_tree_insert(root, index, item))
2772 cond_resched();
2773 }
2774
2775 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
2776
IS_INODE(struct page * page)2777 static inline bool IS_INODE(struct page *page)
2778 {
2779 struct f2fs_node *p = F2FS_NODE(page);
2780
2781 return RAW_IS_INODE(p);
2782 }
2783
offset_in_addr(struct f2fs_inode * i)2784 static inline int offset_in_addr(struct f2fs_inode *i)
2785 {
2786 return (i->i_inline & F2FS_EXTRA_ATTR) ?
2787 (le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0;
2788 }
2789
blkaddr_in_node(struct f2fs_node * node)2790 static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
2791 {
2792 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
2793 }
2794
2795 static inline int f2fs_has_extra_attr(struct inode *inode);
data_blkaddr(struct inode * inode,struct page * node_page,unsigned int offset)2796 static inline block_t data_blkaddr(struct inode *inode,
2797 struct page *node_page, unsigned int offset)
2798 {
2799 struct f2fs_node *raw_node;
2800 __le32 *addr_array;
2801 int base = 0;
2802 bool is_inode = IS_INODE(node_page);
2803
2804 raw_node = F2FS_NODE(node_page);
2805
2806 if (is_inode) {
2807 if (!inode)
2808 /* from GC path only */
2809 base = offset_in_addr(&raw_node->i);
2810 else if (f2fs_has_extra_attr(inode))
2811 base = get_extra_isize(inode);
2812 }
2813
2814 addr_array = blkaddr_in_node(raw_node);
2815 return le32_to_cpu(addr_array[base + offset]);
2816 }
2817
f2fs_data_blkaddr(struct dnode_of_data * dn)2818 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn)
2819 {
2820 return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node);
2821 }
2822
f2fs_test_bit(unsigned int nr,char * addr)2823 static inline int f2fs_test_bit(unsigned int nr, char *addr)
2824 {
2825 int mask;
2826
2827 addr += (nr >> 3);
2828 mask = 1 << (7 - (nr & 0x07));
2829 return mask & *addr;
2830 }
2831
f2fs_set_bit(unsigned int nr,char * addr)2832 static inline void f2fs_set_bit(unsigned int nr, char *addr)
2833 {
2834 int mask;
2835
2836 addr += (nr >> 3);
2837 mask = 1 << (7 - (nr & 0x07));
2838 *addr |= mask;
2839 }
2840
f2fs_clear_bit(unsigned int nr,char * addr)2841 static inline void f2fs_clear_bit(unsigned int nr, char *addr)
2842 {
2843 int mask;
2844
2845 addr += (nr >> 3);
2846 mask = 1 << (7 - (nr & 0x07));
2847 *addr &= ~mask;
2848 }
2849
f2fs_test_and_set_bit(unsigned int nr,char * addr)2850 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr)
2851 {
2852 int mask;
2853 int ret;
2854
2855 addr += (nr >> 3);
2856 mask = 1 << (7 - (nr & 0x07));
2857 ret = mask & *addr;
2858 *addr |= mask;
2859 return ret;
2860 }
2861
f2fs_test_and_clear_bit(unsigned int nr,char * addr)2862 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr)
2863 {
2864 int mask;
2865 int ret;
2866
2867 addr += (nr >> 3);
2868 mask = 1 << (7 - (nr & 0x07));
2869 ret = mask & *addr;
2870 *addr &= ~mask;
2871 return ret;
2872 }
2873
f2fs_change_bit(unsigned int nr,char * addr)2874 static inline void f2fs_change_bit(unsigned int nr, char *addr)
2875 {
2876 int mask;
2877
2878 addr += (nr >> 3);
2879 mask = 1 << (7 - (nr & 0x07));
2880 *addr ^= mask;
2881 }
2882
2883 /*
2884 * On-disk inode flags (f2fs_inode::i_flags)
2885 */
2886 #define F2FS_COMPR_FL 0x00000004 /* Compress file */
2887 #define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */
2888 #define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */
2889 #define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */
2890 #define F2FS_NODUMP_FL 0x00000040 /* do not dump file */
2891 #define F2FS_NOATIME_FL 0x00000080 /* do not update atime */
2892 #define F2FS_NOCOMP_FL 0x00000400 /* Don't compress */
2893 #define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */
2894 #define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
2895 #define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */
2896 #define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */
2897
2898 /* Flags that should be inherited by new inodes from their parent. */
2899 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \
2900 F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
2901 F2FS_CASEFOLD_FL | F2FS_COMPR_FL | F2FS_NOCOMP_FL)
2902
2903 /* Flags that are appropriate for regular files (all but dir-specific ones). */
2904 #define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
2905 F2FS_CASEFOLD_FL))
2906
2907 /* Flags that are appropriate for non-directories/regular files. */
2908 #define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL)
2909
f2fs_mask_flags(umode_t mode,__u32 flags)2910 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
2911 {
2912 if (S_ISDIR(mode))
2913 return flags;
2914 else if (S_ISREG(mode))
2915 return flags & F2FS_REG_FLMASK;
2916 else
2917 return flags & F2FS_OTHER_FLMASK;
2918 }
2919
__mark_inode_dirty_flag(struct inode * inode,int flag,bool set)2920 static inline void __mark_inode_dirty_flag(struct inode *inode,
2921 int flag, bool set)
2922 {
2923 switch (flag) {
2924 case FI_INLINE_XATTR:
2925 case FI_INLINE_DATA:
2926 case FI_INLINE_DENTRY:
2927 case FI_NEW_INODE:
2928 if (set)
2929 return;
2930 fallthrough;
2931 case FI_DATA_EXIST:
2932 case FI_INLINE_DOTS:
2933 case FI_PIN_FILE:
2934 case FI_COMPRESS_RELEASED:
2935 f2fs_mark_inode_dirty_sync(inode, true);
2936 }
2937 }
2938
set_inode_flag(struct inode * inode,int flag)2939 static inline void set_inode_flag(struct inode *inode, int flag)
2940 {
2941 set_bit(flag, F2FS_I(inode)->flags);
2942 __mark_inode_dirty_flag(inode, flag, true);
2943 }
2944
is_inode_flag_set(struct inode * inode,int flag)2945 static inline int is_inode_flag_set(struct inode *inode, int flag)
2946 {
2947 return test_bit(flag, F2FS_I(inode)->flags);
2948 }
2949
clear_inode_flag(struct inode * inode,int flag)2950 static inline void clear_inode_flag(struct inode *inode, int flag)
2951 {
2952 clear_bit(flag, F2FS_I(inode)->flags);
2953 __mark_inode_dirty_flag(inode, flag, false);
2954 }
2955
f2fs_verity_in_progress(struct inode * inode)2956 static inline bool f2fs_verity_in_progress(struct inode *inode)
2957 {
2958 return IS_ENABLED(CONFIG_FS_VERITY) &&
2959 is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS);
2960 }
2961
set_acl_inode(struct inode * inode,umode_t mode)2962 static inline void set_acl_inode(struct inode *inode, umode_t mode)
2963 {
2964 F2FS_I(inode)->i_acl_mode = mode;
2965 set_inode_flag(inode, FI_ACL_MODE);
2966 f2fs_mark_inode_dirty_sync(inode, false);
2967 }
2968
f2fs_i_links_write(struct inode * inode,bool inc)2969 static inline void f2fs_i_links_write(struct inode *inode, bool inc)
2970 {
2971 if (inc)
2972 inc_nlink(inode);
2973 else
2974 drop_nlink(inode);
2975 f2fs_mark_inode_dirty_sync(inode, true);
2976 }
2977
f2fs_i_blocks_write(struct inode * inode,block_t diff,bool add,bool claim)2978 static inline void f2fs_i_blocks_write(struct inode *inode,
2979 block_t diff, bool add, bool claim)
2980 {
2981 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
2982 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
2983
2984 /* add = 1, claim = 1 should be dquot_reserve_block in pair */
2985 if (add) {
2986 if (claim)
2987 dquot_claim_block(inode, diff);
2988 else
2989 dquot_alloc_block_nofail(inode, diff);
2990 } else {
2991 dquot_free_block(inode, diff);
2992 }
2993
2994 f2fs_mark_inode_dirty_sync(inode, true);
2995 if (clean || recover)
2996 set_inode_flag(inode, FI_AUTO_RECOVER);
2997 }
2998
f2fs_i_size_write(struct inode * inode,loff_t i_size)2999 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
3000 {
3001 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
3002 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
3003
3004 if (i_size_read(inode) == i_size)
3005 return;
3006
3007 i_size_write(inode, i_size);
3008 f2fs_mark_inode_dirty_sync(inode, true);
3009 if (clean || recover)
3010 set_inode_flag(inode, FI_AUTO_RECOVER);
3011 }
3012
f2fs_i_depth_write(struct inode * inode,unsigned int depth)3013 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
3014 {
3015 F2FS_I(inode)->i_current_depth = depth;
3016 f2fs_mark_inode_dirty_sync(inode, true);
3017 }
3018
f2fs_i_gc_failures_write(struct inode * inode,unsigned int count)3019 static inline void f2fs_i_gc_failures_write(struct inode *inode,
3020 unsigned int count)
3021 {
3022 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count;
3023 f2fs_mark_inode_dirty_sync(inode, true);
3024 }
3025
f2fs_i_xnid_write(struct inode * inode,nid_t xnid)3026 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
3027 {
3028 F2FS_I(inode)->i_xattr_nid = xnid;
3029 f2fs_mark_inode_dirty_sync(inode, true);
3030 }
3031
f2fs_i_pino_write(struct inode * inode,nid_t pino)3032 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino)
3033 {
3034 F2FS_I(inode)->i_pino = pino;
3035 f2fs_mark_inode_dirty_sync(inode, true);
3036 }
3037
get_inline_info(struct inode * inode,struct f2fs_inode * ri)3038 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
3039 {
3040 struct f2fs_inode_info *fi = F2FS_I(inode);
3041
3042 if (ri->i_inline & F2FS_INLINE_XATTR)
3043 set_bit(FI_INLINE_XATTR, fi->flags);
3044 if (ri->i_inline & F2FS_INLINE_DATA)
3045 set_bit(FI_INLINE_DATA, fi->flags);
3046 if (ri->i_inline & F2FS_INLINE_DENTRY)
3047 set_bit(FI_INLINE_DENTRY, fi->flags);
3048 if (ri->i_inline & F2FS_DATA_EXIST)
3049 set_bit(FI_DATA_EXIST, fi->flags);
3050 if (ri->i_inline & F2FS_INLINE_DOTS)
3051 set_bit(FI_INLINE_DOTS, fi->flags);
3052 if (ri->i_inline & F2FS_EXTRA_ATTR)
3053 set_bit(FI_EXTRA_ATTR, fi->flags);
3054 if (ri->i_inline & F2FS_PIN_FILE)
3055 set_bit(FI_PIN_FILE, fi->flags);
3056 if (ri->i_inline & F2FS_COMPRESS_RELEASED)
3057 set_bit(FI_COMPRESS_RELEASED, fi->flags);
3058 }
3059
set_raw_inline(struct inode * inode,struct f2fs_inode * ri)3060 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
3061 {
3062 ri->i_inline = 0;
3063
3064 if (is_inode_flag_set(inode, FI_INLINE_XATTR))
3065 ri->i_inline |= F2FS_INLINE_XATTR;
3066 if (is_inode_flag_set(inode, FI_INLINE_DATA))
3067 ri->i_inline |= F2FS_INLINE_DATA;
3068 if (is_inode_flag_set(inode, FI_INLINE_DENTRY))
3069 ri->i_inline |= F2FS_INLINE_DENTRY;
3070 if (is_inode_flag_set(inode, FI_DATA_EXIST))
3071 ri->i_inline |= F2FS_DATA_EXIST;
3072 if (is_inode_flag_set(inode, FI_INLINE_DOTS))
3073 ri->i_inline |= F2FS_INLINE_DOTS;
3074 if (is_inode_flag_set(inode, FI_EXTRA_ATTR))
3075 ri->i_inline |= F2FS_EXTRA_ATTR;
3076 if (is_inode_flag_set(inode, FI_PIN_FILE))
3077 ri->i_inline |= F2FS_PIN_FILE;
3078 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
3079 ri->i_inline |= F2FS_COMPRESS_RELEASED;
3080 }
3081
f2fs_has_extra_attr(struct inode * inode)3082 static inline int f2fs_has_extra_attr(struct inode *inode)
3083 {
3084 return is_inode_flag_set(inode, FI_EXTRA_ATTR);
3085 }
3086
f2fs_has_inline_xattr(struct inode * inode)3087 static inline int f2fs_has_inline_xattr(struct inode *inode)
3088 {
3089 return is_inode_flag_set(inode, FI_INLINE_XATTR);
3090 }
3091
f2fs_compressed_file(struct inode * inode)3092 static inline int f2fs_compressed_file(struct inode *inode)
3093 {
3094 return S_ISREG(inode->i_mode) &&
3095 is_inode_flag_set(inode, FI_COMPRESSED_FILE);
3096 }
3097
f2fs_need_compress_data(struct inode * inode)3098 static inline bool f2fs_need_compress_data(struct inode *inode)
3099 {
3100 int compress_mode = F2FS_OPTION(F2FS_I_SB(inode)).compress_mode;
3101
3102 if (!f2fs_compressed_file(inode))
3103 return false;
3104
3105 if (compress_mode == COMPR_MODE_FS)
3106 return true;
3107 else if (compress_mode == COMPR_MODE_USER &&
3108 is_inode_flag_set(inode, FI_ENABLE_COMPRESS))
3109 return true;
3110
3111 return false;
3112 }
3113
addrs_per_inode(struct inode * inode)3114 static inline unsigned int addrs_per_inode(struct inode *inode)
3115 {
3116 unsigned int addrs = CUR_ADDRS_PER_INODE(inode) -
3117 get_inline_xattr_addrs(inode);
3118
3119 if (!f2fs_compressed_file(inode))
3120 return addrs;
3121 return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size);
3122 }
3123
addrs_per_block(struct inode * inode)3124 static inline unsigned int addrs_per_block(struct inode *inode)
3125 {
3126 if (!f2fs_compressed_file(inode))
3127 return DEF_ADDRS_PER_BLOCK;
3128 return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size);
3129 }
3130
inline_xattr_addr(struct inode * inode,struct page * page)3131 static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
3132 {
3133 struct f2fs_inode *ri = F2FS_INODE(page);
3134
3135 return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
3136 get_inline_xattr_addrs(inode)]);
3137 }
3138
inline_xattr_size(struct inode * inode)3139 static inline int inline_xattr_size(struct inode *inode)
3140 {
3141 if (f2fs_has_inline_xattr(inode))
3142 return get_inline_xattr_addrs(inode) * sizeof(__le32);
3143 return 0;
3144 }
3145
f2fs_has_inline_data(struct inode * inode)3146 static inline int f2fs_has_inline_data(struct inode *inode)
3147 {
3148 return is_inode_flag_set(inode, FI_INLINE_DATA);
3149 }
3150
f2fs_exist_data(struct inode * inode)3151 static inline int f2fs_exist_data(struct inode *inode)
3152 {
3153 return is_inode_flag_set(inode, FI_DATA_EXIST);
3154 }
3155
f2fs_has_inline_dots(struct inode * inode)3156 static inline int f2fs_has_inline_dots(struct inode *inode)
3157 {
3158 return is_inode_flag_set(inode, FI_INLINE_DOTS);
3159 }
3160
f2fs_is_mmap_file(struct inode * inode)3161 static inline int f2fs_is_mmap_file(struct inode *inode)
3162 {
3163 return is_inode_flag_set(inode, FI_MMAP_FILE);
3164 }
3165
f2fs_is_pinned_file(struct inode * inode)3166 static inline bool f2fs_is_pinned_file(struct inode *inode)
3167 {
3168 return is_inode_flag_set(inode, FI_PIN_FILE);
3169 }
3170
f2fs_is_atomic_file(struct inode * inode)3171 static inline bool f2fs_is_atomic_file(struct inode *inode)
3172 {
3173 return is_inode_flag_set(inode, FI_ATOMIC_FILE);
3174 }
3175
f2fs_is_commit_atomic_write(struct inode * inode)3176 static inline bool f2fs_is_commit_atomic_write(struct inode *inode)
3177 {
3178 return is_inode_flag_set(inode, FI_ATOMIC_COMMIT);
3179 }
3180
f2fs_is_volatile_file(struct inode * inode)3181 static inline bool f2fs_is_volatile_file(struct inode *inode)
3182 {
3183 return is_inode_flag_set(inode, FI_VOLATILE_FILE);
3184 }
3185
f2fs_is_first_block_written(struct inode * inode)3186 static inline bool f2fs_is_first_block_written(struct inode *inode)
3187 {
3188 return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
3189 }
3190
f2fs_is_drop_cache(struct inode * inode)3191 static inline bool f2fs_is_drop_cache(struct inode *inode)
3192 {
3193 return is_inode_flag_set(inode, FI_DROP_CACHE);
3194 }
3195
inline_data_addr(struct inode * inode,struct page * page)3196 static inline void *inline_data_addr(struct inode *inode, struct page *page)
3197 {
3198 struct f2fs_inode *ri = F2FS_INODE(page);
3199 int extra_size = get_extra_isize(inode);
3200
3201 return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]);
3202 }
3203
f2fs_has_inline_dentry(struct inode * inode)3204 static inline int f2fs_has_inline_dentry(struct inode *inode)
3205 {
3206 return is_inode_flag_set(inode, FI_INLINE_DENTRY);
3207 }
3208
is_file(struct inode * inode,int type)3209 static inline int is_file(struct inode *inode, int type)
3210 {
3211 return F2FS_I(inode)->i_advise & type;
3212 }
3213
set_file(struct inode * inode,int type)3214 static inline void set_file(struct inode *inode, int type)
3215 {
3216 F2FS_I(inode)->i_advise |= type;
3217 f2fs_mark_inode_dirty_sync(inode, true);
3218 }
3219
clear_file(struct inode * inode,int type)3220 static inline void clear_file(struct inode *inode, int type)
3221 {
3222 F2FS_I(inode)->i_advise &= ~type;
3223 f2fs_mark_inode_dirty_sync(inode, true);
3224 }
3225
f2fs_is_time_consistent(struct inode * inode)3226 static inline bool f2fs_is_time_consistent(struct inode *inode)
3227 {
3228 if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
3229 return false;
3230 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
3231 return false;
3232 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
3233 return false;
3234 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3,
3235 &F2FS_I(inode)->i_crtime))
3236 return false;
3237 return true;
3238 }
3239
f2fs_skip_inode_update(struct inode * inode,int dsync)3240 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
3241 {
3242 bool ret;
3243
3244 if (dsync) {
3245 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3246
3247 spin_lock(&sbi->inode_lock[DIRTY_META]);
3248 ret = list_empty(&F2FS_I(inode)->gdirty_list);
3249 spin_unlock(&sbi->inode_lock[DIRTY_META]);
3250 return ret;
3251 }
3252 if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) ||
3253 file_keep_isize(inode) ||
3254 i_size_read(inode) & ~PAGE_MASK)
3255 return false;
3256
3257 if (!f2fs_is_time_consistent(inode))
3258 return false;
3259
3260 spin_lock(&F2FS_I(inode)->i_size_lock);
3261 ret = F2FS_I(inode)->last_disk_size == i_size_read(inode);
3262 spin_unlock(&F2FS_I(inode)->i_size_lock);
3263
3264 return ret;
3265 }
3266
f2fs_readonly(struct super_block * sb)3267 static inline bool f2fs_readonly(struct super_block *sb)
3268 {
3269 return sb_rdonly(sb);
3270 }
3271
f2fs_cp_error(struct f2fs_sb_info * sbi)3272 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
3273 {
3274 return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
3275 }
3276
is_dot_dotdot(const u8 * name,size_t len)3277 static inline bool is_dot_dotdot(const u8 *name, size_t len)
3278 {
3279 if (len == 1 && name[0] == '.')
3280 return true;
3281
3282 if (len == 2 && name[0] == '.' && name[1] == '.')
3283 return true;
3284
3285 return false;
3286 }
3287
f2fs_kmalloc(struct f2fs_sb_info * sbi,size_t size,gfp_t flags)3288 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
3289 size_t size, gfp_t flags)
3290 {
3291 if (time_to_inject(sbi, FAULT_KMALLOC)) {
3292 f2fs_show_injection_info(sbi, FAULT_KMALLOC);
3293 return NULL;
3294 }
3295
3296 return kmalloc(size, flags);
3297 }
3298
f2fs_kzalloc(struct f2fs_sb_info * sbi,size_t size,gfp_t flags)3299 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
3300 size_t size, gfp_t flags)
3301 {
3302 return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO);
3303 }
3304
f2fs_kvmalloc(struct f2fs_sb_info * sbi,size_t size,gfp_t flags)3305 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
3306 size_t size, gfp_t flags)
3307 {
3308 if (time_to_inject(sbi, FAULT_KVMALLOC)) {
3309 f2fs_show_injection_info(sbi, FAULT_KVMALLOC);
3310 return NULL;
3311 }
3312
3313 return kvmalloc(size, flags);
3314 }
3315
f2fs_kvzalloc(struct f2fs_sb_info * sbi,size_t size,gfp_t flags)3316 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi,
3317 size_t size, gfp_t flags)
3318 {
3319 return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO);
3320 }
3321
get_extra_isize(struct inode * inode)3322 static inline int get_extra_isize(struct inode *inode)
3323 {
3324 return F2FS_I(inode)->i_extra_isize / sizeof(__le32);
3325 }
3326
get_inline_xattr_addrs(struct inode * inode)3327 static inline int get_inline_xattr_addrs(struct inode *inode)
3328 {
3329 return F2FS_I(inode)->i_inline_xattr_size;
3330 }
3331
3332 #define f2fs_get_inode_mode(i) \
3333 ((is_inode_flag_set(i, FI_ACL_MODE)) ? \
3334 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
3335
3336 #define F2FS_TOTAL_EXTRA_ATTR_SIZE \
3337 (offsetof(struct f2fs_inode, i_extra_end) - \
3338 offsetof(struct f2fs_inode, i_extra_isize)) \
3339
3340 #define F2FS_OLD_ATTRIBUTE_SIZE (offsetof(struct f2fs_inode, i_addr))
3341 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field) \
3342 ((offsetof(typeof(*(f2fs_inode)), field) + \
3343 sizeof((f2fs_inode)->field)) \
3344 <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \
3345
3346 #define DEFAULT_IOSTAT_PERIOD_MS 3000
3347 #define MIN_IOSTAT_PERIOD_MS 100
3348 /* maximum period of iostat tracing is 1 day */
3349 #define MAX_IOSTAT_PERIOD_MS 8640000
3350
f2fs_reset_iostat(struct f2fs_sb_info * sbi)3351 static inline void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
3352 {
3353 int i;
3354
3355 spin_lock(&sbi->iostat_lock);
3356 for (i = 0; i < NR_IO_TYPE; i++) {
3357 sbi->rw_iostat[i] = 0;
3358 sbi->prev_rw_iostat[i] = 0;
3359 }
3360 spin_unlock(&sbi->iostat_lock);
3361 }
3362
3363 extern void f2fs_record_iostat(struct f2fs_sb_info *sbi);
3364
f2fs_update_iostat(struct f2fs_sb_info * sbi,enum iostat_type type,unsigned long long io_bytes)3365 static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi,
3366 enum iostat_type type, unsigned long long io_bytes)
3367 {
3368 if (!sbi->iostat_enable)
3369 return;
3370 spin_lock(&sbi->iostat_lock);
3371 sbi->rw_iostat[type] += io_bytes;
3372
3373 if (type == APP_WRITE_IO || type == APP_DIRECT_IO)
3374 sbi->rw_iostat[APP_BUFFERED_IO] =
3375 sbi->rw_iostat[APP_WRITE_IO] -
3376 sbi->rw_iostat[APP_DIRECT_IO];
3377
3378 if (type == APP_READ_IO || type == APP_DIRECT_READ_IO)
3379 sbi->rw_iostat[APP_BUFFERED_READ_IO] =
3380 sbi->rw_iostat[APP_READ_IO] -
3381 sbi->rw_iostat[APP_DIRECT_READ_IO];
3382 spin_unlock(&sbi->iostat_lock);
3383
3384 f2fs_record_iostat(sbi);
3385 }
3386
3387 #define __is_large_section(sbi) ((sbi)->segs_per_sec > 1)
3388
3389 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META)
3390
3391 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3392 block_t blkaddr, int type);
verify_blkaddr(struct f2fs_sb_info * sbi,block_t blkaddr,int type)3393 static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
3394 block_t blkaddr, int type)
3395 {
3396 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
3397 f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.",
3398 blkaddr, type);
3399 f2fs_bug_on(sbi, 1);
3400 }
3401 }
3402
__is_valid_data_blkaddr(block_t blkaddr)3403 static inline bool __is_valid_data_blkaddr(block_t blkaddr)
3404 {
3405 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR ||
3406 blkaddr == COMPRESS_ADDR)
3407 return false;
3408 return true;
3409 }
3410
3411 /*
3412 * file.c
3413 */
3414 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
3415 void f2fs_truncate_data_blocks(struct dnode_of_data *dn);
3416 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock);
3417 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
3418 int f2fs_truncate(struct inode *inode);
3419 int f2fs_getattr(const struct path *path, struct kstat *stat,
3420 u32 request_mask, unsigned int flags);
3421 int f2fs_setattr(struct dentry *dentry, struct iattr *attr);
3422 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
3423 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
3424 int f2fs_precache_extents(struct inode *inode);
3425 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
3426 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
3427 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid);
3428 int f2fs_pin_file_control(struct inode *inode, bool inc);
3429
3430 /*
3431 * inode.c
3432 */
3433 void f2fs_set_inode_flags(struct inode *inode);
3434 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page);
3435 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page);
3436 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino);
3437 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino);
3438 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
3439 void f2fs_update_inode(struct inode *inode, struct page *node_page);
3440 void f2fs_update_inode_page(struct inode *inode);
3441 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
3442 void f2fs_evict_inode(struct inode *inode);
3443 void f2fs_handle_failed_inode(struct inode *inode);
3444
3445 /*
3446 * namei.c
3447 */
3448 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
3449 bool hot, bool set);
3450 struct dentry *f2fs_get_parent(struct dentry *child);
3451
3452 /*
3453 * dir.c
3454 */
3455 unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de);
3456 int f2fs_init_casefolded_name(const struct inode *dir,
3457 struct f2fs_filename *fname);
3458 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname,
3459 int lookup, struct f2fs_filename *fname);
3460 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
3461 struct f2fs_filename *fname);
3462 void f2fs_free_filename(struct f2fs_filename *fname);
3463 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
3464 const struct f2fs_filename *fname, int *max_slots);
3465 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
3466 unsigned int start_pos, struct fscrypt_str *fstr);
3467 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
3468 struct f2fs_dentry_ptr *d);
3469 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
3470 const struct f2fs_filename *fname, struct page *dpage);
3471 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
3472 unsigned int current_depth);
3473 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots);
3474 void f2fs_drop_nlink(struct inode *dir, struct inode *inode);
3475 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
3476 const struct f2fs_filename *fname,
3477 struct page **res_page);
3478 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
3479 const struct qstr *child, struct page **res_page);
3480 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p);
3481 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
3482 struct page **page);
3483 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
3484 struct page *page, struct inode *inode);
3485 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
3486 const struct f2fs_filename *fname);
3487 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
3488 const struct fscrypt_str *name, f2fs_hash_t name_hash,
3489 unsigned int bit_pos);
3490 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
3491 struct inode *inode, nid_t ino, umode_t mode);
3492 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
3493 struct inode *inode, nid_t ino, umode_t mode);
3494 int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
3495 struct inode *inode, nid_t ino, umode_t mode);
3496 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
3497 struct inode *dir, struct inode *inode);
3498 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir);
3499 bool f2fs_empty_dir(struct inode *dir);
3500
f2fs_add_link(struct dentry * dentry,struct inode * inode)3501 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
3502 {
3503 if (fscrypt_is_nokey_name(dentry))
3504 return -ENOKEY;
3505 return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name,
3506 inode, inode->i_ino, inode->i_mode);
3507 }
3508
3509 /*
3510 * super.c
3511 */
3512 int f2fs_inode_dirtied(struct inode *inode, bool sync);
3513 void f2fs_inode_synced(struct inode *inode);
3514 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
3515 int f2fs_quota_sync(struct super_block *sb, int type);
3516 loff_t max_file_blocks(struct inode *inode);
3517 void f2fs_quota_off_umount(struct super_block *sb);
3518 void f2fs_handle_stop(struct f2fs_sb_info *sbi, unsigned char reason);
3519 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
3520 int f2fs_sync_fs(struct super_block *sb, int sync);
3521 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
3522
3523 /*
3524 * hash.c
3525 */
3526 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname);
3527
3528 /*
3529 * node.c
3530 */
3531 struct node_info;
3532
3533 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
3534 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
3535 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
3536 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
3537 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
3538 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
3539 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
3540 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
3541 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
3542 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
3543 struct node_info *ni, bool checkpoint_context);
3544 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
3545 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
3546 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
3547 int f2fs_truncate_xattr_node(struct inode *inode);
3548 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
3549 unsigned int seq_id);
3550 int f2fs_remove_inode_page(struct inode *inode);
3551 struct page *f2fs_new_inode_page(struct inode *inode);
3552 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
3553 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
3554 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
3555 struct page *f2fs_get_node_page_ra(struct page *parent, int start);
3556 int f2fs_move_node_page(struct page *node_page, int gc_type);
3557 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi);
3558 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
3559 struct writeback_control *wbc, bool atomic,
3560 unsigned int *seq_id);
3561 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
3562 struct writeback_control *wbc,
3563 bool do_balance, enum iostat_type io_type);
3564 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
3565 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
3566 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
3567 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
3568 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
3569 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
3570 int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
3571 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
3572 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
3573 unsigned int segno, struct f2fs_summary_block *sum);
3574 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3575 int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
3576 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi);
3577 int __init f2fs_create_node_manager_caches(void);
3578 void f2fs_destroy_node_manager_caches(void);
3579
3580 /*
3581 * segment.c
3582 */
3583 bool f2fs_need_SSR(struct f2fs_sb_info *sbi);
3584 void f2fs_register_inmem_page(struct inode *inode, struct page *page);
3585 void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure);
3586 void f2fs_drop_inmem_pages(struct inode *inode);
3587 void f2fs_drop_inmem_page(struct inode *inode, struct page *page);
3588 int f2fs_commit_inmem_pages(struct inode *inode);
3589 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
3590 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg);
3591 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
3592 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi);
3593 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
3594 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
3595 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
3596 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
3597 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
3598 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi);
3599 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi);
3600 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
3601 struct cp_control *cpc);
3602 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi);
3603 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi);
3604 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
3605 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
3606 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
3607 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno);
3608 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi);
3609 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi);
3610 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi);
3611 void f2fs_get_new_segment(struct f2fs_sb_info *sbi,
3612 unsigned int *newseg, bool new_sec, int dir);
3613 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
3614 unsigned int start, unsigned int end);
3615 void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force);
3616 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
3617 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
3618 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
3619 struct cp_control *cpc);
3620 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
3621 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src,
3622 block_t blk_addr);
3623 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
3624 enum iostat_type io_type);
3625 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio);
3626 void f2fs_outplace_write_data(struct dnode_of_data *dn,
3627 struct f2fs_io_info *fio);
3628 int f2fs_inplace_write_data(struct f2fs_io_info *fio);
3629 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3630 block_t old_blkaddr, block_t new_blkaddr,
3631 bool recover_curseg, bool recover_newaddr,
3632 bool from_gc);
3633 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3634 block_t old_addr, block_t new_addr,
3635 unsigned char version, bool recover_curseg,
3636 bool recover_newaddr);
3637 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3638 block_t old_blkaddr, block_t *new_blkaddr,
3639 struct f2fs_summary *sum, int type,
3640 struct f2fs_io_info *fio);
3641 void f2fs_wait_on_page_writeback(struct page *page,
3642 enum page_type type, bool ordered, bool locked);
3643 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
3644 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
3645 block_t len);
3646 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3647 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3648 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
3649 unsigned int val, int alloc);
3650 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3651 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi);
3652 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi);
3653 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi);
3654 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi);
3655 int __init f2fs_create_segment_manager_caches(void);
3656 void f2fs_destroy_segment_manager_caches(void);
3657 int f2fs_rw_hint_to_seg_type(enum rw_hint hint);
3658 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
3659 enum page_type type, enum temp_type temp);
3660 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
3661 unsigned int segno);
3662 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
3663 unsigned int segno);
3664
3665 /*
3666 * checkpoint.c
3667 */
3668 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io,
3669 unsigned char reason);
3670 void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi);
3671 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
3672 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
3673 struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index);
3674 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
3675 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3676 block_t blkaddr, int type);
3677 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
3678 int type, bool sync);
3679 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index);
3680 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
3681 long nr_to_write, enum iostat_type io_type);
3682 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3683 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3684 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all);
3685 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode);
3686 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3687 unsigned int devidx, int type);
3688 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3689 unsigned int devidx, int type);
3690 int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi);
3691 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi);
3692 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi);
3693 void f2fs_add_orphan_inode(struct inode *inode);
3694 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino);
3695 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi);
3696 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi);
3697 void f2fs_update_dirty_page(struct inode *inode, struct page *page);
3698 void f2fs_remove_dirty_inode(struct inode *inode);
3699 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type,
3700 bool from_cp);
3701 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type);
3702 u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi);
3703 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3704 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi);
3705 int __init f2fs_create_checkpoint_caches(void);
3706 void f2fs_destroy_checkpoint_caches(void);
3707 int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi);
3708 int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi);
3709 void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi);
3710 void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi);
3711
3712 /*
3713 * data.c
3714 */
3715 int __init f2fs_init_bioset(void);
3716 void f2fs_destroy_bioset(void);
3717 int f2fs_init_bio_entry_cache(void);
3718 void f2fs_destroy_bio_entry_cache(void);
3719 void f2fs_submit_bio(struct f2fs_sb_info *sbi,
3720 struct bio *bio, enum page_type type);
3721 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
3722 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
3723 struct inode *inode, struct page *page,
3724 nid_t ino, enum page_type type);
3725 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
3726 struct bio **bio, struct page *page);
3727 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
3728 int f2fs_submit_page_bio(struct f2fs_io_info *fio);
3729 int f2fs_merge_page_bio(struct f2fs_io_info *fio);
3730 void f2fs_submit_page_write(struct f2fs_io_info *fio);
3731 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
3732 block_t blk_addr, struct bio *bio);
3733 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr);
3734 void f2fs_set_data_blkaddr(struct dnode_of_data *dn);
3735 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
3736 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
3737 int f2fs_reserve_new_block(struct dnode_of_data *dn);
3738 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index);
3739 int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from);
3740 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
3741 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
3742 int op_flags, bool for_write);
3743 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index);
3744 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
3745 bool for_write);
3746 struct page *f2fs_get_new_data_page(struct inode *inode,
3747 struct page *ipage, pgoff_t index, bool new_i_size);
3748 int f2fs_do_write_data_page(struct f2fs_io_info *fio);
3749 void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock);
3750 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
3751 int create, int flag);
3752 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3753 u64 start, u64 len);
3754 int f2fs_encrypt_one_page(struct f2fs_io_info *fio);
3755 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
3756 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
3757 int f2fs_write_single_data_page(struct page *page, int *submitted,
3758 struct bio **bio, sector_t *last_block,
3759 struct writeback_control *wbc,
3760 enum iostat_type io_type,
3761 int compr_blocks, bool allow_balance);
3762 void f2fs_invalidate_page(struct page *page, unsigned int offset,
3763 unsigned int length);
3764 int f2fs_release_page(struct page *page, gfp_t wait);
3765 #ifdef CONFIG_MIGRATION
3766 int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
3767 struct page *page, enum migrate_mode mode);
3768 #endif
3769 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
3770 void f2fs_clear_page_cache_dirty_tag(struct page *page);
3771 int f2fs_init_post_read_processing(void);
3772 void f2fs_destroy_post_read_processing(void);
3773 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi);
3774 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
3775
3776 /*
3777 * gc.c
3778 */
3779 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi);
3780 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
3781 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
3782 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, bool force,
3783 unsigned int segno);
3784 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
3785 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count);
3786 int __init f2fs_create_garbage_collection_cache(void);
3787 void f2fs_destroy_garbage_collection_cache(void);
3788
3789 /*
3790 * recovery.c
3791 */
3792 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only);
3793 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi);
3794 int __init f2fs_create_recovery_cache(void);
3795 void f2fs_destroy_recovery_cache(void);
3796
3797 /*
3798 * debug.c
3799 */
3800 #ifdef CONFIG_F2FS_STAT_FS
3801 struct f2fs_stat_info {
3802 struct list_head stat_list;
3803 struct f2fs_sb_info *sbi;
3804 int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
3805 int main_area_segs, main_area_sections, main_area_zones;
3806 unsigned long long hit_cached[NR_EXTENT_CACHES];
3807 unsigned long long hit_rbtree[NR_EXTENT_CACHES];
3808 unsigned long long total_ext[NR_EXTENT_CACHES];
3809 unsigned long long hit_total[NR_EXTENT_CACHES];
3810 int ext_tree[NR_EXTENT_CACHES];
3811 int zombie_tree[NR_EXTENT_CACHES];
3812 int ext_node[NR_EXTENT_CACHES];
3813 /* to count memory footprint */
3814 unsigned long long ext_mem[NR_EXTENT_CACHES];
3815 /* for read extent cache */
3816 unsigned long long hit_largest;
3817 /* for block age extent cache */
3818 unsigned long long allocated_data_blocks;
3819 int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
3820 int ndirty_data, ndirty_qdata;
3821 int inmem_pages;
3822 unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all;
3823 int nats, dirty_nats, sits, dirty_sits;
3824 int free_nids, avail_nids, alloc_nids;
3825 int total_count, utilization;
3826 int bg_gc, nr_wb_cp_data, nr_wb_data;
3827 int nr_rd_data, nr_rd_node, nr_rd_meta;
3828 int nr_dio_read, nr_dio_write;
3829 unsigned int io_skip_bggc, other_skip_bggc;
3830 int nr_flushing, nr_flushed, flush_list_empty;
3831 int nr_discarding, nr_discarded;
3832 int nr_discard_cmd;
3833 unsigned int undiscard_blks;
3834 int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt;
3835 unsigned int cur_ckpt_time, peak_ckpt_time;
3836 int inline_xattr, inline_inode, inline_dir, append, update, orphans;
3837 int compr_inode;
3838 unsigned long long compr_blocks;
3839 int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt;
3840 unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
3841 unsigned int bimodal, avg_vblocks;
3842 int util_free, util_valid, util_invalid;
3843 int rsvd_segs, overp_segs;
3844 int dirty_count, node_pages, meta_pages, compress_pages;
3845 int compress_page_hit;
3846 int prefree_count, call_count, cp_count, bg_cp_count;
3847 int tot_segs, node_segs, data_segs, free_segs, free_secs;
3848 int bg_node_segs, bg_data_segs;
3849 int tot_blks, data_blks, node_blks;
3850 int bg_data_blks, bg_node_blks;
3851 unsigned long long skipped_atomic_files[2];
3852 int curseg[NR_CURSEG_TYPE];
3853 int cursec[NR_CURSEG_TYPE];
3854 int curzone[NR_CURSEG_TYPE];
3855 unsigned int dirty_seg[NR_CURSEG_TYPE];
3856 unsigned int full_seg[NR_CURSEG_TYPE];
3857 unsigned int valid_blks[NR_CURSEG_TYPE];
3858
3859 unsigned int meta_count[META_MAX];
3860 unsigned int segment_count[2];
3861 unsigned int block_count[2];
3862 unsigned int inplace_count;
3863 unsigned long long base_mem, cache_mem, page_mem;
3864 };
3865
F2FS_STAT(struct f2fs_sb_info * sbi)3866 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
3867 {
3868 return (struct f2fs_stat_info *)sbi->stat_info;
3869 }
3870
3871 #define stat_inc_cp_count(si) ((si)->cp_count++)
3872 #define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++)
3873 #define stat_inc_call_count(si) ((si)->call_count++)
3874 #define stat_inc_bggc_count(si) ((si)->bg_gc++)
3875 #define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++)
3876 #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++)
3877 #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++)
3878 #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--)
3879 #define stat_inc_total_hit(sbi, type) (atomic64_inc(&(sbi)->total_hit_ext[type]))
3880 #define stat_inc_rbtree_node_hit(sbi, type) (atomic64_inc(&(sbi)->read_hit_rbtree[type]))
3881 #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest))
3882 #define stat_inc_cached_node_hit(sbi, type) (atomic64_inc(&(sbi)->read_hit_cached[type]))
3883 #define stat_inc_inline_xattr(inode) \
3884 do { \
3885 if (f2fs_has_inline_xattr(inode)) \
3886 (atomic_inc(&F2FS_I_SB(inode)->inline_xattr)); \
3887 } while (0)
3888 #define stat_dec_inline_xattr(inode) \
3889 do { \
3890 if (f2fs_has_inline_xattr(inode)) \
3891 (atomic_dec(&F2FS_I_SB(inode)->inline_xattr)); \
3892 } while (0)
3893 #define stat_inc_inline_inode(inode) \
3894 do { \
3895 if (f2fs_has_inline_data(inode)) \
3896 (atomic_inc(&F2FS_I_SB(inode)->inline_inode)); \
3897 } while (0)
3898 #define stat_dec_inline_inode(inode) \
3899 do { \
3900 if (f2fs_has_inline_data(inode)) \
3901 (atomic_dec(&F2FS_I_SB(inode)->inline_inode)); \
3902 } while (0)
3903 #define stat_inc_inline_dir(inode) \
3904 do { \
3905 if (f2fs_has_inline_dentry(inode)) \
3906 (atomic_inc(&F2FS_I_SB(inode)->inline_dir)); \
3907 } while (0)
3908 #define stat_dec_inline_dir(inode) \
3909 do { \
3910 if (f2fs_has_inline_dentry(inode)) \
3911 (atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \
3912 } while (0)
3913 #define stat_inc_compr_inode(inode) \
3914 do { \
3915 if (f2fs_compressed_file(inode)) \
3916 (atomic_inc(&F2FS_I_SB(inode)->compr_inode)); \
3917 } while (0)
3918 #define stat_dec_compr_inode(inode) \
3919 do { \
3920 if (f2fs_compressed_file(inode)) \
3921 (atomic_dec(&F2FS_I_SB(inode)->compr_inode)); \
3922 } while (0)
3923 #define stat_add_compr_blocks(inode, blocks) \
3924 (atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks))
3925 #define stat_sub_compr_blocks(inode, blocks) \
3926 (atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks))
3927 #define stat_inc_meta_count(sbi, blkaddr) \
3928 do { \
3929 if (blkaddr < SIT_I(sbi)->sit_base_addr) \
3930 atomic_inc(&(sbi)->meta_count[META_CP]); \
3931 else if (blkaddr < NM_I(sbi)->nat_blkaddr) \
3932 atomic_inc(&(sbi)->meta_count[META_SIT]); \
3933 else if (blkaddr < SM_I(sbi)->ssa_blkaddr) \
3934 atomic_inc(&(sbi)->meta_count[META_NAT]); \
3935 else if (blkaddr < SM_I(sbi)->main_blkaddr) \
3936 atomic_inc(&(sbi)->meta_count[META_SSA]); \
3937 } while (0)
3938 #define stat_inc_seg_type(sbi, curseg) \
3939 ((sbi)->segment_count[(curseg)->alloc_type]++)
3940 #define stat_inc_block_count(sbi, curseg) \
3941 ((sbi)->block_count[(curseg)->alloc_type]++)
3942 #define stat_inc_inplace_blocks(sbi) \
3943 (atomic_inc(&(sbi)->inplace_count))
3944 #define stat_update_max_atomic_write(inode) \
3945 do { \
3946 int cur = F2FS_I_SB(inode)->atomic_files; \
3947 int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \
3948 if (cur > max) \
3949 atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \
3950 } while (0)
3951 #define stat_inc_volatile_write(inode) \
3952 (atomic_inc(&F2FS_I_SB(inode)->vw_cnt))
3953 #define stat_dec_volatile_write(inode) \
3954 (atomic_dec(&F2FS_I_SB(inode)->vw_cnt))
3955 #define stat_update_max_volatile_write(inode) \
3956 do { \
3957 int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt); \
3958 int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt); \
3959 if (cur > max) \
3960 atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur); \
3961 } while (0)
3962 #define stat_inc_seg_count(sbi, type, gc_type) \
3963 do { \
3964 struct f2fs_stat_info *si = F2FS_STAT(sbi); \
3965 si->tot_segs++; \
3966 if ((type) == SUM_TYPE_DATA) { \
3967 si->data_segs++; \
3968 si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \
3969 } else { \
3970 si->node_segs++; \
3971 si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0; \
3972 } \
3973 } while (0)
3974
3975 #define stat_inc_tot_blk_count(si, blks) \
3976 ((si)->tot_blks += (blks))
3977
3978 #define stat_inc_data_blk_count(sbi, blks, gc_type) \
3979 do { \
3980 struct f2fs_stat_info *si = F2FS_STAT(sbi); \
3981 stat_inc_tot_blk_count(si, blks); \
3982 si->data_blks += (blks); \
3983 si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \
3984 } while (0)
3985
3986 #define stat_inc_node_blk_count(sbi, blks, gc_type) \
3987 do { \
3988 struct f2fs_stat_info *si = F2FS_STAT(sbi); \
3989 stat_inc_tot_blk_count(si, blks); \
3990 si->node_blks += (blks); \
3991 si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \
3992 } while (0)
3993
3994 int f2fs_build_stats(struct f2fs_sb_info *sbi);
3995 void f2fs_destroy_stats(struct f2fs_sb_info *sbi);
3996 void __init f2fs_create_root_stats(void);
3997 void f2fs_destroy_root_stats(void);
3998 void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
3999 #else
4000 #define stat_inc_cp_count(si) do { } while (0)
4001 #define stat_inc_bg_cp_count(si) do { } while (0)
4002 #define stat_inc_call_count(si) do { } while (0)
4003 #define stat_inc_bggc_count(si) do { } while (0)
4004 #define stat_io_skip_bggc_count(sbi) do { } while (0)
4005 #define stat_other_skip_bggc_count(sbi) do { } while (0)
4006 #define stat_inc_dirty_inode(sbi, type) do { } while (0)
4007 #define stat_dec_dirty_inode(sbi, type) do { } while (0)
4008 #define stat_inc_total_hit(sbi, type) do { } while (0)
4009 #define stat_inc_rbtree_node_hit(sbi, type) do { } while (0)
4010 #define stat_inc_largest_node_hit(sbi) do { } while (0)
4011 #define stat_inc_cached_node_hit(sbi, type) do { } while (0)
4012 #define stat_inc_inline_xattr(inode) do { } while (0)
4013 #define stat_dec_inline_xattr(inode) do { } while (0)
4014 #define stat_inc_inline_inode(inode) do { } while (0)
4015 #define stat_dec_inline_inode(inode) do { } while (0)
4016 #define stat_inc_inline_dir(inode) do { } while (0)
4017 #define stat_dec_inline_dir(inode) do { } while (0)
4018 #define stat_inc_compr_inode(inode) do { } while (0)
4019 #define stat_dec_compr_inode(inode) do { } while (0)
4020 #define stat_add_compr_blocks(inode, blocks) do { } while (0)
4021 #define stat_sub_compr_blocks(inode, blocks) do { } while (0)
4022 #define stat_update_max_atomic_write(inode) do { } while (0)
4023 #define stat_inc_volatile_write(inode) do { } while (0)
4024 #define stat_dec_volatile_write(inode) do { } while (0)
4025 #define stat_update_max_volatile_write(inode) do { } while (0)
4026 #define stat_inc_meta_count(sbi, blkaddr) do { } while (0)
4027 #define stat_inc_seg_type(sbi, curseg) do { } while (0)
4028 #define stat_inc_block_count(sbi, curseg) do { } while (0)
4029 #define stat_inc_inplace_blocks(sbi) do { } while (0)
4030 #define stat_inc_seg_count(sbi, type, gc_type) do { } while (0)
4031 #define stat_inc_tot_blk_count(si, blks) do { } while (0)
4032 #define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0)
4033 #define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0)
4034
f2fs_build_stats(struct f2fs_sb_info * sbi)4035 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
f2fs_destroy_stats(struct f2fs_sb_info * sbi)4036 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
f2fs_create_root_stats(void)4037 static inline void __init f2fs_create_root_stats(void) { }
f2fs_destroy_root_stats(void)4038 static inline void f2fs_destroy_root_stats(void) { }
f2fs_update_sit_info(struct f2fs_sb_info * sbi)4039 static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {}
4040 #endif
4041
4042 extern const struct file_operations f2fs_dir_operations;
4043 extern const struct file_operations f2fs_file_operations;
4044 extern const struct inode_operations f2fs_file_inode_operations;
4045 extern const struct address_space_operations f2fs_dblock_aops;
4046 extern const struct address_space_operations f2fs_node_aops;
4047 extern const struct address_space_operations f2fs_meta_aops;
4048 extern const struct inode_operations f2fs_dir_inode_operations;
4049 extern const struct inode_operations f2fs_symlink_inode_operations;
4050 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations;
4051 extern const struct inode_operations f2fs_special_inode_operations;
4052 extern struct kmem_cache *f2fs_inode_entry_slab;
4053
4054 /*
4055 * inline.c
4056 */
4057 bool f2fs_may_inline_data(struct inode *inode);
4058 bool f2fs_sanity_check_inline_data(struct inode *inode);
4059 bool f2fs_may_inline_dentry(struct inode *inode);
4060 void f2fs_do_read_inline_data(struct page *page, struct page *ipage);
4061 void f2fs_truncate_inline_inode(struct inode *inode,
4062 struct page *ipage, u64 from);
4063 int f2fs_read_inline_data(struct inode *inode, struct page *page);
4064 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
4065 int f2fs_convert_inline_inode(struct inode *inode);
4066 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
4067 int f2fs_write_inline_data(struct inode *inode, struct page *page);
4068 int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
4069 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
4070 const struct f2fs_filename *fname,
4071 struct page **res_page);
4072 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
4073 struct page *ipage);
4074 int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
4075 struct inode *inode, nid_t ino, umode_t mode);
4076 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry,
4077 struct page *page, struct inode *dir,
4078 struct inode *inode);
4079 bool f2fs_empty_inline_dir(struct inode *dir);
4080 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
4081 struct fscrypt_str *fstr);
4082 int f2fs_inline_data_fiemap(struct inode *inode,
4083 struct fiemap_extent_info *fieinfo,
4084 __u64 start, __u64 len);
4085
4086 /*
4087 * shrinker.c
4088 */
4089 unsigned long f2fs_shrink_count(struct shrinker *shrink,
4090 struct shrink_control *sc);
4091 unsigned long f2fs_shrink_scan(struct shrinker *shrink,
4092 struct shrink_control *sc);
4093 void f2fs_join_shrinker(struct f2fs_sb_info *sbi);
4094 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
4095
4096 /*
4097 * extent_cache.c
4098 */
4099 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
4100 struct rb_entry *cached_re, unsigned int ofs);
4101 struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi,
4102 struct rb_root_cached *root,
4103 struct rb_node **parent,
4104 unsigned long long key, bool *left_most);
4105 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
4106 struct rb_root_cached *root,
4107 struct rb_node **parent,
4108 unsigned int ofs, bool *leftmost);
4109 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
4110 struct rb_entry *cached_re, unsigned int ofs,
4111 struct rb_entry **prev_entry, struct rb_entry **next_entry,
4112 struct rb_node ***insert_p, struct rb_node **insert_parent,
4113 bool force, bool *leftmost);
4114 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
4115 struct rb_root_cached *root, bool check_key);
4116 void f2fs_init_extent_tree(struct inode *inode);
4117 void f2fs_drop_extent_tree(struct inode *inode);
4118 void f2fs_destroy_extent_node(struct inode *inode);
4119 void f2fs_destroy_extent_tree(struct inode *inode);
4120 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi);
4121 int __init f2fs_create_extent_cache(void);
4122 void f2fs_destroy_extent_cache(void);
4123
4124 /* read extent cache ops */
4125 void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage);
4126 bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs,
4127 struct extent_info *ei);
4128 void f2fs_update_read_extent_cache(struct dnode_of_data *dn);
4129 void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn,
4130 pgoff_t fofs, block_t blkaddr, unsigned int len);
4131 unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi,
4132 int nr_shrink);
4133
4134 /* block age extent cache ops */
4135 void f2fs_init_age_extent_tree(struct inode *inode);
4136 bool f2fs_lookup_age_extent_cache(struct inode *inode, pgoff_t pgofs,
4137 struct extent_info *ei);
4138 void f2fs_update_age_extent_cache(struct dnode_of_data *dn);
4139 void f2fs_update_age_extent_cache_range(struct dnode_of_data *dn,
4140 pgoff_t fofs, unsigned int len);
4141 unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info *sbi,
4142 int nr_shrink);
4143
4144 /*
4145 * sysfs.c
4146 */
4147 int __init f2fs_init_sysfs(void);
4148 void f2fs_exit_sysfs(void);
4149 int f2fs_register_sysfs(struct f2fs_sb_info *sbi);
4150 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi);
4151
4152 /* verity.c */
4153 extern const struct fsverity_operations f2fs_verityops;
4154
4155 /*
4156 * crypto support
4157 */
f2fs_encrypted_file(struct inode * inode)4158 static inline bool f2fs_encrypted_file(struct inode *inode)
4159 {
4160 return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
4161 }
4162
f2fs_set_encrypted_inode(struct inode * inode)4163 static inline void f2fs_set_encrypted_inode(struct inode *inode)
4164 {
4165 #ifdef CONFIG_FS_ENCRYPTION
4166 file_set_encrypt(inode);
4167 f2fs_set_inode_flags(inode);
4168 #endif
4169 }
4170
4171 /*
4172 * Returns true if the reads of the inode's data need to undergo some
4173 * postprocessing step, like decryption or authenticity verification.
4174 */
f2fs_post_read_required(struct inode * inode)4175 static inline bool f2fs_post_read_required(struct inode *inode)
4176 {
4177 return f2fs_encrypted_file(inode) || fsverity_active(inode) ||
4178 f2fs_compressed_file(inode);
4179 }
4180
4181 /*
4182 * compress.c
4183 */
4184 #ifdef CONFIG_F2FS_FS_COMPRESSION
4185 bool f2fs_is_compressed_page(struct page *page);
4186 struct page *f2fs_compress_control_page(struct page *page);
4187 int f2fs_prepare_compress_overwrite(struct inode *inode,
4188 struct page **pagep, pgoff_t index, void **fsdata);
4189 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
4190 pgoff_t index, unsigned copied);
4191 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock);
4192 void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
4193 bool f2fs_is_compress_backend_ready(struct inode *inode);
4194 int f2fs_init_compress_mempool(void);
4195 void f2fs_destroy_compress_mempool(void);
4196 void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task);
4197 void f2fs_end_read_compressed_page(struct page *page, bool failed,
4198 block_t blkaddr, bool in_task);
4199 bool f2fs_cluster_is_empty(struct compress_ctx *cc);
4200 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
4201 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
4202 int f2fs_write_multi_pages(struct compress_ctx *cc,
4203 int *submitted,
4204 struct writeback_control *wbc,
4205 enum iostat_type io_type);
4206 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
4207 void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
4208 pgoff_t fofs, block_t blkaddr,
4209 unsigned int llen, unsigned int c_len);
4210 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
4211 unsigned nr_pages, sector_t *last_block_in_bio,
4212 bool is_readahead, bool for_write);
4213 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
4214 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
4215 bool in_task);
4216 void f2fs_put_page_dic(struct page *page, bool in_task);
4217 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
4218 int f2fs_init_compress_ctx(struct compress_ctx *cc);
4219 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
4220 void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
4221 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi);
4222 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi);
4223 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
4224 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
4225 int __init f2fs_init_compress_cache(void);
4226 void f2fs_destroy_compress_cache(void);
4227 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi);
4228 void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr);
4229 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
4230 nid_t ino, block_t blkaddr);
4231 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
4232 block_t blkaddr);
4233 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino);
4234 #define inc_compr_inode_stat(inode) \
4235 do { \
4236 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \
4237 sbi->compr_new_inode++; \
4238 } while (0)
4239 #define add_compr_block_stat(inode, blocks) \
4240 do { \
4241 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \
4242 int diff = F2FS_I(inode)->i_cluster_size - blocks; \
4243 sbi->compr_written_block += blocks; \
4244 sbi->compr_saved_block += diff; \
4245 } while (0)
4246 #else
f2fs_is_compressed_page(struct page * page)4247 static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
f2fs_is_compress_backend_ready(struct inode * inode)4248 static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
4249 {
4250 if (!f2fs_compressed_file(inode))
4251 return true;
4252 /* not support compression */
4253 return false;
4254 }
f2fs_compress_control_page(struct page * page)4255 static inline struct page *f2fs_compress_control_page(struct page *page)
4256 {
4257 WARN_ON_ONCE(1);
4258 return ERR_PTR(-EINVAL);
4259 }
f2fs_init_compress_mempool(void)4260 static inline int f2fs_init_compress_mempool(void) { return 0; }
f2fs_destroy_compress_mempool(void)4261 static inline void f2fs_destroy_compress_mempool(void) { }
f2fs_decompress_cluster(struct decompress_io_ctx * dic,bool in_task)4262 static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic,
4263 bool in_task) { }
f2fs_end_read_compressed_page(struct page * page,bool failed,block_t blkaddr,bool in_task)4264 static inline void f2fs_end_read_compressed_page(struct page *page,
4265 bool failed, block_t blkaddr, bool in_task)
4266 {
4267 WARN_ON_ONCE(1);
4268 }
f2fs_put_page_dic(struct page * page,bool in_task)4269 static inline void f2fs_put_page_dic(struct page *page, bool in_task)
4270 {
4271 WARN_ON_ONCE(1);
4272 }
f2fs_cluster_blocks_are_contiguous(struct dnode_of_data * dn)4273 static inline unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) { return 0; }
f2fs_init_compress_inode(struct f2fs_sb_info * sbi)4274 static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
f2fs_destroy_compress_inode(struct f2fs_sb_info * sbi)4275 static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
f2fs_init_page_array_cache(struct f2fs_sb_info * sbi)4276 static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
f2fs_destroy_page_array_cache(struct f2fs_sb_info * sbi)4277 static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
f2fs_init_compress_cache(void)4278 static inline int __init f2fs_init_compress_cache(void) { return 0; }
f2fs_destroy_compress_cache(void)4279 static inline void f2fs_destroy_compress_cache(void) { }
f2fs_invalidate_compress_page(struct f2fs_sb_info * sbi,block_t blkaddr)4280 static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi,
4281 block_t blkaddr) { }
f2fs_cache_compressed_page(struct f2fs_sb_info * sbi,struct page * page,nid_t ino,block_t blkaddr)4282 static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
4283 struct page *page, nid_t ino, block_t blkaddr) { }
f2fs_load_compressed_page(struct f2fs_sb_info * sbi,struct page * page,block_t blkaddr)4284 static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
4285 struct page *page, block_t blkaddr) { return false; }
f2fs_invalidate_compress_pages(struct f2fs_sb_info * sbi,nid_t ino)4286 static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
4287 nid_t ino) { }
4288 #define inc_compr_inode_stat(inode) do { } while (0)
f2fs_update_read_extent_tree_range_compressed(struct inode * inode,pgoff_t fofs,block_t blkaddr,unsigned int llen,unsigned int c_len)4289 static inline void f2fs_update_read_extent_tree_range_compressed(
4290 struct inode *inode,
4291 pgoff_t fofs, block_t blkaddr,
4292 unsigned int llen, unsigned int c_len) { }
4293 #endif
4294
set_compress_context(struct inode * inode)4295 static inline int set_compress_context(struct inode *inode)
4296 {
4297 #ifdef CONFIG_F2FS_FS_COMPRESSION
4298 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4299
4300 F2FS_I(inode)->i_compress_algorithm =
4301 F2FS_OPTION(sbi).compress_algorithm;
4302 F2FS_I(inode)->i_log_cluster_size =
4303 F2FS_OPTION(sbi).compress_log_size;
4304 F2FS_I(inode)->i_compress_flag =
4305 F2FS_OPTION(sbi).compress_chksum ?
4306 1 << COMPRESS_CHKSUM : 0;
4307 F2FS_I(inode)->i_cluster_size =
4308 1 << F2FS_I(inode)->i_log_cluster_size;
4309 if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 &&
4310 F2FS_OPTION(sbi).compress_level)
4311 F2FS_I(inode)->i_compress_flag |=
4312 F2FS_OPTION(sbi).compress_level <<
4313 COMPRESS_LEVEL_OFFSET;
4314 F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
4315 set_inode_flag(inode, FI_COMPRESSED_FILE);
4316 stat_inc_compr_inode(inode);
4317 inc_compr_inode_stat(inode);
4318 f2fs_mark_inode_dirty_sync(inode, true);
4319 return 0;
4320 #else
4321 return -EOPNOTSUPP;
4322 #endif
4323 }
4324
f2fs_disable_compressed_file(struct inode * inode)4325 static inline bool f2fs_disable_compressed_file(struct inode *inode)
4326 {
4327 struct f2fs_inode_info *fi = F2FS_I(inode);
4328
4329 if (!f2fs_compressed_file(inode))
4330 return true;
4331 if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
4332 return false;
4333
4334 fi->i_flags &= ~F2FS_COMPR_FL;
4335 stat_dec_compr_inode(inode);
4336 clear_inode_flag(inode, FI_COMPRESSED_FILE);
4337 f2fs_mark_inode_dirty_sync(inode, true);
4338 return true;
4339 }
4340
4341 #define F2FS_FEATURE_FUNCS(name, flagname) \
4342 static inline int f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \
4343 { \
4344 return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \
4345 }
4346
4347 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT);
4348 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED);
4349 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR);
4350 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA);
4351 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM);
4352 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR);
4353 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO);
4354 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME);
4355 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND);
4356 F2FS_FEATURE_FUNCS(verity, VERITY);
4357 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
4358 F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
4359 F2FS_FEATURE_FUNCS(compression, COMPRESSION);
4360 F2FS_FEATURE_FUNCS(readonly, RO);
4361
4362 #ifdef CONFIG_BLK_DEV_ZONED
f2fs_blkz_is_seq(struct f2fs_sb_info * sbi,int devi,block_t blkaddr)4363 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
4364 block_t blkaddr)
4365 {
4366 unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz;
4367
4368 return test_bit(zno, FDEV(devi).blkz_seq);
4369 }
4370 #endif
4371
f2fs_hw_should_discard(struct f2fs_sb_info * sbi)4372 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
4373 {
4374 return f2fs_sb_has_blkzoned(sbi);
4375 }
4376
f2fs_bdev_support_discard(struct block_device * bdev)4377 static inline bool f2fs_bdev_support_discard(struct block_device *bdev)
4378 {
4379 return blk_queue_discard(bdev_get_queue(bdev)) ||
4380 bdev_is_zoned(bdev);
4381 }
4382
f2fs_hw_support_discard(struct f2fs_sb_info * sbi)4383 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
4384 {
4385 int i;
4386
4387 if (!f2fs_is_multi_device(sbi))
4388 return f2fs_bdev_support_discard(sbi->sb->s_bdev);
4389
4390 for (i = 0; i < sbi->s_ndevs; i++)
4391 if (f2fs_bdev_support_discard(FDEV(i).bdev))
4392 return true;
4393 return false;
4394 }
4395
f2fs_realtime_discard_enable(struct f2fs_sb_info * sbi)4396 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi)
4397 {
4398 return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) ||
4399 f2fs_hw_should_discard(sbi);
4400 }
4401
f2fs_hw_is_readonly(struct f2fs_sb_info * sbi)4402 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi)
4403 {
4404 int i;
4405
4406 if (!f2fs_is_multi_device(sbi))
4407 return bdev_read_only(sbi->sb->s_bdev);
4408
4409 for (i = 0; i < sbi->s_ndevs; i++)
4410 if (bdev_read_only(FDEV(i).bdev))
4411 return true;
4412 return false;
4413 }
4414
f2fs_lfs_mode(struct f2fs_sb_info * sbi)4415 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
4416 {
4417 return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
4418 }
4419
f2fs_low_mem_mode(struct f2fs_sb_info * sbi)4420 static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi)
4421 {
4422 return F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW;
4423 }
4424
f2fs_may_compress(struct inode * inode)4425 static inline bool f2fs_may_compress(struct inode *inode)
4426 {
4427 if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||
4428 f2fs_is_atomic_file(inode) ||
4429 f2fs_is_volatile_file(inode))
4430 return false;
4431 return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
4432 }
4433
f2fs_i_compr_blocks_update(struct inode * inode,u64 blocks,bool add)4434 static inline void f2fs_i_compr_blocks_update(struct inode *inode,
4435 u64 blocks, bool add)
4436 {
4437 int diff = F2FS_I(inode)->i_cluster_size - blocks;
4438 struct f2fs_inode_info *fi = F2FS_I(inode);
4439
4440 /* don't update i_compr_blocks if saved blocks were released */
4441 if (!add && !atomic_read(&fi->i_compr_blocks))
4442 return;
4443
4444 if (add) {
4445 atomic_add(diff, &fi->i_compr_blocks);
4446 stat_add_compr_blocks(inode, diff);
4447 } else {
4448 atomic_sub(diff, &fi->i_compr_blocks);
4449 stat_sub_compr_blocks(inode, diff);
4450 }
4451 f2fs_mark_inode_dirty_sync(inode, true);
4452 }
4453
block_unaligned_IO(struct inode * inode,struct kiocb * iocb,struct iov_iter * iter)4454 static inline int block_unaligned_IO(struct inode *inode,
4455 struct kiocb *iocb, struct iov_iter *iter)
4456 {
4457 unsigned int i_blkbits = READ_ONCE(inode->i_blkbits);
4458 unsigned int blocksize_mask = (1 << i_blkbits) - 1;
4459 loff_t offset = iocb->ki_pos;
4460 unsigned long align = offset | iov_iter_alignment(iter);
4461
4462 return align & blocksize_mask;
4463 }
4464
allow_outplace_dio(struct inode * inode,struct kiocb * iocb,struct iov_iter * iter)4465 static inline int allow_outplace_dio(struct inode *inode,
4466 struct kiocb *iocb, struct iov_iter *iter)
4467 {
4468 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4469 int rw = iov_iter_rw(iter);
4470
4471 return (f2fs_lfs_mode(sbi) && (rw == WRITE) &&
4472 !block_unaligned_IO(inode, iocb, iter));
4473 }
4474
f2fs_force_buffered_io(struct inode * inode,struct kiocb * iocb,struct iov_iter * iter)4475 static inline bool f2fs_force_buffered_io(struct inode *inode,
4476 struct kiocb *iocb, struct iov_iter *iter)
4477 {
4478 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4479 int rw = iov_iter_rw(iter);
4480
4481 if (!fscrypt_dio_supported(iocb, iter))
4482 return true;
4483 if (fsverity_active(inode))
4484 return true;
4485 if (f2fs_compressed_file(inode))
4486 return true;
4487 if (f2fs_is_multi_device(sbi))
4488 return true;
4489 /*
4490 * for blkzoned device, fallback direct IO to buffered IO, so
4491 * all IOs can be serialized by log-structured write.
4492 */
4493 if (f2fs_sb_has_blkzoned(sbi))
4494 return true;
4495 if (f2fs_lfs_mode(sbi) && (rw == WRITE)) {
4496 if (block_unaligned_IO(inode, iocb, iter))
4497 return true;
4498 if (F2FS_IO_ALIGNED(sbi))
4499 return true;
4500 }
4501 if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED))
4502 return true;
4503
4504 return false;
4505 }
4506
f2fs_need_verity(const struct inode * inode,pgoff_t idx)4507 static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
4508 {
4509 return fsverity_active(inode) &&
4510 idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
4511 }
4512
4513 #ifdef CONFIG_F2FS_FAULT_INJECTION
4514 extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
4515 unsigned int type);
4516 #else
4517 #define f2fs_build_fault_attr(sbi, rate, type) do { } while (0)
4518 #endif
4519
is_journalled_quota(struct f2fs_sb_info * sbi)4520 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
4521 {
4522 #ifdef CONFIG_QUOTA
4523 if (f2fs_sb_has_quota_ino(sbi))
4524 return true;
4525 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
4526 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
4527 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
4528 return true;
4529 #endif
4530 return false;
4531 }
4532
4533 #define EFSBADCRC EBADMSG /* Bad CRC detected */
4534 #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
4535
4536 #endif /* _LINUX_F2FS_H */
4537