1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * fs/f2fs/super.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6*4882a593Smuzhiyun * http://www.samsung.com/
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun #include <linux/module.h>
9*4882a593Smuzhiyun #include <linux/init.h>
10*4882a593Smuzhiyun #include <linux/fs.h>
11*4882a593Smuzhiyun #include <linux/statfs.h>
12*4882a593Smuzhiyun #include <linux/buffer_head.h>
13*4882a593Smuzhiyun #include <linux/backing-dev.h>
14*4882a593Smuzhiyun #include <linux/kthread.h>
15*4882a593Smuzhiyun #include <linux/parser.h>
16*4882a593Smuzhiyun #include <linux/mount.h>
17*4882a593Smuzhiyun #include <linux/seq_file.h>
18*4882a593Smuzhiyun #include <linux/proc_fs.h>
19*4882a593Smuzhiyun #include <linux/random.h>
20*4882a593Smuzhiyun #include <linux/exportfs.h>
21*4882a593Smuzhiyun #include <linux/blkdev.h>
22*4882a593Smuzhiyun #include <linux/quotaops.h>
23*4882a593Smuzhiyun #include <linux/f2fs_fs.h>
24*4882a593Smuzhiyun #include <linux/sysfs.h>
25*4882a593Smuzhiyun #include <linux/quota.h>
26*4882a593Smuzhiyun #include <linux/unicode.h>
27*4882a593Smuzhiyun #include <linux/part_stat.h>
28*4882a593Smuzhiyun #include <linux/zstd.h>
29*4882a593Smuzhiyun #include <linux/lz4.h>
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include "f2fs.h"
32*4882a593Smuzhiyun #include "node.h"
33*4882a593Smuzhiyun #include "segment.h"
34*4882a593Smuzhiyun #include "xattr.h"
35*4882a593Smuzhiyun #include "gc.h"
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #define CREATE_TRACE_POINTS
38*4882a593Smuzhiyun #include <trace/events/f2fs.h>
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun static struct kmem_cache *f2fs_inode_cachep;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FAULT_INJECTION
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun const char *f2fs_fault_name[FAULT_MAX] = {
45*4882a593Smuzhiyun [FAULT_KMALLOC] = "kmalloc",
46*4882a593Smuzhiyun [FAULT_KVMALLOC] = "kvmalloc",
47*4882a593Smuzhiyun [FAULT_PAGE_ALLOC] = "page alloc",
48*4882a593Smuzhiyun [FAULT_PAGE_GET] = "page get",
49*4882a593Smuzhiyun [FAULT_ALLOC_NID] = "alloc nid",
50*4882a593Smuzhiyun [FAULT_ORPHAN] = "orphan",
51*4882a593Smuzhiyun [FAULT_BLOCK] = "no more block",
52*4882a593Smuzhiyun [FAULT_DIR_DEPTH] = "too big dir depth",
53*4882a593Smuzhiyun [FAULT_EVICT_INODE] = "evict_inode fail",
54*4882a593Smuzhiyun [FAULT_TRUNCATE] = "truncate fail",
55*4882a593Smuzhiyun [FAULT_READ_IO] = "read IO error",
56*4882a593Smuzhiyun [FAULT_CHECKPOINT] = "checkpoint error",
57*4882a593Smuzhiyun [FAULT_DISCARD] = "discard error",
58*4882a593Smuzhiyun [FAULT_WRITE_IO] = "write IO error",
59*4882a593Smuzhiyun };
60*4882a593Smuzhiyun
f2fs_build_fault_attr(struct f2fs_sb_info * sbi,unsigned int rate,unsigned int type)61*4882a593Smuzhiyun void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
62*4882a593Smuzhiyun unsigned int type)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun if (rate) {
67*4882a593Smuzhiyun atomic_set(&ffi->inject_ops, 0);
68*4882a593Smuzhiyun ffi->inject_rate = rate;
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun if (type)
72*4882a593Smuzhiyun ffi->inject_type = type;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun if (!rate && !type)
75*4882a593Smuzhiyun memset(ffi, 0, sizeof(struct f2fs_fault_info));
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun #endif
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /* f2fs-wide shrinker description */
80*4882a593Smuzhiyun static struct shrinker f2fs_shrinker_info = {
81*4882a593Smuzhiyun .scan_objects = f2fs_shrink_scan,
82*4882a593Smuzhiyun .count_objects = f2fs_shrink_count,
83*4882a593Smuzhiyun .seeks = DEFAULT_SEEKS,
84*4882a593Smuzhiyun };
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun enum {
87*4882a593Smuzhiyun Opt_gc_background,
88*4882a593Smuzhiyun Opt_disable_roll_forward,
89*4882a593Smuzhiyun Opt_norecovery,
90*4882a593Smuzhiyun Opt_discard,
91*4882a593Smuzhiyun Opt_nodiscard,
92*4882a593Smuzhiyun Opt_noheap,
93*4882a593Smuzhiyun Opt_heap,
94*4882a593Smuzhiyun Opt_user_xattr,
95*4882a593Smuzhiyun Opt_nouser_xattr,
96*4882a593Smuzhiyun Opt_acl,
97*4882a593Smuzhiyun Opt_noacl,
98*4882a593Smuzhiyun Opt_active_logs,
99*4882a593Smuzhiyun Opt_disable_ext_identify,
100*4882a593Smuzhiyun Opt_inline_xattr,
101*4882a593Smuzhiyun Opt_noinline_xattr,
102*4882a593Smuzhiyun Opt_inline_xattr_size,
103*4882a593Smuzhiyun Opt_inline_data,
104*4882a593Smuzhiyun Opt_inline_dentry,
105*4882a593Smuzhiyun Opt_noinline_dentry,
106*4882a593Smuzhiyun Opt_flush_merge,
107*4882a593Smuzhiyun Opt_noflush_merge,
108*4882a593Smuzhiyun Opt_nobarrier,
109*4882a593Smuzhiyun Opt_fastboot,
110*4882a593Smuzhiyun Opt_extent_cache,
111*4882a593Smuzhiyun Opt_noextent_cache,
112*4882a593Smuzhiyun Opt_noinline_data,
113*4882a593Smuzhiyun Opt_data_flush,
114*4882a593Smuzhiyun Opt_reserve_root,
115*4882a593Smuzhiyun Opt_resgid,
116*4882a593Smuzhiyun Opt_resuid,
117*4882a593Smuzhiyun Opt_mode,
118*4882a593Smuzhiyun Opt_io_size_bits,
119*4882a593Smuzhiyun Opt_fault_injection,
120*4882a593Smuzhiyun Opt_fault_type,
121*4882a593Smuzhiyun Opt_lazytime,
122*4882a593Smuzhiyun Opt_nolazytime,
123*4882a593Smuzhiyun Opt_quota,
124*4882a593Smuzhiyun Opt_noquota,
125*4882a593Smuzhiyun Opt_usrquota,
126*4882a593Smuzhiyun Opt_grpquota,
127*4882a593Smuzhiyun Opt_prjquota,
128*4882a593Smuzhiyun Opt_usrjquota,
129*4882a593Smuzhiyun Opt_grpjquota,
130*4882a593Smuzhiyun Opt_prjjquota,
131*4882a593Smuzhiyun Opt_offusrjquota,
132*4882a593Smuzhiyun Opt_offgrpjquota,
133*4882a593Smuzhiyun Opt_offprjjquota,
134*4882a593Smuzhiyun Opt_jqfmt_vfsold,
135*4882a593Smuzhiyun Opt_jqfmt_vfsv0,
136*4882a593Smuzhiyun Opt_jqfmt_vfsv1,
137*4882a593Smuzhiyun Opt_whint,
138*4882a593Smuzhiyun Opt_alloc,
139*4882a593Smuzhiyun Opt_fsync,
140*4882a593Smuzhiyun Opt_test_dummy_encryption,
141*4882a593Smuzhiyun Opt_inlinecrypt,
142*4882a593Smuzhiyun Opt_checkpoint_disable,
143*4882a593Smuzhiyun Opt_checkpoint_disable_cap,
144*4882a593Smuzhiyun Opt_checkpoint_disable_cap_perc,
145*4882a593Smuzhiyun Opt_checkpoint_enable,
146*4882a593Smuzhiyun Opt_checkpoint_merge,
147*4882a593Smuzhiyun Opt_nocheckpoint_merge,
148*4882a593Smuzhiyun Opt_compress_algorithm,
149*4882a593Smuzhiyun Opt_compress_log_size,
150*4882a593Smuzhiyun Opt_compress_extension,
151*4882a593Smuzhiyun Opt_compress_chksum,
152*4882a593Smuzhiyun Opt_compress_mode,
153*4882a593Smuzhiyun Opt_compress_cache,
154*4882a593Smuzhiyun Opt_atgc,
155*4882a593Smuzhiyun Opt_gc_merge,
156*4882a593Smuzhiyun Opt_nogc_merge,
157*4882a593Smuzhiyun Opt_memory_mode,
158*4882a593Smuzhiyun Opt_age_extent_cache,
159*4882a593Smuzhiyun Opt_err,
160*4882a593Smuzhiyun };
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun static match_table_t f2fs_tokens = {
163*4882a593Smuzhiyun {Opt_gc_background, "background_gc=%s"},
164*4882a593Smuzhiyun {Opt_disable_roll_forward, "disable_roll_forward"},
165*4882a593Smuzhiyun {Opt_norecovery, "norecovery"},
166*4882a593Smuzhiyun {Opt_discard, "discard"},
167*4882a593Smuzhiyun {Opt_nodiscard, "nodiscard"},
168*4882a593Smuzhiyun {Opt_noheap, "no_heap"},
169*4882a593Smuzhiyun {Opt_heap, "heap"},
170*4882a593Smuzhiyun {Opt_user_xattr, "user_xattr"},
171*4882a593Smuzhiyun {Opt_nouser_xattr, "nouser_xattr"},
172*4882a593Smuzhiyun {Opt_acl, "acl"},
173*4882a593Smuzhiyun {Opt_noacl, "noacl"},
174*4882a593Smuzhiyun {Opt_active_logs, "active_logs=%u"},
175*4882a593Smuzhiyun {Opt_disable_ext_identify, "disable_ext_identify"},
176*4882a593Smuzhiyun {Opt_inline_xattr, "inline_xattr"},
177*4882a593Smuzhiyun {Opt_noinline_xattr, "noinline_xattr"},
178*4882a593Smuzhiyun {Opt_inline_xattr_size, "inline_xattr_size=%u"},
179*4882a593Smuzhiyun {Opt_inline_data, "inline_data"},
180*4882a593Smuzhiyun {Opt_inline_dentry, "inline_dentry"},
181*4882a593Smuzhiyun {Opt_noinline_dentry, "noinline_dentry"},
182*4882a593Smuzhiyun {Opt_flush_merge, "flush_merge"},
183*4882a593Smuzhiyun {Opt_noflush_merge, "noflush_merge"},
184*4882a593Smuzhiyun {Opt_nobarrier, "nobarrier"},
185*4882a593Smuzhiyun {Opt_fastboot, "fastboot"},
186*4882a593Smuzhiyun {Opt_extent_cache, "extent_cache"},
187*4882a593Smuzhiyun {Opt_noextent_cache, "noextent_cache"},
188*4882a593Smuzhiyun {Opt_noinline_data, "noinline_data"},
189*4882a593Smuzhiyun {Opt_data_flush, "data_flush"},
190*4882a593Smuzhiyun {Opt_reserve_root, "reserve_root=%u"},
191*4882a593Smuzhiyun {Opt_resgid, "resgid=%u"},
192*4882a593Smuzhiyun {Opt_resuid, "resuid=%u"},
193*4882a593Smuzhiyun {Opt_mode, "mode=%s"},
194*4882a593Smuzhiyun {Opt_io_size_bits, "io_bits=%u"},
195*4882a593Smuzhiyun {Opt_fault_injection, "fault_injection=%u"},
196*4882a593Smuzhiyun {Opt_fault_type, "fault_type=%u"},
197*4882a593Smuzhiyun {Opt_lazytime, "lazytime"},
198*4882a593Smuzhiyun {Opt_nolazytime, "nolazytime"},
199*4882a593Smuzhiyun {Opt_quota, "quota"},
200*4882a593Smuzhiyun {Opt_noquota, "noquota"},
201*4882a593Smuzhiyun {Opt_usrquota, "usrquota"},
202*4882a593Smuzhiyun {Opt_grpquota, "grpquota"},
203*4882a593Smuzhiyun {Opt_prjquota, "prjquota"},
204*4882a593Smuzhiyun {Opt_usrjquota, "usrjquota=%s"},
205*4882a593Smuzhiyun {Opt_grpjquota, "grpjquota=%s"},
206*4882a593Smuzhiyun {Opt_prjjquota, "prjjquota=%s"},
207*4882a593Smuzhiyun {Opt_offusrjquota, "usrjquota="},
208*4882a593Smuzhiyun {Opt_offgrpjquota, "grpjquota="},
209*4882a593Smuzhiyun {Opt_offprjjquota, "prjjquota="},
210*4882a593Smuzhiyun {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
211*4882a593Smuzhiyun {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
212*4882a593Smuzhiyun {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
213*4882a593Smuzhiyun {Opt_whint, "whint_mode=%s"},
214*4882a593Smuzhiyun {Opt_alloc, "alloc_mode=%s"},
215*4882a593Smuzhiyun {Opt_fsync, "fsync_mode=%s"},
216*4882a593Smuzhiyun {Opt_test_dummy_encryption, "test_dummy_encryption=%s"},
217*4882a593Smuzhiyun {Opt_test_dummy_encryption, "test_dummy_encryption"},
218*4882a593Smuzhiyun {Opt_inlinecrypt, "inlinecrypt"},
219*4882a593Smuzhiyun {Opt_checkpoint_disable, "checkpoint=disable"},
220*4882a593Smuzhiyun {Opt_checkpoint_disable_cap, "checkpoint=disable:%u"},
221*4882a593Smuzhiyun {Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"},
222*4882a593Smuzhiyun {Opt_checkpoint_enable, "checkpoint=enable"},
223*4882a593Smuzhiyun {Opt_checkpoint_merge, "checkpoint_merge"},
224*4882a593Smuzhiyun {Opt_nocheckpoint_merge, "nocheckpoint_merge"},
225*4882a593Smuzhiyun {Opt_compress_algorithm, "compress_algorithm=%s"},
226*4882a593Smuzhiyun {Opt_compress_log_size, "compress_log_size=%u"},
227*4882a593Smuzhiyun {Opt_compress_extension, "compress_extension=%s"},
228*4882a593Smuzhiyun {Opt_compress_chksum, "compress_chksum"},
229*4882a593Smuzhiyun {Opt_compress_mode, "compress_mode=%s"},
230*4882a593Smuzhiyun {Opt_compress_cache, "compress_cache"},
231*4882a593Smuzhiyun {Opt_atgc, "atgc"},
232*4882a593Smuzhiyun {Opt_gc_merge, "gc_merge"},
233*4882a593Smuzhiyun {Opt_nogc_merge, "nogc_merge"},
234*4882a593Smuzhiyun {Opt_memory_mode, "memory=%s"},
235*4882a593Smuzhiyun {Opt_age_extent_cache, "age_extent_cache"},
236*4882a593Smuzhiyun {Opt_err, NULL},
237*4882a593Smuzhiyun };
238*4882a593Smuzhiyun
f2fs_printk(struct f2fs_sb_info * sbi,const char * fmt,...)239*4882a593Smuzhiyun void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun struct va_format vaf;
242*4882a593Smuzhiyun va_list args;
243*4882a593Smuzhiyun int level;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun va_start(args, fmt);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun level = printk_get_level(fmt);
248*4882a593Smuzhiyun vaf.fmt = printk_skip_level(fmt);
249*4882a593Smuzhiyun vaf.va = &args;
250*4882a593Smuzhiyun printk("%c%cF2FS-fs (%s): %pV\n",
251*4882a593Smuzhiyun KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun va_end(args);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun #ifdef CONFIG_UNICODE
257*4882a593Smuzhiyun static const struct f2fs_sb_encodings {
258*4882a593Smuzhiyun __u16 magic;
259*4882a593Smuzhiyun char *name;
260*4882a593Smuzhiyun char *version;
261*4882a593Smuzhiyun } f2fs_sb_encoding_map[] = {
262*4882a593Smuzhiyun {F2FS_ENC_UTF8_12_1, "utf8", "12.1.0"},
263*4882a593Smuzhiyun };
264*4882a593Smuzhiyun
f2fs_sb_read_encoding(const struct f2fs_super_block * sb,const struct f2fs_sb_encodings ** encoding,__u16 * flags)265*4882a593Smuzhiyun static int f2fs_sb_read_encoding(const struct f2fs_super_block *sb,
266*4882a593Smuzhiyun const struct f2fs_sb_encodings **encoding,
267*4882a593Smuzhiyun __u16 *flags)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun __u16 magic = le16_to_cpu(sb->s_encoding);
270*4882a593Smuzhiyun int i;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(f2fs_sb_encoding_map); i++)
273*4882a593Smuzhiyun if (magic == f2fs_sb_encoding_map[i].magic)
274*4882a593Smuzhiyun break;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun if (i >= ARRAY_SIZE(f2fs_sb_encoding_map))
277*4882a593Smuzhiyun return -EINVAL;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun *encoding = &f2fs_sb_encoding_map[i];
280*4882a593Smuzhiyun *flags = le16_to_cpu(sb->s_encoding_flags);
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun return 0;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun struct kmem_cache *f2fs_cf_name_slab;
f2fs_create_casefold_cache(void)286*4882a593Smuzhiyun static int __init f2fs_create_casefold_cache(void)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun f2fs_cf_name_slab = f2fs_kmem_cache_create("f2fs_casefolded_name",
289*4882a593Smuzhiyun F2FS_NAME_LEN);
290*4882a593Smuzhiyun if (!f2fs_cf_name_slab)
291*4882a593Smuzhiyun return -ENOMEM;
292*4882a593Smuzhiyun return 0;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
f2fs_destroy_casefold_cache(void)295*4882a593Smuzhiyun static void f2fs_destroy_casefold_cache(void)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun kmem_cache_destroy(f2fs_cf_name_slab);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun #else
f2fs_create_casefold_cache(void)300*4882a593Smuzhiyun static int __init f2fs_create_casefold_cache(void) { return 0; }
f2fs_destroy_casefold_cache(void)301*4882a593Smuzhiyun static void f2fs_destroy_casefold_cache(void) { }
302*4882a593Smuzhiyun #endif
303*4882a593Smuzhiyun
limit_reserve_root(struct f2fs_sb_info * sbi)304*4882a593Smuzhiyun static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun block_t limit = min((sbi->user_block_count >> 3),
307*4882a593Smuzhiyun sbi->user_block_count - sbi->reserved_blocks);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /* limit is 12.5% */
310*4882a593Smuzhiyun if (test_opt(sbi, RESERVE_ROOT) &&
311*4882a593Smuzhiyun F2FS_OPTION(sbi).root_reserved_blocks > limit &&
312*4882a593Smuzhiyun F2FS_OPTION(sbi).root_reserved_blocks > MIN_ROOT_RESERVED_BLOCKS) {
313*4882a593Smuzhiyun F2FS_OPTION(sbi).root_reserved_blocks = limit;
314*4882a593Smuzhiyun f2fs_info(sbi, "Reduce reserved blocks for root = %u",
315*4882a593Smuzhiyun F2FS_OPTION(sbi).root_reserved_blocks);
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun if (!test_opt(sbi, RESERVE_ROOT) &&
318*4882a593Smuzhiyun (!uid_eq(F2FS_OPTION(sbi).s_resuid,
319*4882a593Smuzhiyun make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
320*4882a593Smuzhiyun !gid_eq(F2FS_OPTION(sbi).s_resgid,
321*4882a593Smuzhiyun make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
322*4882a593Smuzhiyun f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
323*4882a593Smuzhiyun from_kuid_munged(&init_user_ns,
324*4882a593Smuzhiyun F2FS_OPTION(sbi).s_resuid),
325*4882a593Smuzhiyun from_kgid_munged(&init_user_ns,
326*4882a593Smuzhiyun F2FS_OPTION(sbi).s_resgid));
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
adjust_reserved_segment(struct f2fs_sb_info * sbi)329*4882a593Smuzhiyun static inline int adjust_reserved_segment(struct f2fs_sb_info *sbi)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun unsigned int sec_blks = sbi->blocks_per_seg * sbi->segs_per_sec;
332*4882a593Smuzhiyun unsigned int avg_vblocks;
333*4882a593Smuzhiyun unsigned int wanted_reserved_segments;
334*4882a593Smuzhiyun block_t avail_user_block_count;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun if (!F2FS_IO_ALIGNED(sbi))
337*4882a593Smuzhiyun return 0;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun /* average valid block count in section in worst case */
340*4882a593Smuzhiyun avg_vblocks = sec_blks / F2FS_IO_SIZE(sbi);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /*
343*4882a593Smuzhiyun * we need enough free space when migrating one section in worst case
344*4882a593Smuzhiyun */
345*4882a593Smuzhiyun wanted_reserved_segments = (F2FS_IO_SIZE(sbi) / avg_vblocks) *
346*4882a593Smuzhiyun reserved_segments(sbi);
347*4882a593Smuzhiyun wanted_reserved_segments -= reserved_segments(sbi);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun avail_user_block_count = sbi->user_block_count -
350*4882a593Smuzhiyun sbi->current_reserved_blocks -
351*4882a593Smuzhiyun F2FS_OPTION(sbi).root_reserved_blocks;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun if (wanted_reserved_segments * sbi->blocks_per_seg >
354*4882a593Smuzhiyun avail_user_block_count) {
355*4882a593Smuzhiyun f2fs_err(sbi, "IO align feature can't grab additional reserved segment: %u, available segments: %u",
356*4882a593Smuzhiyun wanted_reserved_segments,
357*4882a593Smuzhiyun avail_user_block_count >> sbi->log_blocks_per_seg);
358*4882a593Smuzhiyun return -ENOSPC;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun SM_I(sbi)->additional_reserved_segments = wanted_reserved_segments;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun f2fs_info(sbi, "IO align feature needs additional reserved segment: %u",
364*4882a593Smuzhiyun wanted_reserved_segments);
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun return 0;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
adjust_unusable_cap_perc(struct f2fs_sb_info * sbi)369*4882a593Smuzhiyun static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun if (!F2FS_OPTION(sbi).unusable_cap_perc)
372*4882a593Smuzhiyun return;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
375*4882a593Smuzhiyun F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count;
376*4882a593Smuzhiyun else
377*4882a593Smuzhiyun F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) *
378*4882a593Smuzhiyun F2FS_OPTION(sbi).unusable_cap_perc;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%",
381*4882a593Smuzhiyun F2FS_OPTION(sbi).unusable_cap,
382*4882a593Smuzhiyun F2FS_OPTION(sbi).unusable_cap_perc);
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
init_once(void * foo)385*4882a593Smuzhiyun static void init_once(void *foo)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun inode_init_once(&fi->vfs_inode);
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun #ifdef CONFIG_QUOTA
393*4882a593Smuzhiyun static const char * const quotatypes[] = INITQFNAMES;
394*4882a593Smuzhiyun #define QTYPE2NAME(t) (quotatypes[t])
f2fs_set_qf_name(struct super_block * sb,int qtype,substring_t * args)395*4882a593Smuzhiyun static int f2fs_set_qf_name(struct super_block *sb, int qtype,
396*4882a593Smuzhiyun substring_t *args)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_SB(sb);
399*4882a593Smuzhiyun char *qname;
400*4882a593Smuzhiyun int ret = -EINVAL;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
403*4882a593Smuzhiyun f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
404*4882a593Smuzhiyun return -EINVAL;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun if (f2fs_sb_has_quota_ino(sbi)) {
407*4882a593Smuzhiyun f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name");
408*4882a593Smuzhiyun return 0;
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun qname = match_strdup(args);
412*4882a593Smuzhiyun if (!qname) {
413*4882a593Smuzhiyun f2fs_err(sbi, "Not enough memory for storing quotafile name");
414*4882a593Smuzhiyun return -ENOMEM;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
417*4882a593Smuzhiyun if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
418*4882a593Smuzhiyun ret = 0;
419*4882a593Smuzhiyun else
420*4882a593Smuzhiyun f2fs_err(sbi, "%s quota file already specified",
421*4882a593Smuzhiyun QTYPE2NAME(qtype));
422*4882a593Smuzhiyun goto errout;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun if (strchr(qname, '/')) {
425*4882a593Smuzhiyun f2fs_err(sbi, "quotafile must be on filesystem root");
426*4882a593Smuzhiyun goto errout;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
429*4882a593Smuzhiyun set_opt(sbi, QUOTA);
430*4882a593Smuzhiyun return 0;
431*4882a593Smuzhiyun errout:
432*4882a593Smuzhiyun kfree(qname);
433*4882a593Smuzhiyun return ret;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun
f2fs_clear_qf_name(struct super_block * sb,int qtype)436*4882a593Smuzhiyun static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_SB(sb);
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
441*4882a593Smuzhiyun f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
442*4882a593Smuzhiyun return -EINVAL;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
445*4882a593Smuzhiyun F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
446*4882a593Smuzhiyun return 0;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
f2fs_check_quota_options(struct f2fs_sb_info * sbi)449*4882a593Smuzhiyun static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun /*
452*4882a593Smuzhiyun * We do the test below only for project quotas. 'usrquota' and
453*4882a593Smuzhiyun * 'grpquota' mount options are allowed even without quota feature
454*4882a593Smuzhiyun * to support legacy quotas in quota files.
455*4882a593Smuzhiyun */
456*4882a593Smuzhiyun if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) {
457*4882a593Smuzhiyun f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement.");
458*4882a593Smuzhiyun return -1;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
461*4882a593Smuzhiyun F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
462*4882a593Smuzhiyun F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) {
463*4882a593Smuzhiyun if (test_opt(sbi, USRQUOTA) &&
464*4882a593Smuzhiyun F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
465*4882a593Smuzhiyun clear_opt(sbi, USRQUOTA);
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun if (test_opt(sbi, GRPQUOTA) &&
468*4882a593Smuzhiyun F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
469*4882a593Smuzhiyun clear_opt(sbi, GRPQUOTA);
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun if (test_opt(sbi, PRJQUOTA) &&
472*4882a593Smuzhiyun F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
473*4882a593Smuzhiyun clear_opt(sbi, PRJQUOTA);
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
476*4882a593Smuzhiyun test_opt(sbi, PRJQUOTA)) {
477*4882a593Smuzhiyun f2fs_err(sbi, "old and new quota format mixing");
478*4882a593Smuzhiyun return -1;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun if (!F2FS_OPTION(sbi).s_jquota_fmt) {
482*4882a593Smuzhiyun f2fs_err(sbi, "journaled quota format not specified");
483*4882a593Smuzhiyun return -1;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) {
488*4882a593Smuzhiyun f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt");
489*4882a593Smuzhiyun F2FS_OPTION(sbi).s_jquota_fmt = 0;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun return 0;
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun #endif
494*4882a593Smuzhiyun
f2fs_set_test_dummy_encryption(struct super_block * sb,const char * opt,const substring_t * arg,bool is_remount)495*4882a593Smuzhiyun static int f2fs_set_test_dummy_encryption(struct super_block *sb,
496*4882a593Smuzhiyun const char *opt,
497*4882a593Smuzhiyun const substring_t *arg,
498*4882a593Smuzhiyun bool is_remount)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_SB(sb);
501*4882a593Smuzhiyun #ifdef CONFIG_FS_ENCRYPTION
502*4882a593Smuzhiyun int err;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun if (!f2fs_sb_has_encrypt(sbi)) {
505*4882a593Smuzhiyun f2fs_err(sbi, "Encrypt feature is off");
506*4882a593Smuzhiyun return -EINVAL;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun /*
510*4882a593Smuzhiyun * This mount option is just for testing, and it's not worthwhile to
511*4882a593Smuzhiyun * implement the extra complexity (e.g. RCU protection) that would be
512*4882a593Smuzhiyun * needed to allow it to be set or changed during remount. We do allow
513*4882a593Smuzhiyun * it to be specified during remount, but only if there is no change.
514*4882a593Smuzhiyun */
515*4882a593Smuzhiyun if (is_remount && !F2FS_OPTION(sbi).dummy_enc_policy.policy) {
516*4882a593Smuzhiyun f2fs_warn(sbi, "Can't set test_dummy_encryption on remount");
517*4882a593Smuzhiyun return -EINVAL;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun err = fscrypt_set_test_dummy_encryption(
520*4882a593Smuzhiyun sb, arg->from, &F2FS_OPTION(sbi).dummy_enc_policy);
521*4882a593Smuzhiyun if (err) {
522*4882a593Smuzhiyun if (err == -EEXIST)
523*4882a593Smuzhiyun f2fs_warn(sbi,
524*4882a593Smuzhiyun "Can't change test_dummy_encryption on remount");
525*4882a593Smuzhiyun else if (err == -EINVAL)
526*4882a593Smuzhiyun f2fs_warn(sbi, "Value of option \"%s\" is unrecognized",
527*4882a593Smuzhiyun opt);
528*4882a593Smuzhiyun else
529*4882a593Smuzhiyun f2fs_warn(sbi, "Error processing option \"%s\" [%d]",
530*4882a593Smuzhiyun opt, err);
531*4882a593Smuzhiyun return -EINVAL;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun f2fs_warn(sbi, "Test dummy encryption mode enabled");
534*4882a593Smuzhiyun #else
535*4882a593Smuzhiyun f2fs_warn(sbi, "Test dummy encryption mount option ignored");
536*4882a593Smuzhiyun #endif
537*4882a593Smuzhiyun return 0;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FS_COMPRESSION
541*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FS_LZ4
f2fs_set_lz4hc_level(struct f2fs_sb_info * sbi,const char * str)542*4882a593Smuzhiyun static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FS_LZ4HC
545*4882a593Smuzhiyun unsigned int level;
546*4882a593Smuzhiyun #endif
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun if (strlen(str) == 3) {
549*4882a593Smuzhiyun F2FS_OPTION(sbi).compress_level = 0;
550*4882a593Smuzhiyun return 0;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FS_LZ4HC
554*4882a593Smuzhiyun str += 3;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun if (str[0] != ':') {
557*4882a593Smuzhiyun f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
558*4882a593Smuzhiyun return -EINVAL;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun if (kstrtouint(str + 1, 10, &level))
561*4882a593Smuzhiyun return -EINVAL;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun if (level < LZ4HC_MIN_CLEVEL || level > LZ4HC_MAX_CLEVEL) {
564*4882a593Smuzhiyun f2fs_info(sbi, "invalid lz4hc compress level: %d", level);
565*4882a593Smuzhiyun return -EINVAL;
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun F2FS_OPTION(sbi).compress_level = level;
569*4882a593Smuzhiyun return 0;
570*4882a593Smuzhiyun #else
571*4882a593Smuzhiyun f2fs_info(sbi, "kernel doesn't support lz4hc compression");
572*4882a593Smuzhiyun return -EINVAL;
573*4882a593Smuzhiyun #endif
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun #endif
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FS_ZSTD
f2fs_set_zstd_level(struct f2fs_sb_info * sbi,const char * str)578*4882a593Smuzhiyun static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun unsigned int level;
581*4882a593Smuzhiyun int len = 4;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun if (strlen(str) == len) {
584*4882a593Smuzhiyun F2FS_OPTION(sbi).compress_level = 0;
585*4882a593Smuzhiyun return 0;
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun str += len;
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun if (str[0] != ':') {
591*4882a593Smuzhiyun f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
592*4882a593Smuzhiyun return -EINVAL;
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun if (kstrtouint(str + 1, 10, &level))
595*4882a593Smuzhiyun return -EINVAL;
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun if (!level || level > ZSTD_maxCLevel()) {
598*4882a593Smuzhiyun f2fs_info(sbi, "invalid zstd compress level: %d", level);
599*4882a593Smuzhiyun return -EINVAL;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun F2FS_OPTION(sbi).compress_level = level;
603*4882a593Smuzhiyun return 0;
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun #endif
606*4882a593Smuzhiyun #endif
607*4882a593Smuzhiyun
parse_options(struct super_block * sb,char * options,bool is_remount)608*4882a593Smuzhiyun static int parse_options(struct super_block *sb, char *options, bool is_remount)
609*4882a593Smuzhiyun {
610*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_SB(sb);
611*4882a593Smuzhiyun substring_t args[MAX_OPT_ARGS];
612*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FS_COMPRESSION
613*4882a593Smuzhiyun unsigned char (*ext)[F2FS_EXTENSION_LEN];
614*4882a593Smuzhiyun int ext_cnt;
615*4882a593Smuzhiyun #endif
616*4882a593Smuzhiyun char *p, *name;
617*4882a593Smuzhiyun int arg = 0;
618*4882a593Smuzhiyun kuid_t uid;
619*4882a593Smuzhiyun kgid_t gid;
620*4882a593Smuzhiyun int ret;
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun if (!options)
623*4882a593Smuzhiyun goto default_check;
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun while ((p = strsep(&options, ",")) != NULL) {
626*4882a593Smuzhiyun int token;
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun if (!*p)
629*4882a593Smuzhiyun continue;
630*4882a593Smuzhiyun /*
631*4882a593Smuzhiyun * Initialize args struct so we know whether arg was
632*4882a593Smuzhiyun * found; some options take optional arguments.
633*4882a593Smuzhiyun */
634*4882a593Smuzhiyun args[0].to = args[0].from = NULL;
635*4882a593Smuzhiyun token = match_token(p, f2fs_tokens, args);
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun switch (token) {
638*4882a593Smuzhiyun case Opt_gc_background:
639*4882a593Smuzhiyun name = match_strdup(&args[0]);
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun if (!name)
642*4882a593Smuzhiyun return -ENOMEM;
643*4882a593Smuzhiyun if (!strcmp(name, "on")) {
644*4882a593Smuzhiyun F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
645*4882a593Smuzhiyun } else if (!strcmp(name, "off")) {
646*4882a593Smuzhiyun F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF;
647*4882a593Smuzhiyun } else if (!strcmp(name, "sync")) {
648*4882a593Smuzhiyun F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC;
649*4882a593Smuzhiyun } else {
650*4882a593Smuzhiyun kfree(name);
651*4882a593Smuzhiyun return -EINVAL;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun kfree(name);
654*4882a593Smuzhiyun break;
655*4882a593Smuzhiyun case Opt_disable_roll_forward:
656*4882a593Smuzhiyun set_opt(sbi, DISABLE_ROLL_FORWARD);
657*4882a593Smuzhiyun break;
658*4882a593Smuzhiyun case Opt_norecovery:
659*4882a593Smuzhiyun /* this option mounts f2fs with ro */
660*4882a593Smuzhiyun set_opt(sbi, NORECOVERY);
661*4882a593Smuzhiyun if (!f2fs_readonly(sb))
662*4882a593Smuzhiyun return -EINVAL;
663*4882a593Smuzhiyun break;
664*4882a593Smuzhiyun case Opt_discard:
665*4882a593Smuzhiyun set_opt(sbi, DISCARD);
666*4882a593Smuzhiyun break;
667*4882a593Smuzhiyun case Opt_nodiscard:
668*4882a593Smuzhiyun if (f2fs_sb_has_blkzoned(sbi)) {
669*4882a593Smuzhiyun f2fs_warn(sbi, "discard is required for zoned block devices");
670*4882a593Smuzhiyun return -EINVAL;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun clear_opt(sbi, DISCARD);
673*4882a593Smuzhiyun break;
674*4882a593Smuzhiyun case Opt_noheap:
675*4882a593Smuzhiyun set_opt(sbi, NOHEAP);
676*4882a593Smuzhiyun break;
677*4882a593Smuzhiyun case Opt_heap:
678*4882a593Smuzhiyun clear_opt(sbi, NOHEAP);
679*4882a593Smuzhiyun break;
680*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FS_XATTR
681*4882a593Smuzhiyun case Opt_user_xattr:
682*4882a593Smuzhiyun set_opt(sbi, XATTR_USER);
683*4882a593Smuzhiyun break;
684*4882a593Smuzhiyun case Opt_nouser_xattr:
685*4882a593Smuzhiyun clear_opt(sbi, XATTR_USER);
686*4882a593Smuzhiyun break;
687*4882a593Smuzhiyun case Opt_inline_xattr:
688*4882a593Smuzhiyun set_opt(sbi, INLINE_XATTR);
689*4882a593Smuzhiyun break;
690*4882a593Smuzhiyun case Opt_noinline_xattr:
691*4882a593Smuzhiyun clear_opt(sbi, INLINE_XATTR);
692*4882a593Smuzhiyun break;
693*4882a593Smuzhiyun case Opt_inline_xattr_size:
694*4882a593Smuzhiyun if (args->from && match_int(args, &arg))
695*4882a593Smuzhiyun return -EINVAL;
696*4882a593Smuzhiyun set_opt(sbi, INLINE_XATTR_SIZE);
697*4882a593Smuzhiyun F2FS_OPTION(sbi).inline_xattr_size = arg;
698*4882a593Smuzhiyun break;
699*4882a593Smuzhiyun #else
700*4882a593Smuzhiyun case Opt_user_xattr:
701*4882a593Smuzhiyun f2fs_info(sbi, "user_xattr options not supported");
702*4882a593Smuzhiyun break;
703*4882a593Smuzhiyun case Opt_nouser_xattr:
704*4882a593Smuzhiyun f2fs_info(sbi, "nouser_xattr options not supported");
705*4882a593Smuzhiyun break;
706*4882a593Smuzhiyun case Opt_inline_xattr:
707*4882a593Smuzhiyun f2fs_info(sbi, "inline_xattr options not supported");
708*4882a593Smuzhiyun break;
709*4882a593Smuzhiyun case Opt_noinline_xattr:
710*4882a593Smuzhiyun f2fs_info(sbi, "noinline_xattr options not supported");
711*4882a593Smuzhiyun break;
712*4882a593Smuzhiyun #endif
713*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FS_POSIX_ACL
714*4882a593Smuzhiyun case Opt_acl:
715*4882a593Smuzhiyun set_opt(sbi, POSIX_ACL);
716*4882a593Smuzhiyun break;
717*4882a593Smuzhiyun case Opt_noacl:
718*4882a593Smuzhiyun clear_opt(sbi, POSIX_ACL);
719*4882a593Smuzhiyun break;
720*4882a593Smuzhiyun #else
721*4882a593Smuzhiyun case Opt_acl:
722*4882a593Smuzhiyun f2fs_info(sbi, "acl options not supported");
723*4882a593Smuzhiyun break;
724*4882a593Smuzhiyun case Opt_noacl:
725*4882a593Smuzhiyun f2fs_info(sbi, "noacl options not supported");
726*4882a593Smuzhiyun break;
727*4882a593Smuzhiyun #endif
728*4882a593Smuzhiyun case Opt_active_logs:
729*4882a593Smuzhiyun if (args->from && match_int(args, &arg))
730*4882a593Smuzhiyun return -EINVAL;
731*4882a593Smuzhiyun if (arg != 2 && arg != 4 &&
732*4882a593Smuzhiyun arg != NR_CURSEG_PERSIST_TYPE)
733*4882a593Smuzhiyun return -EINVAL;
734*4882a593Smuzhiyun F2FS_OPTION(sbi).active_logs = arg;
735*4882a593Smuzhiyun break;
736*4882a593Smuzhiyun case Opt_disable_ext_identify:
737*4882a593Smuzhiyun set_opt(sbi, DISABLE_EXT_IDENTIFY);
738*4882a593Smuzhiyun break;
739*4882a593Smuzhiyun case Opt_inline_data:
740*4882a593Smuzhiyun set_opt(sbi, INLINE_DATA);
741*4882a593Smuzhiyun break;
742*4882a593Smuzhiyun case Opt_inline_dentry:
743*4882a593Smuzhiyun set_opt(sbi, INLINE_DENTRY);
744*4882a593Smuzhiyun break;
745*4882a593Smuzhiyun case Opt_noinline_dentry:
746*4882a593Smuzhiyun clear_opt(sbi, INLINE_DENTRY);
747*4882a593Smuzhiyun break;
748*4882a593Smuzhiyun case Opt_flush_merge:
749*4882a593Smuzhiyun set_opt(sbi, FLUSH_MERGE);
750*4882a593Smuzhiyun break;
751*4882a593Smuzhiyun case Opt_noflush_merge:
752*4882a593Smuzhiyun clear_opt(sbi, FLUSH_MERGE);
753*4882a593Smuzhiyun break;
754*4882a593Smuzhiyun case Opt_nobarrier:
755*4882a593Smuzhiyun set_opt(sbi, NOBARRIER);
756*4882a593Smuzhiyun break;
757*4882a593Smuzhiyun case Opt_fastboot:
758*4882a593Smuzhiyun set_opt(sbi, FASTBOOT);
759*4882a593Smuzhiyun break;
760*4882a593Smuzhiyun case Opt_extent_cache:
761*4882a593Smuzhiyun set_opt(sbi, READ_EXTENT_CACHE);
762*4882a593Smuzhiyun break;
763*4882a593Smuzhiyun case Opt_noextent_cache:
764*4882a593Smuzhiyun clear_opt(sbi, READ_EXTENT_CACHE);
765*4882a593Smuzhiyun break;
766*4882a593Smuzhiyun case Opt_noinline_data:
767*4882a593Smuzhiyun clear_opt(sbi, INLINE_DATA);
768*4882a593Smuzhiyun break;
769*4882a593Smuzhiyun case Opt_data_flush:
770*4882a593Smuzhiyun set_opt(sbi, DATA_FLUSH);
771*4882a593Smuzhiyun break;
772*4882a593Smuzhiyun case Opt_reserve_root:
773*4882a593Smuzhiyun if (args->from && match_int(args, &arg))
774*4882a593Smuzhiyun return -EINVAL;
775*4882a593Smuzhiyun if (test_opt(sbi, RESERVE_ROOT)) {
776*4882a593Smuzhiyun f2fs_info(sbi, "Preserve previous reserve_root=%u",
777*4882a593Smuzhiyun F2FS_OPTION(sbi).root_reserved_blocks);
778*4882a593Smuzhiyun } else {
779*4882a593Smuzhiyun F2FS_OPTION(sbi).root_reserved_blocks = arg;
780*4882a593Smuzhiyun set_opt(sbi, RESERVE_ROOT);
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun break;
783*4882a593Smuzhiyun case Opt_resuid:
784*4882a593Smuzhiyun if (args->from && match_int(args, &arg))
785*4882a593Smuzhiyun return -EINVAL;
786*4882a593Smuzhiyun uid = make_kuid(current_user_ns(), arg);
787*4882a593Smuzhiyun if (!uid_valid(uid)) {
788*4882a593Smuzhiyun f2fs_err(sbi, "Invalid uid value %d", arg);
789*4882a593Smuzhiyun return -EINVAL;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun F2FS_OPTION(sbi).s_resuid = uid;
792*4882a593Smuzhiyun break;
793*4882a593Smuzhiyun case Opt_resgid:
794*4882a593Smuzhiyun if (args->from && match_int(args, &arg))
795*4882a593Smuzhiyun return -EINVAL;
796*4882a593Smuzhiyun gid = make_kgid(current_user_ns(), arg);
797*4882a593Smuzhiyun if (!gid_valid(gid)) {
798*4882a593Smuzhiyun f2fs_err(sbi, "Invalid gid value %d", arg);
799*4882a593Smuzhiyun return -EINVAL;
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun F2FS_OPTION(sbi).s_resgid = gid;
802*4882a593Smuzhiyun break;
803*4882a593Smuzhiyun case Opt_mode:
804*4882a593Smuzhiyun name = match_strdup(&args[0]);
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun if (!name)
807*4882a593Smuzhiyun return -ENOMEM;
808*4882a593Smuzhiyun if (!strcmp(name, "adaptive")) {
809*4882a593Smuzhiyun if (f2fs_sb_has_blkzoned(sbi)) {
810*4882a593Smuzhiyun f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature");
811*4882a593Smuzhiyun kfree(name);
812*4882a593Smuzhiyun return -EINVAL;
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
815*4882a593Smuzhiyun } else if (!strcmp(name, "lfs")) {
816*4882a593Smuzhiyun F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
817*4882a593Smuzhiyun } else {
818*4882a593Smuzhiyun kfree(name);
819*4882a593Smuzhiyun return -EINVAL;
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun kfree(name);
822*4882a593Smuzhiyun break;
823*4882a593Smuzhiyun case Opt_io_size_bits:
824*4882a593Smuzhiyun if (args->from && match_int(args, &arg))
825*4882a593Smuzhiyun return -EINVAL;
826*4882a593Smuzhiyun if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_PAGES)) {
827*4882a593Smuzhiyun f2fs_warn(sbi, "Not support %d, larger than %d",
828*4882a593Smuzhiyun 1 << arg, BIO_MAX_PAGES);
829*4882a593Smuzhiyun return -EINVAL;
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun F2FS_OPTION(sbi).write_io_size_bits = arg;
832*4882a593Smuzhiyun break;
833*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FAULT_INJECTION
834*4882a593Smuzhiyun case Opt_fault_injection:
835*4882a593Smuzhiyun if (args->from && match_int(args, &arg))
836*4882a593Smuzhiyun return -EINVAL;
837*4882a593Smuzhiyun f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
838*4882a593Smuzhiyun set_opt(sbi, FAULT_INJECTION);
839*4882a593Smuzhiyun break;
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun case Opt_fault_type:
842*4882a593Smuzhiyun if (args->from && match_int(args, &arg))
843*4882a593Smuzhiyun return -EINVAL;
844*4882a593Smuzhiyun f2fs_build_fault_attr(sbi, 0, arg);
845*4882a593Smuzhiyun set_opt(sbi, FAULT_INJECTION);
846*4882a593Smuzhiyun break;
847*4882a593Smuzhiyun #else
848*4882a593Smuzhiyun case Opt_fault_injection:
849*4882a593Smuzhiyun f2fs_info(sbi, "fault_injection options not supported");
850*4882a593Smuzhiyun break;
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun case Opt_fault_type:
853*4882a593Smuzhiyun f2fs_info(sbi, "fault_type options not supported");
854*4882a593Smuzhiyun break;
855*4882a593Smuzhiyun #endif
856*4882a593Smuzhiyun case Opt_lazytime:
857*4882a593Smuzhiyun sb->s_flags |= SB_LAZYTIME;
858*4882a593Smuzhiyun break;
859*4882a593Smuzhiyun case Opt_nolazytime:
860*4882a593Smuzhiyun sb->s_flags &= ~SB_LAZYTIME;
861*4882a593Smuzhiyun break;
862*4882a593Smuzhiyun #ifdef CONFIG_QUOTA
863*4882a593Smuzhiyun case Opt_quota:
864*4882a593Smuzhiyun case Opt_usrquota:
865*4882a593Smuzhiyun set_opt(sbi, USRQUOTA);
866*4882a593Smuzhiyun break;
867*4882a593Smuzhiyun case Opt_grpquota:
868*4882a593Smuzhiyun set_opt(sbi, GRPQUOTA);
869*4882a593Smuzhiyun break;
870*4882a593Smuzhiyun case Opt_prjquota:
871*4882a593Smuzhiyun set_opt(sbi, PRJQUOTA);
872*4882a593Smuzhiyun break;
873*4882a593Smuzhiyun case Opt_usrjquota:
874*4882a593Smuzhiyun ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
875*4882a593Smuzhiyun if (ret)
876*4882a593Smuzhiyun return ret;
877*4882a593Smuzhiyun break;
878*4882a593Smuzhiyun case Opt_grpjquota:
879*4882a593Smuzhiyun ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
880*4882a593Smuzhiyun if (ret)
881*4882a593Smuzhiyun return ret;
882*4882a593Smuzhiyun break;
883*4882a593Smuzhiyun case Opt_prjjquota:
884*4882a593Smuzhiyun ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
885*4882a593Smuzhiyun if (ret)
886*4882a593Smuzhiyun return ret;
887*4882a593Smuzhiyun break;
888*4882a593Smuzhiyun case Opt_offusrjquota:
889*4882a593Smuzhiyun ret = f2fs_clear_qf_name(sb, USRQUOTA);
890*4882a593Smuzhiyun if (ret)
891*4882a593Smuzhiyun return ret;
892*4882a593Smuzhiyun break;
893*4882a593Smuzhiyun case Opt_offgrpjquota:
894*4882a593Smuzhiyun ret = f2fs_clear_qf_name(sb, GRPQUOTA);
895*4882a593Smuzhiyun if (ret)
896*4882a593Smuzhiyun return ret;
897*4882a593Smuzhiyun break;
898*4882a593Smuzhiyun case Opt_offprjjquota:
899*4882a593Smuzhiyun ret = f2fs_clear_qf_name(sb, PRJQUOTA);
900*4882a593Smuzhiyun if (ret)
901*4882a593Smuzhiyun return ret;
902*4882a593Smuzhiyun break;
903*4882a593Smuzhiyun case Opt_jqfmt_vfsold:
904*4882a593Smuzhiyun F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD;
905*4882a593Smuzhiyun break;
906*4882a593Smuzhiyun case Opt_jqfmt_vfsv0:
907*4882a593Smuzhiyun F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0;
908*4882a593Smuzhiyun break;
909*4882a593Smuzhiyun case Opt_jqfmt_vfsv1:
910*4882a593Smuzhiyun F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1;
911*4882a593Smuzhiyun break;
912*4882a593Smuzhiyun case Opt_noquota:
913*4882a593Smuzhiyun clear_opt(sbi, QUOTA);
914*4882a593Smuzhiyun clear_opt(sbi, USRQUOTA);
915*4882a593Smuzhiyun clear_opt(sbi, GRPQUOTA);
916*4882a593Smuzhiyun clear_opt(sbi, PRJQUOTA);
917*4882a593Smuzhiyun break;
918*4882a593Smuzhiyun #else
919*4882a593Smuzhiyun case Opt_quota:
920*4882a593Smuzhiyun case Opt_usrquota:
921*4882a593Smuzhiyun case Opt_grpquota:
922*4882a593Smuzhiyun case Opt_prjquota:
923*4882a593Smuzhiyun case Opt_usrjquota:
924*4882a593Smuzhiyun case Opt_grpjquota:
925*4882a593Smuzhiyun case Opt_prjjquota:
926*4882a593Smuzhiyun case Opt_offusrjquota:
927*4882a593Smuzhiyun case Opt_offgrpjquota:
928*4882a593Smuzhiyun case Opt_offprjjquota:
929*4882a593Smuzhiyun case Opt_jqfmt_vfsold:
930*4882a593Smuzhiyun case Opt_jqfmt_vfsv0:
931*4882a593Smuzhiyun case Opt_jqfmt_vfsv1:
932*4882a593Smuzhiyun case Opt_noquota:
933*4882a593Smuzhiyun f2fs_info(sbi, "quota operations not supported");
934*4882a593Smuzhiyun break;
935*4882a593Smuzhiyun #endif
936*4882a593Smuzhiyun case Opt_whint:
937*4882a593Smuzhiyun name = match_strdup(&args[0]);
938*4882a593Smuzhiyun if (!name)
939*4882a593Smuzhiyun return -ENOMEM;
940*4882a593Smuzhiyun if (!strcmp(name, "user-based")) {
941*4882a593Smuzhiyun F2FS_OPTION(sbi).whint_mode = WHINT_MODE_USER;
942*4882a593Smuzhiyun } else if (!strcmp(name, "off")) {
943*4882a593Smuzhiyun F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
944*4882a593Smuzhiyun } else if (!strcmp(name, "fs-based")) {
945*4882a593Smuzhiyun F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS;
946*4882a593Smuzhiyun } else {
947*4882a593Smuzhiyun kfree(name);
948*4882a593Smuzhiyun return -EINVAL;
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun kfree(name);
951*4882a593Smuzhiyun break;
952*4882a593Smuzhiyun case Opt_alloc:
953*4882a593Smuzhiyun name = match_strdup(&args[0]);
954*4882a593Smuzhiyun if (!name)
955*4882a593Smuzhiyun return -ENOMEM;
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun if (!strcmp(name, "default")) {
958*4882a593Smuzhiyun F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
959*4882a593Smuzhiyun } else if (!strcmp(name, "reuse")) {
960*4882a593Smuzhiyun F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
961*4882a593Smuzhiyun } else {
962*4882a593Smuzhiyun kfree(name);
963*4882a593Smuzhiyun return -EINVAL;
964*4882a593Smuzhiyun }
965*4882a593Smuzhiyun kfree(name);
966*4882a593Smuzhiyun break;
967*4882a593Smuzhiyun case Opt_fsync:
968*4882a593Smuzhiyun name = match_strdup(&args[0]);
969*4882a593Smuzhiyun if (!name)
970*4882a593Smuzhiyun return -ENOMEM;
971*4882a593Smuzhiyun if (!strcmp(name, "posix")) {
972*4882a593Smuzhiyun F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
973*4882a593Smuzhiyun } else if (!strcmp(name, "strict")) {
974*4882a593Smuzhiyun F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
975*4882a593Smuzhiyun } else if (!strcmp(name, "nobarrier")) {
976*4882a593Smuzhiyun F2FS_OPTION(sbi).fsync_mode =
977*4882a593Smuzhiyun FSYNC_MODE_NOBARRIER;
978*4882a593Smuzhiyun } else {
979*4882a593Smuzhiyun kfree(name);
980*4882a593Smuzhiyun return -EINVAL;
981*4882a593Smuzhiyun }
982*4882a593Smuzhiyun kfree(name);
983*4882a593Smuzhiyun break;
984*4882a593Smuzhiyun case Opt_test_dummy_encryption:
985*4882a593Smuzhiyun ret = f2fs_set_test_dummy_encryption(sb, p, &args[0],
986*4882a593Smuzhiyun is_remount);
987*4882a593Smuzhiyun if (ret)
988*4882a593Smuzhiyun return ret;
989*4882a593Smuzhiyun break;
990*4882a593Smuzhiyun case Opt_inlinecrypt:
991*4882a593Smuzhiyun #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
992*4882a593Smuzhiyun sb->s_flags |= SB_INLINECRYPT;
993*4882a593Smuzhiyun #else
994*4882a593Smuzhiyun f2fs_info(sbi, "inline encryption not supported");
995*4882a593Smuzhiyun #endif
996*4882a593Smuzhiyun break;
997*4882a593Smuzhiyun case Opt_checkpoint_disable_cap_perc:
998*4882a593Smuzhiyun if (args->from && match_int(args, &arg))
999*4882a593Smuzhiyun return -EINVAL;
1000*4882a593Smuzhiyun if (arg < 0 || arg > 100)
1001*4882a593Smuzhiyun return -EINVAL;
1002*4882a593Smuzhiyun F2FS_OPTION(sbi).unusable_cap_perc = arg;
1003*4882a593Smuzhiyun set_opt(sbi, DISABLE_CHECKPOINT);
1004*4882a593Smuzhiyun break;
1005*4882a593Smuzhiyun case Opt_checkpoint_disable_cap:
1006*4882a593Smuzhiyun if (args->from && match_int(args, &arg))
1007*4882a593Smuzhiyun return -EINVAL;
1008*4882a593Smuzhiyun F2FS_OPTION(sbi).unusable_cap = arg;
1009*4882a593Smuzhiyun set_opt(sbi, DISABLE_CHECKPOINT);
1010*4882a593Smuzhiyun break;
1011*4882a593Smuzhiyun case Opt_checkpoint_disable:
1012*4882a593Smuzhiyun set_opt(sbi, DISABLE_CHECKPOINT);
1013*4882a593Smuzhiyun break;
1014*4882a593Smuzhiyun case Opt_checkpoint_enable:
1015*4882a593Smuzhiyun clear_opt(sbi, DISABLE_CHECKPOINT);
1016*4882a593Smuzhiyun break;
1017*4882a593Smuzhiyun case Opt_checkpoint_merge:
1018*4882a593Smuzhiyun set_opt(sbi, MERGE_CHECKPOINT);
1019*4882a593Smuzhiyun break;
1020*4882a593Smuzhiyun case Opt_nocheckpoint_merge:
1021*4882a593Smuzhiyun clear_opt(sbi, MERGE_CHECKPOINT);
1022*4882a593Smuzhiyun break;
1023*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FS_COMPRESSION
1024*4882a593Smuzhiyun case Opt_compress_algorithm:
1025*4882a593Smuzhiyun if (!f2fs_sb_has_compression(sbi)) {
1026*4882a593Smuzhiyun f2fs_info(sbi, "Image doesn't support compression");
1027*4882a593Smuzhiyun break;
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun name = match_strdup(&args[0]);
1030*4882a593Smuzhiyun if (!name)
1031*4882a593Smuzhiyun return -ENOMEM;
1032*4882a593Smuzhiyun if (!strcmp(name, "lzo")) {
1033*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FS_LZO
1034*4882a593Smuzhiyun F2FS_OPTION(sbi).compress_level = 0;
1035*4882a593Smuzhiyun F2FS_OPTION(sbi).compress_algorithm =
1036*4882a593Smuzhiyun COMPRESS_LZO;
1037*4882a593Smuzhiyun #else
1038*4882a593Smuzhiyun f2fs_info(sbi, "kernel doesn't support lzo compression");
1039*4882a593Smuzhiyun #endif
1040*4882a593Smuzhiyun } else if (!strncmp(name, "lz4", 3)) {
1041*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FS_LZ4
1042*4882a593Smuzhiyun ret = f2fs_set_lz4hc_level(sbi, name);
1043*4882a593Smuzhiyun if (ret) {
1044*4882a593Smuzhiyun kfree(name);
1045*4882a593Smuzhiyun return -EINVAL;
1046*4882a593Smuzhiyun }
1047*4882a593Smuzhiyun F2FS_OPTION(sbi).compress_algorithm =
1048*4882a593Smuzhiyun COMPRESS_LZ4;
1049*4882a593Smuzhiyun #else
1050*4882a593Smuzhiyun f2fs_info(sbi, "kernel doesn't support lz4 compression");
1051*4882a593Smuzhiyun #endif
1052*4882a593Smuzhiyun } else if (!strncmp(name, "zstd", 4)) {
1053*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FS_ZSTD
1054*4882a593Smuzhiyun ret = f2fs_set_zstd_level(sbi, name);
1055*4882a593Smuzhiyun if (ret) {
1056*4882a593Smuzhiyun kfree(name);
1057*4882a593Smuzhiyun return -EINVAL;
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun F2FS_OPTION(sbi).compress_algorithm =
1060*4882a593Smuzhiyun COMPRESS_ZSTD;
1061*4882a593Smuzhiyun #else
1062*4882a593Smuzhiyun f2fs_info(sbi, "kernel doesn't support zstd compression");
1063*4882a593Smuzhiyun #endif
1064*4882a593Smuzhiyun } else if (!strcmp(name, "lzo-rle")) {
1065*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FS_LZORLE
1066*4882a593Smuzhiyun F2FS_OPTION(sbi).compress_level = 0;
1067*4882a593Smuzhiyun F2FS_OPTION(sbi).compress_algorithm =
1068*4882a593Smuzhiyun COMPRESS_LZORLE;
1069*4882a593Smuzhiyun #else
1070*4882a593Smuzhiyun f2fs_info(sbi, "kernel doesn't support lzorle compression");
1071*4882a593Smuzhiyun #endif
1072*4882a593Smuzhiyun } else {
1073*4882a593Smuzhiyun kfree(name);
1074*4882a593Smuzhiyun return -EINVAL;
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun kfree(name);
1077*4882a593Smuzhiyun break;
1078*4882a593Smuzhiyun case Opt_compress_log_size:
1079*4882a593Smuzhiyun if (!f2fs_sb_has_compression(sbi)) {
1080*4882a593Smuzhiyun f2fs_info(sbi, "Image doesn't support compression");
1081*4882a593Smuzhiyun break;
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun if (args->from && match_int(args, &arg))
1084*4882a593Smuzhiyun return -EINVAL;
1085*4882a593Smuzhiyun if (arg < MIN_COMPRESS_LOG_SIZE ||
1086*4882a593Smuzhiyun arg > MAX_COMPRESS_LOG_SIZE) {
1087*4882a593Smuzhiyun f2fs_err(sbi,
1088*4882a593Smuzhiyun "Compress cluster log size is out of range");
1089*4882a593Smuzhiyun return -EINVAL;
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun F2FS_OPTION(sbi).compress_log_size = arg;
1092*4882a593Smuzhiyun break;
1093*4882a593Smuzhiyun case Opt_compress_extension:
1094*4882a593Smuzhiyun if (!f2fs_sb_has_compression(sbi)) {
1095*4882a593Smuzhiyun f2fs_info(sbi, "Image doesn't support compression");
1096*4882a593Smuzhiyun break;
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun name = match_strdup(&args[0]);
1099*4882a593Smuzhiyun if (!name)
1100*4882a593Smuzhiyun return -ENOMEM;
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun ext = F2FS_OPTION(sbi).extensions;
1103*4882a593Smuzhiyun ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun if (strlen(name) >= F2FS_EXTENSION_LEN ||
1106*4882a593Smuzhiyun ext_cnt >= COMPRESS_EXT_NUM) {
1107*4882a593Smuzhiyun f2fs_err(sbi,
1108*4882a593Smuzhiyun "invalid extension length/number");
1109*4882a593Smuzhiyun kfree(name);
1110*4882a593Smuzhiyun return -EINVAL;
1111*4882a593Smuzhiyun }
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun strcpy(ext[ext_cnt], name);
1114*4882a593Smuzhiyun F2FS_OPTION(sbi).compress_ext_cnt++;
1115*4882a593Smuzhiyun kfree(name);
1116*4882a593Smuzhiyun break;
1117*4882a593Smuzhiyun case Opt_compress_chksum:
1118*4882a593Smuzhiyun F2FS_OPTION(sbi).compress_chksum = true;
1119*4882a593Smuzhiyun break;
1120*4882a593Smuzhiyun case Opt_compress_mode:
1121*4882a593Smuzhiyun name = match_strdup(&args[0]);
1122*4882a593Smuzhiyun if (!name)
1123*4882a593Smuzhiyun return -ENOMEM;
1124*4882a593Smuzhiyun if (!strcmp(name, "fs")) {
1125*4882a593Smuzhiyun F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
1126*4882a593Smuzhiyun } else if (!strcmp(name, "user")) {
1127*4882a593Smuzhiyun F2FS_OPTION(sbi).compress_mode = COMPR_MODE_USER;
1128*4882a593Smuzhiyun } else {
1129*4882a593Smuzhiyun kfree(name);
1130*4882a593Smuzhiyun return -EINVAL;
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun kfree(name);
1133*4882a593Smuzhiyun break;
1134*4882a593Smuzhiyun case Opt_compress_cache:
1135*4882a593Smuzhiyun set_opt(sbi, COMPRESS_CACHE);
1136*4882a593Smuzhiyun break;
1137*4882a593Smuzhiyun #else
1138*4882a593Smuzhiyun case Opt_compress_algorithm:
1139*4882a593Smuzhiyun case Opt_compress_log_size:
1140*4882a593Smuzhiyun case Opt_compress_extension:
1141*4882a593Smuzhiyun case Opt_compress_chksum:
1142*4882a593Smuzhiyun case Opt_compress_mode:
1143*4882a593Smuzhiyun case Opt_compress_cache:
1144*4882a593Smuzhiyun f2fs_info(sbi, "compression options not supported");
1145*4882a593Smuzhiyun break;
1146*4882a593Smuzhiyun #endif
1147*4882a593Smuzhiyun case Opt_atgc:
1148*4882a593Smuzhiyun set_opt(sbi, ATGC);
1149*4882a593Smuzhiyun break;
1150*4882a593Smuzhiyun case Opt_gc_merge:
1151*4882a593Smuzhiyun set_opt(sbi, GC_MERGE);
1152*4882a593Smuzhiyun break;
1153*4882a593Smuzhiyun case Opt_nogc_merge:
1154*4882a593Smuzhiyun clear_opt(sbi, GC_MERGE);
1155*4882a593Smuzhiyun break;
1156*4882a593Smuzhiyun case Opt_age_extent_cache:
1157*4882a593Smuzhiyun set_opt(sbi, AGE_EXTENT_CACHE);
1158*4882a593Smuzhiyun break;
1159*4882a593Smuzhiyun case Opt_memory_mode:
1160*4882a593Smuzhiyun name = match_strdup(&args[0]);
1161*4882a593Smuzhiyun if (!name)
1162*4882a593Smuzhiyun return -ENOMEM;
1163*4882a593Smuzhiyun if (!strcmp(name, "normal")) {
1164*4882a593Smuzhiyun F2FS_OPTION(sbi).memory_mode =
1165*4882a593Smuzhiyun MEMORY_MODE_NORMAL;
1166*4882a593Smuzhiyun } else if (!strcmp(name, "low")) {
1167*4882a593Smuzhiyun F2FS_OPTION(sbi).memory_mode =
1168*4882a593Smuzhiyun MEMORY_MODE_LOW;
1169*4882a593Smuzhiyun } else {
1170*4882a593Smuzhiyun kfree(name);
1171*4882a593Smuzhiyun return -EINVAL;
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun kfree(name);
1174*4882a593Smuzhiyun break;
1175*4882a593Smuzhiyun default:
1176*4882a593Smuzhiyun f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
1177*4882a593Smuzhiyun p);
1178*4882a593Smuzhiyun return -EINVAL;
1179*4882a593Smuzhiyun }
1180*4882a593Smuzhiyun }
1181*4882a593Smuzhiyun default_check:
1182*4882a593Smuzhiyun #ifdef CONFIG_QUOTA
1183*4882a593Smuzhiyun if (f2fs_check_quota_options(sbi))
1184*4882a593Smuzhiyun return -EINVAL;
1185*4882a593Smuzhiyun #else
1186*4882a593Smuzhiyun if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) {
1187*4882a593Smuzhiyun f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
1188*4882a593Smuzhiyun return -EINVAL;
1189*4882a593Smuzhiyun }
1190*4882a593Smuzhiyun if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) {
1191*4882a593Smuzhiyun f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
1192*4882a593Smuzhiyun return -EINVAL;
1193*4882a593Smuzhiyun }
1194*4882a593Smuzhiyun #endif
1195*4882a593Smuzhiyun #ifndef CONFIG_UNICODE
1196*4882a593Smuzhiyun if (f2fs_sb_has_casefold(sbi)) {
1197*4882a593Smuzhiyun f2fs_err(sbi,
1198*4882a593Smuzhiyun "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
1199*4882a593Smuzhiyun return -EINVAL;
1200*4882a593Smuzhiyun }
1201*4882a593Smuzhiyun #endif
1202*4882a593Smuzhiyun /*
1203*4882a593Smuzhiyun * The BLKZONED feature indicates that the drive was formatted with
1204*4882a593Smuzhiyun * zone alignment optimization. This is optional for host-aware
1205*4882a593Smuzhiyun * devices, but mandatory for host-managed zoned block devices.
1206*4882a593Smuzhiyun */
1207*4882a593Smuzhiyun #ifndef CONFIG_BLK_DEV_ZONED
1208*4882a593Smuzhiyun if (f2fs_sb_has_blkzoned(sbi)) {
1209*4882a593Smuzhiyun f2fs_err(sbi, "Zoned block device support is not enabled");
1210*4882a593Smuzhiyun return -EINVAL;
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun #endif
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun if (F2FS_IO_SIZE_BITS(sbi) && !f2fs_lfs_mode(sbi)) {
1215*4882a593Smuzhiyun f2fs_err(sbi, "Should set mode=lfs with %uKB-sized IO",
1216*4882a593Smuzhiyun F2FS_IO_SIZE_KB(sbi));
1217*4882a593Smuzhiyun return -EINVAL;
1218*4882a593Smuzhiyun }
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun if (test_opt(sbi, INLINE_XATTR_SIZE)) {
1221*4882a593Smuzhiyun int min_size, max_size;
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun if (!f2fs_sb_has_extra_attr(sbi) ||
1224*4882a593Smuzhiyun !f2fs_sb_has_flexible_inline_xattr(sbi)) {
1225*4882a593Smuzhiyun f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off");
1226*4882a593Smuzhiyun return -EINVAL;
1227*4882a593Smuzhiyun }
1228*4882a593Smuzhiyun if (!test_opt(sbi, INLINE_XATTR)) {
1229*4882a593Smuzhiyun f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option");
1230*4882a593Smuzhiyun return -EINVAL;
1231*4882a593Smuzhiyun }
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun min_size = sizeof(struct f2fs_xattr_header) / sizeof(__le32);
1234*4882a593Smuzhiyun max_size = MAX_INLINE_XATTR_SIZE;
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun if (F2FS_OPTION(sbi).inline_xattr_size < min_size ||
1237*4882a593Smuzhiyun F2FS_OPTION(sbi).inline_xattr_size > max_size) {
1238*4882a593Smuzhiyun f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d",
1239*4882a593Smuzhiyun min_size, max_size);
1240*4882a593Smuzhiyun return -EINVAL;
1241*4882a593Smuzhiyun }
1242*4882a593Smuzhiyun }
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun if (test_opt(sbi, DISABLE_CHECKPOINT) && f2fs_lfs_mode(sbi)) {
1245*4882a593Smuzhiyun f2fs_err(sbi, "LFS not compatible with checkpoint=disable");
1246*4882a593Smuzhiyun return -EINVAL;
1247*4882a593Smuzhiyun }
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun /* Not pass down write hints if the number of active logs is lesser
1250*4882a593Smuzhiyun * than NR_CURSEG_PERSIST_TYPE.
1251*4882a593Smuzhiyun */
1252*4882a593Smuzhiyun if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_PERSIST_TYPE)
1253*4882a593Smuzhiyun F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) {
1256*4882a593Smuzhiyun f2fs_err(sbi, "Allow to mount readonly mode only");
1257*4882a593Smuzhiyun return -EROFS;
1258*4882a593Smuzhiyun }
1259*4882a593Smuzhiyun return 0;
1260*4882a593Smuzhiyun }
1261*4882a593Smuzhiyun
f2fs_alloc_inode(struct super_block * sb)1262*4882a593Smuzhiyun static struct inode *f2fs_alloc_inode(struct super_block *sb)
1263*4882a593Smuzhiyun {
1264*4882a593Smuzhiyun struct f2fs_inode_info *fi;
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
1267*4882a593Smuzhiyun if (!fi)
1268*4882a593Smuzhiyun return NULL;
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun init_once((void *) fi);
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun /* Initialize f2fs-specific inode info */
1273*4882a593Smuzhiyun atomic_set(&fi->dirty_pages, 0);
1274*4882a593Smuzhiyun atomic_set(&fi->i_compr_blocks, 0);
1275*4882a593Smuzhiyun init_f2fs_rwsem(&fi->i_sem);
1276*4882a593Smuzhiyun spin_lock_init(&fi->i_size_lock);
1277*4882a593Smuzhiyun INIT_LIST_HEAD(&fi->dirty_list);
1278*4882a593Smuzhiyun INIT_LIST_HEAD(&fi->gdirty_list);
1279*4882a593Smuzhiyun INIT_LIST_HEAD(&fi->inmem_ilist);
1280*4882a593Smuzhiyun INIT_LIST_HEAD(&fi->inmem_pages);
1281*4882a593Smuzhiyun mutex_init(&fi->inmem_lock);
1282*4882a593Smuzhiyun init_f2fs_rwsem(&fi->i_gc_rwsem[READ]);
1283*4882a593Smuzhiyun init_f2fs_rwsem(&fi->i_gc_rwsem[WRITE]);
1284*4882a593Smuzhiyun init_f2fs_rwsem(&fi->i_mmap_sem);
1285*4882a593Smuzhiyun init_f2fs_rwsem(&fi->i_xattr_sem);
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun /* Will be used by directory only */
1288*4882a593Smuzhiyun fi->i_dir_level = F2FS_SB(sb)->dir_level;
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun return &fi->vfs_inode;
1291*4882a593Smuzhiyun }
1292*4882a593Smuzhiyun
f2fs_drop_inode(struct inode * inode)1293*4882a593Smuzhiyun static int f2fs_drop_inode(struct inode *inode)
1294*4882a593Smuzhiyun {
1295*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1296*4882a593Smuzhiyun int ret;
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun /*
1299*4882a593Smuzhiyun * during filesystem shutdown, if checkpoint is disabled,
1300*4882a593Smuzhiyun * drop useless meta/node dirty pages.
1301*4882a593Smuzhiyun */
1302*4882a593Smuzhiyun if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
1303*4882a593Smuzhiyun if (inode->i_ino == F2FS_NODE_INO(sbi) ||
1304*4882a593Smuzhiyun inode->i_ino == F2FS_META_INO(sbi)) {
1305*4882a593Smuzhiyun trace_f2fs_drop_inode(inode, 1);
1306*4882a593Smuzhiyun return 1;
1307*4882a593Smuzhiyun }
1308*4882a593Smuzhiyun }
1309*4882a593Smuzhiyun
1310*4882a593Smuzhiyun /*
1311*4882a593Smuzhiyun * This is to avoid a deadlock condition like below.
1312*4882a593Smuzhiyun * writeback_single_inode(inode)
1313*4882a593Smuzhiyun * - f2fs_write_data_page
1314*4882a593Smuzhiyun * - f2fs_gc -> iput -> evict
1315*4882a593Smuzhiyun * - inode_wait_for_writeback(inode)
1316*4882a593Smuzhiyun */
1317*4882a593Smuzhiyun if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
1318*4882a593Smuzhiyun if (!inode->i_nlink && !is_bad_inode(inode)) {
1319*4882a593Smuzhiyun /* to avoid evict_inode call simultaneously */
1320*4882a593Smuzhiyun atomic_inc(&inode->i_count);
1321*4882a593Smuzhiyun spin_unlock(&inode->i_lock);
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun /* some remained atomic pages should discarded */
1324*4882a593Smuzhiyun if (f2fs_is_atomic_file(inode))
1325*4882a593Smuzhiyun f2fs_drop_inmem_pages(inode);
1326*4882a593Smuzhiyun
1327*4882a593Smuzhiyun /* should remain fi->extent_tree for writepage */
1328*4882a593Smuzhiyun f2fs_destroy_extent_node(inode);
1329*4882a593Smuzhiyun
1330*4882a593Smuzhiyun sb_start_intwrite(inode->i_sb);
1331*4882a593Smuzhiyun f2fs_i_size_write(inode, 0);
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun f2fs_submit_merged_write_cond(F2FS_I_SB(inode),
1334*4882a593Smuzhiyun inode, NULL, 0, DATA);
1335*4882a593Smuzhiyun truncate_inode_pages_final(inode->i_mapping);
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun if (F2FS_HAS_BLOCKS(inode))
1338*4882a593Smuzhiyun f2fs_truncate(inode);
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun sb_end_intwrite(inode->i_sb);
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun spin_lock(&inode->i_lock);
1343*4882a593Smuzhiyun atomic_dec(&inode->i_count);
1344*4882a593Smuzhiyun }
1345*4882a593Smuzhiyun trace_f2fs_drop_inode(inode, 0);
1346*4882a593Smuzhiyun return 0;
1347*4882a593Smuzhiyun }
1348*4882a593Smuzhiyun ret = generic_drop_inode(inode);
1349*4882a593Smuzhiyun if (!ret)
1350*4882a593Smuzhiyun ret = fscrypt_drop_inode(inode);
1351*4882a593Smuzhiyun trace_f2fs_drop_inode(inode, ret);
1352*4882a593Smuzhiyun return ret;
1353*4882a593Smuzhiyun }
1354*4882a593Smuzhiyun
f2fs_inode_dirtied(struct inode * inode,bool sync)1355*4882a593Smuzhiyun int f2fs_inode_dirtied(struct inode *inode, bool sync)
1356*4882a593Smuzhiyun {
1357*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1358*4882a593Smuzhiyun int ret = 0;
1359*4882a593Smuzhiyun
1360*4882a593Smuzhiyun spin_lock(&sbi->inode_lock[DIRTY_META]);
1361*4882a593Smuzhiyun if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1362*4882a593Smuzhiyun ret = 1;
1363*4882a593Smuzhiyun } else {
1364*4882a593Smuzhiyun set_inode_flag(inode, FI_DIRTY_INODE);
1365*4882a593Smuzhiyun stat_inc_dirty_inode(sbi, DIRTY_META);
1366*4882a593Smuzhiyun }
1367*4882a593Smuzhiyun if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
1368*4882a593Smuzhiyun list_add_tail(&F2FS_I(inode)->gdirty_list,
1369*4882a593Smuzhiyun &sbi->inode_list[DIRTY_META]);
1370*4882a593Smuzhiyun inc_page_count(sbi, F2FS_DIRTY_IMETA);
1371*4882a593Smuzhiyun }
1372*4882a593Smuzhiyun spin_unlock(&sbi->inode_lock[DIRTY_META]);
1373*4882a593Smuzhiyun return ret;
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun
f2fs_inode_synced(struct inode * inode)1376*4882a593Smuzhiyun void f2fs_inode_synced(struct inode *inode)
1377*4882a593Smuzhiyun {
1378*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1379*4882a593Smuzhiyun
1380*4882a593Smuzhiyun spin_lock(&sbi->inode_lock[DIRTY_META]);
1381*4882a593Smuzhiyun if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1382*4882a593Smuzhiyun spin_unlock(&sbi->inode_lock[DIRTY_META]);
1383*4882a593Smuzhiyun return;
1384*4882a593Smuzhiyun }
1385*4882a593Smuzhiyun if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
1386*4882a593Smuzhiyun list_del_init(&F2FS_I(inode)->gdirty_list);
1387*4882a593Smuzhiyun dec_page_count(sbi, F2FS_DIRTY_IMETA);
1388*4882a593Smuzhiyun }
1389*4882a593Smuzhiyun clear_inode_flag(inode, FI_DIRTY_INODE);
1390*4882a593Smuzhiyun clear_inode_flag(inode, FI_AUTO_RECOVER);
1391*4882a593Smuzhiyun stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
1392*4882a593Smuzhiyun spin_unlock(&sbi->inode_lock[DIRTY_META]);
1393*4882a593Smuzhiyun }
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun /*
1396*4882a593Smuzhiyun * f2fs_dirty_inode() is called from __mark_inode_dirty()
1397*4882a593Smuzhiyun *
1398*4882a593Smuzhiyun * We should call set_dirty_inode to write the dirty inode through write_inode.
1399*4882a593Smuzhiyun */
f2fs_dirty_inode(struct inode * inode,int flags)1400*4882a593Smuzhiyun static void f2fs_dirty_inode(struct inode *inode, int flags)
1401*4882a593Smuzhiyun {
1402*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun if (inode->i_ino == F2FS_NODE_INO(sbi) ||
1405*4882a593Smuzhiyun inode->i_ino == F2FS_META_INO(sbi))
1406*4882a593Smuzhiyun return;
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun if (flags == I_DIRTY_TIME)
1409*4882a593Smuzhiyun return;
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
1412*4882a593Smuzhiyun clear_inode_flag(inode, FI_AUTO_RECOVER);
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun f2fs_inode_dirtied(inode, false);
1415*4882a593Smuzhiyun }
1416*4882a593Smuzhiyun
f2fs_free_inode(struct inode * inode)1417*4882a593Smuzhiyun static void f2fs_free_inode(struct inode *inode)
1418*4882a593Smuzhiyun {
1419*4882a593Smuzhiyun fscrypt_free_inode(inode);
1420*4882a593Smuzhiyun kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
1421*4882a593Smuzhiyun }
1422*4882a593Smuzhiyun
destroy_percpu_info(struct f2fs_sb_info * sbi)1423*4882a593Smuzhiyun static void destroy_percpu_info(struct f2fs_sb_info *sbi)
1424*4882a593Smuzhiyun {
1425*4882a593Smuzhiyun percpu_counter_destroy(&sbi->alloc_valid_block_count);
1426*4882a593Smuzhiyun percpu_counter_destroy(&sbi->total_valid_inode_count);
1427*4882a593Smuzhiyun }
1428*4882a593Smuzhiyun
destroy_device_list(struct f2fs_sb_info * sbi)1429*4882a593Smuzhiyun static void destroy_device_list(struct f2fs_sb_info *sbi)
1430*4882a593Smuzhiyun {
1431*4882a593Smuzhiyun int i;
1432*4882a593Smuzhiyun
1433*4882a593Smuzhiyun for (i = 0; i < sbi->s_ndevs; i++) {
1434*4882a593Smuzhiyun blkdev_put(FDEV(i).bdev, FMODE_EXCL);
1435*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_ZONED
1436*4882a593Smuzhiyun kvfree(FDEV(i).blkz_seq);
1437*4882a593Smuzhiyun kfree(FDEV(i).zone_capacity_blocks);
1438*4882a593Smuzhiyun #endif
1439*4882a593Smuzhiyun }
1440*4882a593Smuzhiyun kvfree(sbi->devs);
1441*4882a593Smuzhiyun }
1442*4882a593Smuzhiyun
f2fs_put_super(struct super_block * sb)1443*4882a593Smuzhiyun static void f2fs_put_super(struct super_block *sb)
1444*4882a593Smuzhiyun {
1445*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_SB(sb);
1446*4882a593Smuzhiyun int i;
1447*4882a593Smuzhiyun bool dropped;
1448*4882a593Smuzhiyun
1449*4882a593Smuzhiyun /* unregister procfs/sysfs entries in advance to avoid race case */
1450*4882a593Smuzhiyun f2fs_unregister_sysfs(sbi);
1451*4882a593Smuzhiyun
1452*4882a593Smuzhiyun f2fs_quota_off_umount(sb);
1453*4882a593Smuzhiyun
1454*4882a593Smuzhiyun /* prevent remaining shrinker jobs */
1455*4882a593Smuzhiyun mutex_lock(&sbi->umount_mutex);
1456*4882a593Smuzhiyun
1457*4882a593Smuzhiyun /*
1458*4882a593Smuzhiyun * flush all issued checkpoints and stop checkpoint issue thread.
1459*4882a593Smuzhiyun * after then, all checkpoints should be done by each process context.
1460*4882a593Smuzhiyun */
1461*4882a593Smuzhiyun f2fs_stop_ckpt_thread(sbi);
1462*4882a593Smuzhiyun
1463*4882a593Smuzhiyun /*
1464*4882a593Smuzhiyun * We don't need to do checkpoint when superblock is clean.
1465*4882a593Smuzhiyun * But, the previous checkpoint was not done by umount, it needs to do
1466*4882a593Smuzhiyun * clean checkpoint again.
1467*4882a593Smuzhiyun */
1468*4882a593Smuzhiyun if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
1469*4882a593Smuzhiyun !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) {
1470*4882a593Smuzhiyun struct cp_control cpc = {
1471*4882a593Smuzhiyun .reason = CP_UMOUNT,
1472*4882a593Smuzhiyun };
1473*4882a593Smuzhiyun f2fs_write_checkpoint(sbi, &cpc);
1474*4882a593Smuzhiyun }
1475*4882a593Smuzhiyun
1476*4882a593Smuzhiyun /* be sure to wait for any on-going discard commands */
1477*4882a593Smuzhiyun dropped = f2fs_issue_discard_timeout(sbi);
1478*4882a593Smuzhiyun
1479*4882a593Smuzhiyun if ((f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) &&
1480*4882a593Smuzhiyun !sbi->discard_blks && !dropped) {
1481*4882a593Smuzhiyun struct cp_control cpc = {
1482*4882a593Smuzhiyun .reason = CP_UMOUNT | CP_TRIMMED,
1483*4882a593Smuzhiyun };
1484*4882a593Smuzhiyun f2fs_write_checkpoint(sbi, &cpc);
1485*4882a593Smuzhiyun }
1486*4882a593Smuzhiyun
1487*4882a593Smuzhiyun /*
1488*4882a593Smuzhiyun * normally superblock is clean, so we need to release this.
1489*4882a593Smuzhiyun * In addition, EIO will skip do checkpoint, we need this as well.
1490*4882a593Smuzhiyun */
1491*4882a593Smuzhiyun f2fs_release_ino_entry(sbi, true);
1492*4882a593Smuzhiyun
1493*4882a593Smuzhiyun f2fs_leave_shrinker(sbi);
1494*4882a593Smuzhiyun mutex_unlock(&sbi->umount_mutex);
1495*4882a593Smuzhiyun
1496*4882a593Smuzhiyun /* our cp_error case, we can wait for any writeback page */
1497*4882a593Smuzhiyun f2fs_flush_merged_writes(sbi);
1498*4882a593Smuzhiyun
1499*4882a593Smuzhiyun f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
1500*4882a593Smuzhiyun
1501*4882a593Smuzhiyun f2fs_bug_on(sbi, sbi->fsync_node_num);
1502*4882a593Smuzhiyun
1503*4882a593Smuzhiyun f2fs_destroy_compress_inode(sbi);
1504*4882a593Smuzhiyun
1505*4882a593Smuzhiyun iput(sbi->node_inode);
1506*4882a593Smuzhiyun sbi->node_inode = NULL;
1507*4882a593Smuzhiyun
1508*4882a593Smuzhiyun iput(sbi->meta_inode);
1509*4882a593Smuzhiyun sbi->meta_inode = NULL;
1510*4882a593Smuzhiyun
1511*4882a593Smuzhiyun /*
1512*4882a593Smuzhiyun * iput() can update stat information, if f2fs_write_checkpoint()
1513*4882a593Smuzhiyun * above failed with error.
1514*4882a593Smuzhiyun */
1515*4882a593Smuzhiyun f2fs_destroy_stats(sbi);
1516*4882a593Smuzhiyun
1517*4882a593Smuzhiyun /* destroy f2fs internal modules */
1518*4882a593Smuzhiyun f2fs_destroy_node_manager(sbi);
1519*4882a593Smuzhiyun f2fs_destroy_segment_manager(sbi);
1520*4882a593Smuzhiyun
1521*4882a593Smuzhiyun f2fs_destroy_post_read_wq(sbi);
1522*4882a593Smuzhiyun
1523*4882a593Smuzhiyun kvfree(sbi->ckpt);
1524*4882a593Smuzhiyun
1525*4882a593Smuzhiyun sb->s_fs_info = NULL;
1526*4882a593Smuzhiyun if (sbi->s_chksum_driver)
1527*4882a593Smuzhiyun crypto_free_shash(sbi->s_chksum_driver);
1528*4882a593Smuzhiyun kfree(sbi->raw_super);
1529*4882a593Smuzhiyun
1530*4882a593Smuzhiyun destroy_device_list(sbi);
1531*4882a593Smuzhiyun f2fs_destroy_page_array_cache(sbi);
1532*4882a593Smuzhiyun f2fs_destroy_xattr_caches(sbi);
1533*4882a593Smuzhiyun mempool_destroy(sbi->write_io_dummy);
1534*4882a593Smuzhiyun #ifdef CONFIG_QUOTA
1535*4882a593Smuzhiyun for (i = 0; i < MAXQUOTAS; i++)
1536*4882a593Smuzhiyun kfree(F2FS_OPTION(sbi).s_qf_names[i]);
1537*4882a593Smuzhiyun #endif
1538*4882a593Smuzhiyun fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
1539*4882a593Smuzhiyun destroy_percpu_info(sbi);
1540*4882a593Smuzhiyun for (i = 0; i < NR_PAGE_TYPE; i++)
1541*4882a593Smuzhiyun kvfree(sbi->write_io[i]);
1542*4882a593Smuzhiyun #ifdef CONFIG_UNICODE
1543*4882a593Smuzhiyun utf8_unload(sb->s_encoding);
1544*4882a593Smuzhiyun #endif
1545*4882a593Smuzhiyun kfree(sbi);
1546*4882a593Smuzhiyun }
1547*4882a593Smuzhiyun
f2fs_sync_fs(struct super_block * sb,int sync)1548*4882a593Smuzhiyun int f2fs_sync_fs(struct super_block *sb, int sync)
1549*4882a593Smuzhiyun {
1550*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_SB(sb);
1551*4882a593Smuzhiyun int err = 0;
1552*4882a593Smuzhiyun
1553*4882a593Smuzhiyun if (unlikely(f2fs_cp_error(sbi)))
1554*4882a593Smuzhiyun return 0;
1555*4882a593Smuzhiyun if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
1556*4882a593Smuzhiyun return 0;
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun trace_f2fs_sync_fs(sb, sync);
1559*4882a593Smuzhiyun
1560*4882a593Smuzhiyun if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1561*4882a593Smuzhiyun return -EAGAIN;
1562*4882a593Smuzhiyun
1563*4882a593Smuzhiyun if (sync) {
1564*4882a593Smuzhiyun err = f2fs_issue_checkpoint(sbi);
1565*4882a593Smuzhiyun atomic_set(&sbi->no_cp_fsync_pages, 0);
1566*4882a593Smuzhiyun }
1567*4882a593Smuzhiyun
1568*4882a593Smuzhiyun return err;
1569*4882a593Smuzhiyun }
1570*4882a593Smuzhiyun
f2fs_freeze(struct super_block * sb)1571*4882a593Smuzhiyun static int f2fs_freeze(struct super_block *sb)
1572*4882a593Smuzhiyun {
1573*4882a593Smuzhiyun if (f2fs_readonly(sb))
1574*4882a593Smuzhiyun return 0;
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun /* IO error happened before */
1577*4882a593Smuzhiyun if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
1578*4882a593Smuzhiyun return -EIO;
1579*4882a593Smuzhiyun
1580*4882a593Smuzhiyun /* must be clean, since sync_filesystem() was already called */
1581*4882a593Smuzhiyun if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
1582*4882a593Smuzhiyun return -EINVAL;
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun /* Let's flush checkpoints and stop the thread. */
1585*4882a593Smuzhiyun f2fs_flush_ckpt_thread(F2FS_SB(sb));
1586*4882a593Smuzhiyun
1587*4882a593Smuzhiyun /* to avoid deadlock on f2fs_evict_inode->SB_FREEZE_FS */
1588*4882a593Smuzhiyun set_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
1589*4882a593Smuzhiyun return 0;
1590*4882a593Smuzhiyun }
1591*4882a593Smuzhiyun
f2fs_unfreeze(struct super_block * sb)1592*4882a593Smuzhiyun static int f2fs_unfreeze(struct super_block *sb)
1593*4882a593Smuzhiyun {
1594*4882a593Smuzhiyun clear_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
1595*4882a593Smuzhiyun return 0;
1596*4882a593Smuzhiyun }
1597*4882a593Smuzhiyun
1598*4882a593Smuzhiyun #ifdef CONFIG_QUOTA
f2fs_statfs_project(struct super_block * sb,kprojid_t projid,struct kstatfs * buf)1599*4882a593Smuzhiyun static int f2fs_statfs_project(struct super_block *sb,
1600*4882a593Smuzhiyun kprojid_t projid, struct kstatfs *buf)
1601*4882a593Smuzhiyun {
1602*4882a593Smuzhiyun struct kqid qid;
1603*4882a593Smuzhiyun struct dquot *dquot;
1604*4882a593Smuzhiyun u64 limit;
1605*4882a593Smuzhiyun u64 curblock;
1606*4882a593Smuzhiyun
1607*4882a593Smuzhiyun qid = make_kqid_projid(projid);
1608*4882a593Smuzhiyun dquot = dqget(sb, qid);
1609*4882a593Smuzhiyun if (IS_ERR(dquot))
1610*4882a593Smuzhiyun return PTR_ERR(dquot);
1611*4882a593Smuzhiyun spin_lock(&dquot->dq_dqb_lock);
1612*4882a593Smuzhiyun
1613*4882a593Smuzhiyun limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
1614*4882a593Smuzhiyun dquot->dq_dqb.dqb_bhardlimit);
1615*4882a593Smuzhiyun if (limit)
1616*4882a593Smuzhiyun limit >>= sb->s_blocksize_bits;
1617*4882a593Smuzhiyun
1618*4882a593Smuzhiyun if (limit && buf->f_blocks > limit) {
1619*4882a593Smuzhiyun curblock = (dquot->dq_dqb.dqb_curspace +
1620*4882a593Smuzhiyun dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
1621*4882a593Smuzhiyun buf->f_blocks = limit;
1622*4882a593Smuzhiyun buf->f_bfree = buf->f_bavail =
1623*4882a593Smuzhiyun (buf->f_blocks > curblock) ?
1624*4882a593Smuzhiyun (buf->f_blocks - curblock) : 0;
1625*4882a593Smuzhiyun }
1626*4882a593Smuzhiyun
1627*4882a593Smuzhiyun limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
1628*4882a593Smuzhiyun dquot->dq_dqb.dqb_ihardlimit);
1629*4882a593Smuzhiyun
1630*4882a593Smuzhiyun if (limit && buf->f_files > limit) {
1631*4882a593Smuzhiyun buf->f_files = limit;
1632*4882a593Smuzhiyun buf->f_ffree =
1633*4882a593Smuzhiyun (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
1634*4882a593Smuzhiyun (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
1635*4882a593Smuzhiyun }
1636*4882a593Smuzhiyun
1637*4882a593Smuzhiyun spin_unlock(&dquot->dq_dqb_lock);
1638*4882a593Smuzhiyun dqput(dquot);
1639*4882a593Smuzhiyun return 0;
1640*4882a593Smuzhiyun }
1641*4882a593Smuzhiyun #endif
1642*4882a593Smuzhiyun
f2fs_statfs(struct dentry * dentry,struct kstatfs * buf)1643*4882a593Smuzhiyun static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
1644*4882a593Smuzhiyun {
1645*4882a593Smuzhiyun struct super_block *sb = dentry->d_sb;
1646*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_SB(sb);
1647*4882a593Smuzhiyun u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
1648*4882a593Smuzhiyun block_t total_count, user_block_count, start_count;
1649*4882a593Smuzhiyun u64 avail_node_count;
1650*4882a593Smuzhiyun
1651*4882a593Smuzhiyun total_count = le64_to_cpu(sbi->raw_super->block_count);
1652*4882a593Smuzhiyun user_block_count = sbi->user_block_count;
1653*4882a593Smuzhiyun start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
1654*4882a593Smuzhiyun buf->f_type = F2FS_SUPER_MAGIC;
1655*4882a593Smuzhiyun buf->f_bsize = sbi->blocksize;
1656*4882a593Smuzhiyun
1657*4882a593Smuzhiyun /* f_blocks should not include overhead of filesystem */
1658*4882a593Smuzhiyun buf->f_blocks = user_block_count;
1659*4882a593Smuzhiyun buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
1660*4882a593Smuzhiyun sbi->current_reserved_blocks;
1661*4882a593Smuzhiyun
1662*4882a593Smuzhiyun spin_lock(&sbi->stat_lock);
1663*4882a593Smuzhiyun if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
1664*4882a593Smuzhiyun buf->f_bfree = 0;
1665*4882a593Smuzhiyun else
1666*4882a593Smuzhiyun buf->f_bfree -= sbi->unusable_block_count;
1667*4882a593Smuzhiyun spin_unlock(&sbi->stat_lock);
1668*4882a593Smuzhiyun
1669*4882a593Smuzhiyun if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
1670*4882a593Smuzhiyun buf->f_bavail = buf->f_bfree -
1671*4882a593Smuzhiyun F2FS_OPTION(sbi).root_reserved_blocks;
1672*4882a593Smuzhiyun else
1673*4882a593Smuzhiyun buf->f_bavail = 0;
1674*4882a593Smuzhiyun
1675*4882a593Smuzhiyun avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
1676*4882a593Smuzhiyun
1677*4882a593Smuzhiyun if (avail_node_count > user_block_count) {
1678*4882a593Smuzhiyun buf->f_files = user_block_count;
1679*4882a593Smuzhiyun buf->f_ffree = buf->f_bavail;
1680*4882a593Smuzhiyun } else {
1681*4882a593Smuzhiyun buf->f_files = avail_node_count;
1682*4882a593Smuzhiyun buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
1683*4882a593Smuzhiyun buf->f_bavail);
1684*4882a593Smuzhiyun }
1685*4882a593Smuzhiyun
1686*4882a593Smuzhiyun buf->f_namelen = F2FS_NAME_LEN;
1687*4882a593Smuzhiyun buf->f_fsid = u64_to_fsid(id);
1688*4882a593Smuzhiyun
1689*4882a593Smuzhiyun #ifdef CONFIG_QUOTA
1690*4882a593Smuzhiyun if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
1691*4882a593Smuzhiyun sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
1692*4882a593Smuzhiyun f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
1693*4882a593Smuzhiyun }
1694*4882a593Smuzhiyun #endif
1695*4882a593Smuzhiyun return 0;
1696*4882a593Smuzhiyun }
1697*4882a593Smuzhiyun
f2fs_show_quota_options(struct seq_file * seq,struct super_block * sb)1698*4882a593Smuzhiyun static inline void f2fs_show_quota_options(struct seq_file *seq,
1699*4882a593Smuzhiyun struct super_block *sb)
1700*4882a593Smuzhiyun {
1701*4882a593Smuzhiyun #ifdef CONFIG_QUOTA
1702*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_SB(sb);
1703*4882a593Smuzhiyun
1704*4882a593Smuzhiyun if (F2FS_OPTION(sbi).s_jquota_fmt) {
1705*4882a593Smuzhiyun char *fmtname = "";
1706*4882a593Smuzhiyun
1707*4882a593Smuzhiyun switch (F2FS_OPTION(sbi).s_jquota_fmt) {
1708*4882a593Smuzhiyun case QFMT_VFS_OLD:
1709*4882a593Smuzhiyun fmtname = "vfsold";
1710*4882a593Smuzhiyun break;
1711*4882a593Smuzhiyun case QFMT_VFS_V0:
1712*4882a593Smuzhiyun fmtname = "vfsv0";
1713*4882a593Smuzhiyun break;
1714*4882a593Smuzhiyun case QFMT_VFS_V1:
1715*4882a593Smuzhiyun fmtname = "vfsv1";
1716*4882a593Smuzhiyun break;
1717*4882a593Smuzhiyun }
1718*4882a593Smuzhiyun seq_printf(seq, ",jqfmt=%s", fmtname);
1719*4882a593Smuzhiyun }
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyun if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
1722*4882a593Smuzhiyun seq_show_option(seq, "usrjquota",
1723*4882a593Smuzhiyun F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
1724*4882a593Smuzhiyun
1725*4882a593Smuzhiyun if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
1726*4882a593Smuzhiyun seq_show_option(seq, "grpjquota",
1727*4882a593Smuzhiyun F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
1728*4882a593Smuzhiyun
1729*4882a593Smuzhiyun if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
1730*4882a593Smuzhiyun seq_show_option(seq, "prjjquota",
1731*4882a593Smuzhiyun F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
1732*4882a593Smuzhiyun #endif
1733*4882a593Smuzhiyun }
1734*4882a593Smuzhiyun
1735*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FS_COMPRESSION
f2fs_show_compress_options(struct seq_file * seq,struct super_block * sb)1736*4882a593Smuzhiyun static inline void f2fs_show_compress_options(struct seq_file *seq,
1737*4882a593Smuzhiyun struct super_block *sb)
1738*4882a593Smuzhiyun {
1739*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_SB(sb);
1740*4882a593Smuzhiyun char *algtype = "";
1741*4882a593Smuzhiyun int i;
1742*4882a593Smuzhiyun
1743*4882a593Smuzhiyun if (!f2fs_sb_has_compression(sbi))
1744*4882a593Smuzhiyun return;
1745*4882a593Smuzhiyun
1746*4882a593Smuzhiyun switch (F2FS_OPTION(sbi).compress_algorithm) {
1747*4882a593Smuzhiyun case COMPRESS_LZO:
1748*4882a593Smuzhiyun algtype = "lzo";
1749*4882a593Smuzhiyun break;
1750*4882a593Smuzhiyun case COMPRESS_LZ4:
1751*4882a593Smuzhiyun algtype = "lz4";
1752*4882a593Smuzhiyun break;
1753*4882a593Smuzhiyun case COMPRESS_ZSTD:
1754*4882a593Smuzhiyun algtype = "zstd";
1755*4882a593Smuzhiyun break;
1756*4882a593Smuzhiyun case COMPRESS_LZORLE:
1757*4882a593Smuzhiyun algtype = "lzo-rle";
1758*4882a593Smuzhiyun break;
1759*4882a593Smuzhiyun }
1760*4882a593Smuzhiyun seq_printf(seq, ",compress_algorithm=%s", algtype);
1761*4882a593Smuzhiyun
1762*4882a593Smuzhiyun if (F2FS_OPTION(sbi).compress_level)
1763*4882a593Smuzhiyun seq_printf(seq, ":%d", F2FS_OPTION(sbi).compress_level);
1764*4882a593Smuzhiyun
1765*4882a593Smuzhiyun seq_printf(seq, ",compress_log_size=%u",
1766*4882a593Smuzhiyun F2FS_OPTION(sbi).compress_log_size);
1767*4882a593Smuzhiyun
1768*4882a593Smuzhiyun for (i = 0; i < F2FS_OPTION(sbi).compress_ext_cnt; i++) {
1769*4882a593Smuzhiyun seq_printf(seq, ",compress_extension=%s",
1770*4882a593Smuzhiyun F2FS_OPTION(sbi).extensions[i]);
1771*4882a593Smuzhiyun }
1772*4882a593Smuzhiyun
1773*4882a593Smuzhiyun if (F2FS_OPTION(sbi).compress_chksum)
1774*4882a593Smuzhiyun seq_puts(seq, ",compress_chksum");
1775*4882a593Smuzhiyun
1776*4882a593Smuzhiyun if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_FS)
1777*4882a593Smuzhiyun seq_printf(seq, ",compress_mode=%s", "fs");
1778*4882a593Smuzhiyun else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER)
1779*4882a593Smuzhiyun seq_printf(seq, ",compress_mode=%s", "user");
1780*4882a593Smuzhiyun
1781*4882a593Smuzhiyun if (test_opt(sbi, COMPRESS_CACHE))
1782*4882a593Smuzhiyun seq_puts(seq, ",compress_cache");
1783*4882a593Smuzhiyun }
1784*4882a593Smuzhiyun #endif
1785*4882a593Smuzhiyun
f2fs_show_options(struct seq_file * seq,struct dentry * root)1786*4882a593Smuzhiyun static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
1787*4882a593Smuzhiyun {
1788*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
1789*4882a593Smuzhiyun
1790*4882a593Smuzhiyun if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC)
1791*4882a593Smuzhiyun seq_printf(seq, ",background_gc=%s", "sync");
1792*4882a593Smuzhiyun else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_ON)
1793*4882a593Smuzhiyun seq_printf(seq, ",background_gc=%s", "on");
1794*4882a593Smuzhiyun else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF)
1795*4882a593Smuzhiyun seq_printf(seq, ",background_gc=%s", "off");
1796*4882a593Smuzhiyun
1797*4882a593Smuzhiyun if (test_opt(sbi, GC_MERGE))
1798*4882a593Smuzhiyun seq_puts(seq, ",gc_merge");
1799*4882a593Smuzhiyun
1800*4882a593Smuzhiyun if (test_opt(sbi, DISABLE_ROLL_FORWARD))
1801*4882a593Smuzhiyun seq_puts(seq, ",disable_roll_forward");
1802*4882a593Smuzhiyun if (test_opt(sbi, NORECOVERY))
1803*4882a593Smuzhiyun seq_puts(seq, ",norecovery");
1804*4882a593Smuzhiyun if (test_opt(sbi, DISCARD))
1805*4882a593Smuzhiyun seq_puts(seq, ",discard");
1806*4882a593Smuzhiyun else
1807*4882a593Smuzhiyun seq_puts(seq, ",nodiscard");
1808*4882a593Smuzhiyun if (test_opt(sbi, NOHEAP))
1809*4882a593Smuzhiyun seq_puts(seq, ",no_heap");
1810*4882a593Smuzhiyun else
1811*4882a593Smuzhiyun seq_puts(seq, ",heap");
1812*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FS_XATTR
1813*4882a593Smuzhiyun if (test_opt(sbi, XATTR_USER))
1814*4882a593Smuzhiyun seq_puts(seq, ",user_xattr");
1815*4882a593Smuzhiyun else
1816*4882a593Smuzhiyun seq_puts(seq, ",nouser_xattr");
1817*4882a593Smuzhiyun if (test_opt(sbi, INLINE_XATTR))
1818*4882a593Smuzhiyun seq_puts(seq, ",inline_xattr");
1819*4882a593Smuzhiyun else
1820*4882a593Smuzhiyun seq_puts(seq, ",noinline_xattr");
1821*4882a593Smuzhiyun if (test_opt(sbi, INLINE_XATTR_SIZE))
1822*4882a593Smuzhiyun seq_printf(seq, ",inline_xattr_size=%u",
1823*4882a593Smuzhiyun F2FS_OPTION(sbi).inline_xattr_size);
1824*4882a593Smuzhiyun #endif
1825*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FS_POSIX_ACL
1826*4882a593Smuzhiyun if (test_opt(sbi, POSIX_ACL))
1827*4882a593Smuzhiyun seq_puts(seq, ",acl");
1828*4882a593Smuzhiyun else
1829*4882a593Smuzhiyun seq_puts(seq, ",noacl");
1830*4882a593Smuzhiyun #endif
1831*4882a593Smuzhiyun if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
1832*4882a593Smuzhiyun seq_puts(seq, ",disable_ext_identify");
1833*4882a593Smuzhiyun if (test_opt(sbi, INLINE_DATA))
1834*4882a593Smuzhiyun seq_puts(seq, ",inline_data");
1835*4882a593Smuzhiyun else
1836*4882a593Smuzhiyun seq_puts(seq, ",noinline_data");
1837*4882a593Smuzhiyun if (test_opt(sbi, INLINE_DENTRY))
1838*4882a593Smuzhiyun seq_puts(seq, ",inline_dentry");
1839*4882a593Smuzhiyun else
1840*4882a593Smuzhiyun seq_puts(seq, ",noinline_dentry");
1841*4882a593Smuzhiyun if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
1842*4882a593Smuzhiyun seq_puts(seq, ",flush_merge");
1843*4882a593Smuzhiyun if (test_opt(sbi, NOBARRIER))
1844*4882a593Smuzhiyun seq_puts(seq, ",nobarrier");
1845*4882a593Smuzhiyun if (test_opt(sbi, FASTBOOT))
1846*4882a593Smuzhiyun seq_puts(seq, ",fastboot");
1847*4882a593Smuzhiyun if (test_opt(sbi, READ_EXTENT_CACHE))
1848*4882a593Smuzhiyun seq_puts(seq, ",extent_cache");
1849*4882a593Smuzhiyun else
1850*4882a593Smuzhiyun seq_puts(seq, ",noextent_cache");
1851*4882a593Smuzhiyun if (test_opt(sbi, AGE_EXTENT_CACHE))
1852*4882a593Smuzhiyun seq_puts(seq, ",age_extent_cache");
1853*4882a593Smuzhiyun if (test_opt(sbi, DATA_FLUSH))
1854*4882a593Smuzhiyun seq_puts(seq, ",data_flush");
1855*4882a593Smuzhiyun
1856*4882a593Smuzhiyun seq_puts(seq, ",mode=");
1857*4882a593Smuzhiyun if (F2FS_OPTION(sbi).fs_mode == FS_MODE_ADAPTIVE)
1858*4882a593Smuzhiyun seq_puts(seq, "adaptive");
1859*4882a593Smuzhiyun else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS)
1860*4882a593Smuzhiyun seq_puts(seq, "lfs");
1861*4882a593Smuzhiyun seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
1862*4882a593Smuzhiyun if (test_opt(sbi, RESERVE_ROOT))
1863*4882a593Smuzhiyun seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
1864*4882a593Smuzhiyun F2FS_OPTION(sbi).root_reserved_blocks,
1865*4882a593Smuzhiyun from_kuid_munged(&init_user_ns,
1866*4882a593Smuzhiyun F2FS_OPTION(sbi).s_resuid),
1867*4882a593Smuzhiyun from_kgid_munged(&init_user_ns,
1868*4882a593Smuzhiyun F2FS_OPTION(sbi).s_resgid));
1869*4882a593Smuzhiyun if (F2FS_IO_SIZE_BITS(sbi))
1870*4882a593Smuzhiyun seq_printf(seq, ",io_bits=%u",
1871*4882a593Smuzhiyun F2FS_OPTION(sbi).write_io_size_bits);
1872*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FAULT_INJECTION
1873*4882a593Smuzhiyun if (test_opt(sbi, FAULT_INJECTION)) {
1874*4882a593Smuzhiyun seq_printf(seq, ",fault_injection=%u",
1875*4882a593Smuzhiyun F2FS_OPTION(sbi).fault_info.inject_rate);
1876*4882a593Smuzhiyun seq_printf(seq, ",fault_type=%u",
1877*4882a593Smuzhiyun F2FS_OPTION(sbi).fault_info.inject_type);
1878*4882a593Smuzhiyun }
1879*4882a593Smuzhiyun #endif
1880*4882a593Smuzhiyun #ifdef CONFIG_QUOTA
1881*4882a593Smuzhiyun if (test_opt(sbi, QUOTA))
1882*4882a593Smuzhiyun seq_puts(seq, ",quota");
1883*4882a593Smuzhiyun if (test_opt(sbi, USRQUOTA))
1884*4882a593Smuzhiyun seq_puts(seq, ",usrquota");
1885*4882a593Smuzhiyun if (test_opt(sbi, GRPQUOTA))
1886*4882a593Smuzhiyun seq_puts(seq, ",grpquota");
1887*4882a593Smuzhiyun if (test_opt(sbi, PRJQUOTA))
1888*4882a593Smuzhiyun seq_puts(seq, ",prjquota");
1889*4882a593Smuzhiyun #endif
1890*4882a593Smuzhiyun f2fs_show_quota_options(seq, sbi->sb);
1891*4882a593Smuzhiyun if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER)
1892*4882a593Smuzhiyun seq_printf(seq, ",whint_mode=%s", "user-based");
1893*4882a593Smuzhiyun else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS)
1894*4882a593Smuzhiyun seq_printf(seq, ",whint_mode=%s", "fs-based");
1895*4882a593Smuzhiyun
1896*4882a593Smuzhiyun fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb);
1897*4882a593Smuzhiyun
1898*4882a593Smuzhiyun if (sbi->sb->s_flags & SB_INLINECRYPT)
1899*4882a593Smuzhiyun seq_puts(seq, ",inlinecrypt");
1900*4882a593Smuzhiyun
1901*4882a593Smuzhiyun if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
1902*4882a593Smuzhiyun seq_printf(seq, ",alloc_mode=%s", "default");
1903*4882a593Smuzhiyun else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
1904*4882a593Smuzhiyun seq_printf(seq, ",alloc_mode=%s", "reuse");
1905*4882a593Smuzhiyun
1906*4882a593Smuzhiyun if (test_opt(sbi, DISABLE_CHECKPOINT))
1907*4882a593Smuzhiyun seq_printf(seq, ",checkpoint=disable:%u",
1908*4882a593Smuzhiyun F2FS_OPTION(sbi).unusable_cap);
1909*4882a593Smuzhiyun if (test_opt(sbi, MERGE_CHECKPOINT))
1910*4882a593Smuzhiyun seq_puts(seq, ",checkpoint_merge");
1911*4882a593Smuzhiyun else
1912*4882a593Smuzhiyun seq_puts(seq, ",nocheckpoint_merge");
1913*4882a593Smuzhiyun if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
1914*4882a593Smuzhiyun seq_printf(seq, ",fsync_mode=%s", "posix");
1915*4882a593Smuzhiyun else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
1916*4882a593Smuzhiyun seq_printf(seq, ",fsync_mode=%s", "strict");
1917*4882a593Smuzhiyun else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
1918*4882a593Smuzhiyun seq_printf(seq, ",fsync_mode=%s", "nobarrier");
1919*4882a593Smuzhiyun
1920*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FS_COMPRESSION
1921*4882a593Smuzhiyun f2fs_show_compress_options(seq, sbi->sb);
1922*4882a593Smuzhiyun #endif
1923*4882a593Smuzhiyun
1924*4882a593Smuzhiyun if (test_opt(sbi, ATGC))
1925*4882a593Smuzhiyun seq_puts(seq, ",atgc");
1926*4882a593Smuzhiyun
1927*4882a593Smuzhiyun if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_NORMAL)
1928*4882a593Smuzhiyun seq_printf(seq, ",memory=%s", "normal");
1929*4882a593Smuzhiyun else if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW)
1930*4882a593Smuzhiyun seq_printf(seq, ",memory=%s", "low");
1931*4882a593Smuzhiyun
1932*4882a593Smuzhiyun return 0;
1933*4882a593Smuzhiyun }
1934*4882a593Smuzhiyun
default_options(struct f2fs_sb_info * sbi)1935*4882a593Smuzhiyun static void default_options(struct f2fs_sb_info *sbi)
1936*4882a593Smuzhiyun {
1937*4882a593Smuzhiyun /* init some FS parameters */
1938*4882a593Smuzhiyun if (f2fs_sb_has_readonly(sbi))
1939*4882a593Smuzhiyun F2FS_OPTION(sbi).active_logs = NR_CURSEG_RO_TYPE;
1940*4882a593Smuzhiyun else
1941*4882a593Smuzhiyun F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE;
1942*4882a593Smuzhiyun
1943*4882a593Smuzhiyun F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
1944*4882a593Smuzhiyun F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
1945*4882a593Smuzhiyun F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
1946*4882a593Smuzhiyun F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
1947*4882a593Smuzhiyun F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
1948*4882a593Smuzhiyun F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
1949*4882a593Smuzhiyun F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4;
1950*4882a593Smuzhiyun F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE;
1951*4882a593Smuzhiyun F2FS_OPTION(sbi).compress_ext_cnt = 0;
1952*4882a593Smuzhiyun F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
1953*4882a593Smuzhiyun F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
1954*4882a593Smuzhiyun F2FS_OPTION(sbi).memory_mode = MEMORY_MODE_NORMAL;
1955*4882a593Smuzhiyun
1956*4882a593Smuzhiyun sbi->sb->s_flags &= ~SB_INLINECRYPT;
1957*4882a593Smuzhiyun
1958*4882a593Smuzhiyun set_opt(sbi, INLINE_XATTR);
1959*4882a593Smuzhiyun set_opt(sbi, INLINE_DATA);
1960*4882a593Smuzhiyun set_opt(sbi, INLINE_DENTRY);
1961*4882a593Smuzhiyun set_opt(sbi, READ_EXTENT_CACHE);
1962*4882a593Smuzhiyun set_opt(sbi, NOHEAP);
1963*4882a593Smuzhiyun clear_opt(sbi, DISABLE_CHECKPOINT);
1964*4882a593Smuzhiyun set_opt(sbi, MERGE_CHECKPOINT);
1965*4882a593Smuzhiyun F2FS_OPTION(sbi).unusable_cap = 0;
1966*4882a593Smuzhiyun sbi->sb->s_flags |= SB_LAZYTIME;
1967*4882a593Smuzhiyun set_opt(sbi, FLUSH_MERGE);
1968*4882a593Smuzhiyun set_opt(sbi, DISCARD);
1969*4882a593Smuzhiyun if (f2fs_sb_has_blkzoned(sbi))
1970*4882a593Smuzhiyun F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
1971*4882a593Smuzhiyun else
1972*4882a593Smuzhiyun F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
1973*4882a593Smuzhiyun
1974*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FS_XATTR
1975*4882a593Smuzhiyun set_opt(sbi, XATTR_USER);
1976*4882a593Smuzhiyun #endif
1977*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FS_POSIX_ACL
1978*4882a593Smuzhiyun set_opt(sbi, POSIX_ACL);
1979*4882a593Smuzhiyun #endif
1980*4882a593Smuzhiyun
1981*4882a593Smuzhiyun f2fs_build_fault_attr(sbi, 0, 0);
1982*4882a593Smuzhiyun }
1983*4882a593Smuzhiyun
1984*4882a593Smuzhiyun #ifdef CONFIG_QUOTA
1985*4882a593Smuzhiyun static int f2fs_enable_quotas(struct super_block *sb);
1986*4882a593Smuzhiyun #endif
1987*4882a593Smuzhiyun
f2fs_disable_checkpoint(struct f2fs_sb_info * sbi)1988*4882a593Smuzhiyun static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
1989*4882a593Smuzhiyun {
1990*4882a593Smuzhiyun unsigned int s_flags = sbi->sb->s_flags;
1991*4882a593Smuzhiyun struct cp_control cpc;
1992*4882a593Smuzhiyun int err = 0;
1993*4882a593Smuzhiyun int ret;
1994*4882a593Smuzhiyun block_t unusable;
1995*4882a593Smuzhiyun
1996*4882a593Smuzhiyun if (s_flags & SB_RDONLY) {
1997*4882a593Smuzhiyun f2fs_err(sbi, "checkpoint=disable on readonly fs");
1998*4882a593Smuzhiyun return -EINVAL;
1999*4882a593Smuzhiyun }
2000*4882a593Smuzhiyun sbi->sb->s_flags |= SB_ACTIVE;
2001*4882a593Smuzhiyun
2002*4882a593Smuzhiyun f2fs_update_time(sbi, DISABLE_TIME);
2003*4882a593Smuzhiyun
2004*4882a593Smuzhiyun while (!f2fs_time_over(sbi, DISABLE_TIME)) {
2005*4882a593Smuzhiyun f2fs_down_write(&sbi->gc_lock);
2006*4882a593Smuzhiyun err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
2007*4882a593Smuzhiyun if (err == -ENODATA) {
2008*4882a593Smuzhiyun err = 0;
2009*4882a593Smuzhiyun break;
2010*4882a593Smuzhiyun }
2011*4882a593Smuzhiyun if (err && err != -EAGAIN)
2012*4882a593Smuzhiyun break;
2013*4882a593Smuzhiyun }
2014*4882a593Smuzhiyun
2015*4882a593Smuzhiyun ret = sync_filesystem(sbi->sb);
2016*4882a593Smuzhiyun if (ret || err) {
2017*4882a593Smuzhiyun err = ret ? ret : err;
2018*4882a593Smuzhiyun goto restore_flag;
2019*4882a593Smuzhiyun }
2020*4882a593Smuzhiyun
2021*4882a593Smuzhiyun unusable = f2fs_get_unusable_blocks(sbi);
2022*4882a593Smuzhiyun if (f2fs_disable_cp_again(sbi, unusable)) {
2023*4882a593Smuzhiyun err = -EAGAIN;
2024*4882a593Smuzhiyun goto restore_flag;
2025*4882a593Smuzhiyun }
2026*4882a593Smuzhiyun
2027*4882a593Smuzhiyun f2fs_down_write(&sbi->gc_lock);
2028*4882a593Smuzhiyun cpc.reason = CP_PAUSE;
2029*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_CP_DISABLED);
2030*4882a593Smuzhiyun err = f2fs_write_checkpoint(sbi, &cpc);
2031*4882a593Smuzhiyun if (err)
2032*4882a593Smuzhiyun goto out_unlock;
2033*4882a593Smuzhiyun
2034*4882a593Smuzhiyun spin_lock(&sbi->stat_lock);
2035*4882a593Smuzhiyun sbi->unusable_block_count = unusable;
2036*4882a593Smuzhiyun spin_unlock(&sbi->stat_lock);
2037*4882a593Smuzhiyun
2038*4882a593Smuzhiyun out_unlock:
2039*4882a593Smuzhiyun f2fs_up_write(&sbi->gc_lock);
2040*4882a593Smuzhiyun restore_flag:
2041*4882a593Smuzhiyun sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
2042*4882a593Smuzhiyun return err;
2043*4882a593Smuzhiyun }
2044*4882a593Smuzhiyun
f2fs_enable_checkpoint(struct f2fs_sb_info * sbi)2045*4882a593Smuzhiyun static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
2046*4882a593Smuzhiyun {
2047*4882a593Smuzhiyun int retry = DEFAULT_RETRY_IO_COUNT;
2048*4882a593Smuzhiyun
2049*4882a593Smuzhiyun /* we should flush all the data to keep data consistency */
2050*4882a593Smuzhiyun do {
2051*4882a593Smuzhiyun sync_inodes_sb(sbi->sb);
2052*4882a593Smuzhiyun cond_resched();
2053*4882a593Smuzhiyun congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
2054*4882a593Smuzhiyun } while (get_pages(sbi, F2FS_DIRTY_DATA) && retry--);
2055*4882a593Smuzhiyun
2056*4882a593Smuzhiyun if (unlikely(retry < 0))
2057*4882a593Smuzhiyun f2fs_warn(sbi, "checkpoint=enable has some unwritten data.");
2058*4882a593Smuzhiyun
2059*4882a593Smuzhiyun f2fs_down_write(&sbi->gc_lock);
2060*4882a593Smuzhiyun f2fs_dirty_to_prefree(sbi);
2061*4882a593Smuzhiyun
2062*4882a593Smuzhiyun clear_sbi_flag(sbi, SBI_CP_DISABLED);
2063*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_IS_DIRTY);
2064*4882a593Smuzhiyun f2fs_up_write(&sbi->gc_lock);
2065*4882a593Smuzhiyun
2066*4882a593Smuzhiyun f2fs_sync_fs(sbi->sb, 1);
2067*4882a593Smuzhiyun
2068*4882a593Smuzhiyun /* Let's ensure there's no pending checkpoint anymore */
2069*4882a593Smuzhiyun f2fs_flush_ckpt_thread(sbi);
2070*4882a593Smuzhiyun }
2071*4882a593Smuzhiyun
f2fs_remount(struct super_block * sb,int * flags,char * data)2072*4882a593Smuzhiyun static int f2fs_remount(struct super_block *sb, int *flags, char *data)
2073*4882a593Smuzhiyun {
2074*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_SB(sb);
2075*4882a593Smuzhiyun struct f2fs_mount_info org_mount_opt;
2076*4882a593Smuzhiyun unsigned long old_sb_flags;
2077*4882a593Smuzhiyun int err;
2078*4882a593Smuzhiyun bool need_restart_gc = false, need_stop_gc = false;
2079*4882a593Smuzhiyun bool need_restart_ckpt = false, need_stop_ckpt = false;
2080*4882a593Smuzhiyun bool need_restart_flush = false, need_stop_flush = false;
2081*4882a593Smuzhiyun bool no_read_extent_cache = !test_opt(sbi, READ_EXTENT_CACHE);
2082*4882a593Smuzhiyun bool no_age_extent_cache = !test_opt(sbi, AGE_EXTENT_CACHE);
2083*4882a593Smuzhiyun bool disable_checkpoint = test_opt(sbi, DISABLE_CHECKPOINT);
2084*4882a593Smuzhiyun bool no_io_align = !F2FS_IO_ALIGNED(sbi);
2085*4882a593Smuzhiyun bool no_atgc = !test_opt(sbi, ATGC);
2086*4882a593Smuzhiyun bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE);
2087*4882a593Smuzhiyun bool checkpoint_changed;
2088*4882a593Smuzhiyun #ifdef CONFIG_QUOTA
2089*4882a593Smuzhiyun int i, j;
2090*4882a593Smuzhiyun #endif
2091*4882a593Smuzhiyun
2092*4882a593Smuzhiyun /*
2093*4882a593Smuzhiyun * Save the old mount options in case we
2094*4882a593Smuzhiyun * need to restore them.
2095*4882a593Smuzhiyun */
2096*4882a593Smuzhiyun org_mount_opt = sbi->mount_opt;
2097*4882a593Smuzhiyun old_sb_flags = sb->s_flags;
2098*4882a593Smuzhiyun
2099*4882a593Smuzhiyun #ifdef CONFIG_QUOTA
2100*4882a593Smuzhiyun org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
2101*4882a593Smuzhiyun for (i = 0; i < MAXQUOTAS; i++) {
2102*4882a593Smuzhiyun if (F2FS_OPTION(sbi).s_qf_names[i]) {
2103*4882a593Smuzhiyun org_mount_opt.s_qf_names[i] =
2104*4882a593Smuzhiyun kstrdup(F2FS_OPTION(sbi).s_qf_names[i],
2105*4882a593Smuzhiyun GFP_KERNEL);
2106*4882a593Smuzhiyun if (!org_mount_opt.s_qf_names[i]) {
2107*4882a593Smuzhiyun for (j = 0; j < i; j++)
2108*4882a593Smuzhiyun kfree(org_mount_opt.s_qf_names[j]);
2109*4882a593Smuzhiyun return -ENOMEM;
2110*4882a593Smuzhiyun }
2111*4882a593Smuzhiyun } else {
2112*4882a593Smuzhiyun org_mount_opt.s_qf_names[i] = NULL;
2113*4882a593Smuzhiyun }
2114*4882a593Smuzhiyun }
2115*4882a593Smuzhiyun #endif
2116*4882a593Smuzhiyun
2117*4882a593Smuzhiyun /* recover superblocks we couldn't write due to previous RO mount */
2118*4882a593Smuzhiyun if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
2119*4882a593Smuzhiyun err = f2fs_commit_super(sbi, false);
2120*4882a593Smuzhiyun f2fs_info(sbi, "Try to recover all the superblocks, ret: %d",
2121*4882a593Smuzhiyun err);
2122*4882a593Smuzhiyun if (!err)
2123*4882a593Smuzhiyun clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
2124*4882a593Smuzhiyun }
2125*4882a593Smuzhiyun
2126*4882a593Smuzhiyun default_options(sbi);
2127*4882a593Smuzhiyun
2128*4882a593Smuzhiyun /* parse mount options */
2129*4882a593Smuzhiyun err = parse_options(sb, data, true);
2130*4882a593Smuzhiyun if (err)
2131*4882a593Smuzhiyun goto restore_opts;
2132*4882a593Smuzhiyun checkpoint_changed =
2133*4882a593Smuzhiyun disable_checkpoint != test_opt(sbi, DISABLE_CHECKPOINT);
2134*4882a593Smuzhiyun
2135*4882a593Smuzhiyun /*
2136*4882a593Smuzhiyun * Previous and new state of filesystem is RO,
2137*4882a593Smuzhiyun * so skip checking GC and FLUSH_MERGE conditions.
2138*4882a593Smuzhiyun */
2139*4882a593Smuzhiyun if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
2140*4882a593Smuzhiyun goto skip;
2141*4882a593Smuzhiyun
2142*4882a593Smuzhiyun if (f2fs_sb_has_readonly(sbi) && !(*flags & SB_RDONLY)) {
2143*4882a593Smuzhiyun err = -EROFS;
2144*4882a593Smuzhiyun goto restore_opts;
2145*4882a593Smuzhiyun }
2146*4882a593Smuzhiyun
2147*4882a593Smuzhiyun #ifdef CONFIG_QUOTA
2148*4882a593Smuzhiyun if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
2149*4882a593Smuzhiyun err = dquot_suspend(sb, -1);
2150*4882a593Smuzhiyun if (err < 0)
2151*4882a593Smuzhiyun goto restore_opts;
2152*4882a593Smuzhiyun } else if (f2fs_readonly(sb) && !(*flags & SB_RDONLY)) {
2153*4882a593Smuzhiyun /* dquot_resume needs RW */
2154*4882a593Smuzhiyun sb->s_flags &= ~SB_RDONLY;
2155*4882a593Smuzhiyun if (sb_any_quota_suspended(sb)) {
2156*4882a593Smuzhiyun dquot_resume(sb, -1);
2157*4882a593Smuzhiyun } else if (f2fs_sb_has_quota_ino(sbi)) {
2158*4882a593Smuzhiyun err = f2fs_enable_quotas(sb);
2159*4882a593Smuzhiyun if (err)
2160*4882a593Smuzhiyun goto restore_opts;
2161*4882a593Smuzhiyun }
2162*4882a593Smuzhiyun }
2163*4882a593Smuzhiyun #endif
2164*4882a593Smuzhiyun /* disallow enable atgc dynamically */
2165*4882a593Smuzhiyun if (no_atgc == !!test_opt(sbi, ATGC)) {
2166*4882a593Smuzhiyun err = -EINVAL;
2167*4882a593Smuzhiyun f2fs_warn(sbi, "switch atgc option is not allowed");
2168*4882a593Smuzhiyun goto restore_opts;
2169*4882a593Smuzhiyun }
2170*4882a593Smuzhiyun
2171*4882a593Smuzhiyun /* disallow enable/disable extent_cache dynamically */
2172*4882a593Smuzhiyun if (no_read_extent_cache == !!test_opt(sbi, READ_EXTENT_CACHE)) {
2173*4882a593Smuzhiyun err = -EINVAL;
2174*4882a593Smuzhiyun f2fs_warn(sbi, "switch extent_cache option is not allowed");
2175*4882a593Smuzhiyun goto restore_opts;
2176*4882a593Smuzhiyun }
2177*4882a593Smuzhiyun /* disallow enable/disable age extent_cache dynamically */
2178*4882a593Smuzhiyun if (no_age_extent_cache == !!test_opt(sbi, AGE_EXTENT_CACHE)) {
2179*4882a593Smuzhiyun err = -EINVAL;
2180*4882a593Smuzhiyun f2fs_warn(sbi, "switch age_extent_cache option is not allowed");
2181*4882a593Smuzhiyun goto restore_opts;
2182*4882a593Smuzhiyun }
2183*4882a593Smuzhiyun
2184*4882a593Smuzhiyun if (no_io_align == !!F2FS_IO_ALIGNED(sbi)) {
2185*4882a593Smuzhiyun err = -EINVAL;
2186*4882a593Smuzhiyun f2fs_warn(sbi, "switch io_bits option is not allowed");
2187*4882a593Smuzhiyun goto restore_opts;
2188*4882a593Smuzhiyun }
2189*4882a593Smuzhiyun
2190*4882a593Smuzhiyun if (no_compress_cache == !!test_opt(sbi, COMPRESS_CACHE)) {
2191*4882a593Smuzhiyun err = -EINVAL;
2192*4882a593Smuzhiyun f2fs_warn(sbi, "switch compress_cache option is not allowed");
2193*4882a593Smuzhiyun goto restore_opts;
2194*4882a593Smuzhiyun }
2195*4882a593Smuzhiyun
2196*4882a593Smuzhiyun if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
2197*4882a593Smuzhiyun err = -EINVAL;
2198*4882a593Smuzhiyun f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
2199*4882a593Smuzhiyun goto restore_opts;
2200*4882a593Smuzhiyun }
2201*4882a593Smuzhiyun
2202*4882a593Smuzhiyun /*
2203*4882a593Smuzhiyun * We stop the GC thread if FS is mounted as RO
2204*4882a593Smuzhiyun * or if background_gc = off is passed in mount
2205*4882a593Smuzhiyun * option. Also sync the filesystem.
2206*4882a593Smuzhiyun */
2207*4882a593Smuzhiyun if ((*flags & SB_RDONLY) ||
2208*4882a593Smuzhiyun (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF &&
2209*4882a593Smuzhiyun !test_opt(sbi, GC_MERGE))) {
2210*4882a593Smuzhiyun if (sbi->gc_thread) {
2211*4882a593Smuzhiyun f2fs_stop_gc_thread(sbi);
2212*4882a593Smuzhiyun need_restart_gc = true;
2213*4882a593Smuzhiyun }
2214*4882a593Smuzhiyun } else if (!sbi->gc_thread) {
2215*4882a593Smuzhiyun err = f2fs_start_gc_thread(sbi);
2216*4882a593Smuzhiyun if (err)
2217*4882a593Smuzhiyun goto restore_opts;
2218*4882a593Smuzhiyun need_stop_gc = true;
2219*4882a593Smuzhiyun }
2220*4882a593Smuzhiyun
2221*4882a593Smuzhiyun if (*flags & SB_RDONLY ||
2222*4882a593Smuzhiyun F2FS_OPTION(sbi).whint_mode != org_mount_opt.whint_mode) {
2223*4882a593Smuzhiyun sync_inodes_sb(sb);
2224*4882a593Smuzhiyun
2225*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_IS_DIRTY);
2226*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_IS_CLOSE);
2227*4882a593Smuzhiyun f2fs_sync_fs(sb, 1);
2228*4882a593Smuzhiyun clear_sbi_flag(sbi, SBI_IS_CLOSE);
2229*4882a593Smuzhiyun }
2230*4882a593Smuzhiyun
2231*4882a593Smuzhiyun if ((*flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) ||
2232*4882a593Smuzhiyun !test_opt(sbi, MERGE_CHECKPOINT)) {
2233*4882a593Smuzhiyun f2fs_stop_ckpt_thread(sbi);
2234*4882a593Smuzhiyun need_restart_ckpt = true;
2235*4882a593Smuzhiyun } else {
2236*4882a593Smuzhiyun /* Flush if the prevous checkpoint, if exists. */
2237*4882a593Smuzhiyun f2fs_flush_ckpt_thread(sbi);
2238*4882a593Smuzhiyun
2239*4882a593Smuzhiyun err = f2fs_start_ckpt_thread(sbi);
2240*4882a593Smuzhiyun if (err) {
2241*4882a593Smuzhiyun f2fs_err(sbi,
2242*4882a593Smuzhiyun "Failed to start F2FS issue_checkpoint_thread (%d)",
2243*4882a593Smuzhiyun err);
2244*4882a593Smuzhiyun goto restore_gc;
2245*4882a593Smuzhiyun }
2246*4882a593Smuzhiyun need_stop_ckpt = true;
2247*4882a593Smuzhiyun }
2248*4882a593Smuzhiyun
2249*4882a593Smuzhiyun /*
2250*4882a593Smuzhiyun * We stop issue flush thread if FS is mounted as RO
2251*4882a593Smuzhiyun * or if flush_merge is not passed in mount option.
2252*4882a593Smuzhiyun */
2253*4882a593Smuzhiyun if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
2254*4882a593Smuzhiyun clear_opt(sbi, FLUSH_MERGE);
2255*4882a593Smuzhiyun f2fs_destroy_flush_cmd_control(sbi, false);
2256*4882a593Smuzhiyun need_restart_flush = true;
2257*4882a593Smuzhiyun } else {
2258*4882a593Smuzhiyun err = f2fs_create_flush_cmd_control(sbi);
2259*4882a593Smuzhiyun if (err)
2260*4882a593Smuzhiyun goto restore_ckpt;
2261*4882a593Smuzhiyun need_stop_flush = true;
2262*4882a593Smuzhiyun }
2263*4882a593Smuzhiyun
2264*4882a593Smuzhiyun if (checkpoint_changed) {
2265*4882a593Smuzhiyun if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2266*4882a593Smuzhiyun err = f2fs_disable_checkpoint(sbi);
2267*4882a593Smuzhiyun if (err)
2268*4882a593Smuzhiyun goto restore_flush;
2269*4882a593Smuzhiyun } else {
2270*4882a593Smuzhiyun f2fs_enable_checkpoint(sbi);
2271*4882a593Smuzhiyun }
2272*4882a593Smuzhiyun }
2273*4882a593Smuzhiyun
2274*4882a593Smuzhiyun skip:
2275*4882a593Smuzhiyun #ifdef CONFIG_QUOTA
2276*4882a593Smuzhiyun /* Release old quota file names */
2277*4882a593Smuzhiyun for (i = 0; i < MAXQUOTAS; i++)
2278*4882a593Smuzhiyun kfree(org_mount_opt.s_qf_names[i]);
2279*4882a593Smuzhiyun #endif
2280*4882a593Smuzhiyun /* Update the POSIXACL Flag */
2281*4882a593Smuzhiyun sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
2282*4882a593Smuzhiyun (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
2283*4882a593Smuzhiyun
2284*4882a593Smuzhiyun limit_reserve_root(sbi);
2285*4882a593Smuzhiyun adjust_unusable_cap_perc(sbi);
2286*4882a593Smuzhiyun *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
2287*4882a593Smuzhiyun return 0;
2288*4882a593Smuzhiyun restore_flush:
2289*4882a593Smuzhiyun if (need_restart_flush) {
2290*4882a593Smuzhiyun if (f2fs_create_flush_cmd_control(sbi))
2291*4882a593Smuzhiyun f2fs_warn(sbi, "background flush thread has stopped");
2292*4882a593Smuzhiyun } else if (need_stop_flush) {
2293*4882a593Smuzhiyun clear_opt(sbi, FLUSH_MERGE);
2294*4882a593Smuzhiyun f2fs_destroy_flush_cmd_control(sbi, false);
2295*4882a593Smuzhiyun }
2296*4882a593Smuzhiyun restore_ckpt:
2297*4882a593Smuzhiyun if (need_restart_ckpt) {
2298*4882a593Smuzhiyun if (f2fs_start_ckpt_thread(sbi))
2299*4882a593Smuzhiyun f2fs_warn(sbi, "background ckpt thread has stopped");
2300*4882a593Smuzhiyun } else if (need_stop_ckpt) {
2301*4882a593Smuzhiyun f2fs_stop_ckpt_thread(sbi);
2302*4882a593Smuzhiyun }
2303*4882a593Smuzhiyun restore_gc:
2304*4882a593Smuzhiyun if (need_restart_gc) {
2305*4882a593Smuzhiyun if (f2fs_start_gc_thread(sbi))
2306*4882a593Smuzhiyun f2fs_warn(sbi, "background gc thread has stopped");
2307*4882a593Smuzhiyun } else if (need_stop_gc) {
2308*4882a593Smuzhiyun f2fs_stop_gc_thread(sbi);
2309*4882a593Smuzhiyun }
2310*4882a593Smuzhiyun restore_opts:
2311*4882a593Smuzhiyun #ifdef CONFIG_QUOTA
2312*4882a593Smuzhiyun F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
2313*4882a593Smuzhiyun for (i = 0; i < MAXQUOTAS; i++) {
2314*4882a593Smuzhiyun kfree(F2FS_OPTION(sbi).s_qf_names[i]);
2315*4882a593Smuzhiyun F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
2316*4882a593Smuzhiyun }
2317*4882a593Smuzhiyun #endif
2318*4882a593Smuzhiyun sbi->mount_opt = org_mount_opt;
2319*4882a593Smuzhiyun sb->s_flags = old_sb_flags;
2320*4882a593Smuzhiyun return err;
2321*4882a593Smuzhiyun }
2322*4882a593Smuzhiyun
2323*4882a593Smuzhiyun #ifdef CONFIG_QUOTA
2324*4882a593Smuzhiyun /* Read data from quotafile */
f2fs_quota_read(struct super_block * sb,int type,char * data,size_t len,loff_t off)2325*4882a593Smuzhiyun static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
2326*4882a593Smuzhiyun size_t len, loff_t off)
2327*4882a593Smuzhiyun {
2328*4882a593Smuzhiyun struct inode *inode = sb_dqopt(sb)->files[type];
2329*4882a593Smuzhiyun struct address_space *mapping = inode->i_mapping;
2330*4882a593Smuzhiyun block_t blkidx = F2FS_BYTES_TO_BLK(off);
2331*4882a593Smuzhiyun int offset = off & (sb->s_blocksize - 1);
2332*4882a593Smuzhiyun int tocopy;
2333*4882a593Smuzhiyun size_t toread;
2334*4882a593Smuzhiyun loff_t i_size = i_size_read(inode);
2335*4882a593Smuzhiyun struct page *page;
2336*4882a593Smuzhiyun char *kaddr;
2337*4882a593Smuzhiyun
2338*4882a593Smuzhiyun if (off > i_size)
2339*4882a593Smuzhiyun return 0;
2340*4882a593Smuzhiyun
2341*4882a593Smuzhiyun if (off + len > i_size)
2342*4882a593Smuzhiyun len = i_size - off;
2343*4882a593Smuzhiyun toread = len;
2344*4882a593Smuzhiyun while (toread > 0) {
2345*4882a593Smuzhiyun tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
2346*4882a593Smuzhiyun repeat:
2347*4882a593Smuzhiyun page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
2348*4882a593Smuzhiyun if (IS_ERR(page)) {
2349*4882a593Smuzhiyun if (PTR_ERR(page) == -ENOMEM) {
2350*4882a593Smuzhiyun congestion_wait(BLK_RW_ASYNC,
2351*4882a593Smuzhiyun DEFAULT_IO_TIMEOUT);
2352*4882a593Smuzhiyun goto repeat;
2353*4882a593Smuzhiyun }
2354*4882a593Smuzhiyun set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2355*4882a593Smuzhiyun return PTR_ERR(page);
2356*4882a593Smuzhiyun }
2357*4882a593Smuzhiyun
2358*4882a593Smuzhiyun lock_page(page);
2359*4882a593Smuzhiyun
2360*4882a593Smuzhiyun if (unlikely(page->mapping != mapping)) {
2361*4882a593Smuzhiyun f2fs_put_page(page, 1);
2362*4882a593Smuzhiyun goto repeat;
2363*4882a593Smuzhiyun }
2364*4882a593Smuzhiyun if (unlikely(!PageUptodate(page))) {
2365*4882a593Smuzhiyun f2fs_put_page(page, 1);
2366*4882a593Smuzhiyun set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2367*4882a593Smuzhiyun return -EIO;
2368*4882a593Smuzhiyun }
2369*4882a593Smuzhiyun
2370*4882a593Smuzhiyun kaddr = kmap_atomic(page);
2371*4882a593Smuzhiyun memcpy(data, kaddr + offset, tocopy);
2372*4882a593Smuzhiyun kunmap_atomic(kaddr);
2373*4882a593Smuzhiyun f2fs_put_page(page, 1);
2374*4882a593Smuzhiyun
2375*4882a593Smuzhiyun offset = 0;
2376*4882a593Smuzhiyun toread -= tocopy;
2377*4882a593Smuzhiyun data += tocopy;
2378*4882a593Smuzhiyun blkidx++;
2379*4882a593Smuzhiyun }
2380*4882a593Smuzhiyun return len;
2381*4882a593Smuzhiyun }
2382*4882a593Smuzhiyun
2383*4882a593Smuzhiyun /* Write to quotafile */
f2fs_quota_write(struct super_block * sb,int type,const char * data,size_t len,loff_t off)2384*4882a593Smuzhiyun static ssize_t f2fs_quota_write(struct super_block *sb, int type,
2385*4882a593Smuzhiyun const char *data, size_t len, loff_t off)
2386*4882a593Smuzhiyun {
2387*4882a593Smuzhiyun struct inode *inode = sb_dqopt(sb)->files[type];
2388*4882a593Smuzhiyun struct address_space *mapping = inode->i_mapping;
2389*4882a593Smuzhiyun const struct address_space_operations *a_ops = mapping->a_ops;
2390*4882a593Smuzhiyun int offset = off & (sb->s_blocksize - 1);
2391*4882a593Smuzhiyun size_t towrite = len;
2392*4882a593Smuzhiyun struct page *page;
2393*4882a593Smuzhiyun void *fsdata = NULL;
2394*4882a593Smuzhiyun char *kaddr;
2395*4882a593Smuzhiyun int err = 0;
2396*4882a593Smuzhiyun int tocopy;
2397*4882a593Smuzhiyun
2398*4882a593Smuzhiyun while (towrite > 0) {
2399*4882a593Smuzhiyun tocopy = min_t(unsigned long, sb->s_blocksize - offset,
2400*4882a593Smuzhiyun towrite);
2401*4882a593Smuzhiyun retry:
2402*4882a593Smuzhiyun err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
2403*4882a593Smuzhiyun &page, &fsdata);
2404*4882a593Smuzhiyun if (unlikely(err)) {
2405*4882a593Smuzhiyun if (err == -ENOMEM) {
2406*4882a593Smuzhiyun congestion_wait(BLK_RW_ASYNC,
2407*4882a593Smuzhiyun DEFAULT_IO_TIMEOUT);
2408*4882a593Smuzhiyun goto retry;
2409*4882a593Smuzhiyun }
2410*4882a593Smuzhiyun set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2411*4882a593Smuzhiyun break;
2412*4882a593Smuzhiyun }
2413*4882a593Smuzhiyun
2414*4882a593Smuzhiyun kaddr = kmap_atomic(page);
2415*4882a593Smuzhiyun memcpy(kaddr + offset, data, tocopy);
2416*4882a593Smuzhiyun kunmap_atomic(kaddr);
2417*4882a593Smuzhiyun flush_dcache_page(page);
2418*4882a593Smuzhiyun
2419*4882a593Smuzhiyun a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
2420*4882a593Smuzhiyun page, fsdata);
2421*4882a593Smuzhiyun offset = 0;
2422*4882a593Smuzhiyun towrite -= tocopy;
2423*4882a593Smuzhiyun off += tocopy;
2424*4882a593Smuzhiyun data += tocopy;
2425*4882a593Smuzhiyun cond_resched();
2426*4882a593Smuzhiyun }
2427*4882a593Smuzhiyun
2428*4882a593Smuzhiyun if (len == towrite)
2429*4882a593Smuzhiyun return err;
2430*4882a593Smuzhiyun inode->i_mtime = inode->i_ctime = current_time(inode);
2431*4882a593Smuzhiyun f2fs_mark_inode_dirty_sync(inode, false);
2432*4882a593Smuzhiyun return len - towrite;
2433*4882a593Smuzhiyun }
2434*4882a593Smuzhiyun
f2fs_get_dquots(struct inode * inode)2435*4882a593Smuzhiyun static struct dquot **f2fs_get_dquots(struct inode *inode)
2436*4882a593Smuzhiyun {
2437*4882a593Smuzhiyun return F2FS_I(inode)->i_dquot;
2438*4882a593Smuzhiyun }
2439*4882a593Smuzhiyun
f2fs_get_reserved_space(struct inode * inode)2440*4882a593Smuzhiyun static qsize_t *f2fs_get_reserved_space(struct inode *inode)
2441*4882a593Smuzhiyun {
2442*4882a593Smuzhiyun return &F2FS_I(inode)->i_reserved_quota;
2443*4882a593Smuzhiyun }
2444*4882a593Smuzhiyun
f2fs_quota_on_mount(struct f2fs_sb_info * sbi,int type)2445*4882a593Smuzhiyun static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
2446*4882a593Smuzhiyun {
2447*4882a593Smuzhiyun if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
2448*4882a593Smuzhiyun f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it");
2449*4882a593Smuzhiyun return 0;
2450*4882a593Smuzhiyun }
2451*4882a593Smuzhiyun
2452*4882a593Smuzhiyun return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
2453*4882a593Smuzhiyun F2FS_OPTION(sbi).s_jquota_fmt, type);
2454*4882a593Smuzhiyun }
2455*4882a593Smuzhiyun
f2fs_enable_quota_files(struct f2fs_sb_info * sbi,bool rdonly)2456*4882a593Smuzhiyun int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
2457*4882a593Smuzhiyun {
2458*4882a593Smuzhiyun int enabled = 0;
2459*4882a593Smuzhiyun int i, err;
2460*4882a593Smuzhiyun
2461*4882a593Smuzhiyun if (f2fs_sb_has_quota_ino(sbi) && rdonly) {
2462*4882a593Smuzhiyun err = f2fs_enable_quotas(sbi->sb);
2463*4882a593Smuzhiyun if (err) {
2464*4882a593Smuzhiyun f2fs_err(sbi, "Cannot turn on quota_ino: %d", err);
2465*4882a593Smuzhiyun return 0;
2466*4882a593Smuzhiyun }
2467*4882a593Smuzhiyun return 1;
2468*4882a593Smuzhiyun }
2469*4882a593Smuzhiyun
2470*4882a593Smuzhiyun for (i = 0; i < MAXQUOTAS; i++) {
2471*4882a593Smuzhiyun if (F2FS_OPTION(sbi).s_qf_names[i]) {
2472*4882a593Smuzhiyun err = f2fs_quota_on_mount(sbi, i);
2473*4882a593Smuzhiyun if (!err) {
2474*4882a593Smuzhiyun enabled = 1;
2475*4882a593Smuzhiyun continue;
2476*4882a593Smuzhiyun }
2477*4882a593Smuzhiyun f2fs_err(sbi, "Cannot turn on quotas: %d on %d",
2478*4882a593Smuzhiyun err, i);
2479*4882a593Smuzhiyun }
2480*4882a593Smuzhiyun }
2481*4882a593Smuzhiyun return enabled;
2482*4882a593Smuzhiyun }
2483*4882a593Smuzhiyun
f2fs_quota_enable(struct super_block * sb,int type,int format_id,unsigned int flags)2484*4882a593Smuzhiyun static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
2485*4882a593Smuzhiyun unsigned int flags)
2486*4882a593Smuzhiyun {
2487*4882a593Smuzhiyun struct inode *qf_inode;
2488*4882a593Smuzhiyun unsigned long qf_inum;
2489*4882a593Smuzhiyun int err;
2490*4882a593Smuzhiyun
2491*4882a593Smuzhiyun BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb)));
2492*4882a593Smuzhiyun
2493*4882a593Smuzhiyun qf_inum = f2fs_qf_ino(sb, type);
2494*4882a593Smuzhiyun if (!qf_inum)
2495*4882a593Smuzhiyun return -EPERM;
2496*4882a593Smuzhiyun
2497*4882a593Smuzhiyun qf_inode = f2fs_iget(sb, qf_inum);
2498*4882a593Smuzhiyun if (IS_ERR(qf_inode)) {
2499*4882a593Smuzhiyun f2fs_err(F2FS_SB(sb), "Bad quota inode %u:%lu", type, qf_inum);
2500*4882a593Smuzhiyun return PTR_ERR(qf_inode);
2501*4882a593Smuzhiyun }
2502*4882a593Smuzhiyun
2503*4882a593Smuzhiyun /* Don't account quota for quota files to avoid recursion */
2504*4882a593Smuzhiyun qf_inode->i_flags |= S_NOQUOTA;
2505*4882a593Smuzhiyun err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
2506*4882a593Smuzhiyun iput(qf_inode);
2507*4882a593Smuzhiyun return err;
2508*4882a593Smuzhiyun }
2509*4882a593Smuzhiyun
f2fs_enable_quotas(struct super_block * sb)2510*4882a593Smuzhiyun static int f2fs_enable_quotas(struct super_block *sb)
2511*4882a593Smuzhiyun {
2512*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_SB(sb);
2513*4882a593Smuzhiyun int type, err = 0;
2514*4882a593Smuzhiyun unsigned long qf_inum;
2515*4882a593Smuzhiyun bool quota_mopt[MAXQUOTAS] = {
2516*4882a593Smuzhiyun test_opt(sbi, USRQUOTA),
2517*4882a593Smuzhiyun test_opt(sbi, GRPQUOTA),
2518*4882a593Smuzhiyun test_opt(sbi, PRJQUOTA),
2519*4882a593Smuzhiyun };
2520*4882a593Smuzhiyun
2521*4882a593Smuzhiyun if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) {
2522*4882a593Smuzhiyun f2fs_err(sbi, "quota file may be corrupted, skip loading it");
2523*4882a593Smuzhiyun return 0;
2524*4882a593Smuzhiyun }
2525*4882a593Smuzhiyun
2526*4882a593Smuzhiyun sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
2527*4882a593Smuzhiyun
2528*4882a593Smuzhiyun for (type = 0; type < MAXQUOTAS; type++) {
2529*4882a593Smuzhiyun qf_inum = f2fs_qf_ino(sb, type);
2530*4882a593Smuzhiyun if (qf_inum) {
2531*4882a593Smuzhiyun err = f2fs_quota_enable(sb, type, QFMT_VFS_V1,
2532*4882a593Smuzhiyun DQUOT_USAGE_ENABLED |
2533*4882a593Smuzhiyun (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
2534*4882a593Smuzhiyun if (err) {
2535*4882a593Smuzhiyun f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.",
2536*4882a593Smuzhiyun type, err);
2537*4882a593Smuzhiyun for (type--; type >= 0; type--)
2538*4882a593Smuzhiyun dquot_quota_off(sb, type);
2539*4882a593Smuzhiyun set_sbi_flag(F2FS_SB(sb),
2540*4882a593Smuzhiyun SBI_QUOTA_NEED_REPAIR);
2541*4882a593Smuzhiyun return err;
2542*4882a593Smuzhiyun }
2543*4882a593Smuzhiyun }
2544*4882a593Smuzhiyun }
2545*4882a593Smuzhiyun return 0;
2546*4882a593Smuzhiyun }
2547*4882a593Smuzhiyun
f2fs_quota_sync_file(struct f2fs_sb_info * sbi,int type)2548*4882a593Smuzhiyun static int f2fs_quota_sync_file(struct f2fs_sb_info *sbi, int type)
2549*4882a593Smuzhiyun {
2550*4882a593Smuzhiyun struct quota_info *dqopt = sb_dqopt(sbi->sb);
2551*4882a593Smuzhiyun struct address_space *mapping = dqopt->files[type]->i_mapping;
2552*4882a593Smuzhiyun int ret = 0;
2553*4882a593Smuzhiyun
2554*4882a593Smuzhiyun ret = dquot_writeback_dquots(sbi->sb, type);
2555*4882a593Smuzhiyun if (ret)
2556*4882a593Smuzhiyun goto out;
2557*4882a593Smuzhiyun
2558*4882a593Smuzhiyun ret = filemap_fdatawrite(mapping);
2559*4882a593Smuzhiyun if (ret)
2560*4882a593Smuzhiyun goto out;
2561*4882a593Smuzhiyun
2562*4882a593Smuzhiyun /* if we are using journalled quota */
2563*4882a593Smuzhiyun if (is_journalled_quota(sbi))
2564*4882a593Smuzhiyun goto out;
2565*4882a593Smuzhiyun
2566*4882a593Smuzhiyun ret = filemap_fdatawait(mapping);
2567*4882a593Smuzhiyun
2568*4882a593Smuzhiyun truncate_inode_pages(&dqopt->files[type]->i_data, 0);
2569*4882a593Smuzhiyun out:
2570*4882a593Smuzhiyun if (ret)
2571*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2572*4882a593Smuzhiyun return ret;
2573*4882a593Smuzhiyun }
2574*4882a593Smuzhiyun
f2fs_quota_sync(struct super_block * sb,int type)2575*4882a593Smuzhiyun int f2fs_quota_sync(struct super_block *sb, int type)
2576*4882a593Smuzhiyun {
2577*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_SB(sb);
2578*4882a593Smuzhiyun struct quota_info *dqopt = sb_dqopt(sb);
2579*4882a593Smuzhiyun int cnt;
2580*4882a593Smuzhiyun int ret = 0;
2581*4882a593Smuzhiyun
2582*4882a593Smuzhiyun /*
2583*4882a593Smuzhiyun * Now when everything is written we can discard the pagecache so
2584*4882a593Smuzhiyun * that userspace sees the changes.
2585*4882a593Smuzhiyun */
2586*4882a593Smuzhiyun for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2587*4882a593Smuzhiyun
2588*4882a593Smuzhiyun if (type != -1 && cnt != type)
2589*4882a593Smuzhiyun continue;
2590*4882a593Smuzhiyun
2591*4882a593Smuzhiyun if (!sb_has_quota_active(sb, cnt))
2592*4882a593Smuzhiyun continue;
2593*4882a593Smuzhiyun
2594*4882a593Smuzhiyun if (!f2fs_sb_has_quota_ino(sbi))
2595*4882a593Smuzhiyun inode_lock(dqopt->files[cnt]);
2596*4882a593Smuzhiyun
2597*4882a593Smuzhiyun /*
2598*4882a593Smuzhiyun * do_quotactl
2599*4882a593Smuzhiyun * f2fs_quota_sync
2600*4882a593Smuzhiyun * f2fs_down_read(quota_sem)
2601*4882a593Smuzhiyun * dquot_writeback_dquots()
2602*4882a593Smuzhiyun * f2fs_dquot_commit
2603*4882a593Smuzhiyun * block_operation
2604*4882a593Smuzhiyun * f2fs_down_read(quota_sem)
2605*4882a593Smuzhiyun */
2606*4882a593Smuzhiyun f2fs_lock_op(sbi);
2607*4882a593Smuzhiyun f2fs_down_read(&sbi->quota_sem);
2608*4882a593Smuzhiyun
2609*4882a593Smuzhiyun ret = f2fs_quota_sync_file(sbi, cnt);
2610*4882a593Smuzhiyun
2611*4882a593Smuzhiyun f2fs_up_read(&sbi->quota_sem);
2612*4882a593Smuzhiyun f2fs_unlock_op(sbi);
2613*4882a593Smuzhiyun
2614*4882a593Smuzhiyun if (!f2fs_sb_has_quota_ino(sbi))
2615*4882a593Smuzhiyun inode_unlock(dqopt->files[cnt]);
2616*4882a593Smuzhiyun
2617*4882a593Smuzhiyun if (ret)
2618*4882a593Smuzhiyun break;
2619*4882a593Smuzhiyun }
2620*4882a593Smuzhiyun return ret;
2621*4882a593Smuzhiyun }
2622*4882a593Smuzhiyun
f2fs_quota_on(struct super_block * sb,int type,int format_id,const struct path * path)2623*4882a593Smuzhiyun static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
2624*4882a593Smuzhiyun const struct path *path)
2625*4882a593Smuzhiyun {
2626*4882a593Smuzhiyun struct inode *inode;
2627*4882a593Smuzhiyun int err;
2628*4882a593Smuzhiyun
2629*4882a593Smuzhiyun /* if quota sysfile exists, deny enabling quota with specific file */
2630*4882a593Smuzhiyun if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) {
2631*4882a593Smuzhiyun f2fs_err(F2FS_SB(sb), "quota sysfile already exists");
2632*4882a593Smuzhiyun return -EBUSY;
2633*4882a593Smuzhiyun }
2634*4882a593Smuzhiyun
2635*4882a593Smuzhiyun err = f2fs_quota_sync(sb, type);
2636*4882a593Smuzhiyun if (err)
2637*4882a593Smuzhiyun return err;
2638*4882a593Smuzhiyun
2639*4882a593Smuzhiyun err = dquot_quota_on(sb, type, format_id, path);
2640*4882a593Smuzhiyun if (err)
2641*4882a593Smuzhiyun return err;
2642*4882a593Smuzhiyun
2643*4882a593Smuzhiyun inode = d_inode(path->dentry);
2644*4882a593Smuzhiyun
2645*4882a593Smuzhiyun inode_lock(inode);
2646*4882a593Smuzhiyun F2FS_I(inode)->i_flags |= F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL;
2647*4882a593Smuzhiyun f2fs_set_inode_flags(inode);
2648*4882a593Smuzhiyun inode_unlock(inode);
2649*4882a593Smuzhiyun f2fs_mark_inode_dirty_sync(inode, false);
2650*4882a593Smuzhiyun
2651*4882a593Smuzhiyun return 0;
2652*4882a593Smuzhiyun }
2653*4882a593Smuzhiyun
__f2fs_quota_off(struct super_block * sb,int type)2654*4882a593Smuzhiyun static int __f2fs_quota_off(struct super_block *sb, int type)
2655*4882a593Smuzhiyun {
2656*4882a593Smuzhiyun struct inode *inode = sb_dqopt(sb)->files[type];
2657*4882a593Smuzhiyun int err;
2658*4882a593Smuzhiyun
2659*4882a593Smuzhiyun if (!inode || !igrab(inode))
2660*4882a593Smuzhiyun return dquot_quota_off(sb, type);
2661*4882a593Smuzhiyun
2662*4882a593Smuzhiyun err = f2fs_quota_sync(sb, type);
2663*4882a593Smuzhiyun if (err)
2664*4882a593Smuzhiyun goto out_put;
2665*4882a593Smuzhiyun
2666*4882a593Smuzhiyun err = dquot_quota_off(sb, type);
2667*4882a593Smuzhiyun if (err || f2fs_sb_has_quota_ino(F2FS_SB(sb)))
2668*4882a593Smuzhiyun goto out_put;
2669*4882a593Smuzhiyun
2670*4882a593Smuzhiyun inode_lock(inode);
2671*4882a593Smuzhiyun F2FS_I(inode)->i_flags &= ~(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL);
2672*4882a593Smuzhiyun f2fs_set_inode_flags(inode);
2673*4882a593Smuzhiyun inode_unlock(inode);
2674*4882a593Smuzhiyun f2fs_mark_inode_dirty_sync(inode, false);
2675*4882a593Smuzhiyun out_put:
2676*4882a593Smuzhiyun iput(inode);
2677*4882a593Smuzhiyun return err;
2678*4882a593Smuzhiyun }
2679*4882a593Smuzhiyun
f2fs_quota_off(struct super_block * sb,int type)2680*4882a593Smuzhiyun static int f2fs_quota_off(struct super_block *sb, int type)
2681*4882a593Smuzhiyun {
2682*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_SB(sb);
2683*4882a593Smuzhiyun int err;
2684*4882a593Smuzhiyun
2685*4882a593Smuzhiyun err = __f2fs_quota_off(sb, type);
2686*4882a593Smuzhiyun
2687*4882a593Smuzhiyun /*
2688*4882a593Smuzhiyun * quotactl can shutdown journalled quota, result in inconsistence
2689*4882a593Smuzhiyun * between quota record and fs data by following updates, tag the
2690*4882a593Smuzhiyun * flag to let fsck be aware of it.
2691*4882a593Smuzhiyun */
2692*4882a593Smuzhiyun if (is_journalled_quota(sbi))
2693*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2694*4882a593Smuzhiyun return err;
2695*4882a593Smuzhiyun }
2696*4882a593Smuzhiyun
f2fs_quota_off_umount(struct super_block * sb)2697*4882a593Smuzhiyun void f2fs_quota_off_umount(struct super_block *sb)
2698*4882a593Smuzhiyun {
2699*4882a593Smuzhiyun int type;
2700*4882a593Smuzhiyun int err;
2701*4882a593Smuzhiyun
2702*4882a593Smuzhiyun for (type = 0; type < MAXQUOTAS; type++) {
2703*4882a593Smuzhiyun err = __f2fs_quota_off(sb, type);
2704*4882a593Smuzhiyun if (err) {
2705*4882a593Smuzhiyun int ret = dquot_quota_off(sb, type);
2706*4882a593Smuzhiyun
2707*4882a593Smuzhiyun f2fs_err(F2FS_SB(sb), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.",
2708*4882a593Smuzhiyun type, err, ret);
2709*4882a593Smuzhiyun set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2710*4882a593Smuzhiyun }
2711*4882a593Smuzhiyun }
2712*4882a593Smuzhiyun /*
2713*4882a593Smuzhiyun * In case of checkpoint=disable, we must flush quota blocks.
2714*4882a593Smuzhiyun * This can cause NULL exception for node_inode in end_io, since
2715*4882a593Smuzhiyun * put_super already dropped it.
2716*4882a593Smuzhiyun */
2717*4882a593Smuzhiyun sync_filesystem(sb);
2718*4882a593Smuzhiyun }
2719*4882a593Smuzhiyun
f2fs_truncate_quota_inode_pages(struct super_block * sb)2720*4882a593Smuzhiyun static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
2721*4882a593Smuzhiyun {
2722*4882a593Smuzhiyun struct quota_info *dqopt = sb_dqopt(sb);
2723*4882a593Smuzhiyun int type;
2724*4882a593Smuzhiyun
2725*4882a593Smuzhiyun for (type = 0; type < MAXQUOTAS; type++) {
2726*4882a593Smuzhiyun if (!dqopt->files[type])
2727*4882a593Smuzhiyun continue;
2728*4882a593Smuzhiyun f2fs_inode_synced(dqopt->files[type]);
2729*4882a593Smuzhiyun }
2730*4882a593Smuzhiyun }
2731*4882a593Smuzhiyun
f2fs_dquot_commit(struct dquot * dquot)2732*4882a593Smuzhiyun static int f2fs_dquot_commit(struct dquot *dquot)
2733*4882a593Smuzhiyun {
2734*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2735*4882a593Smuzhiyun int ret;
2736*4882a593Smuzhiyun
2737*4882a593Smuzhiyun f2fs_down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
2738*4882a593Smuzhiyun ret = dquot_commit(dquot);
2739*4882a593Smuzhiyun if (ret < 0)
2740*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2741*4882a593Smuzhiyun f2fs_up_read(&sbi->quota_sem);
2742*4882a593Smuzhiyun return ret;
2743*4882a593Smuzhiyun }
2744*4882a593Smuzhiyun
f2fs_dquot_acquire(struct dquot * dquot)2745*4882a593Smuzhiyun static int f2fs_dquot_acquire(struct dquot *dquot)
2746*4882a593Smuzhiyun {
2747*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2748*4882a593Smuzhiyun int ret;
2749*4882a593Smuzhiyun
2750*4882a593Smuzhiyun f2fs_down_read(&sbi->quota_sem);
2751*4882a593Smuzhiyun ret = dquot_acquire(dquot);
2752*4882a593Smuzhiyun if (ret < 0)
2753*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2754*4882a593Smuzhiyun f2fs_up_read(&sbi->quota_sem);
2755*4882a593Smuzhiyun return ret;
2756*4882a593Smuzhiyun }
2757*4882a593Smuzhiyun
f2fs_dquot_release(struct dquot * dquot)2758*4882a593Smuzhiyun static int f2fs_dquot_release(struct dquot *dquot)
2759*4882a593Smuzhiyun {
2760*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2761*4882a593Smuzhiyun int ret = dquot_release(dquot);
2762*4882a593Smuzhiyun
2763*4882a593Smuzhiyun if (ret < 0)
2764*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2765*4882a593Smuzhiyun return ret;
2766*4882a593Smuzhiyun }
2767*4882a593Smuzhiyun
f2fs_dquot_mark_dquot_dirty(struct dquot * dquot)2768*4882a593Smuzhiyun static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
2769*4882a593Smuzhiyun {
2770*4882a593Smuzhiyun struct super_block *sb = dquot->dq_sb;
2771*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_SB(sb);
2772*4882a593Smuzhiyun int ret = dquot_mark_dquot_dirty(dquot);
2773*4882a593Smuzhiyun
2774*4882a593Smuzhiyun /* if we are using journalled quota */
2775*4882a593Smuzhiyun if (is_journalled_quota(sbi))
2776*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
2777*4882a593Smuzhiyun
2778*4882a593Smuzhiyun return ret;
2779*4882a593Smuzhiyun }
2780*4882a593Smuzhiyun
f2fs_dquot_commit_info(struct super_block * sb,int type)2781*4882a593Smuzhiyun static int f2fs_dquot_commit_info(struct super_block *sb, int type)
2782*4882a593Smuzhiyun {
2783*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_SB(sb);
2784*4882a593Smuzhiyun int ret = dquot_commit_info(sb, type);
2785*4882a593Smuzhiyun
2786*4882a593Smuzhiyun if (ret < 0)
2787*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2788*4882a593Smuzhiyun return ret;
2789*4882a593Smuzhiyun }
2790*4882a593Smuzhiyun
f2fs_get_projid(struct inode * inode,kprojid_t * projid)2791*4882a593Smuzhiyun static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
2792*4882a593Smuzhiyun {
2793*4882a593Smuzhiyun *projid = F2FS_I(inode)->i_projid;
2794*4882a593Smuzhiyun return 0;
2795*4882a593Smuzhiyun }
2796*4882a593Smuzhiyun
2797*4882a593Smuzhiyun static const struct dquot_operations f2fs_quota_operations = {
2798*4882a593Smuzhiyun .get_reserved_space = f2fs_get_reserved_space,
2799*4882a593Smuzhiyun .write_dquot = f2fs_dquot_commit,
2800*4882a593Smuzhiyun .acquire_dquot = f2fs_dquot_acquire,
2801*4882a593Smuzhiyun .release_dquot = f2fs_dquot_release,
2802*4882a593Smuzhiyun .mark_dirty = f2fs_dquot_mark_dquot_dirty,
2803*4882a593Smuzhiyun .write_info = f2fs_dquot_commit_info,
2804*4882a593Smuzhiyun .alloc_dquot = dquot_alloc,
2805*4882a593Smuzhiyun .destroy_dquot = dquot_destroy,
2806*4882a593Smuzhiyun .get_projid = f2fs_get_projid,
2807*4882a593Smuzhiyun .get_next_id = dquot_get_next_id,
2808*4882a593Smuzhiyun };
2809*4882a593Smuzhiyun
2810*4882a593Smuzhiyun static const struct quotactl_ops f2fs_quotactl_ops = {
2811*4882a593Smuzhiyun .quota_on = f2fs_quota_on,
2812*4882a593Smuzhiyun .quota_off = f2fs_quota_off,
2813*4882a593Smuzhiyun .quota_sync = f2fs_quota_sync,
2814*4882a593Smuzhiyun .get_state = dquot_get_state,
2815*4882a593Smuzhiyun .set_info = dquot_set_dqinfo,
2816*4882a593Smuzhiyun .get_dqblk = dquot_get_dqblk,
2817*4882a593Smuzhiyun .set_dqblk = dquot_set_dqblk,
2818*4882a593Smuzhiyun .get_nextdqblk = dquot_get_next_dqblk,
2819*4882a593Smuzhiyun };
2820*4882a593Smuzhiyun #else
f2fs_quota_sync(struct super_block * sb,int type)2821*4882a593Smuzhiyun int f2fs_quota_sync(struct super_block *sb, int type)
2822*4882a593Smuzhiyun {
2823*4882a593Smuzhiyun return 0;
2824*4882a593Smuzhiyun }
2825*4882a593Smuzhiyun
f2fs_quota_off_umount(struct super_block * sb)2826*4882a593Smuzhiyun void f2fs_quota_off_umount(struct super_block *sb)
2827*4882a593Smuzhiyun {
2828*4882a593Smuzhiyun }
2829*4882a593Smuzhiyun #endif
2830*4882a593Smuzhiyun
2831*4882a593Smuzhiyun static const struct super_operations f2fs_sops = {
2832*4882a593Smuzhiyun .alloc_inode = f2fs_alloc_inode,
2833*4882a593Smuzhiyun .free_inode = f2fs_free_inode,
2834*4882a593Smuzhiyun .drop_inode = f2fs_drop_inode,
2835*4882a593Smuzhiyun .write_inode = f2fs_write_inode,
2836*4882a593Smuzhiyun .dirty_inode = f2fs_dirty_inode,
2837*4882a593Smuzhiyun .show_options = f2fs_show_options,
2838*4882a593Smuzhiyun #ifdef CONFIG_QUOTA
2839*4882a593Smuzhiyun .quota_read = f2fs_quota_read,
2840*4882a593Smuzhiyun .quota_write = f2fs_quota_write,
2841*4882a593Smuzhiyun .get_dquots = f2fs_get_dquots,
2842*4882a593Smuzhiyun #endif
2843*4882a593Smuzhiyun .evict_inode = f2fs_evict_inode,
2844*4882a593Smuzhiyun .put_super = f2fs_put_super,
2845*4882a593Smuzhiyun .sync_fs = f2fs_sync_fs,
2846*4882a593Smuzhiyun .freeze_fs = f2fs_freeze,
2847*4882a593Smuzhiyun .unfreeze_fs = f2fs_unfreeze,
2848*4882a593Smuzhiyun .statfs = f2fs_statfs,
2849*4882a593Smuzhiyun .remount_fs = f2fs_remount,
2850*4882a593Smuzhiyun };
2851*4882a593Smuzhiyun
2852*4882a593Smuzhiyun #ifdef CONFIG_FS_ENCRYPTION
f2fs_get_context(struct inode * inode,void * ctx,size_t len)2853*4882a593Smuzhiyun static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
2854*4882a593Smuzhiyun {
2855*4882a593Smuzhiyun return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
2856*4882a593Smuzhiyun F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
2857*4882a593Smuzhiyun ctx, len, NULL);
2858*4882a593Smuzhiyun }
2859*4882a593Smuzhiyun
f2fs_set_context(struct inode * inode,const void * ctx,size_t len,void * fs_data)2860*4882a593Smuzhiyun static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
2861*4882a593Smuzhiyun void *fs_data)
2862*4882a593Smuzhiyun {
2863*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2864*4882a593Smuzhiyun
2865*4882a593Smuzhiyun /*
2866*4882a593Smuzhiyun * Encrypting the root directory is not allowed because fsck
2867*4882a593Smuzhiyun * expects lost+found directory to exist and remain unencrypted
2868*4882a593Smuzhiyun * if LOST_FOUND feature is enabled.
2869*4882a593Smuzhiyun *
2870*4882a593Smuzhiyun */
2871*4882a593Smuzhiyun if (f2fs_sb_has_lost_found(sbi) &&
2872*4882a593Smuzhiyun inode->i_ino == F2FS_ROOT_INO(sbi))
2873*4882a593Smuzhiyun return -EPERM;
2874*4882a593Smuzhiyun
2875*4882a593Smuzhiyun return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
2876*4882a593Smuzhiyun F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
2877*4882a593Smuzhiyun ctx, len, fs_data, XATTR_CREATE);
2878*4882a593Smuzhiyun }
2879*4882a593Smuzhiyun
f2fs_get_dummy_policy(struct super_block * sb)2880*4882a593Smuzhiyun static const union fscrypt_policy *f2fs_get_dummy_policy(struct super_block *sb)
2881*4882a593Smuzhiyun {
2882*4882a593Smuzhiyun return F2FS_OPTION(F2FS_SB(sb)).dummy_enc_policy.policy;
2883*4882a593Smuzhiyun }
2884*4882a593Smuzhiyun
f2fs_has_stable_inodes(struct super_block * sb)2885*4882a593Smuzhiyun static bool f2fs_has_stable_inodes(struct super_block *sb)
2886*4882a593Smuzhiyun {
2887*4882a593Smuzhiyun return true;
2888*4882a593Smuzhiyun }
2889*4882a593Smuzhiyun
f2fs_get_ino_and_lblk_bits(struct super_block * sb,int * ino_bits_ret,int * lblk_bits_ret)2890*4882a593Smuzhiyun static void f2fs_get_ino_and_lblk_bits(struct super_block *sb,
2891*4882a593Smuzhiyun int *ino_bits_ret, int *lblk_bits_ret)
2892*4882a593Smuzhiyun {
2893*4882a593Smuzhiyun *ino_bits_ret = 8 * sizeof(nid_t);
2894*4882a593Smuzhiyun *lblk_bits_ret = 8 * sizeof(block_t);
2895*4882a593Smuzhiyun }
2896*4882a593Smuzhiyun
f2fs_get_num_devices(struct super_block * sb)2897*4882a593Smuzhiyun static int f2fs_get_num_devices(struct super_block *sb)
2898*4882a593Smuzhiyun {
2899*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_SB(sb);
2900*4882a593Smuzhiyun
2901*4882a593Smuzhiyun if (f2fs_is_multi_device(sbi))
2902*4882a593Smuzhiyun return sbi->s_ndevs;
2903*4882a593Smuzhiyun return 1;
2904*4882a593Smuzhiyun }
2905*4882a593Smuzhiyun
f2fs_get_devices(struct super_block * sb,struct request_queue ** devs)2906*4882a593Smuzhiyun static void f2fs_get_devices(struct super_block *sb,
2907*4882a593Smuzhiyun struct request_queue **devs)
2908*4882a593Smuzhiyun {
2909*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_SB(sb);
2910*4882a593Smuzhiyun int i;
2911*4882a593Smuzhiyun
2912*4882a593Smuzhiyun for (i = 0; i < sbi->s_ndevs; i++)
2913*4882a593Smuzhiyun devs[i] = bdev_get_queue(FDEV(i).bdev);
2914*4882a593Smuzhiyun }
2915*4882a593Smuzhiyun
2916*4882a593Smuzhiyun static const struct fscrypt_operations f2fs_cryptops = {
2917*4882a593Smuzhiyun .key_prefix = "f2fs:",
2918*4882a593Smuzhiyun .get_context = f2fs_get_context,
2919*4882a593Smuzhiyun .set_context = f2fs_set_context,
2920*4882a593Smuzhiyun .get_dummy_policy = f2fs_get_dummy_policy,
2921*4882a593Smuzhiyun .empty_dir = f2fs_empty_dir,
2922*4882a593Smuzhiyun .max_namelen = F2FS_NAME_LEN,
2923*4882a593Smuzhiyun .has_stable_inodes = f2fs_has_stable_inodes,
2924*4882a593Smuzhiyun .get_ino_and_lblk_bits = f2fs_get_ino_and_lblk_bits,
2925*4882a593Smuzhiyun .get_num_devices = f2fs_get_num_devices,
2926*4882a593Smuzhiyun .get_devices = f2fs_get_devices,
2927*4882a593Smuzhiyun };
2928*4882a593Smuzhiyun #endif
2929*4882a593Smuzhiyun
f2fs_nfs_get_inode(struct super_block * sb,u64 ino,u32 generation)2930*4882a593Smuzhiyun static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
2931*4882a593Smuzhiyun u64 ino, u32 generation)
2932*4882a593Smuzhiyun {
2933*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_SB(sb);
2934*4882a593Smuzhiyun struct inode *inode;
2935*4882a593Smuzhiyun
2936*4882a593Smuzhiyun if (f2fs_check_nid_range(sbi, ino))
2937*4882a593Smuzhiyun return ERR_PTR(-ESTALE);
2938*4882a593Smuzhiyun
2939*4882a593Smuzhiyun /*
2940*4882a593Smuzhiyun * f2fs_iget isn't quite right if the inode is currently unallocated!
2941*4882a593Smuzhiyun * However f2fs_iget currently does appropriate checks to handle stale
2942*4882a593Smuzhiyun * inodes so everything is OK.
2943*4882a593Smuzhiyun */
2944*4882a593Smuzhiyun inode = f2fs_iget(sb, ino);
2945*4882a593Smuzhiyun if (IS_ERR(inode))
2946*4882a593Smuzhiyun return ERR_CAST(inode);
2947*4882a593Smuzhiyun if (unlikely(generation && inode->i_generation != generation)) {
2948*4882a593Smuzhiyun /* we didn't find the right inode.. */
2949*4882a593Smuzhiyun iput(inode);
2950*4882a593Smuzhiyun return ERR_PTR(-ESTALE);
2951*4882a593Smuzhiyun }
2952*4882a593Smuzhiyun return inode;
2953*4882a593Smuzhiyun }
2954*4882a593Smuzhiyun
f2fs_fh_to_dentry(struct super_block * sb,struct fid * fid,int fh_len,int fh_type)2955*4882a593Smuzhiyun static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
2956*4882a593Smuzhiyun int fh_len, int fh_type)
2957*4882a593Smuzhiyun {
2958*4882a593Smuzhiyun return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
2959*4882a593Smuzhiyun f2fs_nfs_get_inode);
2960*4882a593Smuzhiyun }
2961*4882a593Smuzhiyun
f2fs_fh_to_parent(struct super_block * sb,struct fid * fid,int fh_len,int fh_type)2962*4882a593Smuzhiyun static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
2963*4882a593Smuzhiyun int fh_len, int fh_type)
2964*4882a593Smuzhiyun {
2965*4882a593Smuzhiyun return generic_fh_to_parent(sb, fid, fh_len, fh_type,
2966*4882a593Smuzhiyun f2fs_nfs_get_inode);
2967*4882a593Smuzhiyun }
2968*4882a593Smuzhiyun
2969*4882a593Smuzhiyun static const struct export_operations f2fs_export_ops = {
2970*4882a593Smuzhiyun .fh_to_dentry = f2fs_fh_to_dentry,
2971*4882a593Smuzhiyun .fh_to_parent = f2fs_fh_to_parent,
2972*4882a593Smuzhiyun .get_parent = f2fs_get_parent,
2973*4882a593Smuzhiyun };
2974*4882a593Smuzhiyun
max_file_blocks(struct inode * inode)2975*4882a593Smuzhiyun loff_t max_file_blocks(struct inode *inode)
2976*4882a593Smuzhiyun {
2977*4882a593Smuzhiyun loff_t result = 0;
2978*4882a593Smuzhiyun loff_t leaf_count;
2979*4882a593Smuzhiyun
2980*4882a593Smuzhiyun /*
2981*4882a593Smuzhiyun * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
2982*4882a593Smuzhiyun * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
2983*4882a593Smuzhiyun * space in inode.i_addr, it will be more safe to reassign
2984*4882a593Smuzhiyun * result as zero.
2985*4882a593Smuzhiyun */
2986*4882a593Smuzhiyun
2987*4882a593Smuzhiyun if (inode && f2fs_compressed_file(inode))
2988*4882a593Smuzhiyun leaf_count = ADDRS_PER_BLOCK(inode);
2989*4882a593Smuzhiyun else
2990*4882a593Smuzhiyun leaf_count = DEF_ADDRS_PER_BLOCK;
2991*4882a593Smuzhiyun
2992*4882a593Smuzhiyun /* two direct node blocks */
2993*4882a593Smuzhiyun result += (leaf_count * 2);
2994*4882a593Smuzhiyun
2995*4882a593Smuzhiyun /* two indirect node blocks */
2996*4882a593Smuzhiyun leaf_count *= NIDS_PER_BLOCK;
2997*4882a593Smuzhiyun result += (leaf_count * 2);
2998*4882a593Smuzhiyun
2999*4882a593Smuzhiyun /* one double indirect node block */
3000*4882a593Smuzhiyun leaf_count *= NIDS_PER_BLOCK;
3001*4882a593Smuzhiyun result += leaf_count;
3002*4882a593Smuzhiyun
3003*4882a593Smuzhiyun return result;
3004*4882a593Smuzhiyun }
3005*4882a593Smuzhiyun
__f2fs_commit_super(struct buffer_head * bh,struct f2fs_super_block * super)3006*4882a593Smuzhiyun static int __f2fs_commit_super(struct buffer_head *bh,
3007*4882a593Smuzhiyun struct f2fs_super_block *super)
3008*4882a593Smuzhiyun {
3009*4882a593Smuzhiyun lock_buffer(bh);
3010*4882a593Smuzhiyun if (super)
3011*4882a593Smuzhiyun memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
3012*4882a593Smuzhiyun set_buffer_dirty(bh);
3013*4882a593Smuzhiyun unlock_buffer(bh);
3014*4882a593Smuzhiyun
3015*4882a593Smuzhiyun /* it's rare case, we can do fua all the time */
3016*4882a593Smuzhiyun return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
3017*4882a593Smuzhiyun }
3018*4882a593Smuzhiyun
sanity_check_area_boundary(struct f2fs_sb_info * sbi,struct buffer_head * bh)3019*4882a593Smuzhiyun static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
3020*4882a593Smuzhiyun struct buffer_head *bh)
3021*4882a593Smuzhiyun {
3022*4882a593Smuzhiyun struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
3023*4882a593Smuzhiyun (bh->b_data + F2FS_SUPER_OFFSET);
3024*4882a593Smuzhiyun struct super_block *sb = sbi->sb;
3025*4882a593Smuzhiyun u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
3026*4882a593Smuzhiyun u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
3027*4882a593Smuzhiyun u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
3028*4882a593Smuzhiyun u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
3029*4882a593Smuzhiyun u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
3030*4882a593Smuzhiyun u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
3031*4882a593Smuzhiyun u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
3032*4882a593Smuzhiyun u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
3033*4882a593Smuzhiyun u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
3034*4882a593Smuzhiyun u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
3035*4882a593Smuzhiyun u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
3036*4882a593Smuzhiyun u32 segment_count = le32_to_cpu(raw_super->segment_count);
3037*4882a593Smuzhiyun u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3038*4882a593Smuzhiyun u64 main_end_blkaddr = main_blkaddr +
3039*4882a593Smuzhiyun (segment_count_main << log_blocks_per_seg);
3040*4882a593Smuzhiyun u64 seg_end_blkaddr = segment0_blkaddr +
3041*4882a593Smuzhiyun (segment_count << log_blocks_per_seg);
3042*4882a593Smuzhiyun
3043*4882a593Smuzhiyun if (segment0_blkaddr != cp_blkaddr) {
3044*4882a593Smuzhiyun f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
3045*4882a593Smuzhiyun segment0_blkaddr, cp_blkaddr);
3046*4882a593Smuzhiyun return true;
3047*4882a593Smuzhiyun }
3048*4882a593Smuzhiyun
3049*4882a593Smuzhiyun if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
3050*4882a593Smuzhiyun sit_blkaddr) {
3051*4882a593Smuzhiyun f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
3052*4882a593Smuzhiyun cp_blkaddr, sit_blkaddr,
3053*4882a593Smuzhiyun segment_count_ckpt << log_blocks_per_seg);
3054*4882a593Smuzhiyun return true;
3055*4882a593Smuzhiyun }
3056*4882a593Smuzhiyun
3057*4882a593Smuzhiyun if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
3058*4882a593Smuzhiyun nat_blkaddr) {
3059*4882a593Smuzhiyun f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
3060*4882a593Smuzhiyun sit_blkaddr, nat_blkaddr,
3061*4882a593Smuzhiyun segment_count_sit << log_blocks_per_seg);
3062*4882a593Smuzhiyun return true;
3063*4882a593Smuzhiyun }
3064*4882a593Smuzhiyun
3065*4882a593Smuzhiyun if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
3066*4882a593Smuzhiyun ssa_blkaddr) {
3067*4882a593Smuzhiyun f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
3068*4882a593Smuzhiyun nat_blkaddr, ssa_blkaddr,
3069*4882a593Smuzhiyun segment_count_nat << log_blocks_per_seg);
3070*4882a593Smuzhiyun return true;
3071*4882a593Smuzhiyun }
3072*4882a593Smuzhiyun
3073*4882a593Smuzhiyun if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
3074*4882a593Smuzhiyun main_blkaddr) {
3075*4882a593Smuzhiyun f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
3076*4882a593Smuzhiyun ssa_blkaddr, main_blkaddr,
3077*4882a593Smuzhiyun segment_count_ssa << log_blocks_per_seg);
3078*4882a593Smuzhiyun return true;
3079*4882a593Smuzhiyun }
3080*4882a593Smuzhiyun
3081*4882a593Smuzhiyun if (main_end_blkaddr > seg_end_blkaddr) {
3082*4882a593Smuzhiyun f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%llu) block(%u)",
3083*4882a593Smuzhiyun main_blkaddr, seg_end_blkaddr,
3084*4882a593Smuzhiyun segment_count_main << log_blocks_per_seg);
3085*4882a593Smuzhiyun return true;
3086*4882a593Smuzhiyun } else if (main_end_blkaddr < seg_end_blkaddr) {
3087*4882a593Smuzhiyun int err = 0;
3088*4882a593Smuzhiyun char *res;
3089*4882a593Smuzhiyun
3090*4882a593Smuzhiyun /* fix in-memory information all the time */
3091*4882a593Smuzhiyun raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
3092*4882a593Smuzhiyun segment0_blkaddr) >> log_blocks_per_seg);
3093*4882a593Smuzhiyun
3094*4882a593Smuzhiyun if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
3095*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
3096*4882a593Smuzhiyun res = "internally";
3097*4882a593Smuzhiyun } else {
3098*4882a593Smuzhiyun err = __f2fs_commit_super(bh, NULL);
3099*4882a593Smuzhiyun res = err ? "failed" : "done";
3100*4882a593Smuzhiyun }
3101*4882a593Smuzhiyun f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)",
3102*4882a593Smuzhiyun res, main_blkaddr, seg_end_blkaddr,
3103*4882a593Smuzhiyun segment_count_main << log_blocks_per_seg);
3104*4882a593Smuzhiyun if (err)
3105*4882a593Smuzhiyun return true;
3106*4882a593Smuzhiyun }
3107*4882a593Smuzhiyun return false;
3108*4882a593Smuzhiyun }
3109*4882a593Smuzhiyun
sanity_check_raw_super(struct f2fs_sb_info * sbi,struct buffer_head * bh)3110*4882a593Smuzhiyun static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
3111*4882a593Smuzhiyun struct buffer_head *bh)
3112*4882a593Smuzhiyun {
3113*4882a593Smuzhiyun block_t segment_count, segs_per_sec, secs_per_zone, segment_count_main;
3114*4882a593Smuzhiyun block_t total_sections, blocks_per_seg;
3115*4882a593Smuzhiyun struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
3116*4882a593Smuzhiyun (bh->b_data + F2FS_SUPER_OFFSET);
3117*4882a593Smuzhiyun size_t crc_offset = 0;
3118*4882a593Smuzhiyun __u32 crc = 0;
3119*4882a593Smuzhiyun
3120*4882a593Smuzhiyun if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
3121*4882a593Smuzhiyun f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
3122*4882a593Smuzhiyun F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
3123*4882a593Smuzhiyun return -EINVAL;
3124*4882a593Smuzhiyun }
3125*4882a593Smuzhiyun
3126*4882a593Smuzhiyun /* Check checksum_offset and crc in superblock */
3127*4882a593Smuzhiyun if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
3128*4882a593Smuzhiyun crc_offset = le32_to_cpu(raw_super->checksum_offset);
3129*4882a593Smuzhiyun if (crc_offset !=
3130*4882a593Smuzhiyun offsetof(struct f2fs_super_block, crc)) {
3131*4882a593Smuzhiyun f2fs_info(sbi, "Invalid SB checksum offset: %zu",
3132*4882a593Smuzhiyun crc_offset);
3133*4882a593Smuzhiyun return -EFSCORRUPTED;
3134*4882a593Smuzhiyun }
3135*4882a593Smuzhiyun crc = le32_to_cpu(raw_super->crc);
3136*4882a593Smuzhiyun if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
3137*4882a593Smuzhiyun f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
3138*4882a593Smuzhiyun return -EFSCORRUPTED;
3139*4882a593Smuzhiyun }
3140*4882a593Smuzhiyun }
3141*4882a593Smuzhiyun
3142*4882a593Smuzhiyun /* Currently, support only 4KB block size */
3143*4882a593Smuzhiyun if (le32_to_cpu(raw_super->log_blocksize) != F2FS_BLKSIZE_BITS) {
3144*4882a593Smuzhiyun f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u",
3145*4882a593Smuzhiyun le32_to_cpu(raw_super->log_blocksize),
3146*4882a593Smuzhiyun F2FS_BLKSIZE_BITS);
3147*4882a593Smuzhiyun return -EFSCORRUPTED;
3148*4882a593Smuzhiyun }
3149*4882a593Smuzhiyun
3150*4882a593Smuzhiyun /* check log blocks per segment */
3151*4882a593Smuzhiyun if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
3152*4882a593Smuzhiyun f2fs_info(sbi, "Invalid log blocks per segment (%u)",
3153*4882a593Smuzhiyun le32_to_cpu(raw_super->log_blocks_per_seg));
3154*4882a593Smuzhiyun return -EFSCORRUPTED;
3155*4882a593Smuzhiyun }
3156*4882a593Smuzhiyun
3157*4882a593Smuzhiyun /* Currently, support 512/1024/2048/4096 bytes sector size */
3158*4882a593Smuzhiyun if (le32_to_cpu(raw_super->log_sectorsize) >
3159*4882a593Smuzhiyun F2FS_MAX_LOG_SECTOR_SIZE ||
3160*4882a593Smuzhiyun le32_to_cpu(raw_super->log_sectorsize) <
3161*4882a593Smuzhiyun F2FS_MIN_LOG_SECTOR_SIZE) {
3162*4882a593Smuzhiyun f2fs_info(sbi, "Invalid log sectorsize (%u)",
3163*4882a593Smuzhiyun le32_to_cpu(raw_super->log_sectorsize));
3164*4882a593Smuzhiyun return -EFSCORRUPTED;
3165*4882a593Smuzhiyun }
3166*4882a593Smuzhiyun if (le32_to_cpu(raw_super->log_sectors_per_block) +
3167*4882a593Smuzhiyun le32_to_cpu(raw_super->log_sectorsize) !=
3168*4882a593Smuzhiyun F2FS_MAX_LOG_SECTOR_SIZE) {
3169*4882a593Smuzhiyun f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
3170*4882a593Smuzhiyun le32_to_cpu(raw_super->log_sectors_per_block),
3171*4882a593Smuzhiyun le32_to_cpu(raw_super->log_sectorsize));
3172*4882a593Smuzhiyun return -EFSCORRUPTED;
3173*4882a593Smuzhiyun }
3174*4882a593Smuzhiyun
3175*4882a593Smuzhiyun segment_count = le32_to_cpu(raw_super->segment_count);
3176*4882a593Smuzhiyun segment_count_main = le32_to_cpu(raw_super->segment_count_main);
3177*4882a593Smuzhiyun segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
3178*4882a593Smuzhiyun secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
3179*4882a593Smuzhiyun total_sections = le32_to_cpu(raw_super->section_count);
3180*4882a593Smuzhiyun
3181*4882a593Smuzhiyun /* blocks_per_seg should be 512, given the above check */
3182*4882a593Smuzhiyun blocks_per_seg = 1 << le32_to_cpu(raw_super->log_blocks_per_seg);
3183*4882a593Smuzhiyun
3184*4882a593Smuzhiyun if (segment_count > F2FS_MAX_SEGMENT ||
3185*4882a593Smuzhiyun segment_count < F2FS_MIN_SEGMENTS) {
3186*4882a593Smuzhiyun f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
3187*4882a593Smuzhiyun return -EFSCORRUPTED;
3188*4882a593Smuzhiyun }
3189*4882a593Smuzhiyun
3190*4882a593Smuzhiyun if (total_sections > segment_count_main || total_sections < 1 ||
3191*4882a593Smuzhiyun segs_per_sec > segment_count || !segs_per_sec) {
3192*4882a593Smuzhiyun f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
3193*4882a593Smuzhiyun segment_count, total_sections, segs_per_sec);
3194*4882a593Smuzhiyun return -EFSCORRUPTED;
3195*4882a593Smuzhiyun }
3196*4882a593Smuzhiyun
3197*4882a593Smuzhiyun if (segment_count_main != total_sections * segs_per_sec) {
3198*4882a593Smuzhiyun f2fs_info(sbi, "Invalid segment/section count (%u != %u * %u)",
3199*4882a593Smuzhiyun segment_count_main, total_sections, segs_per_sec);
3200*4882a593Smuzhiyun return -EFSCORRUPTED;
3201*4882a593Smuzhiyun }
3202*4882a593Smuzhiyun
3203*4882a593Smuzhiyun if ((segment_count / segs_per_sec) < total_sections) {
3204*4882a593Smuzhiyun f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
3205*4882a593Smuzhiyun segment_count, segs_per_sec, total_sections);
3206*4882a593Smuzhiyun return -EFSCORRUPTED;
3207*4882a593Smuzhiyun }
3208*4882a593Smuzhiyun
3209*4882a593Smuzhiyun if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
3210*4882a593Smuzhiyun f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
3211*4882a593Smuzhiyun segment_count, le64_to_cpu(raw_super->block_count));
3212*4882a593Smuzhiyun return -EFSCORRUPTED;
3213*4882a593Smuzhiyun }
3214*4882a593Smuzhiyun
3215*4882a593Smuzhiyun if (RDEV(0).path[0]) {
3216*4882a593Smuzhiyun block_t dev_seg_count = le32_to_cpu(RDEV(0).total_segments);
3217*4882a593Smuzhiyun int i = 1;
3218*4882a593Smuzhiyun
3219*4882a593Smuzhiyun while (i < MAX_DEVICES && RDEV(i).path[0]) {
3220*4882a593Smuzhiyun dev_seg_count += le32_to_cpu(RDEV(i).total_segments);
3221*4882a593Smuzhiyun i++;
3222*4882a593Smuzhiyun }
3223*4882a593Smuzhiyun if (segment_count != dev_seg_count) {
3224*4882a593Smuzhiyun f2fs_info(sbi, "Segment count (%u) mismatch with total segments from devices (%u)",
3225*4882a593Smuzhiyun segment_count, dev_seg_count);
3226*4882a593Smuzhiyun return -EFSCORRUPTED;
3227*4882a593Smuzhiyun }
3228*4882a593Smuzhiyun } else {
3229*4882a593Smuzhiyun if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_BLKZONED) &&
3230*4882a593Smuzhiyun !bdev_is_zoned(sbi->sb->s_bdev)) {
3231*4882a593Smuzhiyun f2fs_info(sbi, "Zoned block device path is missing");
3232*4882a593Smuzhiyun return -EFSCORRUPTED;
3233*4882a593Smuzhiyun }
3234*4882a593Smuzhiyun }
3235*4882a593Smuzhiyun
3236*4882a593Smuzhiyun if (secs_per_zone > total_sections || !secs_per_zone) {
3237*4882a593Smuzhiyun f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
3238*4882a593Smuzhiyun secs_per_zone, total_sections);
3239*4882a593Smuzhiyun return -EFSCORRUPTED;
3240*4882a593Smuzhiyun }
3241*4882a593Smuzhiyun if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
3242*4882a593Smuzhiyun raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
3243*4882a593Smuzhiyun (le32_to_cpu(raw_super->extension_count) +
3244*4882a593Smuzhiyun raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) {
3245*4882a593Smuzhiyun f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)",
3246*4882a593Smuzhiyun le32_to_cpu(raw_super->extension_count),
3247*4882a593Smuzhiyun raw_super->hot_ext_count,
3248*4882a593Smuzhiyun F2FS_MAX_EXTENSION);
3249*4882a593Smuzhiyun return -EFSCORRUPTED;
3250*4882a593Smuzhiyun }
3251*4882a593Smuzhiyun
3252*4882a593Smuzhiyun if (le32_to_cpu(raw_super->cp_payload) >=
3253*4882a593Smuzhiyun (blocks_per_seg - F2FS_CP_PACKS -
3254*4882a593Smuzhiyun NR_CURSEG_PERSIST_TYPE)) {
3255*4882a593Smuzhiyun f2fs_info(sbi, "Insane cp_payload (%u >= %u)",
3256*4882a593Smuzhiyun le32_to_cpu(raw_super->cp_payload),
3257*4882a593Smuzhiyun blocks_per_seg - F2FS_CP_PACKS -
3258*4882a593Smuzhiyun NR_CURSEG_PERSIST_TYPE);
3259*4882a593Smuzhiyun return -EFSCORRUPTED;
3260*4882a593Smuzhiyun }
3261*4882a593Smuzhiyun
3262*4882a593Smuzhiyun /* check reserved ino info */
3263*4882a593Smuzhiyun if (le32_to_cpu(raw_super->node_ino) != 1 ||
3264*4882a593Smuzhiyun le32_to_cpu(raw_super->meta_ino) != 2 ||
3265*4882a593Smuzhiyun le32_to_cpu(raw_super->root_ino) != 3) {
3266*4882a593Smuzhiyun f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
3267*4882a593Smuzhiyun le32_to_cpu(raw_super->node_ino),
3268*4882a593Smuzhiyun le32_to_cpu(raw_super->meta_ino),
3269*4882a593Smuzhiyun le32_to_cpu(raw_super->root_ino));
3270*4882a593Smuzhiyun return -EFSCORRUPTED;
3271*4882a593Smuzhiyun }
3272*4882a593Smuzhiyun
3273*4882a593Smuzhiyun /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
3274*4882a593Smuzhiyun if (sanity_check_area_boundary(sbi, bh))
3275*4882a593Smuzhiyun return -EFSCORRUPTED;
3276*4882a593Smuzhiyun
3277*4882a593Smuzhiyun return 0;
3278*4882a593Smuzhiyun }
3279*4882a593Smuzhiyun
f2fs_sanity_check_ckpt(struct f2fs_sb_info * sbi)3280*4882a593Smuzhiyun int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
3281*4882a593Smuzhiyun {
3282*4882a593Smuzhiyun unsigned int total, fsmeta;
3283*4882a593Smuzhiyun struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3284*4882a593Smuzhiyun struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3285*4882a593Smuzhiyun unsigned int ovp_segments, reserved_segments;
3286*4882a593Smuzhiyun unsigned int main_segs, blocks_per_seg;
3287*4882a593Smuzhiyun unsigned int sit_segs, nat_segs;
3288*4882a593Smuzhiyun unsigned int sit_bitmap_size, nat_bitmap_size;
3289*4882a593Smuzhiyun unsigned int log_blocks_per_seg;
3290*4882a593Smuzhiyun unsigned int segment_count_main;
3291*4882a593Smuzhiyun unsigned int cp_pack_start_sum, cp_payload;
3292*4882a593Smuzhiyun block_t user_block_count, valid_user_blocks;
3293*4882a593Smuzhiyun block_t avail_node_count, valid_node_count;
3294*4882a593Smuzhiyun unsigned int nat_blocks, nat_bits_bytes, nat_bits_blocks;
3295*4882a593Smuzhiyun int i, j;
3296*4882a593Smuzhiyun
3297*4882a593Smuzhiyun total = le32_to_cpu(raw_super->segment_count);
3298*4882a593Smuzhiyun fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
3299*4882a593Smuzhiyun sit_segs = le32_to_cpu(raw_super->segment_count_sit);
3300*4882a593Smuzhiyun fsmeta += sit_segs;
3301*4882a593Smuzhiyun nat_segs = le32_to_cpu(raw_super->segment_count_nat);
3302*4882a593Smuzhiyun fsmeta += nat_segs;
3303*4882a593Smuzhiyun fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
3304*4882a593Smuzhiyun fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
3305*4882a593Smuzhiyun
3306*4882a593Smuzhiyun if (unlikely(fsmeta >= total))
3307*4882a593Smuzhiyun return 1;
3308*4882a593Smuzhiyun
3309*4882a593Smuzhiyun ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
3310*4882a593Smuzhiyun reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
3311*4882a593Smuzhiyun
3312*4882a593Smuzhiyun if (!f2fs_sb_has_readonly(sbi) &&
3313*4882a593Smuzhiyun unlikely(fsmeta < F2FS_MIN_META_SEGMENTS ||
3314*4882a593Smuzhiyun ovp_segments == 0 || reserved_segments == 0)) {
3315*4882a593Smuzhiyun f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version");
3316*4882a593Smuzhiyun return 1;
3317*4882a593Smuzhiyun }
3318*4882a593Smuzhiyun user_block_count = le64_to_cpu(ckpt->user_block_count);
3319*4882a593Smuzhiyun segment_count_main = le32_to_cpu(raw_super->segment_count_main) +
3320*4882a593Smuzhiyun (f2fs_sb_has_readonly(sbi) ? 1 : 0);
3321*4882a593Smuzhiyun log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3322*4882a593Smuzhiyun if (!user_block_count || user_block_count >=
3323*4882a593Smuzhiyun segment_count_main << log_blocks_per_seg) {
3324*4882a593Smuzhiyun f2fs_err(sbi, "Wrong user_block_count: %u",
3325*4882a593Smuzhiyun user_block_count);
3326*4882a593Smuzhiyun return 1;
3327*4882a593Smuzhiyun }
3328*4882a593Smuzhiyun
3329*4882a593Smuzhiyun valid_user_blocks = le64_to_cpu(ckpt->valid_block_count);
3330*4882a593Smuzhiyun if (valid_user_blocks > user_block_count) {
3331*4882a593Smuzhiyun f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u",
3332*4882a593Smuzhiyun valid_user_blocks, user_block_count);
3333*4882a593Smuzhiyun return 1;
3334*4882a593Smuzhiyun }
3335*4882a593Smuzhiyun
3336*4882a593Smuzhiyun valid_node_count = le32_to_cpu(ckpt->valid_node_count);
3337*4882a593Smuzhiyun avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
3338*4882a593Smuzhiyun if (valid_node_count > avail_node_count) {
3339*4882a593Smuzhiyun f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u",
3340*4882a593Smuzhiyun valid_node_count, avail_node_count);
3341*4882a593Smuzhiyun return 1;
3342*4882a593Smuzhiyun }
3343*4882a593Smuzhiyun
3344*4882a593Smuzhiyun main_segs = le32_to_cpu(raw_super->segment_count_main);
3345*4882a593Smuzhiyun blocks_per_seg = sbi->blocks_per_seg;
3346*4882a593Smuzhiyun
3347*4882a593Smuzhiyun for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
3348*4882a593Smuzhiyun if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
3349*4882a593Smuzhiyun le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
3350*4882a593Smuzhiyun return 1;
3351*4882a593Smuzhiyun
3352*4882a593Smuzhiyun if (f2fs_sb_has_readonly(sbi))
3353*4882a593Smuzhiyun goto check_data;
3354*4882a593Smuzhiyun
3355*4882a593Smuzhiyun for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
3356*4882a593Smuzhiyun if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
3357*4882a593Smuzhiyun le32_to_cpu(ckpt->cur_node_segno[j])) {
3358*4882a593Smuzhiyun f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u",
3359*4882a593Smuzhiyun i, j,
3360*4882a593Smuzhiyun le32_to_cpu(ckpt->cur_node_segno[i]));
3361*4882a593Smuzhiyun return 1;
3362*4882a593Smuzhiyun }
3363*4882a593Smuzhiyun }
3364*4882a593Smuzhiyun }
3365*4882a593Smuzhiyun check_data:
3366*4882a593Smuzhiyun for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
3367*4882a593Smuzhiyun if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
3368*4882a593Smuzhiyun le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
3369*4882a593Smuzhiyun return 1;
3370*4882a593Smuzhiyun
3371*4882a593Smuzhiyun if (f2fs_sb_has_readonly(sbi))
3372*4882a593Smuzhiyun goto skip_cross;
3373*4882a593Smuzhiyun
3374*4882a593Smuzhiyun for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
3375*4882a593Smuzhiyun if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
3376*4882a593Smuzhiyun le32_to_cpu(ckpt->cur_data_segno[j])) {
3377*4882a593Smuzhiyun f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u",
3378*4882a593Smuzhiyun i, j,
3379*4882a593Smuzhiyun le32_to_cpu(ckpt->cur_data_segno[i]));
3380*4882a593Smuzhiyun return 1;
3381*4882a593Smuzhiyun }
3382*4882a593Smuzhiyun }
3383*4882a593Smuzhiyun }
3384*4882a593Smuzhiyun for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
3385*4882a593Smuzhiyun for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) {
3386*4882a593Smuzhiyun if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
3387*4882a593Smuzhiyun le32_to_cpu(ckpt->cur_data_segno[j])) {
3388*4882a593Smuzhiyun f2fs_err(sbi, "Node segment (%u) and Data segment (%u) has the same segno: %u",
3389*4882a593Smuzhiyun i, j,
3390*4882a593Smuzhiyun le32_to_cpu(ckpt->cur_node_segno[i]));
3391*4882a593Smuzhiyun return 1;
3392*4882a593Smuzhiyun }
3393*4882a593Smuzhiyun }
3394*4882a593Smuzhiyun }
3395*4882a593Smuzhiyun skip_cross:
3396*4882a593Smuzhiyun sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
3397*4882a593Smuzhiyun nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
3398*4882a593Smuzhiyun
3399*4882a593Smuzhiyun if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
3400*4882a593Smuzhiyun nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
3401*4882a593Smuzhiyun f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u",
3402*4882a593Smuzhiyun sit_bitmap_size, nat_bitmap_size);
3403*4882a593Smuzhiyun return 1;
3404*4882a593Smuzhiyun }
3405*4882a593Smuzhiyun
3406*4882a593Smuzhiyun cp_pack_start_sum = __start_sum_addr(sbi);
3407*4882a593Smuzhiyun cp_payload = __cp_payload(sbi);
3408*4882a593Smuzhiyun if (cp_pack_start_sum < cp_payload + 1 ||
3409*4882a593Smuzhiyun cp_pack_start_sum > blocks_per_seg - 1 -
3410*4882a593Smuzhiyun NR_CURSEG_PERSIST_TYPE) {
3411*4882a593Smuzhiyun f2fs_err(sbi, "Wrong cp_pack_start_sum: %u",
3412*4882a593Smuzhiyun cp_pack_start_sum);
3413*4882a593Smuzhiyun return 1;
3414*4882a593Smuzhiyun }
3415*4882a593Smuzhiyun
3416*4882a593Smuzhiyun if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) &&
3417*4882a593Smuzhiyun le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) {
3418*4882a593Smuzhiyun f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, "
3419*4882a593Smuzhiyun "please run fsck v1.13.0 or higher to repair, chksum_offset: %u, "
3420*4882a593Smuzhiyun "fixed with patch: \"f2fs-tools: relocate chksum_offset for large_nat_bitmap feature\"",
3421*4882a593Smuzhiyun le32_to_cpu(ckpt->checksum_offset));
3422*4882a593Smuzhiyun return 1;
3423*4882a593Smuzhiyun }
3424*4882a593Smuzhiyun
3425*4882a593Smuzhiyun nat_blocks = nat_segs << log_blocks_per_seg;
3426*4882a593Smuzhiyun nat_bits_bytes = nat_blocks / BITS_PER_BYTE;
3427*4882a593Smuzhiyun nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
3428*4882a593Smuzhiyun if (__is_set_ckpt_flags(ckpt, CP_NAT_BITS_FLAG) &&
3429*4882a593Smuzhiyun (cp_payload + F2FS_CP_PACKS +
3430*4882a593Smuzhiyun NR_CURSEG_PERSIST_TYPE + nat_bits_blocks >= blocks_per_seg)) {
3431*4882a593Smuzhiyun f2fs_warn(sbi, "Insane cp_payload: %u, nat_bits_blocks: %u)",
3432*4882a593Smuzhiyun cp_payload, nat_bits_blocks);
3433*4882a593Smuzhiyun return 1;
3434*4882a593Smuzhiyun }
3435*4882a593Smuzhiyun
3436*4882a593Smuzhiyun if (unlikely(f2fs_cp_error(sbi))) {
3437*4882a593Smuzhiyun f2fs_err(sbi, "A bug case: need to run fsck");
3438*4882a593Smuzhiyun return 1;
3439*4882a593Smuzhiyun }
3440*4882a593Smuzhiyun return 0;
3441*4882a593Smuzhiyun }
3442*4882a593Smuzhiyun
init_sb_info(struct f2fs_sb_info * sbi)3443*4882a593Smuzhiyun static void init_sb_info(struct f2fs_sb_info *sbi)
3444*4882a593Smuzhiyun {
3445*4882a593Smuzhiyun struct f2fs_super_block *raw_super = sbi->raw_super;
3446*4882a593Smuzhiyun int i;
3447*4882a593Smuzhiyun
3448*4882a593Smuzhiyun sbi->log_sectors_per_block =
3449*4882a593Smuzhiyun le32_to_cpu(raw_super->log_sectors_per_block);
3450*4882a593Smuzhiyun sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
3451*4882a593Smuzhiyun sbi->blocksize = 1 << sbi->log_blocksize;
3452*4882a593Smuzhiyun sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3453*4882a593Smuzhiyun sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
3454*4882a593Smuzhiyun sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
3455*4882a593Smuzhiyun sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
3456*4882a593Smuzhiyun sbi->total_sections = le32_to_cpu(raw_super->section_count);
3457*4882a593Smuzhiyun sbi->total_node_count =
3458*4882a593Smuzhiyun (le32_to_cpu(raw_super->segment_count_nat) / 2)
3459*4882a593Smuzhiyun * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
3460*4882a593Smuzhiyun F2FS_ROOT_INO(sbi) = le32_to_cpu(raw_super->root_ino);
3461*4882a593Smuzhiyun F2FS_NODE_INO(sbi) = le32_to_cpu(raw_super->node_ino);
3462*4882a593Smuzhiyun F2FS_META_INO(sbi) = le32_to_cpu(raw_super->meta_ino);
3463*4882a593Smuzhiyun sbi->cur_victim_sec = NULL_SECNO;
3464*4882a593Smuzhiyun sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
3465*4882a593Smuzhiyun sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
3466*4882a593Smuzhiyun sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
3467*4882a593Smuzhiyun sbi->migration_granularity = sbi->segs_per_sec;
3468*4882a593Smuzhiyun
3469*4882a593Smuzhiyun sbi->dir_level = DEF_DIR_LEVEL;
3470*4882a593Smuzhiyun sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
3471*4882a593Smuzhiyun sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
3472*4882a593Smuzhiyun sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
3473*4882a593Smuzhiyun sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
3474*4882a593Smuzhiyun sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
3475*4882a593Smuzhiyun sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] =
3476*4882a593Smuzhiyun DEF_UMOUNT_DISCARD_TIMEOUT;
3477*4882a593Smuzhiyun clear_sbi_flag(sbi, SBI_NEED_FSCK);
3478*4882a593Smuzhiyun
3479*4882a593Smuzhiyun for (i = 0; i < NR_COUNT_TYPE; i++)
3480*4882a593Smuzhiyun atomic_set(&sbi->nr_pages[i], 0);
3481*4882a593Smuzhiyun
3482*4882a593Smuzhiyun for (i = 0; i < META; i++)
3483*4882a593Smuzhiyun atomic_set(&sbi->wb_sync_req[i], 0);
3484*4882a593Smuzhiyun
3485*4882a593Smuzhiyun INIT_LIST_HEAD(&sbi->s_list);
3486*4882a593Smuzhiyun mutex_init(&sbi->umount_mutex);
3487*4882a593Smuzhiyun init_f2fs_rwsem(&sbi->io_order_lock);
3488*4882a593Smuzhiyun spin_lock_init(&sbi->cp_lock);
3489*4882a593Smuzhiyun
3490*4882a593Smuzhiyun sbi->dirty_device = 0;
3491*4882a593Smuzhiyun spin_lock_init(&sbi->dev_lock);
3492*4882a593Smuzhiyun
3493*4882a593Smuzhiyun init_f2fs_rwsem(&sbi->sb_lock);
3494*4882a593Smuzhiyun init_f2fs_rwsem(&sbi->pin_sem);
3495*4882a593Smuzhiyun }
3496*4882a593Smuzhiyun
init_percpu_info(struct f2fs_sb_info * sbi)3497*4882a593Smuzhiyun static int init_percpu_info(struct f2fs_sb_info *sbi)
3498*4882a593Smuzhiyun {
3499*4882a593Smuzhiyun int err;
3500*4882a593Smuzhiyun
3501*4882a593Smuzhiyun err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
3502*4882a593Smuzhiyun if (err)
3503*4882a593Smuzhiyun return err;
3504*4882a593Smuzhiyun
3505*4882a593Smuzhiyun err = percpu_counter_init(&sbi->total_valid_inode_count, 0,
3506*4882a593Smuzhiyun GFP_KERNEL);
3507*4882a593Smuzhiyun if (err)
3508*4882a593Smuzhiyun percpu_counter_destroy(&sbi->alloc_valid_block_count);
3509*4882a593Smuzhiyun
3510*4882a593Smuzhiyun return err;
3511*4882a593Smuzhiyun }
3512*4882a593Smuzhiyun
3513*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_ZONED
3514*4882a593Smuzhiyun
3515*4882a593Smuzhiyun struct f2fs_report_zones_args {
3516*4882a593Smuzhiyun struct f2fs_dev_info *dev;
3517*4882a593Smuzhiyun bool zone_cap_mismatch;
3518*4882a593Smuzhiyun };
3519*4882a593Smuzhiyun
f2fs_report_zone_cb(struct blk_zone * zone,unsigned int idx,void * data)3520*4882a593Smuzhiyun static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
3521*4882a593Smuzhiyun void *data)
3522*4882a593Smuzhiyun {
3523*4882a593Smuzhiyun struct f2fs_report_zones_args *rz_args = data;
3524*4882a593Smuzhiyun
3525*4882a593Smuzhiyun if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
3526*4882a593Smuzhiyun return 0;
3527*4882a593Smuzhiyun
3528*4882a593Smuzhiyun set_bit(idx, rz_args->dev->blkz_seq);
3529*4882a593Smuzhiyun rz_args->dev->zone_capacity_blocks[idx] = zone->capacity >>
3530*4882a593Smuzhiyun F2FS_LOG_SECTORS_PER_BLOCK;
3531*4882a593Smuzhiyun if (zone->len != zone->capacity && !rz_args->zone_cap_mismatch)
3532*4882a593Smuzhiyun rz_args->zone_cap_mismatch = true;
3533*4882a593Smuzhiyun
3534*4882a593Smuzhiyun return 0;
3535*4882a593Smuzhiyun }
3536*4882a593Smuzhiyun
init_blkz_info(struct f2fs_sb_info * sbi,int devi)3537*4882a593Smuzhiyun static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
3538*4882a593Smuzhiyun {
3539*4882a593Smuzhiyun struct block_device *bdev = FDEV(devi).bdev;
3540*4882a593Smuzhiyun sector_t nr_sectors = bdev->bd_part->nr_sects;
3541*4882a593Smuzhiyun struct f2fs_report_zones_args rep_zone_arg;
3542*4882a593Smuzhiyun int ret;
3543*4882a593Smuzhiyun
3544*4882a593Smuzhiyun if (!f2fs_sb_has_blkzoned(sbi))
3545*4882a593Smuzhiyun return 0;
3546*4882a593Smuzhiyun
3547*4882a593Smuzhiyun if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
3548*4882a593Smuzhiyun SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
3549*4882a593Smuzhiyun return -EINVAL;
3550*4882a593Smuzhiyun sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
3551*4882a593Smuzhiyun if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
3552*4882a593Smuzhiyun __ilog2_u32(sbi->blocks_per_blkz))
3553*4882a593Smuzhiyun return -EINVAL;
3554*4882a593Smuzhiyun sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
3555*4882a593Smuzhiyun FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
3556*4882a593Smuzhiyun sbi->log_blocks_per_blkz;
3557*4882a593Smuzhiyun if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
3558*4882a593Smuzhiyun FDEV(devi).nr_blkz++;
3559*4882a593Smuzhiyun
3560*4882a593Smuzhiyun FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi,
3561*4882a593Smuzhiyun BITS_TO_LONGS(FDEV(devi).nr_blkz)
3562*4882a593Smuzhiyun * sizeof(unsigned long),
3563*4882a593Smuzhiyun GFP_KERNEL);
3564*4882a593Smuzhiyun if (!FDEV(devi).blkz_seq)
3565*4882a593Smuzhiyun return -ENOMEM;
3566*4882a593Smuzhiyun
3567*4882a593Smuzhiyun /* Get block zones type and zone-capacity */
3568*4882a593Smuzhiyun FDEV(devi).zone_capacity_blocks = f2fs_kzalloc(sbi,
3569*4882a593Smuzhiyun FDEV(devi).nr_blkz * sizeof(block_t),
3570*4882a593Smuzhiyun GFP_KERNEL);
3571*4882a593Smuzhiyun if (!FDEV(devi).zone_capacity_blocks)
3572*4882a593Smuzhiyun return -ENOMEM;
3573*4882a593Smuzhiyun
3574*4882a593Smuzhiyun rep_zone_arg.dev = &FDEV(devi);
3575*4882a593Smuzhiyun rep_zone_arg.zone_cap_mismatch = false;
3576*4882a593Smuzhiyun
3577*4882a593Smuzhiyun ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb,
3578*4882a593Smuzhiyun &rep_zone_arg);
3579*4882a593Smuzhiyun if (ret < 0)
3580*4882a593Smuzhiyun return ret;
3581*4882a593Smuzhiyun
3582*4882a593Smuzhiyun if (!rep_zone_arg.zone_cap_mismatch) {
3583*4882a593Smuzhiyun kfree(FDEV(devi).zone_capacity_blocks);
3584*4882a593Smuzhiyun FDEV(devi).zone_capacity_blocks = NULL;
3585*4882a593Smuzhiyun }
3586*4882a593Smuzhiyun
3587*4882a593Smuzhiyun return 0;
3588*4882a593Smuzhiyun }
3589*4882a593Smuzhiyun #endif
3590*4882a593Smuzhiyun
3591*4882a593Smuzhiyun /*
3592*4882a593Smuzhiyun * Read f2fs raw super block.
3593*4882a593Smuzhiyun * Because we have two copies of super block, so read both of them
3594*4882a593Smuzhiyun * to get the first valid one. If any one of them is broken, we pass
3595*4882a593Smuzhiyun * them recovery flag back to the caller.
3596*4882a593Smuzhiyun */
read_raw_super_block(struct f2fs_sb_info * sbi,struct f2fs_super_block ** raw_super,int * valid_super_block,int * recovery)3597*4882a593Smuzhiyun static int read_raw_super_block(struct f2fs_sb_info *sbi,
3598*4882a593Smuzhiyun struct f2fs_super_block **raw_super,
3599*4882a593Smuzhiyun int *valid_super_block, int *recovery)
3600*4882a593Smuzhiyun {
3601*4882a593Smuzhiyun struct super_block *sb = sbi->sb;
3602*4882a593Smuzhiyun int block;
3603*4882a593Smuzhiyun struct buffer_head *bh;
3604*4882a593Smuzhiyun struct f2fs_super_block *super;
3605*4882a593Smuzhiyun int err = 0;
3606*4882a593Smuzhiyun
3607*4882a593Smuzhiyun super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
3608*4882a593Smuzhiyun if (!super)
3609*4882a593Smuzhiyun return -ENOMEM;
3610*4882a593Smuzhiyun
3611*4882a593Smuzhiyun for (block = 0; block < 2; block++) {
3612*4882a593Smuzhiyun bh = sb_bread(sb, block);
3613*4882a593Smuzhiyun if (!bh) {
3614*4882a593Smuzhiyun f2fs_err(sbi, "Unable to read %dth superblock",
3615*4882a593Smuzhiyun block + 1);
3616*4882a593Smuzhiyun err = -EIO;
3617*4882a593Smuzhiyun *recovery = 1;
3618*4882a593Smuzhiyun continue;
3619*4882a593Smuzhiyun }
3620*4882a593Smuzhiyun
3621*4882a593Smuzhiyun /* sanity checking of raw super */
3622*4882a593Smuzhiyun err = sanity_check_raw_super(sbi, bh);
3623*4882a593Smuzhiyun if (err) {
3624*4882a593Smuzhiyun f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
3625*4882a593Smuzhiyun block + 1);
3626*4882a593Smuzhiyun brelse(bh);
3627*4882a593Smuzhiyun *recovery = 1;
3628*4882a593Smuzhiyun continue;
3629*4882a593Smuzhiyun }
3630*4882a593Smuzhiyun
3631*4882a593Smuzhiyun if (!*raw_super) {
3632*4882a593Smuzhiyun memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
3633*4882a593Smuzhiyun sizeof(*super));
3634*4882a593Smuzhiyun *valid_super_block = block;
3635*4882a593Smuzhiyun *raw_super = super;
3636*4882a593Smuzhiyun }
3637*4882a593Smuzhiyun brelse(bh);
3638*4882a593Smuzhiyun }
3639*4882a593Smuzhiyun
3640*4882a593Smuzhiyun /* No valid superblock */
3641*4882a593Smuzhiyun if (!*raw_super)
3642*4882a593Smuzhiyun kfree(super);
3643*4882a593Smuzhiyun else
3644*4882a593Smuzhiyun err = 0;
3645*4882a593Smuzhiyun
3646*4882a593Smuzhiyun return err;
3647*4882a593Smuzhiyun }
3648*4882a593Smuzhiyun
f2fs_commit_super(struct f2fs_sb_info * sbi,bool recover)3649*4882a593Smuzhiyun int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
3650*4882a593Smuzhiyun {
3651*4882a593Smuzhiyun struct buffer_head *bh;
3652*4882a593Smuzhiyun __u32 crc = 0;
3653*4882a593Smuzhiyun int err;
3654*4882a593Smuzhiyun
3655*4882a593Smuzhiyun if ((recover && f2fs_readonly(sbi->sb)) ||
3656*4882a593Smuzhiyun bdev_read_only(sbi->sb->s_bdev)) {
3657*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
3658*4882a593Smuzhiyun return -EROFS;
3659*4882a593Smuzhiyun }
3660*4882a593Smuzhiyun
3661*4882a593Smuzhiyun /* we should update superblock crc here */
3662*4882a593Smuzhiyun if (!recover && f2fs_sb_has_sb_chksum(sbi)) {
3663*4882a593Smuzhiyun crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi),
3664*4882a593Smuzhiyun offsetof(struct f2fs_super_block, crc));
3665*4882a593Smuzhiyun F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
3666*4882a593Smuzhiyun }
3667*4882a593Smuzhiyun
3668*4882a593Smuzhiyun /* write back-up superblock first */
3669*4882a593Smuzhiyun bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
3670*4882a593Smuzhiyun if (!bh)
3671*4882a593Smuzhiyun return -EIO;
3672*4882a593Smuzhiyun err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
3673*4882a593Smuzhiyun brelse(bh);
3674*4882a593Smuzhiyun
3675*4882a593Smuzhiyun /* if we are in recovery path, skip writing valid superblock */
3676*4882a593Smuzhiyun if (recover || err)
3677*4882a593Smuzhiyun return err;
3678*4882a593Smuzhiyun
3679*4882a593Smuzhiyun /* write current valid superblock */
3680*4882a593Smuzhiyun bh = sb_bread(sbi->sb, sbi->valid_super_block);
3681*4882a593Smuzhiyun if (!bh)
3682*4882a593Smuzhiyun return -EIO;
3683*4882a593Smuzhiyun err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
3684*4882a593Smuzhiyun brelse(bh);
3685*4882a593Smuzhiyun return err;
3686*4882a593Smuzhiyun }
3687*4882a593Smuzhiyun
f2fs_handle_stop(struct f2fs_sb_info * sbi,unsigned char reason)3688*4882a593Smuzhiyun void f2fs_handle_stop(struct f2fs_sb_info *sbi, unsigned char reason)
3689*4882a593Smuzhiyun {
3690*4882a593Smuzhiyun struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3691*4882a593Smuzhiyun int err;
3692*4882a593Smuzhiyun
3693*4882a593Smuzhiyun f2fs_bug_on(sbi, reason >= MAX_STOP_REASON);
3694*4882a593Smuzhiyun
3695*4882a593Smuzhiyun f2fs_down_write(&sbi->sb_lock);
3696*4882a593Smuzhiyun
3697*4882a593Smuzhiyun if (raw_super->s_stop_reason[reason] < ((1 << BITS_PER_BYTE) - 1))
3698*4882a593Smuzhiyun raw_super->s_stop_reason[reason]++;
3699*4882a593Smuzhiyun
3700*4882a593Smuzhiyun err = f2fs_commit_super(sbi, false);
3701*4882a593Smuzhiyun if (err)
3702*4882a593Smuzhiyun f2fs_err(sbi, "f2fs_commit_super fails to record reason:%u err:%d",
3703*4882a593Smuzhiyun reason, err);
3704*4882a593Smuzhiyun
3705*4882a593Smuzhiyun f2fs_up_write(&sbi->sb_lock);
3706*4882a593Smuzhiyun }
3707*4882a593Smuzhiyun
f2fs_scan_devices(struct f2fs_sb_info * sbi)3708*4882a593Smuzhiyun static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
3709*4882a593Smuzhiyun {
3710*4882a593Smuzhiyun struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3711*4882a593Smuzhiyun unsigned int max_devices = MAX_DEVICES;
3712*4882a593Smuzhiyun int i;
3713*4882a593Smuzhiyun
3714*4882a593Smuzhiyun /* Initialize single device information */
3715*4882a593Smuzhiyun if (!RDEV(0).path[0]) {
3716*4882a593Smuzhiyun if (!bdev_is_zoned(sbi->sb->s_bdev))
3717*4882a593Smuzhiyun return 0;
3718*4882a593Smuzhiyun max_devices = 1;
3719*4882a593Smuzhiyun }
3720*4882a593Smuzhiyun
3721*4882a593Smuzhiyun /*
3722*4882a593Smuzhiyun * Initialize multiple devices information, or single
3723*4882a593Smuzhiyun * zoned block device information.
3724*4882a593Smuzhiyun */
3725*4882a593Smuzhiyun sbi->devs = f2fs_kzalloc(sbi,
3726*4882a593Smuzhiyun array_size(max_devices,
3727*4882a593Smuzhiyun sizeof(struct f2fs_dev_info)),
3728*4882a593Smuzhiyun GFP_KERNEL);
3729*4882a593Smuzhiyun if (!sbi->devs)
3730*4882a593Smuzhiyun return -ENOMEM;
3731*4882a593Smuzhiyun
3732*4882a593Smuzhiyun for (i = 0; i < max_devices; i++) {
3733*4882a593Smuzhiyun
3734*4882a593Smuzhiyun if (i > 0 && !RDEV(i).path[0])
3735*4882a593Smuzhiyun break;
3736*4882a593Smuzhiyun
3737*4882a593Smuzhiyun if (max_devices == 1) {
3738*4882a593Smuzhiyun /* Single zoned block device mount */
3739*4882a593Smuzhiyun FDEV(0).bdev =
3740*4882a593Smuzhiyun blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
3741*4882a593Smuzhiyun sbi->sb->s_mode, sbi->sb->s_type);
3742*4882a593Smuzhiyun } else {
3743*4882a593Smuzhiyun /* Multi-device mount */
3744*4882a593Smuzhiyun memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
3745*4882a593Smuzhiyun FDEV(i).total_segments =
3746*4882a593Smuzhiyun le32_to_cpu(RDEV(i).total_segments);
3747*4882a593Smuzhiyun if (i == 0) {
3748*4882a593Smuzhiyun FDEV(i).start_blk = 0;
3749*4882a593Smuzhiyun FDEV(i).end_blk = FDEV(i).start_blk +
3750*4882a593Smuzhiyun (FDEV(i).total_segments <<
3751*4882a593Smuzhiyun sbi->log_blocks_per_seg) - 1 +
3752*4882a593Smuzhiyun le32_to_cpu(raw_super->segment0_blkaddr);
3753*4882a593Smuzhiyun } else {
3754*4882a593Smuzhiyun FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
3755*4882a593Smuzhiyun FDEV(i).end_blk = FDEV(i).start_blk +
3756*4882a593Smuzhiyun (FDEV(i).total_segments <<
3757*4882a593Smuzhiyun sbi->log_blocks_per_seg) - 1;
3758*4882a593Smuzhiyun }
3759*4882a593Smuzhiyun FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
3760*4882a593Smuzhiyun sbi->sb->s_mode, sbi->sb->s_type);
3761*4882a593Smuzhiyun }
3762*4882a593Smuzhiyun if (IS_ERR(FDEV(i).bdev))
3763*4882a593Smuzhiyun return PTR_ERR(FDEV(i).bdev);
3764*4882a593Smuzhiyun
3765*4882a593Smuzhiyun /* to release errored devices */
3766*4882a593Smuzhiyun sbi->s_ndevs = i + 1;
3767*4882a593Smuzhiyun
3768*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_ZONED
3769*4882a593Smuzhiyun if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
3770*4882a593Smuzhiyun !f2fs_sb_has_blkzoned(sbi)) {
3771*4882a593Smuzhiyun f2fs_err(sbi, "Zoned block device feature not enabled");
3772*4882a593Smuzhiyun return -EINVAL;
3773*4882a593Smuzhiyun }
3774*4882a593Smuzhiyun if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
3775*4882a593Smuzhiyun if (init_blkz_info(sbi, i)) {
3776*4882a593Smuzhiyun f2fs_err(sbi, "Failed to initialize F2FS blkzone information");
3777*4882a593Smuzhiyun return -EINVAL;
3778*4882a593Smuzhiyun }
3779*4882a593Smuzhiyun if (max_devices == 1)
3780*4882a593Smuzhiyun break;
3781*4882a593Smuzhiyun f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
3782*4882a593Smuzhiyun i, FDEV(i).path,
3783*4882a593Smuzhiyun FDEV(i).total_segments,
3784*4882a593Smuzhiyun FDEV(i).start_blk, FDEV(i).end_blk,
3785*4882a593Smuzhiyun bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
3786*4882a593Smuzhiyun "Host-aware" : "Host-managed");
3787*4882a593Smuzhiyun continue;
3788*4882a593Smuzhiyun }
3789*4882a593Smuzhiyun #endif
3790*4882a593Smuzhiyun f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x",
3791*4882a593Smuzhiyun i, FDEV(i).path,
3792*4882a593Smuzhiyun FDEV(i).total_segments,
3793*4882a593Smuzhiyun FDEV(i).start_blk, FDEV(i).end_blk);
3794*4882a593Smuzhiyun }
3795*4882a593Smuzhiyun f2fs_info(sbi,
3796*4882a593Smuzhiyun "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
3797*4882a593Smuzhiyun return 0;
3798*4882a593Smuzhiyun }
3799*4882a593Smuzhiyun
f2fs_setup_casefold(struct f2fs_sb_info * sbi)3800*4882a593Smuzhiyun static int f2fs_setup_casefold(struct f2fs_sb_info *sbi)
3801*4882a593Smuzhiyun {
3802*4882a593Smuzhiyun #ifdef CONFIG_UNICODE
3803*4882a593Smuzhiyun if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) {
3804*4882a593Smuzhiyun const struct f2fs_sb_encodings *encoding_info;
3805*4882a593Smuzhiyun struct unicode_map *encoding;
3806*4882a593Smuzhiyun __u16 encoding_flags;
3807*4882a593Smuzhiyun
3808*4882a593Smuzhiyun if (f2fs_sb_read_encoding(sbi->raw_super, &encoding_info,
3809*4882a593Smuzhiyun &encoding_flags)) {
3810*4882a593Smuzhiyun f2fs_err(sbi,
3811*4882a593Smuzhiyun "Encoding requested by superblock is unknown");
3812*4882a593Smuzhiyun return -EINVAL;
3813*4882a593Smuzhiyun }
3814*4882a593Smuzhiyun
3815*4882a593Smuzhiyun encoding = utf8_load(encoding_info->version);
3816*4882a593Smuzhiyun if (IS_ERR(encoding)) {
3817*4882a593Smuzhiyun f2fs_err(sbi,
3818*4882a593Smuzhiyun "can't mount with superblock charset: %s-%s "
3819*4882a593Smuzhiyun "not supported by the kernel. flags: 0x%x.",
3820*4882a593Smuzhiyun encoding_info->name, encoding_info->version,
3821*4882a593Smuzhiyun encoding_flags);
3822*4882a593Smuzhiyun return PTR_ERR(encoding);
3823*4882a593Smuzhiyun }
3824*4882a593Smuzhiyun f2fs_info(sbi, "Using encoding defined by superblock: "
3825*4882a593Smuzhiyun "%s-%s with flags 0x%hx", encoding_info->name,
3826*4882a593Smuzhiyun encoding_info->version?:"\b", encoding_flags);
3827*4882a593Smuzhiyun
3828*4882a593Smuzhiyun sbi->sb->s_encoding = encoding;
3829*4882a593Smuzhiyun sbi->sb->s_encoding_flags = encoding_flags;
3830*4882a593Smuzhiyun }
3831*4882a593Smuzhiyun #else
3832*4882a593Smuzhiyun if (f2fs_sb_has_casefold(sbi)) {
3833*4882a593Smuzhiyun f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
3834*4882a593Smuzhiyun return -EINVAL;
3835*4882a593Smuzhiyun }
3836*4882a593Smuzhiyun #endif
3837*4882a593Smuzhiyun return 0;
3838*4882a593Smuzhiyun }
3839*4882a593Smuzhiyun
f2fs_tuning_parameters(struct f2fs_sb_info * sbi)3840*4882a593Smuzhiyun static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
3841*4882a593Smuzhiyun {
3842*4882a593Smuzhiyun struct f2fs_sm_info *sm_i = SM_I(sbi);
3843*4882a593Smuzhiyun
3844*4882a593Smuzhiyun /* adjust parameters according to the volume size */
3845*4882a593Smuzhiyun if (sm_i->main_segments <= SMALL_VOLUME_SEGMENTS) {
3846*4882a593Smuzhiyun F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
3847*4882a593Smuzhiyun sm_i->dcc_info->discard_granularity = 1;
3848*4882a593Smuzhiyun sm_i->ipu_policy = 1 << F2FS_IPU_FORCE |
3849*4882a593Smuzhiyun 1 << F2FS_IPU_HONOR_OPU_WRITE;
3850*4882a593Smuzhiyun }
3851*4882a593Smuzhiyun
3852*4882a593Smuzhiyun sbi->readdir_ra = 1;
3853*4882a593Smuzhiyun }
3854*4882a593Smuzhiyun
f2fs_fill_super(struct super_block * sb,void * data,int silent)3855*4882a593Smuzhiyun static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
3856*4882a593Smuzhiyun {
3857*4882a593Smuzhiyun struct f2fs_sb_info *sbi;
3858*4882a593Smuzhiyun struct f2fs_super_block *raw_super;
3859*4882a593Smuzhiyun struct inode *root;
3860*4882a593Smuzhiyun int err;
3861*4882a593Smuzhiyun bool skip_recovery = false, need_fsck = false;
3862*4882a593Smuzhiyun char *options = NULL;
3863*4882a593Smuzhiyun int recovery, i, valid_super_block;
3864*4882a593Smuzhiyun struct curseg_info *seg_i;
3865*4882a593Smuzhiyun int retry_cnt = 1;
3866*4882a593Smuzhiyun
3867*4882a593Smuzhiyun try_onemore:
3868*4882a593Smuzhiyun err = -EINVAL;
3869*4882a593Smuzhiyun raw_super = NULL;
3870*4882a593Smuzhiyun valid_super_block = -1;
3871*4882a593Smuzhiyun recovery = 0;
3872*4882a593Smuzhiyun
3873*4882a593Smuzhiyun /* allocate memory for f2fs-specific super block info */
3874*4882a593Smuzhiyun sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
3875*4882a593Smuzhiyun if (!sbi)
3876*4882a593Smuzhiyun return -ENOMEM;
3877*4882a593Smuzhiyun
3878*4882a593Smuzhiyun sbi->sb = sb;
3879*4882a593Smuzhiyun
3880*4882a593Smuzhiyun /* Load the checksum driver */
3881*4882a593Smuzhiyun sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
3882*4882a593Smuzhiyun if (IS_ERR(sbi->s_chksum_driver)) {
3883*4882a593Smuzhiyun f2fs_err(sbi, "Cannot load crc32 driver.");
3884*4882a593Smuzhiyun err = PTR_ERR(sbi->s_chksum_driver);
3885*4882a593Smuzhiyun sbi->s_chksum_driver = NULL;
3886*4882a593Smuzhiyun goto free_sbi;
3887*4882a593Smuzhiyun }
3888*4882a593Smuzhiyun
3889*4882a593Smuzhiyun /* set a block size */
3890*4882a593Smuzhiyun if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
3891*4882a593Smuzhiyun f2fs_err(sbi, "unable to set blocksize");
3892*4882a593Smuzhiyun goto free_sbi;
3893*4882a593Smuzhiyun }
3894*4882a593Smuzhiyun
3895*4882a593Smuzhiyun err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
3896*4882a593Smuzhiyun &recovery);
3897*4882a593Smuzhiyun if (err)
3898*4882a593Smuzhiyun goto free_sbi;
3899*4882a593Smuzhiyun
3900*4882a593Smuzhiyun sb->s_fs_info = sbi;
3901*4882a593Smuzhiyun sbi->raw_super = raw_super;
3902*4882a593Smuzhiyun
3903*4882a593Smuzhiyun /* precompute checksum seed for metadata */
3904*4882a593Smuzhiyun if (f2fs_sb_has_inode_chksum(sbi))
3905*4882a593Smuzhiyun sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
3906*4882a593Smuzhiyun sizeof(raw_super->uuid));
3907*4882a593Smuzhiyun
3908*4882a593Smuzhiyun default_options(sbi);
3909*4882a593Smuzhiyun /* parse mount options */
3910*4882a593Smuzhiyun options = kstrdup((const char *)data, GFP_KERNEL);
3911*4882a593Smuzhiyun if (data && !options) {
3912*4882a593Smuzhiyun err = -ENOMEM;
3913*4882a593Smuzhiyun goto free_sb_buf;
3914*4882a593Smuzhiyun }
3915*4882a593Smuzhiyun
3916*4882a593Smuzhiyun err = parse_options(sb, options, false);
3917*4882a593Smuzhiyun if (err)
3918*4882a593Smuzhiyun goto free_options;
3919*4882a593Smuzhiyun
3920*4882a593Smuzhiyun sb->s_maxbytes = max_file_blocks(NULL) <<
3921*4882a593Smuzhiyun le32_to_cpu(raw_super->log_blocksize);
3922*4882a593Smuzhiyun sb->s_max_links = F2FS_LINK_MAX;
3923*4882a593Smuzhiyun
3924*4882a593Smuzhiyun err = f2fs_setup_casefold(sbi);
3925*4882a593Smuzhiyun if (err)
3926*4882a593Smuzhiyun goto free_options;
3927*4882a593Smuzhiyun
3928*4882a593Smuzhiyun #ifdef CONFIG_QUOTA
3929*4882a593Smuzhiyun sb->dq_op = &f2fs_quota_operations;
3930*4882a593Smuzhiyun sb->s_qcop = &f2fs_quotactl_ops;
3931*4882a593Smuzhiyun sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
3932*4882a593Smuzhiyun
3933*4882a593Smuzhiyun if (f2fs_sb_has_quota_ino(sbi)) {
3934*4882a593Smuzhiyun for (i = 0; i < MAXQUOTAS; i++) {
3935*4882a593Smuzhiyun if (f2fs_qf_ino(sbi->sb, i))
3936*4882a593Smuzhiyun sbi->nquota_files++;
3937*4882a593Smuzhiyun }
3938*4882a593Smuzhiyun }
3939*4882a593Smuzhiyun #endif
3940*4882a593Smuzhiyun
3941*4882a593Smuzhiyun sb->s_op = &f2fs_sops;
3942*4882a593Smuzhiyun #ifdef CONFIG_FS_ENCRYPTION
3943*4882a593Smuzhiyun sb->s_cop = &f2fs_cryptops;
3944*4882a593Smuzhiyun #endif
3945*4882a593Smuzhiyun #ifdef CONFIG_FS_VERITY
3946*4882a593Smuzhiyun sb->s_vop = &f2fs_verityops;
3947*4882a593Smuzhiyun #endif
3948*4882a593Smuzhiyun sb->s_xattr = f2fs_xattr_handlers;
3949*4882a593Smuzhiyun sb->s_export_op = &f2fs_export_ops;
3950*4882a593Smuzhiyun sb->s_magic = F2FS_SUPER_MAGIC;
3951*4882a593Smuzhiyun sb->s_time_gran = 1;
3952*4882a593Smuzhiyun sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
3953*4882a593Smuzhiyun (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
3954*4882a593Smuzhiyun memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
3955*4882a593Smuzhiyun sb->s_iflags |= SB_I_CGROUPWB;
3956*4882a593Smuzhiyun
3957*4882a593Smuzhiyun /* init f2fs-specific super block info */
3958*4882a593Smuzhiyun sbi->valid_super_block = valid_super_block;
3959*4882a593Smuzhiyun init_f2fs_rwsem(&sbi->gc_lock);
3960*4882a593Smuzhiyun mutex_init(&sbi->writepages);
3961*4882a593Smuzhiyun init_f2fs_rwsem(&sbi->cp_global_sem);
3962*4882a593Smuzhiyun init_f2fs_rwsem(&sbi->node_write);
3963*4882a593Smuzhiyun init_f2fs_rwsem(&sbi->node_change);
3964*4882a593Smuzhiyun
3965*4882a593Smuzhiyun /* disallow all the data/node/meta page writes */
3966*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_POR_DOING);
3967*4882a593Smuzhiyun spin_lock_init(&sbi->stat_lock);
3968*4882a593Smuzhiyun
3969*4882a593Smuzhiyun /* init iostat info */
3970*4882a593Smuzhiyun spin_lock_init(&sbi->iostat_lock);
3971*4882a593Smuzhiyun sbi->iostat_enable = false;
3972*4882a593Smuzhiyun sbi->iostat_period_ms = DEFAULT_IOSTAT_PERIOD_MS;
3973*4882a593Smuzhiyun
3974*4882a593Smuzhiyun for (i = 0; i < NR_PAGE_TYPE; i++) {
3975*4882a593Smuzhiyun int n = (i == META) ? 1 : NR_TEMP_TYPE;
3976*4882a593Smuzhiyun int j;
3977*4882a593Smuzhiyun
3978*4882a593Smuzhiyun sbi->write_io[i] =
3979*4882a593Smuzhiyun f2fs_kmalloc(sbi,
3980*4882a593Smuzhiyun array_size(n,
3981*4882a593Smuzhiyun sizeof(struct f2fs_bio_info)),
3982*4882a593Smuzhiyun GFP_KERNEL);
3983*4882a593Smuzhiyun if (!sbi->write_io[i]) {
3984*4882a593Smuzhiyun err = -ENOMEM;
3985*4882a593Smuzhiyun goto free_bio_info;
3986*4882a593Smuzhiyun }
3987*4882a593Smuzhiyun
3988*4882a593Smuzhiyun for (j = HOT; j < n; j++) {
3989*4882a593Smuzhiyun init_f2fs_rwsem(&sbi->write_io[i][j].io_rwsem);
3990*4882a593Smuzhiyun sbi->write_io[i][j].sbi = sbi;
3991*4882a593Smuzhiyun sbi->write_io[i][j].bio = NULL;
3992*4882a593Smuzhiyun spin_lock_init(&sbi->write_io[i][j].io_lock);
3993*4882a593Smuzhiyun INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
3994*4882a593Smuzhiyun INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
3995*4882a593Smuzhiyun init_f2fs_rwsem(&sbi->write_io[i][j].bio_list_lock);
3996*4882a593Smuzhiyun }
3997*4882a593Smuzhiyun }
3998*4882a593Smuzhiyun
3999*4882a593Smuzhiyun init_f2fs_rwsem(&sbi->cp_rwsem);
4000*4882a593Smuzhiyun init_f2fs_rwsem(&sbi->quota_sem);
4001*4882a593Smuzhiyun init_waitqueue_head(&sbi->cp_wait);
4002*4882a593Smuzhiyun init_sb_info(sbi);
4003*4882a593Smuzhiyun
4004*4882a593Smuzhiyun err = init_percpu_info(sbi);
4005*4882a593Smuzhiyun if (err)
4006*4882a593Smuzhiyun goto free_bio_info;
4007*4882a593Smuzhiyun
4008*4882a593Smuzhiyun if (F2FS_IO_ALIGNED(sbi)) {
4009*4882a593Smuzhiyun sbi->write_io_dummy =
4010*4882a593Smuzhiyun mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
4011*4882a593Smuzhiyun if (!sbi->write_io_dummy) {
4012*4882a593Smuzhiyun err = -ENOMEM;
4013*4882a593Smuzhiyun goto free_percpu;
4014*4882a593Smuzhiyun }
4015*4882a593Smuzhiyun }
4016*4882a593Smuzhiyun
4017*4882a593Smuzhiyun /* init per sbi slab cache */
4018*4882a593Smuzhiyun err = f2fs_init_xattr_caches(sbi);
4019*4882a593Smuzhiyun if (err)
4020*4882a593Smuzhiyun goto free_io_dummy;
4021*4882a593Smuzhiyun err = f2fs_init_page_array_cache(sbi);
4022*4882a593Smuzhiyun if (err)
4023*4882a593Smuzhiyun goto free_xattr_cache;
4024*4882a593Smuzhiyun
4025*4882a593Smuzhiyun /* get an inode for meta space */
4026*4882a593Smuzhiyun sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
4027*4882a593Smuzhiyun if (IS_ERR(sbi->meta_inode)) {
4028*4882a593Smuzhiyun f2fs_err(sbi, "Failed to read F2FS meta data inode");
4029*4882a593Smuzhiyun err = PTR_ERR(sbi->meta_inode);
4030*4882a593Smuzhiyun goto free_page_array_cache;
4031*4882a593Smuzhiyun }
4032*4882a593Smuzhiyun
4033*4882a593Smuzhiyun err = f2fs_get_valid_checkpoint(sbi);
4034*4882a593Smuzhiyun if (err) {
4035*4882a593Smuzhiyun f2fs_err(sbi, "Failed to get valid F2FS checkpoint");
4036*4882a593Smuzhiyun goto free_meta_inode;
4037*4882a593Smuzhiyun }
4038*4882a593Smuzhiyun
4039*4882a593Smuzhiyun if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))
4040*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
4041*4882a593Smuzhiyun if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) {
4042*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
4043*4882a593Smuzhiyun sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL;
4044*4882a593Smuzhiyun }
4045*4882a593Smuzhiyun
4046*4882a593Smuzhiyun if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG))
4047*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_NEED_FSCK);
4048*4882a593Smuzhiyun
4049*4882a593Smuzhiyun /* Initialize device list */
4050*4882a593Smuzhiyun err = f2fs_scan_devices(sbi);
4051*4882a593Smuzhiyun if (err) {
4052*4882a593Smuzhiyun f2fs_err(sbi, "Failed to find devices");
4053*4882a593Smuzhiyun goto free_devices;
4054*4882a593Smuzhiyun }
4055*4882a593Smuzhiyun
4056*4882a593Smuzhiyun err = f2fs_init_post_read_wq(sbi);
4057*4882a593Smuzhiyun if (err) {
4058*4882a593Smuzhiyun f2fs_err(sbi, "Failed to initialize post read workqueue");
4059*4882a593Smuzhiyun goto free_devices;
4060*4882a593Smuzhiyun }
4061*4882a593Smuzhiyun
4062*4882a593Smuzhiyun sbi->total_valid_node_count =
4063*4882a593Smuzhiyun le32_to_cpu(sbi->ckpt->valid_node_count);
4064*4882a593Smuzhiyun percpu_counter_set(&sbi->total_valid_inode_count,
4065*4882a593Smuzhiyun le32_to_cpu(sbi->ckpt->valid_inode_count));
4066*4882a593Smuzhiyun sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
4067*4882a593Smuzhiyun sbi->total_valid_block_count =
4068*4882a593Smuzhiyun le64_to_cpu(sbi->ckpt->valid_block_count);
4069*4882a593Smuzhiyun sbi->last_valid_block_count = sbi->total_valid_block_count;
4070*4882a593Smuzhiyun sbi->reserved_blocks = 0;
4071*4882a593Smuzhiyun sbi->current_reserved_blocks = 0;
4072*4882a593Smuzhiyun limit_reserve_root(sbi);
4073*4882a593Smuzhiyun adjust_unusable_cap_perc(sbi);
4074*4882a593Smuzhiyun
4075*4882a593Smuzhiyun for (i = 0; i < NR_INODE_TYPE; i++) {
4076*4882a593Smuzhiyun INIT_LIST_HEAD(&sbi->inode_list[i]);
4077*4882a593Smuzhiyun spin_lock_init(&sbi->inode_lock[i]);
4078*4882a593Smuzhiyun }
4079*4882a593Smuzhiyun mutex_init(&sbi->flush_lock);
4080*4882a593Smuzhiyun
4081*4882a593Smuzhiyun f2fs_init_extent_cache_info(sbi);
4082*4882a593Smuzhiyun
4083*4882a593Smuzhiyun f2fs_init_ino_entry_info(sbi);
4084*4882a593Smuzhiyun
4085*4882a593Smuzhiyun f2fs_init_fsync_node_info(sbi);
4086*4882a593Smuzhiyun
4087*4882a593Smuzhiyun /* setup checkpoint request control and start checkpoint issue thread */
4088*4882a593Smuzhiyun f2fs_init_ckpt_req_control(sbi);
4089*4882a593Smuzhiyun if (!f2fs_readonly(sb) && !test_opt(sbi, DISABLE_CHECKPOINT) &&
4090*4882a593Smuzhiyun test_opt(sbi, MERGE_CHECKPOINT)) {
4091*4882a593Smuzhiyun err = f2fs_start_ckpt_thread(sbi);
4092*4882a593Smuzhiyun if (err) {
4093*4882a593Smuzhiyun f2fs_err(sbi,
4094*4882a593Smuzhiyun "Failed to start F2FS issue_checkpoint_thread (%d)",
4095*4882a593Smuzhiyun err);
4096*4882a593Smuzhiyun goto stop_ckpt_thread;
4097*4882a593Smuzhiyun }
4098*4882a593Smuzhiyun }
4099*4882a593Smuzhiyun
4100*4882a593Smuzhiyun /* setup f2fs internal modules */
4101*4882a593Smuzhiyun err = f2fs_build_segment_manager(sbi);
4102*4882a593Smuzhiyun if (err) {
4103*4882a593Smuzhiyun f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)",
4104*4882a593Smuzhiyun err);
4105*4882a593Smuzhiyun goto free_sm;
4106*4882a593Smuzhiyun }
4107*4882a593Smuzhiyun err = f2fs_build_node_manager(sbi);
4108*4882a593Smuzhiyun if (err) {
4109*4882a593Smuzhiyun f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)",
4110*4882a593Smuzhiyun err);
4111*4882a593Smuzhiyun goto free_nm;
4112*4882a593Smuzhiyun }
4113*4882a593Smuzhiyun
4114*4882a593Smuzhiyun err = adjust_reserved_segment(sbi);
4115*4882a593Smuzhiyun if (err)
4116*4882a593Smuzhiyun goto free_nm;
4117*4882a593Smuzhiyun
4118*4882a593Smuzhiyun /* For write statistics */
4119*4882a593Smuzhiyun sbi->sectors_written_start = f2fs_get_sectors_written(sbi);
4120*4882a593Smuzhiyun
4121*4882a593Smuzhiyun /* Read accumulated write IO statistics if exists */
4122*4882a593Smuzhiyun seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
4123*4882a593Smuzhiyun if (__exist_node_summaries(sbi))
4124*4882a593Smuzhiyun sbi->kbytes_written =
4125*4882a593Smuzhiyun le64_to_cpu(seg_i->journal->info.kbytes_written);
4126*4882a593Smuzhiyun
4127*4882a593Smuzhiyun f2fs_build_gc_manager(sbi);
4128*4882a593Smuzhiyun
4129*4882a593Smuzhiyun atomic_set(&sbi->no_cp_fsync_pages, 0);
4130*4882a593Smuzhiyun
4131*4882a593Smuzhiyun err = f2fs_build_stats(sbi);
4132*4882a593Smuzhiyun if (err)
4133*4882a593Smuzhiyun goto free_nm;
4134*4882a593Smuzhiyun
4135*4882a593Smuzhiyun /* get an inode for node space */
4136*4882a593Smuzhiyun sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
4137*4882a593Smuzhiyun if (IS_ERR(sbi->node_inode)) {
4138*4882a593Smuzhiyun f2fs_err(sbi, "Failed to read node inode");
4139*4882a593Smuzhiyun err = PTR_ERR(sbi->node_inode);
4140*4882a593Smuzhiyun goto free_stats;
4141*4882a593Smuzhiyun }
4142*4882a593Smuzhiyun
4143*4882a593Smuzhiyun /* read root inode and dentry */
4144*4882a593Smuzhiyun root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
4145*4882a593Smuzhiyun if (IS_ERR(root)) {
4146*4882a593Smuzhiyun f2fs_err(sbi, "Failed to read root inode");
4147*4882a593Smuzhiyun err = PTR_ERR(root);
4148*4882a593Smuzhiyun goto free_node_inode;
4149*4882a593Smuzhiyun }
4150*4882a593Smuzhiyun if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
4151*4882a593Smuzhiyun !root->i_size || !root->i_nlink) {
4152*4882a593Smuzhiyun iput(root);
4153*4882a593Smuzhiyun err = -EINVAL;
4154*4882a593Smuzhiyun goto free_node_inode;
4155*4882a593Smuzhiyun }
4156*4882a593Smuzhiyun
4157*4882a593Smuzhiyun sb->s_root = d_make_root(root); /* allocate root dentry */
4158*4882a593Smuzhiyun if (!sb->s_root) {
4159*4882a593Smuzhiyun err = -ENOMEM;
4160*4882a593Smuzhiyun goto free_node_inode;
4161*4882a593Smuzhiyun }
4162*4882a593Smuzhiyun
4163*4882a593Smuzhiyun err = f2fs_init_compress_inode(sbi);
4164*4882a593Smuzhiyun if (err)
4165*4882a593Smuzhiyun goto free_root_inode;
4166*4882a593Smuzhiyun
4167*4882a593Smuzhiyun err = f2fs_register_sysfs(sbi);
4168*4882a593Smuzhiyun if (err)
4169*4882a593Smuzhiyun goto free_compress_inode;
4170*4882a593Smuzhiyun
4171*4882a593Smuzhiyun #ifdef CONFIG_QUOTA
4172*4882a593Smuzhiyun /* Enable quota usage during mount */
4173*4882a593Smuzhiyun if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
4174*4882a593Smuzhiyun err = f2fs_enable_quotas(sb);
4175*4882a593Smuzhiyun if (err)
4176*4882a593Smuzhiyun f2fs_err(sbi, "Cannot turn on quotas: error %d", err);
4177*4882a593Smuzhiyun }
4178*4882a593Smuzhiyun #endif
4179*4882a593Smuzhiyun /* if there are any orphan inodes, free them */
4180*4882a593Smuzhiyun err = f2fs_recover_orphan_inodes(sbi);
4181*4882a593Smuzhiyun if (err)
4182*4882a593Smuzhiyun goto free_meta;
4183*4882a593Smuzhiyun
4184*4882a593Smuzhiyun if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))
4185*4882a593Smuzhiyun goto reset_checkpoint;
4186*4882a593Smuzhiyun
4187*4882a593Smuzhiyun /* recover fsynced data */
4188*4882a593Smuzhiyun if (!test_opt(sbi, DISABLE_ROLL_FORWARD) &&
4189*4882a593Smuzhiyun !test_opt(sbi, NORECOVERY)) {
4190*4882a593Smuzhiyun /*
4191*4882a593Smuzhiyun * mount should be failed, when device has readonly mode, and
4192*4882a593Smuzhiyun * previous checkpoint was not done by clean system shutdown.
4193*4882a593Smuzhiyun */
4194*4882a593Smuzhiyun if (f2fs_hw_is_readonly(sbi)) {
4195*4882a593Smuzhiyun if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
4196*4882a593Smuzhiyun err = f2fs_recover_fsync_data(sbi, true);
4197*4882a593Smuzhiyun if (err > 0) {
4198*4882a593Smuzhiyun err = -EROFS;
4199*4882a593Smuzhiyun f2fs_err(sbi, "Need to recover fsync data, but "
4200*4882a593Smuzhiyun "write access unavailable, please try "
4201*4882a593Smuzhiyun "mount w/ disable_roll_forward or norecovery");
4202*4882a593Smuzhiyun }
4203*4882a593Smuzhiyun if (err < 0)
4204*4882a593Smuzhiyun goto free_meta;
4205*4882a593Smuzhiyun }
4206*4882a593Smuzhiyun f2fs_info(sbi, "write access unavailable, skipping recovery");
4207*4882a593Smuzhiyun goto reset_checkpoint;
4208*4882a593Smuzhiyun }
4209*4882a593Smuzhiyun
4210*4882a593Smuzhiyun if (need_fsck)
4211*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_NEED_FSCK);
4212*4882a593Smuzhiyun
4213*4882a593Smuzhiyun if (skip_recovery)
4214*4882a593Smuzhiyun goto reset_checkpoint;
4215*4882a593Smuzhiyun
4216*4882a593Smuzhiyun err = f2fs_recover_fsync_data(sbi, false);
4217*4882a593Smuzhiyun if (err < 0) {
4218*4882a593Smuzhiyun if (err != -ENOMEM)
4219*4882a593Smuzhiyun skip_recovery = true;
4220*4882a593Smuzhiyun need_fsck = true;
4221*4882a593Smuzhiyun f2fs_err(sbi, "Cannot recover all fsync data errno=%d",
4222*4882a593Smuzhiyun err);
4223*4882a593Smuzhiyun goto free_meta;
4224*4882a593Smuzhiyun }
4225*4882a593Smuzhiyun } else {
4226*4882a593Smuzhiyun err = f2fs_recover_fsync_data(sbi, true);
4227*4882a593Smuzhiyun
4228*4882a593Smuzhiyun if (!f2fs_readonly(sb) && err > 0) {
4229*4882a593Smuzhiyun err = -EINVAL;
4230*4882a593Smuzhiyun f2fs_err(sbi, "Need to recover fsync data");
4231*4882a593Smuzhiyun goto free_meta;
4232*4882a593Smuzhiyun }
4233*4882a593Smuzhiyun }
4234*4882a593Smuzhiyun
4235*4882a593Smuzhiyun /*
4236*4882a593Smuzhiyun * If the f2fs is not readonly and fsync data recovery succeeds,
4237*4882a593Smuzhiyun * check zoned block devices' write pointer consistency.
4238*4882a593Smuzhiyun */
4239*4882a593Smuzhiyun if (!err && !f2fs_readonly(sb) && f2fs_sb_has_blkzoned(sbi)) {
4240*4882a593Smuzhiyun err = f2fs_check_write_pointer(sbi);
4241*4882a593Smuzhiyun if (err)
4242*4882a593Smuzhiyun goto free_meta;
4243*4882a593Smuzhiyun }
4244*4882a593Smuzhiyun
4245*4882a593Smuzhiyun reset_checkpoint:
4246*4882a593Smuzhiyun f2fs_init_inmem_curseg(sbi);
4247*4882a593Smuzhiyun
4248*4882a593Smuzhiyun /* f2fs_recover_fsync_data() cleared this already */
4249*4882a593Smuzhiyun clear_sbi_flag(sbi, SBI_POR_DOING);
4250*4882a593Smuzhiyun
4251*4882a593Smuzhiyun if (test_opt(sbi, DISABLE_CHECKPOINT)) {
4252*4882a593Smuzhiyun err = f2fs_disable_checkpoint(sbi);
4253*4882a593Smuzhiyun if (err)
4254*4882a593Smuzhiyun goto sync_free_meta;
4255*4882a593Smuzhiyun } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
4256*4882a593Smuzhiyun f2fs_enable_checkpoint(sbi);
4257*4882a593Smuzhiyun }
4258*4882a593Smuzhiyun
4259*4882a593Smuzhiyun /*
4260*4882a593Smuzhiyun * If filesystem is not mounted as read-only then
4261*4882a593Smuzhiyun * do start the gc_thread.
4262*4882a593Smuzhiyun */
4263*4882a593Smuzhiyun if ((F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF ||
4264*4882a593Smuzhiyun test_opt(sbi, GC_MERGE)) && !f2fs_readonly(sb)) {
4265*4882a593Smuzhiyun /* After POR, we can run background GC thread.*/
4266*4882a593Smuzhiyun err = f2fs_start_gc_thread(sbi);
4267*4882a593Smuzhiyun if (err)
4268*4882a593Smuzhiyun goto sync_free_meta;
4269*4882a593Smuzhiyun }
4270*4882a593Smuzhiyun kvfree(options);
4271*4882a593Smuzhiyun
4272*4882a593Smuzhiyun /* recover broken superblock */
4273*4882a593Smuzhiyun if (recovery) {
4274*4882a593Smuzhiyun err = f2fs_commit_super(sbi, true);
4275*4882a593Smuzhiyun f2fs_info(sbi, "Try to recover %dth superblock, ret: %d",
4276*4882a593Smuzhiyun sbi->valid_super_block ? 1 : 2, err);
4277*4882a593Smuzhiyun }
4278*4882a593Smuzhiyun
4279*4882a593Smuzhiyun f2fs_join_shrinker(sbi);
4280*4882a593Smuzhiyun
4281*4882a593Smuzhiyun f2fs_tuning_parameters(sbi);
4282*4882a593Smuzhiyun
4283*4882a593Smuzhiyun f2fs_notice(sbi, "Mounted with checkpoint version = %llx",
4284*4882a593Smuzhiyun cur_cp_version(F2FS_CKPT(sbi)));
4285*4882a593Smuzhiyun f2fs_update_time(sbi, CP_TIME);
4286*4882a593Smuzhiyun f2fs_update_time(sbi, REQ_TIME);
4287*4882a593Smuzhiyun clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
4288*4882a593Smuzhiyun return 0;
4289*4882a593Smuzhiyun
4290*4882a593Smuzhiyun sync_free_meta:
4291*4882a593Smuzhiyun /* safe to flush all the data */
4292*4882a593Smuzhiyun sync_filesystem(sbi->sb);
4293*4882a593Smuzhiyun retry_cnt = 0;
4294*4882a593Smuzhiyun
4295*4882a593Smuzhiyun free_meta:
4296*4882a593Smuzhiyun #ifdef CONFIG_QUOTA
4297*4882a593Smuzhiyun f2fs_truncate_quota_inode_pages(sb);
4298*4882a593Smuzhiyun if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb))
4299*4882a593Smuzhiyun f2fs_quota_off_umount(sbi->sb);
4300*4882a593Smuzhiyun #endif
4301*4882a593Smuzhiyun /*
4302*4882a593Smuzhiyun * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
4303*4882a593Smuzhiyun * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
4304*4882a593Smuzhiyun * followed by f2fs_write_checkpoint() through f2fs_write_node_pages(), which
4305*4882a593Smuzhiyun * falls into an infinite loop in f2fs_sync_meta_pages().
4306*4882a593Smuzhiyun */
4307*4882a593Smuzhiyun truncate_inode_pages_final(META_MAPPING(sbi));
4308*4882a593Smuzhiyun /* evict some inodes being cached by GC */
4309*4882a593Smuzhiyun evict_inodes(sb);
4310*4882a593Smuzhiyun f2fs_unregister_sysfs(sbi);
4311*4882a593Smuzhiyun free_compress_inode:
4312*4882a593Smuzhiyun f2fs_destroy_compress_inode(sbi);
4313*4882a593Smuzhiyun free_root_inode:
4314*4882a593Smuzhiyun dput(sb->s_root);
4315*4882a593Smuzhiyun sb->s_root = NULL;
4316*4882a593Smuzhiyun free_node_inode:
4317*4882a593Smuzhiyun f2fs_release_ino_entry(sbi, true);
4318*4882a593Smuzhiyun truncate_inode_pages_final(NODE_MAPPING(sbi));
4319*4882a593Smuzhiyun iput(sbi->node_inode);
4320*4882a593Smuzhiyun sbi->node_inode = NULL;
4321*4882a593Smuzhiyun free_stats:
4322*4882a593Smuzhiyun f2fs_destroy_stats(sbi);
4323*4882a593Smuzhiyun free_nm:
4324*4882a593Smuzhiyun /* stop discard thread before destroying node manager */
4325*4882a593Smuzhiyun f2fs_stop_discard_thread(sbi);
4326*4882a593Smuzhiyun f2fs_destroy_node_manager(sbi);
4327*4882a593Smuzhiyun free_sm:
4328*4882a593Smuzhiyun f2fs_destroy_segment_manager(sbi);
4329*4882a593Smuzhiyun f2fs_destroy_post_read_wq(sbi);
4330*4882a593Smuzhiyun stop_ckpt_thread:
4331*4882a593Smuzhiyun f2fs_stop_ckpt_thread(sbi);
4332*4882a593Smuzhiyun free_devices:
4333*4882a593Smuzhiyun destroy_device_list(sbi);
4334*4882a593Smuzhiyun kvfree(sbi->ckpt);
4335*4882a593Smuzhiyun free_meta_inode:
4336*4882a593Smuzhiyun make_bad_inode(sbi->meta_inode);
4337*4882a593Smuzhiyun iput(sbi->meta_inode);
4338*4882a593Smuzhiyun sbi->meta_inode = NULL;
4339*4882a593Smuzhiyun free_page_array_cache:
4340*4882a593Smuzhiyun f2fs_destroy_page_array_cache(sbi);
4341*4882a593Smuzhiyun free_xattr_cache:
4342*4882a593Smuzhiyun f2fs_destroy_xattr_caches(sbi);
4343*4882a593Smuzhiyun free_io_dummy:
4344*4882a593Smuzhiyun mempool_destroy(sbi->write_io_dummy);
4345*4882a593Smuzhiyun free_percpu:
4346*4882a593Smuzhiyun destroy_percpu_info(sbi);
4347*4882a593Smuzhiyun free_bio_info:
4348*4882a593Smuzhiyun for (i = 0; i < NR_PAGE_TYPE; i++)
4349*4882a593Smuzhiyun kvfree(sbi->write_io[i]);
4350*4882a593Smuzhiyun
4351*4882a593Smuzhiyun #ifdef CONFIG_UNICODE
4352*4882a593Smuzhiyun utf8_unload(sb->s_encoding);
4353*4882a593Smuzhiyun sb->s_encoding = NULL;
4354*4882a593Smuzhiyun #endif
4355*4882a593Smuzhiyun free_options:
4356*4882a593Smuzhiyun #ifdef CONFIG_QUOTA
4357*4882a593Smuzhiyun for (i = 0; i < MAXQUOTAS; i++)
4358*4882a593Smuzhiyun kfree(F2FS_OPTION(sbi).s_qf_names[i]);
4359*4882a593Smuzhiyun #endif
4360*4882a593Smuzhiyun fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
4361*4882a593Smuzhiyun kvfree(options);
4362*4882a593Smuzhiyun free_sb_buf:
4363*4882a593Smuzhiyun kfree(raw_super);
4364*4882a593Smuzhiyun free_sbi:
4365*4882a593Smuzhiyun if (sbi->s_chksum_driver)
4366*4882a593Smuzhiyun crypto_free_shash(sbi->s_chksum_driver);
4367*4882a593Smuzhiyun kfree(sbi);
4368*4882a593Smuzhiyun
4369*4882a593Smuzhiyun /* give only one another chance */
4370*4882a593Smuzhiyun if (retry_cnt > 0 && skip_recovery) {
4371*4882a593Smuzhiyun retry_cnt--;
4372*4882a593Smuzhiyun shrink_dcache_sb(sb);
4373*4882a593Smuzhiyun goto try_onemore;
4374*4882a593Smuzhiyun }
4375*4882a593Smuzhiyun return err;
4376*4882a593Smuzhiyun }
4377*4882a593Smuzhiyun
f2fs_mount(struct file_system_type * fs_type,int flags,const char * dev_name,void * data)4378*4882a593Smuzhiyun static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
4379*4882a593Smuzhiyun const char *dev_name, void *data)
4380*4882a593Smuzhiyun {
4381*4882a593Smuzhiyun return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
4382*4882a593Smuzhiyun }
4383*4882a593Smuzhiyun
kill_f2fs_super(struct super_block * sb)4384*4882a593Smuzhiyun static void kill_f2fs_super(struct super_block *sb)
4385*4882a593Smuzhiyun {
4386*4882a593Smuzhiyun if (sb->s_root) {
4387*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_SB(sb);
4388*4882a593Smuzhiyun
4389*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_IS_CLOSE);
4390*4882a593Smuzhiyun f2fs_stop_gc_thread(sbi);
4391*4882a593Smuzhiyun f2fs_stop_discard_thread(sbi);
4392*4882a593Smuzhiyun
4393*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FS_COMPRESSION
4394*4882a593Smuzhiyun /*
4395*4882a593Smuzhiyun * latter evict_inode() can bypass checking and invalidating
4396*4882a593Smuzhiyun * compress inode cache.
4397*4882a593Smuzhiyun */
4398*4882a593Smuzhiyun if (test_opt(sbi, COMPRESS_CACHE))
4399*4882a593Smuzhiyun truncate_inode_pages_final(COMPRESS_MAPPING(sbi));
4400*4882a593Smuzhiyun #endif
4401*4882a593Smuzhiyun
4402*4882a593Smuzhiyun if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
4403*4882a593Smuzhiyun !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
4404*4882a593Smuzhiyun struct cp_control cpc = {
4405*4882a593Smuzhiyun .reason = CP_UMOUNT,
4406*4882a593Smuzhiyun };
4407*4882a593Smuzhiyun f2fs_write_checkpoint(sbi, &cpc);
4408*4882a593Smuzhiyun }
4409*4882a593Smuzhiyun
4410*4882a593Smuzhiyun if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb))
4411*4882a593Smuzhiyun sb->s_flags &= ~SB_RDONLY;
4412*4882a593Smuzhiyun }
4413*4882a593Smuzhiyun kill_block_super(sb);
4414*4882a593Smuzhiyun }
4415*4882a593Smuzhiyun
4416*4882a593Smuzhiyun static struct file_system_type f2fs_fs_type = {
4417*4882a593Smuzhiyun .owner = THIS_MODULE,
4418*4882a593Smuzhiyun .name = "f2fs",
4419*4882a593Smuzhiyun .mount = f2fs_mount,
4420*4882a593Smuzhiyun .kill_sb = kill_f2fs_super,
4421*4882a593Smuzhiyun .fs_flags = FS_REQUIRES_DEV,
4422*4882a593Smuzhiyun };
4423*4882a593Smuzhiyun MODULE_ALIAS_FS("f2fs");
4424*4882a593Smuzhiyun
init_inodecache(void)4425*4882a593Smuzhiyun static int __init init_inodecache(void)
4426*4882a593Smuzhiyun {
4427*4882a593Smuzhiyun f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
4428*4882a593Smuzhiyun sizeof(struct f2fs_inode_info), 0,
4429*4882a593Smuzhiyun SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
4430*4882a593Smuzhiyun if (!f2fs_inode_cachep)
4431*4882a593Smuzhiyun return -ENOMEM;
4432*4882a593Smuzhiyun return 0;
4433*4882a593Smuzhiyun }
4434*4882a593Smuzhiyun
destroy_inodecache(void)4435*4882a593Smuzhiyun static void destroy_inodecache(void)
4436*4882a593Smuzhiyun {
4437*4882a593Smuzhiyun /*
4438*4882a593Smuzhiyun * Make sure all delayed rcu free inodes are flushed before we
4439*4882a593Smuzhiyun * destroy cache.
4440*4882a593Smuzhiyun */
4441*4882a593Smuzhiyun rcu_barrier();
4442*4882a593Smuzhiyun kmem_cache_destroy(f2fs_inode_cachep);
4443*4882a593Smuzhiyun }
4444*4882a593Smuzhiyun
init_f2fs_fs(void)4445*4882a593Smuzhiyun static int __init init_f2fs_fs(void)
4446*4882a593Smuzhiyun {
4447*4882a593Smuzhiyun int err;
4448*4882a593Smuzhiyun
4449*4882a593Smuzhiyun if (PAGE_SIZE != F2FS_BLKSIZE) {
4450*4882a593Smuzhiyun printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
4451*4882a593Smuzhiyun PAGE_SIZE, F2FS_BLKSIZE);
4452*4882a593Smuzhiyun return -EINVAL;
4453*4882a593Smuzhiyun }
4454*4882a593Smuzhiyun
4455*4882a593Smuzhiyun err = init_inodecache();
4456*4882a593Smuzhiyun if (err)
4457*4882a593Smuzhiyun goto fail;
4458*4882a593Smuzhiyun err = f2fs_create_node_manager_caches();
4459*4882a593Smuzhiyun if (err)
4460*4882a593Smuzhiyun goto free_inodecache;
4461*4882a593Smuzhiyun err = f2fs_create_segment_manager_caches();
4462*4882a593Smuzhiyun if (err)
4463*4882a593Smuzhiyun goto free_node_manager_caches;
4464*4882a593Smuzhiyun err = f2fs_create_checkpoint_caches();
4465*4882a593Smuzhiyun if (err)
4466*4882a593Smuzhiyun goto free_segment_manager_caches;
4467*4882a593Smuzhiyun err = f2fs_create_recovery_cache();
4468*4882a593Smuzhiyun if (err)
4469*4882a593Smuzhiyun goto free_checkpoint_caches;
4470*4882a593Smuzhiyun err = f2fs_create_extent_cache();
4471*4882a593Smuzhiyun if (err)
4472*4882a593Smuzhiyun goto free_recovery_cache;
4473*4882a593Smuzhiyun err = f2fs_create_garbage_collection_cache();
4474*4882a593Smuzhiyun if (err)
4475*4882a593Smuzhiyun goto free_extent_cache;
4476*4882a593Smuzhiyun err = f2fs_init_sysfs();
4477*4882a593Smuzhiyun if (err)
4478*4882a593Smuzhiyun goto free_garbage_collection_cache;
4479*4882a593Smuzhiyun err = register_shrinker(&f2fs_shrinker_info);
4480*4882a593Smuzhiyun if (err)
4481*4882a593Smuzhiyun goto free_sysfs;
4482*4882a593Smuzhiyun err = register_filesystem(&f2fs_fs_type);
4483*4882a593Smuzhiyun if (err)
4484*4882a593Smuzhiyun goto free_shrinker;
4485*4882a593Smuzhiyun f2fs_create_root_stats();
4486*4882a593Smuzhiyun err = f2fs_init_post_read_processing();
4487*4882a593Smuzhiyun if (err)
4488*4882a593Smuzhiyun goto free_root_stats;
4489*4882a593Smuzhiyun err = f2fs_init_bio_entry_cache();
4490*4882a593Smuzhiyun if (err)
4491*4882a593Smuzhiyun goto free_post_read;
4492*4882a593Smuzhiyun err = f2fs_init_bioset();
4493*4882a593Smuzhiyun if (err)
4494*4882a593Smuzhiyun goto free_bio_enrty_cache;
4495*4882a593Smuzhiyun err = f2fs_init_compress_mempool();
4496*4882a593Smuzhiyun if (err)
4497*4882a593Smuzhiyun goto free_bioset;
4498*4882a593Smuzhiyun err = f2fs_init_compress_cache();
4499*4882a593Smuzhiyun if (err)
4500*4882a593Smuzhiyun goto free_compress_mempool;
4501*4882a593Smuzhiyun err = f2fs_create_casefold_cache();
4502*4882a593Smuzhiyun if (err)
4503*4882a593Smuzhiyun goto free_compress_cache;
4504*4882a593Smuzhiyun return 0;
4505*4882a593Smuzhiyun free_compress_cache:
4506*4882a593Smuzhiyun f2fs_destroy_compress_cache();
4507*4882a593Smuzhiyun free_compress_mempool:
4508*4882a593Smuzhiyun f2fs_destroy_compress_mempool();
4509*4882a593Smuzhiyun free_bioset:
4510*4882a593Smuzhiyun f2fs_destroy_bioset();
4511*4882a593Smuzhiyun free_bio_enrty_cache:
4512*4882a593Smuzhiyun f2fs_destroy_bio_entry_cache();
4513*4882a593Smuzhiyun free_post_read:
4514*4882a593Smuzhiyun f2fs_destroy_post_read_processing();
4515*4882a593Smuzhiyun free_root_stats:
4516*4882a593Smuzhiyun f2fs_destroy_root_stats();
4517*4882a593Smuzhiyun unregister_filesystem(&f2fs_fs_type);
4518*4882a593Smuzhiyun free_shrinker:
4519*4882a593Smuzhiyun unregister_shrinker(&f2fs_shrinker_info);
4520*4882a593Smuzhiyun free_sysfs:
4521*4882a593Smuzhiyun f2fs_exit_sysfs();
4522*4882a593Smuzhiyun free_garbage_collection_cache:
4523*4882a593Smuzhiyun f2fs_destroy_garbage_collection_cache();
4524*4882a593Smuzhiyun free_extent_cache:
4525*4882a593Smuzhiyun f2fs_destroy_extent_cache();
4526*4882a593Smuzhiyun free_recovery_cache:
4527*4882a593Smuzhiyun f2fs_destroy_recovery_cache();
4528*4882a593Smuzhiyun free_checkpoint_caches:
4529*4882a593Smuzhiyun f2fs_destroy_checkpoint_caches();
4530*4882a593Smuzhiyun free_segment_manager_caches:
4531*4882a593Smuzhiyun f2fs_destroy_segment_manager_caches();
4532*4882a593Smuzhiyun free_node_manager_caches:
4533*4882a593Smuzhiyun f2fs_destroy_node_manager_caches();
4534*4882a593Smuzhiyun free_inodecache:
4535*4882a593Smuzhiyun destroy_inodecache();
4536*4882a593Smuzhiyun fail:
4537*4882a593Smuzhiyun return err;
4538*4882a593Smuzhiyun }
4539*4882a593Smuzhiyun
exit_f2fs_fs(void)4540*4882a593Smuzhiyun static void __exit exit_f2fs_fs(void)
4541*4882a593Smuzhiyun {
4542*4882a593Smuzhiyun f2fs_destroy_casefold_cache();
4543*4882a593Smuzhiyun f2fs_destroy_compress_cache();
4544*4882a593Smuzhiyun f2fs_destroy_compress_mempool();
4545*4882a593Smuzhiyun f2fs_destroy_bioset();
4546*4882a593Smuzhiyun f2fs_destroy_bio_entry_cache();
4547*4882a593Smuzhiyun f2fs_destroy_post_read_processing();
4548*4882a593Smuzhiyun f2fs_destroy_root_stats();
4549*4882a593Smuzhiyun unregister_filesystem(&f2fs_fs_type);
4550*4882a593Smuzhiyun unregister_shrinker(&f2fs_shrinker_info);
4551*4882a593Smuzhiyun f2fs_exit_sysfs();
4552*4882a593Smuzhiyun f2fs_destroy_garbage_collection_cache();
4553*4882a593Smuzhiyun f2fs_destroy_extent_cache();
4554*4882a593Smuzhiyun f2fs_destroy_recovery_cache();
4555*4882a593Smuzhiyun f2fs_destroy_checkpoint_caches();
4556*4882a593Smuzhiyun f2fs_destroy_segment_manager_caches();
4557*4882a593Smuzhiyun f2fs_destroy_node_manager_caches();
4558*4882a593Smuzhiyun destroy_inodecache();
4559*4882a593Smuzhiyun }
4560*4882a593Smuzhiyun
4561*4882a593Smuzhiyun module_init(init_f2fs_fs)
4562*4882a593Smuzhiyun module_exit(exit_f2fs_fs)
4563*4882a593Smuzhiyun
4564*4882a593Smuzhiyun MODULE_AUTHOR("Samsung Electronics's Praesto Team");
4565*4882a593Smuzhiyun MODULE_DESCRIPTION("Flash Friendly File System");
4566*4882a593Smuzhiyun MODULE_LICENSE("GPL");
4567*4882a593Smuzhiyun MODULE_IMPORT_NS(ANDROID_GKI_VFS_EXPORT_ONLY);
4568*4882a593Smuzhiyun MODULE_SOFTDEP("pre: crc32");
4569*4882a593Smuzhiyun
4570