xref: /OK3568_Linux_fs/kernel/fs/erofs/super.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Created by Gao Xiang <gaoxiang25@huawei.com>
6  */
7 #include <linux/module.h>
8 #include <linux/buffer_head.h>
9 #include <linux/statfs.h>
10 #include <linux/parser.h>
11 #include <linux/seq_file.h>
12 #include <linux/crc32c.h>
13 #include <linux/fs_context.h>
14 #include <linux/fs_parser.h>
15 #include <linux/dax.h>
16 #include "xattr.h"
17 
18 #define CREATE_TRACE_POINTS
19 #include <trace/events/erofs.h>
20 
21 static struct kmem_cache *erofs_inode_cachep __read_mostly;
22 
_erofs_err(struct super_block * sb,const char * function,const char * fmt,...)23 void _erofs_err(struct super_block *sb, const char *function,
24 		const char *fmt, ...)
25 {
26 	struct va_format vaf;
27 	va_list args;
28 
29 	va_start(args, fmt);
30 
31 	vaf.fmt = fmt;
32 	vaf.va = &args;
33 
34 	pr_err("(device %s): %s: %pV", sb->s_id, function, &vaf);
35 	va_end(args);
36 }
37 
_erofs_info(struct super_block * sb,const char * function,const char * fmt,...)38 void _erofs_info(struct super_block *sb, const char *function,
39 		 const char *fmt, ...)
40 {
41 	struct va_format vaf;
42 	va_list args;
43 
44 	va_start(args, fmt);
45 
46 	vaf.fmt = fmt;
47 	vaf.va = &args;
48 
49 	pr_info("(device %s): %pV", sb->s_id, &vaf);
50 	va_end(args);
51 }
52 
erofs_superblock_csum_verify(struct super_block * sb,void * sbdata)53 static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata)
54 {
55 	struct erofs_super_block *dsb;
56 	u32 expected_crc, crc;
57 
58 	dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET,
59 		      EROFS_BLKSIZ - EROFS_SUPER_OFFSET, GFP_KERNEL);
60 	if (!dsb)
61 		return -ENOMEM;
62 
63 	expected_crc = le32_to_cpu(dsb->checksum);
64 	dsb->checksum = 0;
65 	/* to allow for x86 boot sectors and other oddities. */
66 	crc = crc32c(~0, dsb, EROFS_BLKSIZ - EROFS_SUPER_OFFSET);
67 	kfree(dsb);
68 
69 	if (crc != expected_crc) {
70 		erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
71 			  crc, expected_crc);
72 		return -EBADMSG;
73 	}
74 	return 0;
75 }
76 
erofs_inode_init_once(void * ptr)77 static void erofs_inode_init_once(void *ptr)
78 {
79 	struct erofs_inode *vi = ptr;
80 
81 	inode_init_once(&vi->vfs_inode);
82 }
83 
erofs_alloc_inode(struct super_block * sb)84 static struct inode *erofs_alloc_inode(struct super_block *sb)
85 {
86 	struct erofs_inode *vi =
87 		kmem_cache_alloc(erofs_inode_cachep, GFP_KERNEL);
88 
89 	if (!vi)
90 		return NULL;
91 
92 	/* zero out everything except vfs_inode */
93 	memset(vi, 0, offsetof(struct erofs_inode, vfs_inode));
94 	return &vi->vfs_inode;
95 }
96 
erofs_free_inode(struct inode * inode)97 static void erofs_free_inode(struct inode *inode)
98 {
99 	struct erofs_inode *vi = EROFS_I(inode);
100 
101 	/* be careful of RCU symlink path */
102 	if (inode->i_op == &erofs_fast_symlink_iops)
103 		kfree(inode->i_link);
104 	kfree(vi->xattr_shared_xattrs);
105 
106 	kmem_cache_free(erofs_inode_cachep, vi);
107 }
108 
check_layout_compatibility(struct super_block * sb,struct erofs_super_block * dsb)109 static bool check_layout_compatibility(struct super_block *sb,
110 				       struct erofs_super_block *dsb)
111 {
112 	const unsigned int feature = le32_to_cpu(dsb->feature_incompat);
113 
114 	EROFS_SB(sb)->feature_incompat = feature;
115 
116 	/* check if current kernel meets all mandatory requirements */
117 	if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) {
118 		erofs_err(sb,
119 			  "unidentified incompatible feature %x, please upgrade kernel version",
120 			   feature & ~EROFS_ALL_FEATURE_INCOMPAT);
121 		return false;
122 	}
123 	return true;
124 }
125 
126 #ifdef CONFIG_EROFS_FS_ZIP
127 /* read variable-sized metadata, offset will be aligned by 4-byte */
erofs_read_metadata(struct super_block * sb,struct page ** pagep,erofs_off_t * offset,int * lengthp)128 static void *erofs_read_metadata(struct super_block *sb, struct page **pagep,
129 				 erofs_off_t *offset, int *lengthp)
130 {
131 	struct page *page = *pagep;
132 	u8 *buffer, *ptr;
133 	int len, i, cnt;
134 	erofs_blk_t blk;
135 
136 	*offset = round_up(*offset, 4);
137 	blk = erofs_blknr(*offset);
138 
139 	if (!page || page->index != blk) {
140 		if (page) {
141 			unlock_page(page);
142 			put_page(page);
143 		}
144 		page = erofs_get_meta_page(sb, blk);
145 		if (IS_ERR(page))
146 			goto err_nullpage;
147 	}
148 
149 	ptr = kmap(page);
150 	len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(*offset)]);
151 	if (!len)
152 		len = U16_MAX + 1;
153 	buffer = kmalloc(len, GFP_KERNEL);
154 	if (!buffer) {
155 		buffer = ERR_PTR(-ENOMEM);
156 		goto out;
157 	}
158 	*offset += sizeof(__le16);
159 	*lengthp = len;
160 
161 	for (i = 0; i < len; i += cnt) {
162 		cnt = min(EROFS_BLKSIZ - (int)erofs_blkoff(*offset), len - i);
163 		blk = erofs_blknr(*offset);
164 
165 		if (!page || page->index != blk) {
166 			if (page) {
167 				kunmap(page);
168 				unlock_page(page);
169 				put_page(page);
170 			}
171 			page = erofs_get_meta_page(sb, blk);
172 			if (IS_ERR(page)) {
173 				kfree(buffer);
174 				goto err_nullpage;
175 			}
176 			ptr = kmap(page);
177 		}
178 		memcpy(buffer + i, ptr + erofs_blkoff(*offset), cnt);
179 		*offset += cnt;
180 	}
181 out:
182 	kunmap(page);
183 	*pagep = page;
184 	return buffer;
185 err_nullpage:
186 	*pagep = NULL;
187 	return page;
188 }
189 
erofs_load_compr_cfgs(struct super_block * sb,struct erofs_super_block * dsb)190 static int erofs_load_compr_cfgs(struct super_block *sb,
191 				 struct erofs_super_block *dsb)
192 {
193 	struct erofs_sb_info *sbi;
194 	struct page *page;
195 	unsigned int algs, alg;
196 	erofs_off_t offset;
197 	int size, ret;
198 
199 	sbi = EROFS_SB(sb);
200 	sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
201 
202 	if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
203 		erofs_err(sb, "try to load compressed fs with unsupported algorithms %x",
204 			  sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
205 		return -EINVAL;
206 	}
207 
208 	offset = EROFS_SUPER_OFFSET + sbi->sb_size;
209 	page = NULL;
210 	alg = 0;
211 	ret = 0;
212 
213 	for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
214 		void *data;
215 
216 		if (!(algs & 1))
217 			continue;
218 
219 		data = erofs_read_metadata(sb, &page, &offset, &size);
220 		if (IS_ERR(data)) {
221 			ret = PTR_ERR(data);
222 			goto err;
223 		}
224 
225 		switch (alg) {
226 		case Z_EROFS_COMPRESSION_LZ4:
227 			ret = z_erofs_load_lz4_config(sb, dsb, data, size);
228 			break;
229 		default:
230 			DBG_BUGON(1);
231 			ret = -EFAULT;
232 		}
233 		kfree(data);
234 		if (ret)
235 			goto err;
236 	}
237 err:
238 	if (page) {
239 		unlock_page(page);
240 		put_page(page);
241 	}
242 	return ret;
243 }
244 #else
erofs_load_compr_cfgs(struct super_block * sb,struct erofs_super_block * dsb)245 static int erofs_load_compr_cfgs(struct super_block *sb,
246 				 struct erofs_super_block *dsb)
247 {
248 	if (dsb->u1.available_compr_algs) {
249 		erofs_err(sb, "try to load compressed fs when compression is disabled");
250 		return -EINVAL;
251 	}
252 	return 0;
253 }
254 #endif
255 
erofs_read_superblock(struct super_block * sb)256 static int erofs_read_superblock(struct super_block *sb)
257 {
258 	struct erofs_sb_info *sbi;
259 	struct page *page;
260 	struct erofs_super_block *dsb;
261 	unsigned int blkszbits;
262 	void *data;
263 	int ret;
264 
265 	page = read_mapping_page(sb->s_bdev->bd_inode->i_mapping, 0, NULL);
266 	if (IS_ERR(page)) {
267 		erofs_err(sb, "cannot read erofs superblock");
268 		return PTR_ERR(page);
269 	}
270 
271 	sbi = EROFS_SB(sb);
272 
273 	data = kmap(page);
274 	dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET);
275 
276 	ret = -EINVAL;
277 	if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) {
278 		erofs_err(sb, "cannot find valid erofs superblock");
279 		goto out;
280 	}
281 
282 	sbi->feature_compat = le32_to_cpu(dsb->feature_compat);
283 	if (erofs_sb_has_sb_chksum(sbi)) {
284 		ret = erofs_superblock_csum_verify(sb, data);
285 		if (ret)
286 			goto out;
287 	}
288 
289 	ret = -EINVAL;
290 	blkszbits = dsb->blkszbits;
291 	/* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
292 	if (blkszbits != LOG_BLOCK_SIZE) {
293 		erofs_err(sb, "blkszbits %u isn't supported on this platform",
294 			  blkszbits);
295 		goto out;
296 	}
297 
298 	if (!check_layout_compatibility(sb, dsb))
299 		goto out;
300 
301 	sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE;
302 	if (sbi->sb_size > EROFS_BLKSIZ) {
303 		erofs_err(sb, "invalid sb_extslots %u (more than a fs block)",
304 			  sbi->sb_size);
305 		goto out;
306 	}
307 	sbi->blocks = le32_to_cpu(dsb->blocks);
308 	sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr);
309 #ifdef CONFIG_EROFS_FS_XATTR
310 	sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr);
311 #endif
312 	sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact));
313 	sbi->root_nid = le16_to_cpu(dsb->root_nid);
314 	sbi->inos = le64_to_cpu(dsb->inos);
315 
316 	sbi->build_time = le64_to_cpu(dsb->build_time);
317 	sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec);
318 
319 	memcpy(&sb->s_uuid, dsb->uuid, sizeof(dsb->uuid));
320 
321 	ret = strscpy(sbi->volume_name, dsb->volume_name,
322 		      sizeof(dsb->volume_name));
323 	if (ret < 0) {	/* -E2BIG */
324 		erofs_err(sb, "bad volume name without NIL terminator");
325 		ret = -EFSCORRUPTED;
326 		goto out;
327 	}
328 
329 	/* parse on-disk compression configurations */
330 	if (erofs_sb_has_compr_cfgs(sbi))
331 		ret = erofs_load_compr_cfgs(sb, dsb);
332 	else
333 		ret = z_erofs_load_lz4_config(sb, dsb, NULL, 0);
334 out:
335 	kunmap(page);
336 	put_page(page);
337 	return ret;
338 }
339 
340 /* set up default EROFS parameters */
erofs_default_options(struct erofs_fs_context * ctx)341 static void erofs_default_options(struct erofs_fs_context *ctx)
342 {
343 #ifdef CONFIG_EROFS_FS_ZIP
344 	ctx->cache_strategy = EROFS_ZIP_CACHE_READAROUND;
345 	ctx->max_sync_decompress_pages = 3;
346 	ctx->readahead_sync_decompress = false;
347 #endif
348 #ifdef CONFIG_EROFS_FS_XATTR
349 	set_opt(ctx, XATTR_USER);
350 #endif
351 #ifdef CONFIG_EROFS_FS_POSIX_ACL
352 	set_opt(ctx, POSIX_ACL);
353 #endif
354 }
355 
356 enum {
357 	Opt_user_xattr,
358 	Opt_acl,
359 	Opt_cache_strategy,
360 	Opt_dax,
361 	Opt_dax_enum,
362 	Opt_err
363 };
364 
365 static const struct constant_table erofs_param_cache_strategy[] = {
366 	{"disabled",	EROFS_ZIP_CACHE_DISABLED},
367 	{"readahead",	EROFS_ZIP_CACHE_READAHEAD},
368 	{"readaround",	EROFS_ZIP_CACHE_READAROUND},
369 	{}
370 };
371 
372 static const struct constant_table erofs_dax_param_enums[] = {
373 	{"always",	EROFS_MOUNT_DAX_ALWAYS},
374 	{"never",	EROFS_MOUNT_DAX_NEVER},
375 	{}
376 };
377 
378 static const struct fs_parameter_spec erofs_fs_parameters[] = {
379 	fsparam_flag_no("user_xattr",	Opt_user_xattr),
380 	fsparam_flag_no("acl",		Opt_acl),
381 	fsparam_enum("cache_strategy",	Opt_cache_strategy,
382 		     erofs_param_cache_strategy),
383 	fsparam_flag("dax",             Opt_dax),
384 	fsparam_enum("dax",		Opt_dax_enum, erofs_dax_param_enums),
385 	{}
386 };
387 
erofs_fc_set_dax_mode(struct fs_context * fc,unsigned int mode)388 static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
389 {
390 #ifdef CONFIG_FS_DAX
391 	struct erofs_fs_context *ctx = fc->fs_private;
392 
393 	switch (mode) {
394 	case EROFS_MOUNT_DAX_ALWAYS:
395 		warnfc(fc, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
396 		set_opt(ctx, DAX_ALWAYS);
397 		clear_opt(ctx, DAX_NEVER);
398 		return true;
399 	case EROFS_MOUNT_DAX_NEVER:
400 		set_opt(ctx, DAX_NEVER);
401 		clear_opt(ctx, DAX_ALWAYS);
402 		return true;
403 	default:
404 		DBG_BUGON(1);
405 		return false;
406 	}
407 #else
408 	errorfc(fc, "dax options not supported");
409 	return false;
410 #endif
411 }
412 
erofs_fc_parse_param(struct fs_context * fc,struct fs_parameter * param)413 static int erofs_fc_parse_param(struct fs_context *fc,
414 				struct fs_parameter *param)
415 {
416 	struct erofs_fs_context *ctx __maybe_unused = fc->fs_private;
417 	struct fs_parse_result result;
418 	int opt;
419 
420 	opt = fs_parse(fc, erofs_fs_parameters, param, &result);
421 	if (opt < 0)
422 		return opt;
423 
424 	switch (opt) {
425 	case Opt_user_xattr:
426 #ifdef CONFIG_EROFS_FS_XATTR
427 		if (result.boolean)
428 			set_opt(ctx, XATTR_USER);
429 		else
430 			clear_opt(ctx, XATTR_USER);
431 #else
432 		errorfc(fc, "{,no}user_xattr options not supported");
433 #endif
434 		break;
435 	case Opt_acl:
436 #ifdef CONFIG_EROFS_FS_POSIX_ACL
437 		if (result.boolean)
438 			set_opt(ctx, POSIX_ACL);
439 		else
440 			clear_opt(ctx, POSIX_ACL);
441 #else
442 		errorfc(fc, "{,no}acl options not supported");
443 #endif
444 		break;
445 	case Opt_cache_strategy:
446 #ifdef CONFIG_EROFS_FS_ZIP
447 		ctx->cache_strategy = result.uint_32;
448 #else
449 		errorfc(fc, "compression not supported, cache_strategy ignored");
450 #endif
451 		break;
452 	case Opt_dax:
453 		if (!erofs_fc_set_dax_mode(fc, EROFS_MOUNT_DAX_ALWAYS))
454 			return -EINVAL;
455 		break;
456 	case Opt_dax_enum:
457 		if (!erofs_fc_set_dax_mode(fc, result.uint_32))
458 			return -EINVAL;
459 		break;
460 	default:
461 		return -ENOPARAM;
462 	}
463 	return 0;
464 }
465 
466 #ifdef CONFIG_EROFS_FS_ZIP
467 static const struct address_space_operations managed_cache_aops;
468 
erofs_managed_cache_releasepage(struct page * page,gfp_t gfp_mask)469 static int erofs_managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
470 {
471 	int ret = 1;	/* 0 - busy */
472 	struct address_space *const mapping = page->mapping;
473 
474 	DBG_BUGON(!PageLocked(page));
475 	DBG_BUGON(mapping->a_ops != &managed_cache_aops);
476 
477 	if (PagePrivate(page))
478 		ret = erofs_try_to_free_cached_page(mapping, page);
479 
480 	return ret;
481 }
482 
erofs_managed_cache_invalidatepage(struct page * page,unsigned int offset,unsigned int length)483 static void erofs_managed_cache_invalidatepage(struct page *page,
484 					       unsigned int offset,
485 					       unsigned int length)
486 {
487 	const unsigned int stop = length + offset;
488 
489 	DBG_BUGON(!PageLocked(page));
490 
491 	/* Check for potential overflow in debug mode */
492 	DBG_BUGON(stop > PAGE_SIZE || stop < length);
493 
494 	if (offset == 0 && stop == PAGE_SIZE)
495 		while (!erofs_managed_cache_releasepage(page, GFP_NOFS))
496 			cond_resched();
497 }
498 
499 static const struct address_space_operations managed_cache_aops = {
500 	.releasepage = erofs_managed_cache_releasepage,
501 	.invalidatepage = erofs_managed_cache_invalidatepage,
502 };
503 
erofs_init_managed_cache(struct super_block * sb)504 static int erofs_init_managed_cache(struct super_block *sb)
505 {
506 	struct erofs_sb_info *const sbi = EROFS_SB(sb);
507 	struct inode *const inode = new_inode(sb);
508 
509 	if (!inode)
510 		return -ENOMEM;
511 
512 	set_nlink(inode, 1);
513 	inode->i_size = OFFSET_MAX;
514 
515 	inode->i_mapping->a_ops = &managed_cache_aops;
516 	mapping_set_gfp_mask(inode->i_mapping,
517 			     GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
518 	sbi->managed_cache = inode;
519 	return 0;
520 }
521 #else
erofs_init_managed_cache(struct super_block * sb)522 static int erofs_init_managed_cache(struct super_block *sb) { return 0; }
523 #endif
524 
erofs_fc_fill_super(struct super_block * sb,struct fs_context * fc)525 static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
526 {
527 	struct inode *inode;
528 	struct erofs_sb_info *sbi;
529 	struct erofs_fs_context *ctx = fc->fs_private;
530 	int err;
531 
532 	sb->s_magic = EROFS_SUPER_MAGIC;
533 
534 	if (!sb_set_blocksize(sb, EROFS_BLKSIZ)) {
535 		erofs_err(sb, "failed to set erofs blksize");
536 		return -EINVAL;
537 	}
538 
539 	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
540 	if (!sbi)
541 		return -ENOMEM;
542 
543 	sb->s_fs_info = sbi;
544 	sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
545 	err = erofs_read_superblock(sb);
546 	if (err)
547 		return err;
548 
549 	if (test_opt(ctx, DAX_ALWAYS) &&
550 	    !bdev_dax_supported(sb->s_bdev, EROFS_BLKSIZ)) {
551 		errorfc(fc, "DAX unsupported by block device. Turning off DAX.");
552 		clear_opt(ctx, DAX_ALWAYS);
553 	}
554 	sb->s_flags |= SB_RDONLY | SB_NOATIME;
555 	sb->s_maxbytes = MAX_LFS_FILESIZE;
556 	sb->s_time_gran = 1;
557 
558 	sb->s_op = &erofs_sops;
559 	sb->s_xattr = erofs_xattr_handlers;
560 
561 	if (test_opt(ctx, POSIX_ACL))
562 		sb->s_flags |= SB_POSIXACL;
563 	else
564 		sb->s_flags &= ~SB_POSIXACL;
565 
566 	sbi->ctx = *ctx;
567 
568 #ifdef CONFIG_EROFS_FS_ZIP
569 	xa_init(&sbi->managed_pslots);
570 #endif
571 
572 	/* get the root inode */
573 	inode = erofs_iget(sb, ROOT_NID(sbi), true);
574 	if (IS_ERR(inode))
575 		return PTR_ERR(inode);
576 
577 	if (!S_ISDIR(inode->i_mode)) {
578 		erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)",
579 			  ROOT_NID(sbi), inode->i_mode);
580 		iput(inode);
581 		return -EINVAL;
582 	}
583 
584 	sb->s_root = d_make_root(inode);
585 	if (!sb->s_root)
586 		return -ENOMEM;
587 
588 	erofs_shrinker_register(sb);
589 	/* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */
590 	err = erofs_init_managed_cache(sb);
591 	if (err)
592 		return err;
593 
594 	erofs_info(sb, "mounted with root inode @ nid %llu.", ROOT_NID(sbi));
595 	return 0;
596 }
597 
erofs_fc_get_tree(struct fs_context * fc)598 static int erofs_fc_get_tree(struct fs_context *fc)
599 {
600 	return get_tree_bdev(fc, erofs_fc_fill_super);
601 }
602 
erofs_fc_reconfigure(struct fs_context * fc)603 static int erofs_fc_reconfigure(struct fs_context *fc)
604 {
605 	struct super_block *sb = fc->root->d_sb;
606 	struct erofs_sb_info *sbi = EROFS_SB(sb);
607 	struct erofs_fs_context *ctx = fc->fs_private;
608 
609 	DBG_BUGON(!sb_rdonly(sb));
610 
611 	if (test_opt(ctx, POSIX_ACL))
612 		fc->sb_flags |= SB_POSIXACL;
613 	else
614 		fc->sb_flags &= ~SB_POSIXACL;
615 
616 	sbi->ctx = *ctx;
617 
618 	fc->sb_flags |= SB_RDONLY;
619 	return 0;
620 }
621 
erofs_fc_free(struct fs_context * fc)622 static void erofs_fc_free(struct fs_context *fc)
623 {
624 	kfree(fc->fs_private);
625 }
626 
627 static const struct fs_context_operations erofs_context_ops = {
628 	.parse_param	= erofs_fc_parse_param,
629 	.get_tree       = erofs_fc_get_tree,
630 	.reconfigure    = erofs_fc_reconfigure,
631 	.free		= erofs_fc_free,
632 };
633 
erofs_init_fs_context(struct fs_context * fc)634 static int erofs_init_fs_context(struct fs_context *fc)
635 {
636 	fc->fs_private = kzalloc(sizeof(struct erofs_fs_context), GFP_KERNEL);
637 	if (!fc->fs_private)
638 		return -ENOMEM;
639 
640 	/* set default mount options */
641 	erofs_default_options(fc->fs_private);
642 
643 	fc->ops = &erofs_context_ops;
644 
645 	return 0;
646 }
647 
648 /*
649  * could be triggered after deactivate_locked_super()
650  * is called, thus including umount and failed to initialize.
651  */
erofs_kill_sb(struct super_block * sb)652 static void erofs_kill_sb(struct super_block *sb)
653 {
654 	struct erofs_sb_info *sbi;
655 
656 	WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC);
657 
658 	kill_block_super(sb);
659 
660 	sbi = EROFS_SB(sb);
661 	if (!sbi)
662 		return;
663 	fs_put_dax(sbi->dax_dev);
664 	kfree(sbi);
665 	sb->s_fs_info = NULL;
666 }
667 
668 /* called when ->s_root is non-NULL */
erofs_put_super(struct super_block * sb)669 static void erofs_put_super(struct super_block *sb)
670 {
671 	struct erofs_sb_info *const sbi = EROFS_SB(sb);
672 
673 	DBG_BUGON(!sbi);
674 
675 	erofs_shrinker_unregister(sb);
676 #ifdef CONFIG_EROFS_FS_ZIP
677 	iput(sbi->managed_cache);
678 	sbi->managed_cache = NULL;
679 #endif
680 }
681 
682 static struct file_system_type erofs_fs_type = {
683 	.owner          = THIS_MODULE,
684 	.name           = "erofs",
685 	.init_fs_context = erofs_init_fs_context,
686 	.kill_sb        = erofs_kill_sb,
687 	.fs_flags       = FS_REQUIRES_DEV,
688 };
689 MODULE_ALIAS_FS("erofs");
690 
erofs_module_init(void)691 static int __init erofs_module_init(void)
692 {
693 	int err;
694 
695 	erofs_check_ondisk_layout_definitions();
696 
697 	erofs_inode_cachep = kmem_cache_create("erofs_inode",
698 					       sizeof(struct erofs_inode), 0,
699 					       SLAB_RECLAIM_ACCOUNT,
700 					       erofs_inode_init_once);
701 	if (!erofs_inode_cachep) {
702 		err = -ENOMEM;
703 		goto icache_err;
704 	}
705 
706 	err = erofs_init_shrinker();
707 	if (err)
708 		goto shrinker_err;
709 
710 	erofs_pcpubuf_init();
711 	err = z_erofs_init_zip_subsystem();
712 	if (err)
713 		goto zip_err;
714 
715 	err = register_filesystem(&erofs_fs_type);
716 	if (err)
717 		goto fs_err;
718 
719 	return 0;
720 
721 fs_err:
722 	z_erofs_exit_zip_subsystem();
723 zip_err:
724 	erofs_exit_shrinker();
725 shrinker_err:
726 	kmem_cache_destroy(erofs_inode_cachep);
727 icache_err:
728 	return err;
729 }
730 
erofs_module_exit(void)731 static void __exit erofs_module_exit(void)
732 {
733 	unregister_filesystem(&erofs_fs_type);
734 	z_erofs_exit_zip_subsystem();
735 	erofs_exit_shrinker();
736 
737 	/* Ensure all RCU free inodes are safe before cache is destroyed. */
738 	rcu_barrier();
739 	kmem_cache_destroy(erofs_inode_cachep);
740 	erofs_pcpubuf_exit();
741 }
742 
743 /* get filesystem statistics */
erofs_statfs(struct dentry * dentry,struct kstatfs * buf)744 static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
745 {
746 	struct super_block *sb = dentry->d_sb;
747 	struct erofs_sb_info *sbi = EROFS_SB(sb);
748 	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
749 
750 	buf->f_type = sb->s_magic;
751 	buf->f_bsize = EROFS_BLKSIZ;
752 	buf->f_blocks = sbi->blocks;
753 	buf->f_bfree = buf->f_bavail = 0;
754 
755 	buf->f_files = ULLONG_MAX;
756 	buf->f_ffree = ULLONG_MAX - sbi->inos;
757 
758 	buf->f_namelen = EROFS_NAME_LEN;
759 
760 	buf->f_fsid    = u64_to_fsid(id);
761 	return 0;
762 }
763 
erofs_show_options(struct seq_file * seq,struct dentry * root)764 static int erofs_show_options(struct seq_file *seq, struct dentry *root)
765 {
766 	struct erofs_sb_info *sbi = EROFS_SB(root->d_sb);
767 	struct erofs_fs_context *ctx = &sbi->ctx;
768 
769 #ifdef CONFIG_EROFS_FS_XATTR
770 	if (test_opt(ctx, XATTR_USER))
771 		seq_puts(seq, ",user_xattr");
772 	else
773 		seq_puts(seq, ",nouser_xattr");
774 #endif
775 #ifdef CONFIG_EROFS_FS_POSIX_ACL
776 	if (test_opt(ctx, POSIX_ACL))
777 		seq_puts(seq, ",acl");
778 	else
779 		seq_puts(seq, ",noacl");
780 #endif
781 #ifdef CONFIG_EROFS_FS_ZIP
782 	if (ctx->cache_strategy == EROFS_ZIP_CACHE_DISABLED)
783 		seq_puts(seq, ",cache_strategy=disabled");
784 	else if (ctx->cache_strategy == EROFS_ZIP_CACHE_READAHEAD)
785 		seq_puts(seq, ",cache_strategy=readahead");
786 	else if (ctx->cache_strategy == EROFS_ZIP_CACHE_READAROUND)
787 		seq_puts(seq, ",cache_strategy=readaround");
788 #endif
789 	if (test_opt(ctx, DAX_ALWAYS))
790 		seq_puts(seq, ",dax=always");
791 	if (test_opt(ctx, DAX_NEVER))
792 		seq_puts(seq, ",dax=never");
793 	return 0;
794 }
795 
796 const struct super_operations erofs_sops = {
797 	.put_super = erofs_put_super,
798 	.alloc_inode = erofs_alloc_inode,
799 	.free_inode = erofs_free_inode,
800 	.statfs = erofs_statfs,
801 	.show_options = erofs_show_options,
802 };
803 
804 module_init(erofs_module_init);
805 module_exit(erofs_module_exit);
806 
807 MODULE_DESCRIPTION("Enhanced ROM File System");
808 MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc.");
809 MODULE_LICENSE("GPL");
810 MODULE_IMPORT_NS(ANDROID_GKI_VFS_EXPORT_ONLY);
811 
812