xref: /OK3568_Linux_fs/kernel/fs/erofs/internal.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2017-2018 HUAWEI, Inc.
4*4882a593Smuzhiyun  *             https://www.huawei.com/
5*4882a593Smuzhiyun  * Created by Gao Xiang <gaoxiang25@huawei.com>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun #ifndef __EROFS_INTERNAL_H
8*4882a593Smuzhiyun #define __EROFS_INTERNAL_H
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/fs.h>
11*4882a593Smuzhiyun #include <linux/dcache.h>
12*4882a593Smuzhiyun #include <linux/mm.h>
13*4882a593Smuzhiyun #include <linux/pagemap.h>
14*4882a593Smuzhiyun #include <linux/bio.h>
15*4882a593Smuzhiyun #include <linux/buffer_head.h>
16*4882a593Smuzhiyun #include <linux/magic.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun #include <linux/vmalloc.h>
19*4882a593Smuzhiyun #include "erofs_fs.h"
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun /* redefine pr_fmt "erofs: " */
22*4882a593Smuzhiyun #undef pr_fmt
23*4882a593Smuzhiyun #define pr_fmt(fmt) "erofs: " fmt
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun __printf(3, 4) void _erofs_err(struct super_block *sb,
26*4882a593Smuzhiyun 			       const char *function, const char *fmt, ...);
27*4882a593Smuzhiyun #define erofs_err(sb, fmt, ...)	\
28*4882a593Smuzhiyun 	_erofs_err(sb, __func__, fmt "\n", ##__VA_ARGS__)
29*4882a593Smuzhiyun __printf(3, 4) void _erofs_info(struct super_block *sb,
30*4882a593Smuzhiyun 			       const char *function, const char *fmt, ...);
31*4882a593Smuzhiyun #define erofs_info(sb, fmt, ...) \
32*4882a593Smuzhiyun 	_erofs_info(sb, __func__, fmt "\n", ##__VA_ARGS__)
33*4882a593Smuzhiyun #ifdef CONFIG_EROFS_FS_DEBUG
34*4882a593Smuzhiyun #define erofs_dbg(x, ...)       pr_debug(x "\n", ##__VA_ARGS__)
35*4882a593Smuzhiyun #define DBG_BUGON               BUG_ON
36*4882a593Smuzhiyun #else
37*4882a593Smuzhiyun #define erofs_dbg(x, ...)       ((void)0)
38*4882a593Smuzhiyun #define DBG_BUGON(x)            ((void)(x))
39*4882a593Smuzhiyun #endif	/* !CONFIG_EROFS_FS_DEBUG */
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun /* EROFS_SUPER_MAGIC_V1 to represent the whole file system */
42*4882a593Smuzhiyun #define EROFS_SUPER_MAGIC   EROFS_SUPER_MAGIC_V1
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun typedef u64 erofs_nid_t;
45*4882a593Smuzhiyun typedef u64 erofs_off_t;
46*4882a593Smuzhiyun /* data type for filesystem-wide blocks number */
47*4882a593Smuzhiyun typedef u32 erofs_blk_t;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun struct erofs_fs_context {
50*4882a593Smuzhiyun #ifdef CONFIG_EROFS_FS_ZIP
51*4882a593Smuzhiyun 	/* current strategy of how to use managed cache */
52*4882a593Smuzhiyun 	unsigned char cache_strategy;
53*4882a593Smuzhiyun 	/* strategy of sync decompression (false - auto, true - force on) */
54*4882a593Smuzhiyun 	bool readahead_sync_decompress;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	/* threshold for decompression synchronously */
57*4882a593Smuzhiyun 	unsigned int max_sync_decompress_pages;
58*4882a593Smuzhiyun #endif
59*4882a593Smuzhiyun 	unsigned int mount_opt;
60*4882a593Smuzhiyun };
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun /* all filesystem-wide lz4 configurations */
63*4882a593Smuzhiyun struct erofs_sb_lz4_info {
64*4882a593Smuzhiyun 	/* # of pages needed for EROFS lz4 rolling decompression */
65*4882a593Smuzhiyun 	u16 max_distance_pages;
66*4882a593Smuzhiyun 	/* maximum possible blocks for pclusters in the filesystem */
67*4882a593Smuzhiyun 	u16 max_pclusterblks;
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun struct erofs_sb_info {
71*4882a593Smuzhiyun #ifdef CONFIG_EROFS_FS_ZIP
72*4882a593Smuzhiyun 	/* list for all registered superblocks, mainly for shrinker */
73*4882a593Smuzhiyun 	struct list_head list;
74*4882a593Smuzhiyun 	struct mutex umount_mutex;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	/* managed XArray arranged in physical block number */
77*4882a593Smuzhiyun 	struct xarray managed_pslots;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	unsigned int shrinker_run_no;
80*4882a593Smuzhiyun 	u16 available_compr_algs;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	/* pseudo inode to manage cached pages */
83*4882a593Smuzhiyun 	struct inode *managed_cache;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	struct erofs_sb_lz4_info lz4;
86*4882a593Smuzhiyun #endif	/* CONFIG_EROFS_FS_ZIP */
87*4882a593Smuzhiyun 	struct dax_device *dax_dev;
88*4882a593Smuzhiyun 	u32 blocks;
89*4882a593Smuzhiyun 	u32 meta_blkaddr;
90*4882a593Smuzhiyun #ifdef CONFIG_EROFS_FS_XATTR
91*4882a593Smuzhiyun 	u32 xattr_blkaddr;
92*4882a593Smuzhiyun #endif
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	/* inode slot unit size in bit shift */
95*4882a593Smuzhiyun 	unsigned char islotbits;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	u32 sb_size;			/* total superblock size */
98*4882a593Smuzhiyun 	u32 build_time_nsec;
99*4882a593Smuzhiyun 	u64 build_time;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	/* what we really care is nid, rather than ino.. */
102*4882a593Smuzhiyun 	erofs_nid_t root_nid;
103*4882a593Smuzhiyun 	/* used for statfs, f_files - f_favail */
104*4882a593Smuzhiyun 	u64 inos;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	u8 uuid[16];                    /* 128-bit uuid for volume */
107*4882a593Smuzhiyun 	u8 volume_name[16];             /* volume name */
108*4882a593Smuzhiyun 	u32 feature_compat;
109*4882a593Smuzhiyun 	u32 feature_incompat;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	struct erofs_fs_context ctx;	/* options */
112*4882a593Smuzhiyun };
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun #define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info)
115*4882a593Smuzhiyun #define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info)
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun /* Mount flags set via mount options or defaults */
118*4882a593Smuzhiyun #define EROFS_MOUNT_XATTR_USER		0x00000010
119*4882a593Smuzhiyun #define EROFS_MOUNT_POSIX_ACL		0x00000020
120*4882a593Smuzhiyun #define EROFS_MOUNT_DAX_ALWAYS		0x00000040
121*4882a593Smuzhiyun #define EROFS_MOUNT_DAX_NEVER		0x00000080
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun #define clear_opt(ctx, option)	((ctx)->mount_opt &= ~EROFS_MOUNT_##option)
124*4882a593Smuzhiyun #define set_opt(ctx, option)	((ctx)->mount_opt |= EROFS_MOUNT_##option)
125*4882a593Smuzhiyun #define test_opt(ctx, option)	((ctx)->mount_opt & EROFS_MOUNT_##option)
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun enum {
128*4882a593Smuzhiyun 	EROFS_ZIP_CACHE_DISABLED,
129*4882a593Smuzhiyun 	EROFS_ZIP_CACHE_READAHEAD,
130*4882a593Smuzhiyun 	EROFS_ZIP_CACHE_READAROUND
131*4882a593Smuzhiyun };
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun #ifdef CONFIG_EROFS_FS_ZIP
134*4882a593Smuzhiyun #define EROFS_LOCKED_MAGIC     (INT_MIN | 0xE0F510CCL)
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun /* basic unit of the workstation of a super_block */
137*4882a593Smuzhiyun struct erofs_workgroup {
138*4882a593Smuzhiyun 	/* the workgroup index in the workstation */
139*4882a593Smuzhiyun 	pgoff_t index;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	/* overall workgroup reference count */
142*4882a593Smuzhiyun 	atomic_t refcount;
143*4882a593Smuzhiyun };
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun #if defined(CONFIG_SMP)
erofs_workgroup_try_to_freeze(struct erofs_workgroup * grp,int val)146*4882a593Smuzhiyun static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
147*4882a593Smuzhiyun 						 int val)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	preempt_disable();
150*4882a593Smuzhiyun 	if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) {
151*4882a593Smuzhiyun 		preempt_enable();
152*4882a593Smuzhiyun 		return false;
153*4882a593Smuzhiyun 	}
154*4882a593Smuzhiyun 	return true;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
erofs_workgroup_unfreeze(struct erofs_workgroup * grp,int orig_val)157*4882a593Smuzhiyun static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
158*4882a593Smuzhiyun 					    int orig_val)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	/*
161*4882a593Smuzhiyun 	 * other observers should notice all modifications
162*4882a593Smuzhiyun 	 * in the freezing period.
163*4882a593Smuzhiyun 	 */
164*4882a593Smuzhiyun 	smp_mb();
165*4882a593Smuzhiyun 	atomic_set(&grp->refcount, orig_val);
166*4882a593Smuzhiyun 	preempt_enable();
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
erofs_wait_on_workgroup_freezed(struct erofs_workgroup * grp)169*4882a593Smuzhiyun static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	return atomic_cond_read_relaxed(&grp->refcount,
172*4882a593Smuzhiyun 					VAL != EROFS_LOCKED_MAGIC);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun #else
erofs_workgroup_try_to_freeze(struct erofs_workgroup * grp,int val)175*4882a593Smuzhiyun static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
176*4882a593Smuzhiyun 						 int val)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun 	preempt_disable();
179*4882a593Smuzhiyun 	/* no need to spin on UP platforms, let's just disable preemption. */
180*4882a593Smuzhiyun 	if (val != atomic_read(&grp->refcount)) {
181*4882a593Smuzhiyun 		preempt_enable();
182*4882a593Smuzhiyun 		return false;
183*4882a593Smuzhiyun 	}
184*4882a593Smuzhiyun 	return true;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
erofs_workgroup_unfreeze(struct erofs_workgroup * grp,int orig_val)187*4882a593Smuzhiyun static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
188*4882a593Smuzhiyun 					    int orig_val)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	preempt_enable();
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
erofs_wait_on_workgroup_freezed(struct erofs_workgroup * grp)193*4882a593Smuzhiyun static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	int v = atomic_read(&grp->refcount);
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	/* workgroup is never freezed on uniprocessor systems */
198*4882a593Smuzhiyun 	DBG_BUGON(v == EROFS_LOCKED_MAGIC);
199*4882a593Smuzhiyun 	return v;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun #endif	/* !CONFIG_SMP */
202*4882a593Smuzhiyun #endif	/* !CONFIG_EROFS_FS_ZIP */
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun /* we strictly follow PAGE_SIZE and no buffer head yet */
205*4882a593Smuzhiyun #define LOG_BLOCK_SIZE		PAGE_SHIFT
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun #undef LOG_SECTORS_PER_BLOCK
208*4882a593Smuzhiyun #define LOG_SECTORS_PER_BLOCK	(PAGE_SHIFT - 9)
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun #undef SECTORS_PER_BLOCK
211*4882a593Smuzhiyun #define SECTORS_PER_BLOCK	(1 << SECTORS_PER_BLOCK)
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun #define EROFS_BLKSIZ		(1 << LOG_BLOCK_SIZE)
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun #if (EROFS_BLKSIZ % 4096 || !EROFS_BLKSIZ)
216*4882a593Smuzhiyun #error erofs cannot be used in this platform
217*4882a593Smuzhiyun #endif
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun #define ROOT_NID(sb)		((sb)->root_nid)
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun #define erofs_blknr(addr)       ((addr) / EROFS_BLKSIZ)
222*4882a593Smuzhiyun #define erofs_blkoff(addr)      ((addr) % EROFS_BLKSIZ)
223*4882a593Smuzhiyun #define blknr_to_addr(nr)       ((erofs_off_t)(nr) * EROFS_BLKSIZ)
224*4882a593Smuzhiyun 
iloc(struct erofs_sb_info * sbi,erofs_nid_t nid)225*4882a593Smuzhiyun static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun 	return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun #define EROFS_FEATURE_FUNCS(name, compat, feature) \
231*4882a593Smuzhiyun static inline bool erofs_sb_has_##name(struct erofs_sb_info *sbi) \
232*4882a593Smuzhiyun { \
233*4882a593Smuzhiyun 	return sbi->feature_##compat & EROFS_FEATURE_##feature; \
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun EROFS_FEATURE_FUNCS(lz4_0padding, incompat, INCOMPAT_LZ4_0PADDING)
237*4882a593Smuzhiyun EROFS_FEATURE_FUNCS(compr_cfgs, incompat, INCOMPAT_COMPR_CFGS)
238*4882a593Smuzhiyun EROFS_FEATURE_FUNCS(big_pcluster, incompat, INCOMPAT_BIG_PCLUSTER)
239*4882a593Smuzhiyun EROFS_FEATURE_FUNCS(sb_chksum, compat, COMPAT_SB_CHKSUM)
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun /* atomic flag definitions */
242*4882a593Smuzhiyun #define EROFS_I_EA_INITED_BIT	0
243*4882a593Smuzhiyun #define EROFS_I_Z_INITED_BIT	1
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun /* bitlock definitions (arranged in reverse order) */
246*4882a593Smuzhiyun #define EROFS_I_BL_XATTR_BIT	(BITS_PER_LONG - 1)
247*4882a593Smuzhiyun #define EROFS_I_BL_Z_BIT	(BITS_PER_LONG - 2)
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun struct erofs_inode {
250*4882a593Smuzhiyun 	erofs_nid_t nid;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	/* atomic flags (including bitlocks) */
253*4882a593Smuzhiyun 	unsigned long flags;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	unsigned char datalayout;
256*4882a593Smuzhiyun 	unsigned char inode_isize;
257*4882a593Smuzhiyun 	unsigned short xattr_isize;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	unsigned int xattr_shared_count;
260*4882a593Smuzhiyun 	unsigned int *xattr_shared_xattrs;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	union {
263*4882a593Smuzhiyun 		erofs_blk_t raw_blkaddr;
264*4882a593Smuzhiyun #ifdef CONFIG_EROFS_FS_ZIP
265*4882a593Smuzhiyun 		struct {
266*4882a593Smuzhiyun 			unsigned short z_advise;
267*4882a593Smuzhiyun 			unsigned char  z_algorithmtype[2];
268*4882a593Smuzhiyun 			unsigned char  z_logical_clusterbits;
269*4882a593Smuzhiyun 		};
270*4882a593Smuzhiyun #endif	/* CONFIG_EROFS_FS_ZIP */
271*4882a593Smuzhiyun 	};
272*4882a593Smuzhiyun 	/* the corresponding vfs inode */
273*4882a593Smuzhiyun 	struct inode vfs_inode;
274*4882a593Smuzhiyun };
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun #define EROFS_I(ptr)	\
277*4882a593Smuzhiyun 	container_of(ptr, struct erofs_inode, vfs_inode)
278*4882a593Smuzhiyun 
erofs_inode_datablocks(struct inode * inode)279*4882a593Smuzhiyun static inline unsigned long erofs_inode_datablocks(struct inode *inode)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	/* since i_size cannot be changed */
282*4882a593Smuzhiyun 	return DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
erofs_bitrange(unsigned int value,unsigned int bit,unsigned int bits)285*4882a593Smuzhiyun static inline unsigned int erofs_bitrange(unsigned int value, unsigned int bit,
286*4882a593Smuzhiyun 					  unsigned int bits)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	return (value >> bit) & ((1 << bits) - 1);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 
erofs_inode_version(unsigned int value)293*4882a593Smuzhiyun static inline unsigned int erofs_inode_version(unsigned int value)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun 	return erofs_bitrange(value, EROFS_I_VERSION_BIT,
296*4882a593Smuzhiyun 			      EROFS_I_VERSION_BITS);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun 
erofs_inode_datalayout(unsigned int value)299*4882a593Smuzhiyun static inline unsigned int erofs_inode_datalayout(unsigned int value)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	return erofs_bitrange(value, EROFS_I_DATALAYOUT_BIT,
302*4882a593Smuzhiyun 			      EROFS_I_DATALAYOUT_BITS);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun extern const struct super_operations erofs_sops;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun extern const struct address_space_operations erofs_raw_access_aops;
308*4882a593Smuzhiyun extern const struct address_space_operations z_erofs_aops;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun /*
311*4882a593Smuzhiyun  * Logical to physical block mapping
312*4882a593Smuzhiyun  *
313*4882a593Smuzhiyun  * Different with other file systems, it is used for 2 access modes:
314*4882a593Smuzhiyun  *
315*4882a593Smuzhiyun  * 1) RAW access mode:
316*4882a593Smuzhiyun  *
317*4882a593Smuzhiyun  * Users pass a valid (m_lblk, m_lofs -- usually 0) pair,
318*4882a593Smuzhiyun  * and get the valid m_pblk, m_pofs and the longest m_len(in bytes).
319*4882a593Smuzhiyun  *
320*4882a593Smuzhiyun  * Note that m_lblk in the RAW access mode refers to the number of
321*4882a593Smuzhiyun  * the compressed ondisk block rather than the uncompressed
322*4882a593Smuzhiyun  * in-memory block for the compressed file.
323*4882a593Smuzhiyun  *
324*4882a593Smuzhiyun  * m_pofs equals to m_lofs except for the inline data page.
325*4882a593Smuzhiyun  *
326*4882a593Smuzhiyun  * 2) Normal access mode:
327*4882a593Smuzhiyun  *
328*4882a593Smuzhiyun  * If the inode is not compressed, it has no difference with
329*4882a593Smuzhiyun  * the RAW access mode. However, if the inode is compressed,
330*4882a593Smuzhiyun  * users should pass a valid (m_lblk, m_lofs) pair, and get
331*4882a593Smuzhiyun  * the needed m_pblk, m_pofs, m_len to get the compressed data
332*4882a593Smuzhiyun  * and the updated m_lblk, m_lofs which indicates the start
333*4882a593Smuzhiyun  * of the corresponding uncompressed data in the file.
334*4882a593Smuzhiyun  */
335*4882a593Smuzhiyun enum {
336*4882a593Smuzhiyun 	BH_Zipped = BH_PrivateStart,
337*4882a593Smuzhiyun 	BH_FullMapped,
338*4882a593Smuzhiyun };
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun /* Has a disk mapping */
341*4882a593Smuzhiyun #define EROFS_MAP_MAPPED	(1 << BH_Mapped)
342*4882a593Smuzhiyun /* Located in metadata (could be copied from bd_inode) */
343*4882a593Smuzhiyun #define EROFS_MAP_META		(1 << BH_Meta)
344*4882a593Smuzhiyun /* The extent has been compressed */
345*4882a593Smuzhiyun #define EROFS_MAP_ZIPPED	(1 << BH_Zipped)
346*4882a593Smuzhiyun /* The length of extent is full */
347*4882a593Smuzhiyun #define EROFS_MAP_FULL_MAPPED	(1 << BH_FullMapped)
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun struct erofs_map_blocks {
350*4882a593Smuzhiyun 	erofs_off_t m_pa, m_la;
351*4882a593Smuzhiyun 	u64 m_plen, m_llen;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	unsigned int m_flags;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	struct page *mpage;
356*4882a593Smuzhiyun };
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun /* Flags used by erofs_map_blocks_flatmode() */
359*4882a593Smuzhiyun #define EROFS_GET_BLOCKS_RAW    0x0001
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun /* zmap.c */
362*4882a593Smuzhiyun #ifdef CONFIG_EROFS_FS_ZIP
363*4882a593Smuzhiyun int z_erofs_fill_inode(struct inode *inode);
364*4882a593Smuzhiyun int z_erofs_map_blocks_iter(struct inode *inode,
365*4882a593Smuzhiyun 			    struct erofs_map_blocks *map,
366*4882a593Smuzhiyun 			    int flags);
367*4882a593Smuzhiyun #else
z_erofs_fill_inode(struct inode * inode)368*4882a593Smuzhiyun static inline int z_erofs_fill_inode(struct inode *inode) { return -EOPNOTSUPP; }
z_erofs_map_blocks_iter(struct inode * inode,struct erofs_map_blocks * map,int flags)369*4882a593Smuzhiyun static inline int z_erofs_map_blocks_iter(struct inode *inode,
370*4882a593Smuzhiyun 					  struct erofs_map_blocks *map,
371*4882a593Smuzhiyun 					  int flags)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun 	return -EOPNOTSUPP;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun #endif	/* !CONFIG_EROFS_FS_ZIP */
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun /* data.c */
378*4882a593Smuzhiyun extern const struct file_operations erofs_file_fops;
379*4882a593Smuzhiyun struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr);
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun /* inode.c */
erofs_inode_hash(erofs_nid_t nid)382*4882a593Smuzhiyun static inline unsigned long erofs_inode_hash(erofs_nid_t nid)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun #if BITS_PER_LONG == 32
385*4882a593Smuzhiyun 	return (nid >> 32) ^ (nid & 0xffffffff);
386*4882a593Smuzhiyun #else
387*4882a593Smuzhiyun 	return nid;
388*4882a593Smuzhiyun #endif
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun extern const struct inode_operations erofs_generic_iops;
392*4882a593Smuzhiyun extern const struct inode_operations erofs_symlink_iops;
393*4882a593Smuzhiyun extern const struct inode_operations erofs_fast_symlink_iops;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid, bool dir);
396*4882a593Smuzhiyun int erofs_getattr(const struct path *path, struct kstat *stat,
397*4882a593Smuzhiyun 		  u32 request_mask, unsigned int query_flags);
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun /* namei.c */
400*4882a593Smuzhiyun extern const struct inode_operations erofs_dir_iops;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun int erofs_namei(struct inode *dir, struct qstr *name,
403*4882a593Smuzhiyun 		erofs_nid_t *nid, unsigned int *d_type);
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun /* dir.c */
406*4882a593Smuzhiyun extern const struct file_operations erofs_dir_fops;
407*4882a593Smuzhiyun 
erofs_vm_map_ram(struct page ** pages,unsigned int count)408*4882a593Smuzhiyun static inline void *erofs_vm_map_ram(struct page **pages, unsigned int count)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun 	int retried = 0;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	while (1) {
413*4882a593Smuzhiyun 		void *p = vm_map_ram(pages, count, -1);
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 		/* retry two more times (totally 3 times) */
416*4882a593Smuzhiyun 		if (p || ++retried >= 3)
417*4882a593Smuzhiyun 			return p;
418*4882a593Smuzhiyun 		vm_unmap_aliases();
419*4882a593Smuzhiyun 	}
420*4882a593Smuzhiyun 	return NULL;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun /* pcpubuf.c */
424*4882a593Smuzhiyun void *erofs_get_pcpubuf(unsigned int requiredpages);
425*4882a593Smuzhiyun void erofs_put_pcpubuf(void *ptr);
426*4882a593Smuzhiyun int erofs_pcpubuf_growsize(unsigned int nrpages);
427*4882a593Smuzhiyun void erofs_pcpubuf_init(void);
428*4882a593Smuzhiyun void erofs_pcpubuf_exit(void);
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun /* utils.c / zdata.c */
431*4882a593Smuzhiyun struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp);
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun #ifdef CONFIG_EROFS_FS_ZIP
434*4882a593Smuzhiyun int erofs_workgroup_put(struct erofs_workgroup *grp);
435*4882a593Smuzhiyun struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
436*4882a593Smuzhiyun 					     pgoff_t index);
437*4882a593Smuzhiyun struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
438*4882a593Smuzhiyun 					       struct erofs_workgroup *grp);
439*4882a593Smuzhiyun void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
440*4882a593Smuzhiyun void erofs_shrinker_register(struct super_block *sb);
441*4882a593Smuzhiyun void erofs_shrinker_unregister(struct super_block *sb);
442*4882a593Smuzhiyun int __init erofs_init_shrinker(void);
443*4882a593Smuzhiyun void erofs_exit_shrinker(void);
444*4882a593Smuzhiyun int __init z_erofs_init_zip_subsystem(void);
445*4882a593Smuzhiyun void z_erofs_exit_zip_subsystem(void);
446*4882a593Smuzhiyun int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
447*4882a593Smuzhiyun 				       struct erofs_workgroup *egrp);
448*4882a593Smuzhiyun int erofs_try_to_free_cached_page(struct address_space *mapping,
449*4882a593Smuzhiyun 				  struct page *page);
450*4882a593Smuzhiyun int z_erofs_load_lz4_config(struct super_block *sb,
451*4882a593Smuzhiyun 			    struct erofs_super_block *dsb,
452*4882a593Smuzhiyun 			    struct z_erofs_lz4_cfgs *lz4, int len);
453*4882a593Smuzhiyun #else
erofs_shrinker_register(struct super_block * sb)454*4882a593Smuzhiyun static inline void erofs_shrinker_register(struct super_block *sb) {}
erofs_shrinker_unregister(struct super_block * sb)455*4882a593Smuzhiyun static inline void erofs_shrinker_unregister(struct super_block *sb) {}
erofs_init_shrinker(void)456*4882a593Smuzhiyun static inline int erofs_init_shrinker(void) { return 0; }
erofs_exit_shrinker(void)457*4882a593Smuzhiyun static inline void erofs_exit_shrinker(void) {}
z_erofs_init_zip_subsystem(void)458*4882a593Smuzhiyun static inline int z_erofs_init_zip_subsystem(void) { return 0; }
z_erofs_exit_zip_subsystem(void)459*4882a593Smuzhiyun static inline void z_erofs_exit_zip_subsystem(void) {}
z_erofs_load_lz4_config(struct super_block * sb,struct erofs_super_block * dsb,struct z_erofs_lz4_cfgs * lz4,int len)460*4882a593Smuzhiyun static inline int z_erofs_load_lz4_config(struct super_block *sb,
461*4882a593Smuzhiyun 				  struct erofs_super_block *dsb,
462*4882a593Smuzhiyun 				  struct z_erofs_lz4_cfgs *lz4, int len)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun 	if (lz4 || dsb->u1.lz4_max_distance) {
465*4882a593Smuzhiyun 		erofs_err(sb, "lz4 algorithm isn't enabled");
466*4882a593Smuzhiyun 		return -EINVAL;
467*4882a593Smuzhiyun 	}
468*4882a593Smuzhiyun 	return 0;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun #endif	/* !CONFIG_EROFS_FS_ZIP */
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun #define EFSCORRUPTED    EUCLEAN         /* Filesystem is corrupted */
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun #endif	/* __EROFS_INTERNAL_H */
475*4882a593Smuzhiyun 
476