xref: /OK3568_Linux_fs/kernel/fs/erofs/zdata.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2018 HUAWEI, Inc.
4*4882a593Smuzhiyun  *             https://www.huawei.com/
5*4882a593Smuzhiyun  * Created by Gao Xiang <gaoxiang25@huawei.com>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun #ifndef __EROFS_FS_ZDATA_H
8*4882a593Smuzhiyun #define __EROFS_FS_ZDATA_H
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include "internal.h"
11*4882a593Smuzhiyun #include "zpvec.h"
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #define Z_EROFS_PCLUSTER_MAX_PAGES	(Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
14*4882a593Smuzhiyun #define Z_EROFS_NR_INLINE_PAGEVECS      3
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun /*
17*4882a593Smuzhiyun  * Structure fields follow one of the following exclusion rules.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  * I: Modifiable by initialization/destruction paths and read-only
20*4882a593Smuzhiyun  *    for everyone else;
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * L: Field should be protected by pageset lock;
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * A: Field should be accessed / updated in atomic for parallelized code.
25*4882a593Smuzhiyun  */
26*4882a593Smuzhiyun struct z_erofs_collection {
27*4882a593Smuzhiyun 	struct mutex lock;
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	/* I: page offset of start position of decompression */
30*4882a593Smuzhiyun 	unsigned short pageofs;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	/* L: maximum relative page index in pagevec[] */
33*4882a593Smuzhiyun 	unsigned short nr_pages;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	/* L: total number of pages in pagevec[] */
36*4882a593Smuzhiyun 	unsigned int vcnt;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	union {
39*4882a593Smuzhiyun 		/* L: inline a certain number of pagevecs for bootstrap */
40*4882a593Smuzhiyun 		erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS];
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 		/* I: can be used to free the pcluster by RCU. */
43*4882a593Smuzhiyun 		struct rcu_head rcu;
44*4882a593Smuzhiyun 	};
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #define Z_EROFS_PCLUSTER_FULL_LENGTH    0x00000001
48*4882a593Smuzhiyun #define Z_EROFS_PCLUSTER_LENGTH_BIT     1
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /*
51*4882a593Smuzhiyun  * let's leave a type here in case of introducing
52*4882a593Smuzhiyun  * another tagged pointer later.
53*4882a593Smuzhiyun  */
54*4882a593Smuzhiyun typedef void *z_erofs_next_pcluster_t;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun struct z_erofs_pcluster {
57*4882a593Smuzhiyun 	struct erofs_workgroup obj;
58*4882a593Smuzhiyun 	struct z_erofs_collection primary_collection;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	/* A: point to next chained pcluster or TAILs */
61*4882a593Smuzhiyun 	z_erofs_next_pcluster_t next;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	/* A: lower limit of decompressed length and if full length or not */
64*4882a593Smuzhiyun 	unsigned int length;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	/* I: physical cluster size in pages */
67*4882a593Smuzhiyun 	unsigned short pclusterpages;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	/* I: compression algorithm format */
70*4882a593Smuzhiyun 	unsigned char algorithmformat;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	/* A: compressed pages (can be cached or inplaced pages) */
73*4882a593Smuzhiyun 	struct page *compressed_pages[];
74*4882a593Smuzhiyun };
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun #define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection)
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun /* let's avoid the valid 32-bit kernel addresses */
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun /* the chained workgroup has't submitted io (still open) */
81*4882a593Smuzhiyun #define Z_EROFS_PCLUSTER_TAIL           ((void *)0x5F0ECAFE)
82*4882a593Smuzhiyun /* the chained workgroup has already submitted io */
83*4882a593Smuzhiyun #define Z_EROFS_PCLUSTER_TAIL_CLOSED    ((void *)0x5F0EDEAD)
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun #define Z_EROFS_PCLUSTER_NIL            (NULL)
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun struct z_erofs_decompressqueue {
88*4882a593Smuzhiyun 	struct super_block *sb;
89*4882a593Smuzhiyun 	atomic_t pending_bios;
90*4882a593Smuzhiyun 	z_erofs_next_pcluster_t head;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	union {
93*4882a593Smuzhiyun 		struct completion done;
94*4882a593Smuzhiyun 		struct work_struct work;
95*4882a593Smuzhiyun 	} u;
96*4882a593Smuzhiyun };
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun #define MNGD_MAPPING(sbi)	((sbi)->managed_cache->i_mapping)
erofs_page_is_managed(const struct erofs_sb_info * sbi,struct page * page)99*4882a593Smuzhiyun static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
100*4882a593Smuzhiyun 					 struct page *page)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	return page->mapping == MNGD_MAPPING(sbi);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun #define Z_EROFS_ONLINEPAGE_COUNT_BITS   2
106*4882a593Smuzhiyun #define Z_EROFS_ONLINEPAGE_COUNT_MASK   ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
107*4882a593Smuzhiyun #define Z_EROFS_ONLINEPAGE_INDEX_SHIFT  (Z_EROFS_ONLINEPAGE_COUNT_BITS)
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /*
110*4882a593Smuzhiyun  * waiters (aka. ongoing_packs): # to unlock the page
111*4882a593Smuzhiyun  * sub-index: 0 - for partial page, >= 1 full page sub-index
112*4882a593Smuzhiyun  */
113*4882a593Smuzhiyun typedef atomic_t z_erofs_onlinepage_t;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun /* type punning */
116*4882a593Smuzhiyun union z_erofs_onlinepage_converter {
117*4882a593Smuzhiyun 	z_erofs_onlinepage_t *o;
118*4882a593Smuzhiyun 	unsigned long *v;
119*4882a593Smuzhiyun };
120*4882a593Smuzhiyun 
z_erofs_onlinepage_index(struct page * page)121*4882a593Smuzhiyun static inline unsigned int z_erofs_onlinepage_index(struct page *page)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	union z_erofs_onlinepage_converter u;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	DBG_BUGON(!PagePrivate(page));
126*4882a593Smuzhiyun 	u.v = &page_private(page);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
z_erofs_onlinepage_init(struct page * page)131*4882a593Smuzhiyun static inline void z_erofs_onlinepage_init(struct page *page)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	union {
134*4882a593Smuzhiyun 		z_erofs_onlinepage_t o;
135*4882a593Smuzhiyun 		unsigned long v;
136*4882a593Smuzhiyun 	/* keep from being unlocked in advance */
137*4882a593Smuzhiyun 	} u = { .o = ATOMIC_INIT(1) };
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	set_page_private(page, u.v);
140*4882a593Smuzhiyun 	smp_wmb();
141*4882a593Smuzhiyun 	SetPagePrivate(page);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
z_erofs_onlinepage_fixup(struct page * page,uintptr_t index,bool down)144*4882a593Smuzhiyun static inline void z_erofs_onlinepage_fixup(struct page *page,
145*4882a593Smuzhiyun 	uintptr_t index, bool down)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	union z_erofs_onlinepage_converter u = { .v = &page_private(page) };
148*4882a593Smuzhiyun 	int orig, orig_index, val;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun repeat:
151*4882a593Smuzhiyun 	orig = atomic_read(u.o);
152*4882a593Smuzhiyun 	orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
153*4882a593Smuzhiyun 	if (orig_index) {
154*4882a593Smuzhiyun 		if (!index)
155*4882a593Smuzhiyun 			return;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 		DBG_BUGON(orig_index != index);
158*4882a593Smuzhiyun 	}
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
161*4882a593Smuzhiyun 		((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
162*4882a593Smuzhiyun 	if (atomic_cmpxchg(u.o, orig, val) != orig)
163*4882a593Smuzhiyun 		goto repeat;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun 
z_erofs_onlinepage_endio(struct page * page)166*4882a593Smuzhiyun static inline void z_erofs_onlinepage_endio(struct page *page)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	union z_erofs_onlinepage_converter u;
169*4882a593Smuzhiyun 	unsigned int v;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	DBG_BUGON(!PagePrivate(page));
172*4882a593Smuzhiyun 	u.v = &page_private(page);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	v = atomic_dec_return(u.o);
175*4882a593Smuzhiyun 	if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) {
176*4882a593Smuzhiyun 		set_page_private(page, 0);
177*4882a593Smuzhiyun 		ClearPagePrivate(page);
178*4882a593Smuzhiyun 		if (!PageError(page))
179*4882a593Smuzhiyun 			SetPageUptodate(page);
180*4882a593Smuzhiyun 		unlock_page(page);
181*4882a593Smuzhiyun 	}
182*4882a593Smuzhiyun 	erofs_dbg("%s, page %p value %x", __func__, page, atomic_read(u.o));
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun #define Z_EROFS_VMAP_ONSTACK_PAGES	\
186*4882a593Smuzhiyun 	min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U)
187*4882a593Smuzhiyun #define Z_EROFS_VMAP_GLOBAL_PAGES	2048
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun #endif
190*4882a593Smuzhiyun 
191