1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
6 */
7 #ifndef __EROFS_FS_ZDATA_H
8 #define __EROFS_FS_ZDATA_H
9
10 #include "internal.h"
11 #include "zpvec.h"
12
13 #define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
14 #define Z_EROFS_NR_INLINE_PAGEVECS 3
15
16 /*
17 * Structure fields follow one of the following exclusion rules.
18 *
19 * I: Modifiable by initialization/destruction paths and read-only
20 * for everyone else;
21 *
22 * L: Field should be protected by pageset lock;
23 *
24 * A: Field should be accessed / updated in atomic for parallelized code.
25 */
26 struct z_erofs_collection {
27 struct mutex lock;
28
29 /* I: page offset of start position of decompression */
30 unsigned short pageofs;
31
32 /* L: maximum relative page index in pagevec[] */
33 unsigned short nr_pages;
34
35 /* L: total number of pages in pagevec[] */
36 unsigned int vcnt;
37
38 union {
39 /* L: inline a certain number of pagevecs for bootstrap */
40 erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS];
41
42 /* I: can be used to free the pcluster by RCU. */
43 struct rcu_head rcu;
44 };
45 };
46
47 #define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001
48 #define Z_EROFS_PCLUSTER_LENGTH_BIT 1
49
50 /*
51 * let's leave a type here in case of introducing
52 * another tagged pointer later.
53 */
54 typedef void *z_erofs_next_pcluster_t;
55
56 struct z_erofs_pcluster {
57 struct erofs_workgroup obj;
58 struct z_erofs_collection primary_collection;
59
60 /* A: point to next chained pcluster or TAILs */
61 z_erofs_next_pcluster_t next;
62
63 /* A: lower limit of decompressed length and if full length or not */
64 unsigned int length;
65
66 /* I: physical cluster size in pages */
67 unsigned short pclusterpages;
68
69 /* I: compression algorithm format */
70 unsigned char algorithmformat;
71
72 /* A: compressed pages (can be cached or inplaced pages) */
73 struct page *compressed_pages[];
74 };
75
76 #define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection)
77
78 /* let's avoid the valid 32-bit kernel addresses */
79
80 /* the chained workgroup has't submitted io (still open) */
81 #define Z_EROFS_PCLUSTER_TAIL ((void *)0x5F0ECAFE)
82 /* the chained workgroup has already submitted io */
83 #define Z_EROFS_PCLUSTER_TAIL_CLOSED ((void *)0x5F0EDEAD)
84
85 #define Z_EROFS_PCLUSTER_NIL (NULL)
86
87 struct z_erofs_decompressqueue {
88 struct super_block *sb;
89 atomic_t pending_bios;
90 z_erofs_next_pcluster_t head;
91
92 union {
93 struct completion done;
94 struct work_struct work;
95 } u;
96 };
97
98 #define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
erofs_page_is_managed(const struct erofs_sb_info * sbi,struct page * page)99 static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
100 struct page *page)
101 {
102 return page->mapping == MNGD_MAPPING(sbi);
103 }
104
105 #define Z_EROFS_ONLINEPAGE_COUNT_BITS 2
106 #define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
107 #define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS)
108
109 /*
110 * waiters (aka. ongoing_packs): # to unlock the page
111 * sub-index: 0 - for partial page, >= 1 full page sub-index
112 */
113 typedef atomic_t z_erofs_onlinepage_t;
114
115 /* type punning */
116 union z_erofs_onlinepage_converter {
117 z_erofs_onlinepage_t *o;
118 unsigned long *v;
119 };
120
z_erofs_onlinepage_index(struct page * page)121 static inline unsigned int z_erofs_onlinepage_index(struct page *page)
122 {
123 union z_erofs_onlinepage_converter u;
124
125 DBG_BUGON(!PagePrivate(page));
126 u.v = &page_private(page);
127
128 return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
129 }
130
z_erofs_onlinepage_init(struct page * page)131 static inline void z_erofs_onlinepage_init(struct page *page)
132 {
133 union {
134 z_erofs_onlinepage_t o;
135 unsigned long v;
136 /* keep from being unlocked in advance */
137 } u = { .o = ATOMIC_INIT(1) };
138
139 set_page_private(page, u.v);
140 smp_wmb();
141 SetPagePrivate(page);
142 }
143
z_erofs_onlinepage_fixup(struct page * page,uintptr_t index,bool down)144 static inline void z_erofs_onlinepage_fixup(struct page *page,
145 uintptr_t index, bool down)
146 {
147 union z_erofs_onlinepage_converter u = { .v = &page_private(page) };
148 int orig, orig_index, val;
149
150 repeat:
151 orig = atomic_read(u.o);
152 orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
153 if (orig_index) {
154 if (!index)
155 return;
156
157 DBG_BUGON(orig_index != index);
158 }
159
160 val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
161 ((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
162 if (atomic_cmpxchg(u.o, orig, val) != orig)
163 goto repeat;
164 }
165
z_erofs_onlinepage_endio(struct page * page)166 static inline void z_erofs_onlinepage_endio(struct page *page)
167 {
168 union z_erofs_onlinepage_converter u;
169 unsigned int v;
170
171 DBG_BUGON(!PagePrivate(page));
172 u.v = &page_private(page);
173
174 v = atomic_dec_return(u.o);
175 if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) {
176 set_page_private(page, 0);
177 ClearPagePrivate(page);
178 if (!PageError(page))
179 SetPageUptodate(page);
180 unlock_page(page);
181 }
182 erofs_dbg("%s, page %p value %x", __func__, page, atomic_read(u.o));
183 }
184
185 #define Z_EROFS_VMAP_ONSTACK_PAGES \
186 min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U)
187 #define Z_EROFS_VMAP_GLOBAL_PAGES 2048
188
189 #endif
190
191