xref: /OK3568_Linux_fs/kernel/fs/erofs/decompressor.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2019 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Created by Gao Xiang <gaoxiang25@huawei.com>
6  */
7 #include "compress.h"
8 #include <linux/module.h>
9 #include <linux/lz4.h>
10 
11 #ifndef LZ4_DISTANCE_MAX	/* history window size */
12 #define LZ4_DISTANCE_MAX 65535	/* set to maximum value by default */
13 #endif
14 
15 #define LZ4_MAX_DISTANCE_PAGES	(DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
16 #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
17 #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize)  (((srcsize) >> 8) + 32)
18 #endif
19 
20 struct z_erofs_decompressor {
21 	/*
22 	 * if destpages have sparsed pages, fill them with bounce pages.
23 	 * it also check whether destpages indicate continuous physical memory.
24 	 */
25 	int (*prepare_destpages)(struct z_erofs_decompress_req *rq,
26 				 struct list_head *pagepool);
27 	int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out);
28 	char *name;
29 };
30 
z_erofs_load_lz4_config(struct super_block * sb,struct erofs_super_block * dsb,struct z_erofs_lz4_cfgs * lz4,int size)31 int z_erofs_load_lz4_config(struct super_block *sb,
32 			    struct erofs_super_block *dsb,
33 			    struct z_erofs_lz4_cfgs *lz4, int size)
34 {
35 	struct erofs_sb_info *sbi = EROFS_SB(sb);
36 	u16 distance;
37 
38 	if (lz4) {
39 		if (size < sizeof(struct z_erofs_lz4_cfgs)) {
40 			erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
41 			return -EINVAL;
42 		}
43 		distance = le16_to_cpu(lz4->max_distance);
44 
45 		sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks);
46 		if (!sbi->lz4.max_pclusterblks) {
47 			sbi->lz4.max_pclusterblks = 1;	/* reserved case */
48 		} else if (sbi->lz4.max_pclusterblks >
49 			   Z_EROFS_PCLUSTER_MAX_SIZE / EROFS_BLKSIZ) {
50 			erofs_err(sb, "too large lz4 pclusterblks %u",
51 				  sbi->lz4.max_pclusterblks);
52 			return -EINVAL;
53 		} else if (sbi->lz4.max_pclusterblks >= 2) {
54 			erofs_info(sb, "EXPERIMENTAL big pcluster feature in use. Use at your own risk!");
55 		}
56 	} else {
57 		distance = le16_to_cpu(dsb->u1.lz4_max_distance);
58 		sbi->lz4.max_pclusterblks = 1;
59 	}
60 
61 	sbi->lz4.max_distance_pages = distance ?
62 					DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
63 					LZ4_MAX_DISTANCE_PAGES;
64 	return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks);
65 }
66 
z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req * rq,struct list_head * pagepool)67 static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
68 					 struct list_head *pagepool)
69 {
70 	const unsigned int nr =
71 		PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
72 	struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
73 	unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
74 					   BITS_PER_LONG)] = { 0 };
75 	unsigned int lz4_max_distance_pages =
76 				EROFS_SB(rq->sb)->lz4.max_distance_pages;
77 	void *kaddr = NULL;
78 	unsigned int i, j, top;
79 
80 	top = 0;
81 	for (i = j = 0; i < nr; ++i, ++j) {
82 		struct page *const page = rq->out[i];
83 		struct page *victim;
84 
85 		if (j >= lz4_max_distance_pages)
86 			j = 0;
87 
88 		/* 'valid' bounced can only be tested after a complete round */
89 		if (test_bit(j, bounced)) {
90 			DBG_BUGON(i < lz4_max_distance_pages);
91 			DBG_BUGON(top >= lz4_max_distance_pages);
92 			availables[top++] = rq->out[i - lz4_max_distance_pages];
93 		}
94 
95 		if (page) {
96 			__clear_bit(j, bounced);
97 			if (!PageHighMem(page)) {
98 				if (!i) {
99 					kaddr = page_address(page);
100 					continue;
101 				}
102 				if (kaddr &&
103 				    kaddr + PAGE_SIZE == page_address(page)) {
104 					kaddr += PAGE_SIZE;
105 					continue;
106 				}
107 			}
108 			kaddr = NULL;
109 			continue;
110 		}
111 		kaddr = NULL;
112 		__set_bit(j, bounced);
113 
114 		if (top) {
115 			victim = availables[--top];
116 			get_page(victim);
117 		} else {
118 			victim = erofs_allocpage(pagepool,
119 						 GFP_KERNEL | __GFP_NOFAIL);
120 			set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
121 		}
122 		rq->out[i] = victim;
123 	}
124 	return kaddr ? 1 : 0;
125 }
126 
z_erofs_handle_inplace_io(struct z_erofs_decompress_req * rq,void * inpage,unsigned int * inputmargin,int * maptype,bool support_0padding)127 static void *z_erofs_handle_inplace_io(struct z_erofs_decompress_req *rq,
128 			void *inpage, unsigned int *inputmargin, int *maptype,
129 			bool support_0padding)
130 {
131 	unsigned int nrpages_in, nrpages_out;
132 	unsigned int ofull, oend, inputsize, total, i, j;
133 	struct page **in;
134 	void *src, *tmp;
135 
136 	inputsize = rq->inputsize;
137 	nrpages_in = PAGE_ALIGN(inputsize) >> PAGE_SHIFT;
138 	oend = rq->pageofs_out + rq->outputsize;
139 	ofull = PAGE_ALIGN(oend);
140 	nrpages_out = ofull >> PAGE_SHIFT;
141 
142 	if (rq->inplace_io) {
143 		if (rq->partial_decoding || !support_0padding ||
144 		    ofull - oend < LZ4_DECOMPRESS_INPLACE_MARGIN(inputsize))
145 			goto docopy;
146 
147 		for (i = 0; i < nrpages_in; ++i) {
148 			DBG_BUGON(rq->in[i] == NULL);
149 			for (j = 0; j < nrpages_out - nrpages_in + i; ++j)
150 				if (rq->out[j] == rq->in[i])
151 					goto docopy;
152 		}
153 	}
154 
155 	if (nrpages_in <= 1) {
156 		*maptype = 0;
157 		return inpage;
158 	}
159 	kunmap_atomic(inpage);
160 	might_sleep();
161 	src = erofs_vm_map_ram(rq->in, nrpages_in);
162 	if (!src)
163 		return ERR_PTR(-ENOMEM);
164 	*maptype = 1;
165 	return src;
166 
167 docopy:
168 	/* Or copy compressed data which can be overlapped to per-CPU buffer */
169 	in = rq->in;
170 	src = erofs_get_pcpubuf(nrpages_in);
171 	if (!src) {
172 		DBG_BUGON(1);
173 		kunmap_atomic(inpage);
174 		return ERR_PTR(-EFAULT);
175 	}
176 
177 	tmp = src;
178 	total = rq->inputsize;
179 	while (total) {
180 		unsigned int page_copycnt =
181 			min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
182 
183 		if (!inpage)
184 			inpage = kmap_atomic(*in);
185 		memcpy(tmp, inpage + *inputmargin, page_copycnt);
186 		kunmap_atomic(inpage);
187 		inpage = NULL;
188 		tmp += page_copycnt;
189 		total -= page_copycnt;
190 		++in;
191 		*inputmargin = 0;
192 	}
193 	*maptype = 2;
194 	return src;
195 }
196 
z_erofs_lz4_decompress(struct z_erofs_decompress_req * rq,u8 * out)197 static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
198 {
199 	unsigned int inputmargin;
200 	u8 *headpage, *src;
201 	bool support_0padding;
202 	int ret, maptype;
203 
204 	DBG_BUGON(*rq->in == NULL);
205 	headpage = kmap_atomic(*rq->in);
206 	inputmargin = 0;
207 	support_0padding = false;
208 
209 	/* decompression inplace is only safe when 0padding is enabled */
210 	if (erofs_sb_has_lz4_0padding(EROFS_SB(rq->sb))) {
211 		support_0padding = true;
212 
213 		while (!headpage[inputmargin & ~PAGE_MASK])
214 			if (!(++inputmargin & ~PAGE_MASK))
215 				break;
216 
217 		if (inputmargin >= rq->inputsize) {
218 			kunmap_atomic(headpage);
219 			return -EIO;
220 		}
221 	}
222 
223 	rq->inputsize -= inputmargin;
224 	src = z_erofs_handle_inplace_io(rq, headpage, &inputmargin, &maptype,
225 					support_0padding);
226 	if (IS_ERR(src))
227 		return PTR_ERR(src);
228 
229 	/* legacy format could compress extra data in a pcluster. */
230 	if (rq->partial_decoding || !support_0padding)
231 		ret = LZ4_decompress_safe_partial(src + inputmargin, out,
232 				rq->inputsize, rq->outputsize, rq->outputsize);
233 	else
234 		ret = LZ4_decompress_safe(src + inputmargin, out,
235 					  rq->inputsize, rq->outputsize);
236 
237 	if (ret != rq->outputsize) {
238 		erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
239 			  ret, rq->inputsize, inputmargin, rq->outputsize);
240 
241 		print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
242 			       16, 1, src + inputmargin, rq->inputsize, true);
243 		print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
244 			       16, 1, out, rq->outputsize, true);
245 
246 		if (ret >= 0)
247 			memset(out + ret, 0, rq->outputsize - ret);
248 		ret = -EIO;
249 	}
250 
251 	if (maptype == 0) {
252 		kunmap_atomic(src);
253 	} else if (maptype == 1) {
254 		vm_unmap_ram(src, PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT);
255 	} else if (maptype == 2) {
256 		erofs_put_pcpubuf(src);
257 	} else {
258 		DBG_BUGON(1);
259 		return -EFAULT;
260 	}
261 	return ret;
262 }
263 
264 static struct z_erofs_decompressor decompressors[] = {
265 	[Z_EROFS_COMPRESSION_SHIFTED] = {
266 		.name = "shifted"
267 	},
268 	[Z_EROFS_COMPRESSION_LZ4] = {
269 		.prepare_destpages = z_erofs_lz4_prepare_destpages,
270 		.decompress = z_erofs_lz4_decompress,
271 		.name = "lz4"
272 	},
273 };
274 
copy_from_pcpubuf(struct page ** out,const char * dst,unsigned short pageofs_out,unsigned int outputsize)275 static void copy_from_pcpubuf(struct page **out, const char *dst,
276 			      unsigned short pageofs_out,
277 			      unsigned int outputsize)
278 {
279 	const char *end = dst + outputsize;
280 	const unsigned int righthalf = PAGE_SIZE - pageofs_out;
281 	const char *cur = dst - pageofs_out;
282 
283 	while (cur < end) {
284 		struct page *const page = *out++;
285 
286 		if (page) {
287 			char *buf = kmap_atomic(page);
288 
289 			if (cur >= dst) {
290 				memcpy(buf, cur, min_t(uint, PAGE_SIZE,
291 						       end - cur));
292 			} else {
293 				memcpy(buf + pageofs_out, cur + pageofs_out,
294 				       min_t(uint, righthalf, end - cur));
295 			}
296 			kunmap_atomic(buf);
297 		}
298 		cur += PAGE_SIZE;
299 	}
300 }
301 
z_erofs_decompress_generic(struct z_erofs_decompress_req * rq,struct list_head * pagepool)302 static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
303 				      struct list_head *pagepool)
304 {
305 	const unsigned int nrpages_out =
306 		PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
307 	const struct z_erofs_decompressor *alg = decompressors + rq->alg;
308 	unsigned int dst_maptype;
309 	void *dst;
310 	int ret;
311 
312 	/* two optimized fast paths only for non bigpcluster cases yet */
313 	if (rq->inputsize <= PAGE_SIZE) {
314 		if (nrpages_out == 1 && !rq->inplace_io) {
315 			DBG_BUGON(!*rq->out);
316 			dst = kmap_atomic(*rq->out);
317 			dst_maptype = 0;
318 			goto dstmap_out;
319 		}
320 
321 		/*
322 		 * For the case of small output size (especially much less
323 		 * than PAGE_SIZE), memcpy the decompressed data rather than
324 		 * compressed data is preferred.
325 		 */
326 		if (rq->outputsize <= PAGE_SIZE * 7 / 8) {
327 			dst = erofs_get_pcpubuf(1);
328 			if (IS_ERR(dst))
329 				return PTR_ERR(dst);
330 
331 			rq->inplace_io = false;
332 			ret = alg->decompress(rq, dst);
333 			if (!ret)
334 				copy_from_pcpubuf(rq->out, dst, rq->pageofs_out,
335 						  rq->outputsize);
336 
337 			erofs_put_pcpubuf(dst);
338 			return ret;
339 		}
340 	}
341 
342 	/* general decoding path which can be used for all cases */
343 	ret = alg->prepare_destpages(rq, pagepool);
344 	if (ret < 0)
345 		return ret;
346 	if (ret) {
347 		dst = page_address(*rq->out);
348 		dst_maptype = 1;
349 		goto dstmap_out;
350 	}
351 
352 	dst = erofs_vm_map_ram(rq->out, nrpages_out);
353 	if (!dst)
354 		return -ENOMEM;
355 	dst_maptype = 2;
356 
357 dstmap_out:
358 	ret = alg->decompress(rq, dst + rq->pageofs_out);
359 
360 	if (!dst_maptype)
361 		kunmap_atomic(dst);
362 	else if (dst_maptype == 2)
363 		vm_unmap_ram(dst, nrpages_out);
364 	return ret;
365 }
366 
z_erofs_shifted_transform(const struct z_erofs_decompress_req * rq,struct list_head * pagepool)367 static int z_erofs_shifted_transform(const struct z_erofs_decompress_req *rq,
368 				     struct list_head *pagepool)
369 {
370 	const unsigned int nrpages_out =
371 		PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
372 	const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out;
373 	unsigned char *src, *dst;
374 
375 	if (nrpages_out > 2) {
376 		DBG_BUGON(1);
377 		return -EIO;
378 	}
379 
380 	if (rq->out[0] == *rq->in) {
381 		DBG_BUGON(nrpages_out != 1);
382 		return 0;
383 	}
384 
385 	src = kmap_atomic(*rq->in);
386 	if (rq->out[0]) {
387 		dst = kmap_atomic(rq->out[0]);
388 		memcpy(dst + rq->pageofs_out, src, righthalf);
389 		kunmap_atomic(dst);
390 	}
391 
392 	if (nrpages_out == 2) {
393 		DBG_BUGON(!rq->out[1]);
394 		if (rq->out[1] == *rq->in) {
395 			memmove(src, src + righthalf, rq->pageofs_out);
396 		} else {
397 			dst = kmap_atomic(rq->out[1]);
398 			memcpy(dst, src + righthalf, rq->pageofs_out);
399 			kunmap_atomic(dst);
400 		}
401 	}
402 	kunmap_atomic(src);
403 	return 0;
404 }
405 
z_erofs_decompress(struct z_erofs_decompress_req * rq,struct list_head * pagepool)406 int z_erofs_decompress(struct z_erofs_decompress_req *rq,
407 		       struct list_head *pagepool)
408 {
409 	if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED)
410 		return z_erofs_shifted_transform(rq, pagepool);
411 	return z_erofs_decompress_generic(rq, pagepool);
412 }
413 
414