1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2019 HUAWEI, Inc.
4*4882a593Smuzhiyun * https://www.huawei.com/
5*4882a593Smuzhiyun * Created by Gao Xiang <gaoxiang25@huawei.com>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun #include "compress.h"
8*4882a593Smuzhiyun #include <linux/module.h>
9*4882a593Smuzhiyun #include <linux/lz4.h>
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #ifndef LZ4_DISTANCE_MAX /* history window size */
12*4882a593Smuzhiyun #define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
13*4882a593Smuzhiyun #endif
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
16*4882a593Smuzhiyun #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
17*4882a593Smuzhiyun #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32)
18*4882a593Smuzhiyun #endif
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun struct z_erofs_decompressor {
21*4882a593Smuzhiyun /*
22*4882a593Smuzhiyun * if destpages have sparsed pages, fill them with bounce pages.
23*4882a593Smuzhiyun * it also check whether destpages indicate continuous physical memory.
24*4882a593Smuzhiyun */
25*4882a593Smuzhiyun int (*prepare_destpages)(struct z_erofs_decompress_req *rq,
26*4882a593Smuzhiyun struct list_head *pagepool);
27*4882a593Smuzhiyun int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out);
28*4882a593Smuzhiyun char *name;
29*4882a593Smuzhiyun };
30*4882a593Smuzhiyun
z_erofs_load_lz4_config(struct super_block * sb,struct erofs_super_block * dsb,struct z_erofs_lz4_cfgs * lz4,int size)31*4882a593Smuzhiyun int z_erofs_load_lz4_config(struct super_block *sb,
32*4882a593Smuzhiyun struct erofs_super_block *dsb,
33*4882a593Smuzhiyun struct z_erofs_lz4_cfgs *lz4, int size)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun struct erofs_sb_info *sbi = EROFS_SB(sb);
36*4882a593Smuzhiyun u16 distance;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun if (lz4) {
39*4882a593Smuzhiyun if (size < sizeof(struct z_erofs_lz4_cfgs)) {
40*4882a593Smuzhiyun erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
41*4882a593Smuzhiyun return -EINVAL;
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun distance = le16_to_cpu(lz4->max_distance);
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks);
46*4882a593Smuzhiyun if (!sbi->lz4.max_pclusterblks) {
47*4882a593Smuzhiyun sbi->lz4.max_pclusterblks = 1; /* reserved case */
48*4882a593Smuzhiyun } else if (sbi->lz4.max_pclusterblks >
49*4882a593Smuzhiyun Z_EROFS_PCLUSTER_MAX_SIZE / EROFS_BLKSIZ) {
50*4882a593Smuzhiyun erofs_err(sb, "too large lz4 pclusterblks %u",
51*4882a593Smuzhiyun sbi->lz4.max_pclusterblks);
52*4882a593Smuzhiyun return -EINVAL;
53*4882a593Smuzhiyun } else if (sbi->lz4.max_pclusterblks >= 2) {
54*4882a593Smuzhiyun erofs_info(sb, "EXPERIMENTAL big pcluster feature in use. Use at your own risk!");
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun } else {
57*4882a593Smuzhiyun distance = le16_to_cpu(dsb->u1.lz4_max_distance);
58*4882a593Smuzhiyun sbi->lz4.max_pclusterblks = 1;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun sbi->lz4.max_distance_pages = distance ?
62*4882a593Smuzhiyun DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
63*4882a593Smuzhiyun LZ4_MAX_DISTANCE_PAGES;
64*4882a593Smuzhiyun return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks);
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req * rq,struct list_head * pagepool)67*4882a593Smuzhiyun static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
68*4882a593Smuzhiyun struct list_head *pagepool)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun const unsigned int nr =
71*4882a593Smuzhiyun PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
72*4882a593Smuzhiyun struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
73*4882a593Smuzhiyun unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
74*4882a593Smuzhiyun BITS_PER_LONG)] = { 0 };
75*4882a593Smuzhiyun unsigned int lz4_max_distance_pages =
76*4882a593Smuzhiyun EROFS_SB(rq->sb)->lz4.max_distance_pages;
77*4882a593Smuzhiyun void *kaddr = NULL;
78*4882a593Smuzhiyun unsigned int i, j, top;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun top = 0;
81*4882a593Smuzhiyun for (i = j = 0; i < nr; ++i, ++j) {
82*4882a593Smuzhiyun struct page *const page = rq->out[i];
83*4882a593Smuzhiyun struct page *victim;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun if (j >= lz4_max_distance_pages)
86*4882a593Smuzhiyun j = 0;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /* 'valid' bounced can only be tested after a complete round */
89*4882a593Smuzhiyun if (test_bit(j, bounced)) {
90*4882a593Smuzhiyun DBG_BUGON(i < lz4_max_distance_pages);
91*4882a593Smuzhiyun DBG_BUGON(top >= lz4_max_distance_pages);
92*4882a593Smuzhiyun availables[top++] = rq->out[i - lz4_max_distance_pages];
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun if (page) {
96*4882a593Smuzhiyun __clear_bit(j, bounced);
97*4882a593Smuzhiyun if (!PageHighMem(page)) {
98*4882a593Smuzhiyun if (!i) {
99*4882a593Smuzhiyun kaddr = page_address(page);
100*4882a593Smuzhiyun continue;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun if (kaddr &&
103*4882a593Smuzhiyun kaddr + PAGE_SIZE == page_address(page)) {
104*4882a593Smuzhiyun kaddr += PAGE_SIZE;
105*4882a593Smuzhiyun continue;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun kaddr = NULL;
109*4882a593Smuzhiyun continue;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun kaddr = NULL;
112*4882a593Smuzhiyun __set_bit(j, bounced);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun if (top) {
115*4882a593Smuzhiyun victim = availables[--top];
116*4882a593Smuzhiyun get_page(victim);
117*4882a593Smuzhiyun } else {
118*4882a593Smuzhiyun victim = erofs_allocpage(pagepool,
119*4882a593Smuzhiyun GFP_KERNEL | __GFP_NOFAIL);
120*4882a593Smuzhiyun set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun rq->out[i] = victim;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun return kaddr ? 1 : 0;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
z_erofs_handle_inplace_io(struct z_erofs_decompress_req * rq,void * inpage,unsigned int * inputmargin,int * maptype,bool support_0padding)127*4882a593Smuzhiyun static void *z_erofs_handle_inplace_io(struct z_erofs_decompress_req *rq,
128*4882a593Smuzhiyun void *inpage, unsigned int *inputmargin, int *maptype,
129*4882a593Smuzhiyun bool support_0padding)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun unsigned int nrpages_in, nrpages_out;
132*4882a593Smuzhiyun unsigned int ofull, oend, inputsize, total, i, j;
133*4882a593Smuzhiyun struct page **in;
134*4882a593Smuzhiyun void *src, *tmp;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun inputsize = rq->inputsize;
137*4882a593Smuzhiyun nrpages_in = PAGE_ALIGN(inputsize) >> PAGE_SHIFT;
138*4882a593Smuzhiyun oend = rq->pageofs_out + rq->outputsize;
139*4882a593Smuzhiyun ofull = PAGE_ALIGN(oend);
140*4882a593Smuzhiyun nrpages_out = ofull >> PAGE_SHIFT;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun if (rq->inplace_io) {
143*4882a593Smuzhiyun if (rq->partial_decoding || !support_0padding ||
144*4882a593Smuzhiyun ofull - oend < LZ4_DECOMPRESS_INPLACE_MARGIN(inputsize))
145*4882a593Smuzhiyun goto docopy;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun for (i = 0; i < nrpages_in; ++i) {
148*4882a593Smuzhiyun DBG_BUGON(rq->in[i] == NULL);
149*4882a593Smuzhiyun for (j = 0; j < nrpages_out - nrpages_in + i; ++j)
150*4882a593Smuzhiyun if (rq->out[j] == rq->in[i])
151*4882a593Smuzhiyun goto docopy;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun if (nrpages_in <= 1) {
156*4882a593Smuzhiyun *maptype = 0;
157*4882a593Smuzhiyun return inpage;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun kunmap_atomic(inpage);
160*4882a593Smuzhiyun might_sleep();
161*4882a593Smuzhiyun src = erofs_vm_map_ram(rq->in, nrpages_in);
162*4882a593Smuzhiyun if (!src)
163*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
164*4882a593Smuzhiyun *maptype = 1;
165*4882a593Smuzhiyun return src;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun docopy:
168*4882a593Smuzhiyun /* Or copy compressed data which can be overlapped to per-CPU buffer */
169*4882a593Smuzhiyun in = rq->in;
170*4882a593Smuzhiyun src = erofs_get_pcpubuf(nrpages_in);
171*4882a593Smuzhiyun if (!src) {
172*4882a593Smuzhiyun DBG_BUGON(1);
173*4882a593Smuzhiyun kunmap_atomic(inpage);
174*4882a593Smuzhiyun return ERR_PTR(-EFAULT);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun tmp = src;
178*4882a593Smuzhiyun total = rq->inputsize;
179*4882a593Smuzhiyun while (total) {
180*4882a593Smuzhiyun unsigned int page_copycnt =
181*4882a593Smuzhiyun min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun if (!inpage)
184*4882a593Smuzhiyun inpage = kmap_atomic(*in);
185*4882a593Smuzhiyun memcpy(tmp, inpage + *inputmargin, page_copycnt);
186*4882a593Smuzhiyun kunmap_atomic(inpage);
187*4882a593Smuzhiyun inpage = NULL;
188*4882a593Smuzhiyun tmp += page_copycnt;
189*4882a593Smuzhiyun total -= page_copycnt;
190*4882a593Smuzhiyun ++in;
191*4882a593Smuzhiyun *inputmargin = 0;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun *maptype = 2;
194*4882a593Smuzhiyun return src;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
z_erofs_lz4_decompress(struct z_erofs_decompress_req * rq,u8 * out)197*4882a593Smuzhiyun static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun unsigned int inputmargin;
200*4882a593Smuzhiyun u8 *headpage, *src;
201*4882a593Smuzhiyun bool support_0padding;
202*4882a593Smuzhiyun int ret, maptype;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun DBG_BUGON(*rq->in == NULL);
205*4882a593Smuzhiyun headpage = kmap_atomic(*rq->in);
206*4882a593Smuzhiyun inputmargin = 0;
207*4882a593Smuzhiyun support_0padding = false;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /* decompression inplace is only safe when 0padding is enabled */
210*4882a593Smuzhiyun if (erofs_sb_has_lz4_0padding(EROFS_SB(rq->sb))) {
211*4882a593Smuzhiyun support_0padding = true;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun while (!headpage[inputmargin & ~PAGE_MASK])
214*4882a593Smuzhiyun if (!(++inputmargin & ~PAGE_MASK))
215*4882a593Smuzhiyun break;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun if (inputmargin >= rq->inputsize) {
218*4882a593Smuzhiyun kunmap_atomic(headpage);
219*4882a593Smuzhiyun return -EIO;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun rq->inputsize -= inputmargin;
224*4882a593Smuzhiyun src = z_erofs_handle_inplace_io(rq, headpage, &inputmargin, &maptype,
225*4882a593Smuzhiyun support_0padding);
226*4882a593Smuzhiyun if (IS_ERR(src))
227*4882a593Smuzhiyun return PTR_ERR(src);
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /* legacy format could compress extra data in a pcluster. */
230*4882a593Smuzhiyun if (rq->partial_decoding || !support_0padding)
231*4882a593Smuzhiyun ret = LZ4_decompress_safe_partial(src + inputmargin, out,
232*4882a593Smuzhiyun rq->inputsize, rq->outputsize, rq->outputsize);
233*4882a593Smuzhiyun else
234*4882a593Smuzhiyun ret = LZ4_decompress_safe(src + inputmargin, out,
235*4882a593Smuzhiyun rq->inputsize, rq->outputsize);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun if (ret != rq->outputsize) {
238*4882a593Smuzhiyun erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
239*4882a593Smuzhiyun ret, rq->inputsize, inputmargin, rq->outputsize);
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
242*4882a593Smuzhiyun 16, 1, src + inputmargin, rq->inputsize, true);
243*4882a593Smuzhiyun print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
244*4882a593Smuzhiyun 16, 1, out, rq->outputsize, true);
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun if (ret >= 0)
247*4882a593Smuzhiyun memset(out + ret, 0, rq->outputsize - ret);
248*4882a593Smuzhiyun ret = -EIO;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun if (maptype == 0) {
252*4882a593Smuzhiyun kunmap_atomic(src);
253*4882a593Smuzhiyun } else if (maptype == 1) {
254*4882a593Smuzhiyun vm_unmap_ram(src, PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT);
255*4882a593Smuzhiyun } else if (maptype == 2) {
256*4882a593Smuzhiyun erofs_put_pcpubuf(src);
257*4882a593Smuzhiyun } else {
258*4882a593Smuzhiyun DBG_BUGON(1);
259*4882a593Smuzhiyun return -EFAULT;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun return ret;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun static struct z_erofs_decompressor decompressors[] = {
265*4882a593Smuzhiyun [Z_EROFS_COMPRESSION_SHIFTED] = {
266*4882a593Smuzhiyun .name = "shifted"
267*4882a593Smuzhiyun },
268*4882a593Smuzhiyun [Z_EROFS_COMPRESSION_LZ4] = {
269*4882a593Smuzhiyun .prepare_destpages = z_erofs_lz4_prepare_destpages,
270*4882a593Smuzhiyun .decompress = z_erofs_lz4_decompress,
271*4882a593Smuzhiyun .name = "lz4"
272*4882a593Smuzhiyun },
273*4882a593Smuzhiyun };
274*4882a593Smuzhiyun
copy_from_pcpubuf(struct page ** out,const char * dst,unsigned short pageofs_out,unsigned int outputsize)275*4882a593Smuzhiyun static void copy_from_pcpubuf(struct page **out, const char *dst,
276*4882a593Smuzhiyun unsigned short pageofs_out,
277*4882a593Smuzhiyun unsigned int outputsize)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun const char *end = dst + outputsize;
280*4882a593Smuzhiyun const unsigned int righthalf = PAGE_SIZE - pageofs_out;
281*4882a593Smuzhiyun const char *cur = dst - pageofs_out;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun while (cur < end) {
284*4882a593Smuzhiyun struct page *const page = *out++;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun if (page) {
287*4882a593Smuzhiyun char *buf = kmap_atomic(page);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun if (cur >= dst) {
290*4882a593Smuzhiyun memcpy(buf, cur, min_t(uint, PAGE_SIZE,
291*4882a593Smuzhiyun end - cur));
292*4882a593Smuzhiyun } else {
293*4882a593Smuzhiyun memcpy(buf + pageofs_out, cur + pageofs_out,
294*4882a593Smuzhiyun min_t(uint, righthalf, end - cur));
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun kunmap_atomic(buf);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun cur += PAGE_SIZE;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
z_erofs_decompress_generic(struct z_erofs_decompress_req * rq,struct list_head * pagepool)302*4882a593Smuzhiyun static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
303*4882a593Smuzhiyun struct list_head *pagepool)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun const unsigned int nrpages_out =
306*4882a593Smuzhiyun PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
307*4882a593Smuzhiyun const struct z_erofs_decompressor *alg = decompressors + rq->alg;
308*4882a593Smuzhiyun unsigned int dst_maptype;
309*4882a593Smuzhiyun void *dst;
310*4882a593Smuzhiyun int ret;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun /* two optimized fast paths only for non bigpcluster cases yet */
313*4882a593Smuzhiyun if (rq->inputsize <= PAGE_SIZE) {
314*4882a593Smuzhiyun if (nrpages_out == 1 && !rq->inplace_io) {
315*4882a593Smuzhiyun DBG_BUGON(!*rq->out);
316*4882a593Smuzhiyun dst = kmap_atomic(*rq->out);
317*4882a593Smuzhiyun dst_maptype = 0;
318*4882a593Smuzhiyun goto dstmap_out;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /*
322*4882a593Smuzhiyun * For the case of small output size (especially much less
323*4882a593Smuzhiyun * than PAGE_SIZE), memcpy the decompressed data rather than
324*4882a593Smuzhiyun * compressed data is preferred.
325*4882a593Smuzhiyun */
326*4882a593Smuzhiyun if (rq->outputsize <= PAGE_SIZE * 7 / 8) {
327*4882a593Smuzhiyun dst = erofs_get_pcpubuf(1);
328*4882a593Smuzhiyun if (IS_ERR(dst))
329*4882a593Smuzhiyun return PTR_ERR(dst);
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun rq->inplace_io = false;
332*4882a593Smuzhiyun ret = alg->decompress(rq, dst);
333*4882a593Smuzhiyun if (!ret)
334*4882a593Smuzhiyun copy_from_pcpubuf(rq->out, dst, rq->pageofs_out,
335*4882a593Smuzhiyun rq->outputsize);
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun erofs_put_pcpubuf(dst);
338*4882a593Smuzhiyun return ret;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /* general decoding path which can be used for all cases */
343*4882a593Smuzhiyun ret = alg->prepare_destpages(rq, pagepool);
344*4882a593Smuzhiyun if (ret < 0)
345*4882a593Smuzhiyun return ret;
346*4882a593Smuzhiyun if (ret) {
347*4882a593Smuzhiyun dst = page_address(*rq->out);
348*4882a593Smuzhiyun dst_maptype = 1;
349*4882a593Smuzhiyun goto dstmap_out;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun dst = erofs_vm_map_ram(rq->out, nrpages_out);
353*4882a593Smuzhiyun if (!dst)
354*4882a593Smuzhiyun return -ENOMEM;
355*4882a593Smuzhiyun dst_maptype = 2;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun dstmap_out:
358*4882a593Smuzhiyun ret = alg->decompress(rq, dst + rq->pageofs_out);
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun if (!dst_maptype)
361*4882a593Smuzhiyun kunmap_atomic(dst);
362*4882a593Smuzhiyun else if (dst_maptype == 2)
363*4882a593Smuzhiyun vm_unmap_ram(dst, nrpages_out);
364*4882a593Smuzhiyun return ret;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
z_erofs_shifted_transform(const struct z_erofs_decompress_req * rq,struct list_head * pagepool)367*4882a593Smuzhiyun static int z_erofs_shifted_transform(const struct z_erofs_decompress_req *rq,
368*4882a593Smuzhiyun struct list_head *pagepool)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun const unsigned int nrpages_out =
371*4882a593Smuzhiyun PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
372*4882a593Smuzhiyun const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out;
373*4882a593Smuzhiyun unsigned char *src, *dst;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun if (nrpages_out > 2) {
376*4882a593Smuzhiyun DBG_BUGON(1);
377*4882a593Smuzhiyun return -EIO;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun if (rq->out[0] == *rq->in) {
381*4882a593Smuzhiyun DBG_BUGON(nrpages_out != 1);
382*4882a593Smuzhiyun return 0;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun src = kmap_atomic(*rq->in);
386*4882a593Smuzhiyun if (rq->out[0]) {
387*4882a593Smuzhiyun dst = kmap_atomic(rq->out[0]);
388*4882a593Smuzhiyun memcpy(dst + rq->pageofs_out, src, righthalf);
389*4882a593Smuzhiyun kunmap_atomic(dst);
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun if (nrpages_out == 2) {
393*4882a593Smuzhiyun DBG_BUGON(!rq->out[1]);
394*4882a593Smuzhiyun if (rq->out[1] == *rq->in) {
395*4882a593Smuzhiyun memmove(src, src + righthalf, rq->pageofs_out);
396*4882a593Smuzhiyun } else {
397*4882a593Smuzhiyun dst = kmap_atomic(rq->out[1]);
398*4882a593Smuzhiyun memcpy(dst, src + righthalf, rq->pageofs_out);
399*4882a593Smuzhiyun kunmap_atomic(dst);
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun kunmap_atomic(src);
403*4882a593Smuzhiyun return 0;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
z_erofs_decompress(struct z_erofs_decompress_req * rq,struct list_head * pagepool)406*4882a593Smuzhiyun int z_erofs_decompress(struct z_erofs_decompress_req *rq,
407*4882a593Smuzhiyun struct list_head *pagepool)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED)
410*4882a593Smuzhiyun return z_erofs_shifted_transform(rq, pagepool);
411*4882a593Smuzhiyun return z_erofs_decompress_generic(rq, pagepool);
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun
414