xref: /OK3568_Linux_fs/kernel/fs/erofs/compress.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2019 HUAWEI, Inc.
4*4882a593Smuzhiyun  *             https://www.huawei.com/
5*4882a593Smuzhiyun  * Created by Gao Xiang <gaoxiang25@huawei.com>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun #ifndef __EROFS_FS_COMPRESS_H
8*4882a593Smuzhiyun #define __EROFS_FS_COMPRESS_H
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include "internal.h"
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun enum {
13*4882a593Smuzhiyun 	Z_EROFS_COMPRESSION_SHIFTED = Z_EROFS_COMPRESSION_MAX,
14*4882a593Smuzhiyun 	Z_EROFS_COMPRESSION_RUNTIME_MAX
15*4882a593Smuzhiyun };
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun struct z_erofs_decompress_req {
18*4882a593Smuzhiyun 	struct super_block *sb;
19*4882a593Smuzhiyun 	struct page **in, **out;
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun 	unsigned short pageofs_out;
22*4882a593Smuzhiyun 	unsigned int inputsize, outputsize;
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun 	/* indicate the algorithm will be used for decompression */
25*4882a593Smuzhiyun 	unsigned int alg;
26*4882a593Smuzhiyun 	bool inplace_io, partial_decoding;
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /* some special page->private (unsigned long, see below) */
30*4882a593Smuzhiyun #define Z_EROFS_SHORTLIVED_PAGE		(-1UL << 2)
31*4882a593Smuzhiyun #define Z_EROFS_PREALLOCATED_PAGE	(-2UL << 2)
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /*
34*4882a593Smuzhiyun  * For all pages in a pcluster, page->private should be one of
35*4882a593Smuzhiyun  * Type                         Last 2bits      page->private
36*4882a593Smuzhiyun  * short-lived page             00              Z_EROFS_SHORTLIVED_PAGE
37*4882a593Smuzhiyun  * preallocated page (tryalloc) 00              Z_EROFS_PREALLOCATED_PAGE
38*4882a593Smuzhiyun  * cached/managed page          00              pointer to z_erofs_pcluster
39*4882a593Smuzhiyun  * online page (file-backed,    01/10/11        sub-index << 2 | count
40*4882a593Smuzhiyun  *              some pages can be used for inplace I/O)
41*4882a593Smuzhiyun  *
42*4882a593Smuzhiyun  * page->mapping should be one of
43*4882a593Smuzhiyun  * Type                 page->mapping
44*4882a593Smuzhiyun  * short-lived page     NULL
45*4882a593Smuzhiyun  * preallocated page    NULL
46*4882a593Smuzhiyun  * cached/managed page  non-NULL or NULL (invalidated/truncated page)
47*4882a593Smuzhiyun  * online page          non-NULL
48*4882a593Smuzhiyun  *
49*4882a593Smuzhiyun  * For all managed pages, PG_private should be set with 1 extra refcount,
50*4882a593Smuzhiyun  * which is used for page reclaim / migration.
51*4882a593Smuzhiyun  */
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun /*
54*4882a593Smuzhiyun  * short-lived pages are pages directly from buddy system with specific
55*4882a593Smuzhiyun  * page->private (no need to set PagePrivate since these are non-LRU /
56*4882a593Smuzhiyun  * non-movable pages and bypass reclaim / migration code).
57*4882a593Smuzhiyun  */
z_erofs_is_shortlived_page(struct page * page)58*4882a593Smuzhiyun static inline bool z_erofs_is_shortlived_page(struct page *page)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	if (page->private != Z_EROFS_SHORTLIVED_PAGE)
61*4882a593Smuzhiyun 		return false;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	DBG_BUGON(page->mapping);
64*4882a593Smuzhiyun 	return true;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
z_erofs_put_shortlivedpage(struct list_head * pagepool,struct page * page)67*4882a593Smuzhiyun static inline bool z_erofs_put_shortlivedpage(struct list_head *pagepool,
68*4882a593Smuzhiyun 					      struct page *page)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	if (!z_erofs_is_shortlived_page(page))
71*4882a593Smuzhiyun 		return false;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	/* short-lived pages should not be used by others at the same time */
74*4882a593Smuzhiyun 	if (page_ref_count(page) > 1) {
75*4882a593Smuzhiyun 		put_page(page);
76*4882a593Smuzhiyun 	} else {
77*4882a593Smuzhiyun 		/* follow the pcluster rule above. */
78*4882a593Smuzhiyun 		set_page_private(page, 0);
79*4882a593Smuzhiyun 		list_add(&page->lru, pagepool);
80*4882a593Smuzhiyun 	}
81*4882a593Smuzhiyun 	return true;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun int z_erofs_decompress(struct z_erofs_decompress_req *rq,
85*4882a593Smuzhiyun 		       struct list_head *pagepool);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun #endif
88*4882a593Smuzhiyun 
89