xref: /OK3568_Linux_fs/kernel/drivers/block/zram/zcomp.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2014 Sergey Senozhatsky.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/kernel.h>
7*4882a593Smuzhiyun #include <linux/string.h>
8*4882a593Smuzhiyun #include <linux/err.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/wait.h>
11*4882a593Smuzhiyun #include <linux/sched.h>
12*4882a593Smuzhiyun #include <linux/cpu.h>
13*4882a593Smuzhiyun #include <linux/crypto.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include "zcomp.h"
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun static const char * const backends[] = {
18*4882a593Smuzhiyun 	"lzo",
19*4882a593Smuzhiyun 	"lzo-rle",
20*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_CRYPTO_LZ4)
21*4882a593Smuzhiyun 	"lz4",
22*4882a593Smuzhiyun #endif
23*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_CRYPTO_LZ4HC)
24*4882a593Smuzhiyun 	"lz4hc",
25*4882a593Smuzhiyun #endif
26*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_CRYPTO_842)
27*4882a593Smuzhiyun 	"842",
28*4882a593Smuzhiyun #endif
29*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_CRYPTO_ZSTD)
30*4882a593Smuzhiyun 	"zstd",
31*4882a593Smuzhiyun #endif
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun 
zcomp_strm_free(struct zcomp_strm * zstrm)34*4882a593Smuzhiyun static void zcomp_strm_free(struct zcomp_strm *zstrm)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun 	if (!IS_ERR_OR_NULL(zstrm->tfm))
37*4882a593Smuzhiyun 		crypto_free_comp(zstrm->tfm);
38*4882a593Smuzhiyun 	free_pages((unsigned long)zstrm->buffer, 1);
39*4882a593Smuzhiyun 	zstrm->tfm = NULL;
40*4882a593Smuzhiyun 	zstrm->buffer = NULL;
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /*
44*4882a593Smuzhiyun  * Initialize zcomp_strm structure with ->tfm initialized by backend, and
45*4882a593Smuzhiyun  * ->buffer. Return a negative value on error.
46*4882a593Smuzhiyun  */
zcomp_strm_init(struct zcomp_strm * zstrm,struct zcomp * comp)47*4882a593Smuzhiyun static int zcomp_strm_init(struct zcomp_strm *zstrm, struct zcomp *comp)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	zstrm->tfm = crypto_alloc_comp(comp->name, 0, 0);
50*4882a593Smuzhiyun 	/*
51*4882a593Smuzhiyun 	 * allocate 2 pages. 1 for compressed data, plus 1 extra for the
52*4882a593Smuzhiyun 	 * case when compressed size is larger than the original one
53*4882a593Smuzhiyun 	 */
54*4882a593Smuzhiyun 	zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
55*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(zstrm->tfm) || !zstrm->buffer) {
56*4882a593Smuzhiyun 		zcomp_strm_free(zstrm);
57*4882a593Smuzhiyun 		return -ENOMEM;
58*4882a593Smuzhiyun 	}
59*4882a593Smuzhiyun 	return 0;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun 
zcomp_available_algorithm(const char * comp)62*4882a593Smuzhiyun bool zcomp_available_algorithm(const char *comp)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	/*
65*4882a593Smuzhiyun 	 * Crypto does not ignore a trailing new line symbol,
66*4882a593Smuzhiyun 	 * so make sure you don't supply a string containing
67*4882a593Smuzhiyun 	 * one.
68*4882a593Smuzhiyun 	 * This also means that we permit zcomp initialisation
69*4882a593Smuzhiyun 	 * with any compressing algorithm known to crypto api.
70*4882a593Smuzhiyun 	 */
71*4882a593Smuzhiyun 	return crypto_has_comp(comp, 0, 0) == 1;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun /* show available compressors */
zcomp_available_show(const char * comp,char * buf)75*4882a593Smuzhiyun ssize_t zcomp_available_show(const char *comp, char *buf)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	bool known_algorithm = false;
78*4882a593Smuzhiyun 	ssize_t sz = 0;
79*4882a593Smuzhiyun 	int i;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(backends); i++) {
82*4882a593Smuzhiyun 		if (!strcmp(comp, backends[i])) {
83*4882a593Smuzhiyun 			known_algorithm = true;
84*4882a593Smuzhiyun 			sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
85*4882a593Smuzhiyun 					"[%s] ", backends[i]);
86*4882a593Smuzhiyun 		} else {
87*4882a593Smuzhiyun 			sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
88*4882a593Smuzhiyun 					"%s ", backends[i]);
89*4882a593Smuzhiyun 		}
90*4882a593Smuzhiyun 	}
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	/*
93*4882a593Smuzhiyun 	 * Out-of-tree module known to crypto api or a missing
94*4882a593Smuzhiyun 	 * entry in `backends'.
95*4882a593Smuzhiyun 	 */
96*4882a593Smuzhiyun 	if (!known_algorithm && crypto_has_comp(comp, 0, 0) == 1)
97*4882a593Smuzhiyun 		sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
98*4882a593Smuzhiyun 				"[%s] ", comp);
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
101*4882a593Smuzhiyun 	return sz;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
zcomp_stream_get(struct zcomp * comp)104*4882a593Smuzhiyun struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	local_lock(&comp->stream->lock);
107*4882a593Smuzhiyun 	return this_cpu_ptr(comp->stream);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
zcomp_stream_put(struct zcomp * comp)110*4882a593Smuzhiyun void zcomp_stream_put(struct zcomp *comp)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	local_unlock(&comp->stream->lock);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
zcomp_compress(struct zcomp_strm * zstrm,const void * src,unsigned int * dst_len)115*4882a593Smuzhiyun int zcomp_compress(struct zcomp_strm *zstrm,
116*4882a593Smuzhiyun 		const void *src, unsigned int *dst_len)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	/*
119*4882a593Smuzhiyun 	 * Our dst memory (zstrm->buffer) is always `2 * PAGE_SIZE' sized
120*4882a593Smuzhiyun 	 * because sometimes we can endup having a bigger compressed data
121*4882a593Smuzhiyun 	 * due to various reasons: for example compression algorithms tend
122*4882a593Smuzhiyun 	 * to add some padding to the compressed buffer. Speaking of padding,
123*4882a593Smuzhiyun 	 * comp algorithm `842' pads the compressed length to multiple of 8
124*4882a593Smuzhiyun 	 * and returns -ENOSP when the dst memory is not big enough, which
125*4882a593Smuzhiyun 	 * is not something that ZRAM wants to see. We can handle the
126*4882a593Smuzhiyun 	 * `compressed_size > PAGE_SIZE' case easily in ZRAM, but when we
127*4882a593Smuzhiyun 	 * receive -ERRNO from the compressing backend we can't help it
128*4882a593Smuzhiyun 	 * anymore. To make `842' happy we need to tell the exact size of
129*4882a593Smuzhiyun 	 * the dst buffer, zram_drv will take care of the fact that
130*4882a593Smuzhiyun 	 * compressed buffer is too big.
131*4882a593Smuzhiyun 	 */
132*4882a593Smuzhiyun 	*dst_len = PAGE_SIZE * 2;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	return crypto_comp_compress(zstrm->tfm,
135*4882a593Smuzhiyun 			src, PAGE_SIZE,
136*4882a593Smuzhiyun 			zstrm->buffer, dst_len);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
zcomp_decompress(struct zcomp_strm * zstrm,const void * src,unsigned int src_len,void * dst)139*4882a593Smuzhiyun int zcomp_decompress(struct zcomp_strm *zstrm,
140*4882a593Smuzhiyun 		const void *src, unsigned int src_len, void *dst)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	unsigned int dst_len = PAGE_SIZE;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	return crypto_comp_decompress(zstrm->tfm,
145*4882a593Smuzhiyun 			src, src_len,
146*4882a593Smuzhiyun 			dst, &dst_len);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun 
zcomp_cpu_up_prepare(unsigned int cpu,struct hlist_node * node)149*4882a593Smuzhiyun int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	struct zcomp *comp = hlist_entry(node, struct zcomp, node);
152*4882a593Smuzhiyun 	struct zcomp_strm *zstrm;
153*4882a593Smuzhiyun 	int ret;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	zstrm = per_cpu_ptr(comp->stream, cpu);
156*4882a593Smuzhiyun 	local_lock_init(&zstrm->lock);
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	ret = zcomp_strm_init(zstrm, comp);
159*4882a593Smuzhiyun 	if (ret)
160*4882a593Smuzhiyun 		pr_err("Can't allocate a compression stream\n");
161*4882a593Smuzhiyun 	return ret;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
zcomp_cpu_dead(unsigned int cpu,struct hlist_node * node)164*4882a593Smuzhiyun int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	struct zcomp *comp = hlist_entry(node, struct zcomp, node);
167*4882a593Smuzhiyun 	struct zcomp_strm *zstrm;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	zstrm = per_cpu_ptr(comp->stream, cpu);
170*4882a593Smuzhiyun 	zcomp_strm_free(zstrm);
171*4882a593Smuzhiyun 	return 0;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
zcomp_init(struct zcomp * comp)174*4882a593Smuzhiyun static int zcomp_init(struct zcomp *comp)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	int ret;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	comp->stream = alloc_percpu(struct zcomp_strm);
179*4882a593Smuzhiyun 	if (!comp->stream)
180*4882a593Smuzhiyun 		return -ENOMEM;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
183*4882a593Smuzhiyun 	if (ret < 0)
184*4882a593Smuzhiyun 		goto cleanup;
185*4882a593Smuzhiyun 	return 0;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun cleanup:
188*4882a593Smuzhiyun 	free_percpu(comp->stream);
189*4882a593Smuzhiyun 	return ret;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun 
zcomp_destroy(struct zcomp * comp)192*4882a593Smuzhiyun void zcomp_destroy(struct zcomp *comp)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	cpuhp_state_remove_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
195*4882a593Smuzhiyun 	free_percpu(comp->stream);
196*4882a593Smuzhiyun 	kfree(comp);
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun /*
200*4882a593Smuzhiyun  * search available compressors for requested algorithm.
201*4882a593Smuzhiyun  * allocate new zcomp and initialize it. return compressing
202*4882a593Smuzhiyun  * backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
203*4882a593Smuzhiyun  * if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
204*4882a593Smuzhiyun  * case of allocation error, or any other error potentially
205*4882a593Smuzhiyun  * returned by zcomp_init().
206*4882a593Smuzhiyun  */
zcomp_create(const char * compress)207*4882a593Smuzhiyun struct zcomp *zcomp_create(const char *compress)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun 	struct zcomp *comp;
210*4882a593Smuzhiyun 	int error;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	/*
213*4882a593Smuzhiyun 	 * Crypto API will execute /sbin/modprobe if the compression module
214*4882a593Smuzhiyun 	 * is not loaded yet. We must do it here, otherwise we are about to
215*4882a593Smuzhiyun 	 * call /sbin/modprobe under CPU hot-plug lock.
216*4882a593Smuzhiyun 	 */
217*4882a593Smuzhiyun 	if (!zcomp_available_algorithm(compress))
218*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL);
221*4882a593Smuzhiyun 	if (!comp)
222*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	comp->name = compress;
225*4882a593Smuzhiyun 	error = zcomp_init(comp);
226*4882a593Smuzhiyun 	if (error) {
227*4882a593Smuzhiyun 		kfree(comp);
228*4882a593Smuzhiyun 		return ERR_PTR(error);
229*4882a593Smuzhiyun 	}
230*4882a593Smuzhiyun 	return comp;
231*4882a593Smuzhiyun }
232