xref: /OK3568_Linux_fs/kernel/drivers/base/regmap/regcache-lzo.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun //
3*4882a593Smuzhiyun // Register cache access API - LZO caching support
4*4882a593Smuzhiyun //
5*4882a593Smuzhiyun // Copyright 2011 Wolfson Microelectronics plc
6*4882a593Smuzhiyun //
7*4882a593Smuzhiyun // Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/device.h>
10*4882a593Smuzhiyun #include <linux/lzo.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include "internal.h"
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun static int regcache_lzo_exit(struct regmap *map);
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun struct regcache_lzo_ctx {
18*4882a593Smuzhiyun 	void *wmem;
19*4882a593Smuzhiyun 	void *dst;
20*4882a593Smuzhiyun 	const void *src;
21*4882a593Smuzhiyun 	size_t src_len;
22*4882a593Smuzhiyun 	size_t dst_len;
23*4882a593Smuzhiyun 	size_t decompressed_size;
24*4882a593Smuzhiyun 	unsigned long *sync_bmp;
25*4882a593Smuzhiyun 	int sync_bmp_nbits;
26*4882a593Smuzhiyun };
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define LZO_BLOCK_NUM 8
regcache_lzo_block_count(struct regmap * map)29*4882a593Smuzhiyun static int regcache_lzo_block_count(struct regmap *map)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun 	return LZO_BLOCK_NUM;
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun 
regcache_lzo_prepare(struct regcache_lzo_ctx * lzo_ctx)34*4882a593Smuzhiyun static int regcache_lzo_prepare(struct regcache_lzo_ctx *lzo_ctx)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun 	lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
37*4882a593Smuzhiyun 	if (!lzo_ctx->wmem)
38*4882a593Smuzhiyun 		return -ENOMEM;
39*4882a593Smuzhiyun 	return 0;
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun 
regcache_lzo_compress(struct regcache_lzo_ctx * lzo_ctx)42*4882a593Smuzhiyun static int regcache_lzo_compress(struct regcache_lzo_ctx *lzo_ctx)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	size_t compress_size;
45*4882a593Smuzhiyun 	int ret;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len,
48*4882a593Smuzhiyun 			       lzo_ctx->dst, &compress_size, lzo_ctx->wmem);
49*4882a593Smuzhiyun 	if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len)
50*4882a593Smuzhiyun 		return -EINVAL;
51*4882a593Smuzhiyun 	lzo_ctx->dst_len = compress_size;
52*4882a593Smuzhiyun 	return 0;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
regcache_lzo_decompress(struct regcache_lzo_ctx * lzo_ctx)55*4882a593Smuzhiyun static int regcache_lzo_decompress(struct regcache_lzo_ctx *lzo_ctx)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	size_t dst_len;
58*4882a593Smuzhiyun 	int ret;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	dst_len = lzo_ctx->dst_len;
61*4882a593Smuzhiyun 	ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len,
62*4882a593Smuzhiyun 				    lzo_ctx->dst, &dst_len);
63*4882a593Smuzhiyun 	if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len)
64*4882a593Smuzhiyun 		return -EINVAL;
65*4882a593Smuzhiyun 	return 0;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
regcache_lzo_compress_cache_block(struct regmap * map,struct regcache_lzo_ctx * lzo_ctx)68*4882a593Smuzhiyun static int regcache_lzo_compress_cache_block(struct regmap *map,
69*4882a593Smuzhiyun 		struct regcache_lzo_ctx *lzo_ctx)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	int ret;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE);
74*4882a593Smuzhiyun 	lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
75*4882a593Smuzhiyun 	if (!lzo_ctx->dst) {
76*4882a593Smuzhiyun 		lzo_ctx->dst_len = 0;
77*4882a593Smuzhiyun 		return -ENOMEM;
78*4882a593Smuzhiyun 	}
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	ret = regcache_lzo_compress(lzo_ctx);
81*4882a593Smuzhiyun 	if (ret < 0)
82*4882a593Smuzhiyun 		return ret;
83*4882a593Smuzhiyun 	return 0;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
regcache_lzo_decompress_cache_block(struct regmap * map,struct regcache_lzo_ctx * lzo_ctx)86*4882a593Smuzhiyun static int regcache_lzo_decompress_cache_block(struct regmap *map,
87*4882a593Smuzhiyun 		struct regcache_lzo_ctx *lzo_ctx)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	int ret;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	lzo_ctx->dst_len = lzo_ctx->decompressed_size;
92*4882a593Smuzhiyun 	lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
93*4882a593Smuzhiyun 	if (!lzo_ctx->dst) {
94*4882a593Smuzhiyun 		lzo_ctx->dst_len = 0;
95*4882a593Smuzhiyun 		return -ENOMEM;
96*4882a593Smuzhiyun 	}
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	ret = regcache_lzo_decompress(lzo_ctx);
99*4882a593Smuzhiyun 	if (ret < 0)
100*4882a593Smuzhiyun 		return ret;
101*4882a593Smuzhiyun 	return 0;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
regcache_lzo_get_blkindex(struct regmap * map,unsigned int reg)104*4882a593Smuzhiyun static inline int regcache_lzo_get_blkindex(struct regmap *map,
105*4882a593Smuzhiyun 					    unsigned int reg)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	return ((reg / map->reg_stride) * map->cache_word_size) /
108*4882a593Smuzhiyun 		DIV_ROUND_UP(map->cache_size_raw,
109*4882a593Smuzhiyun 			     regcache_lzo_block_count(map));
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
regcache_lzo_get_blkpos(struct regmap * map,unsigned int reg)112*4882a593Smuzhiyun static inline int regcache_lzo_get_blkpos(struct regmap *map,
113*4882a593Smuzhiyun 					  unsigned int reg)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	return (reg / map->reg_stride) %
116*4882a593Smuzhiyun 		    (DIV_ROUND_UP(map->cache_size_raw,
117*4882a593Smuzhiyun 				  regcache_lzo_block_count(map)) /
118*4882a593Smuzhiyun 		     map->cache_word_size);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
regcache_lzo_get_blksize(struct regmap * map)121*4882a593Smuzhiyun static inline int regcache_lzo_get_blksize(struct regmap *map)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	return DIV_ROUND_UP(map->cache_size_raw,
124*4882a593Smuzhiyun 			    regcache_lzo_block_count(map));
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun 
regcache_lzo_init(struct regmap * map)127*4882a593Smuzhiyun static int regcache_lzo_init(struct regmap *map)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	struct regcache_lzo_ctx **lzo_blocks;
130*4882a593Smuzhiyun 	size_t bmp_size;
131*4882a593Smuzhiyun 	int ret, i, blksize, blkcount;
132*4882a593Smuzhiyun 	const char *p, *end;
133*4882a593Smuzhiyun 	unsigned long *sync_bmp;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	ret = 0;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	blkcount = regcache_lzo_block_count(map);
138*4882a593Smuzhiyun 	map->cache = kcalloc(blkcount, sizeof(*lzo_blocks),
139*4882a593Smuzhiyun 			     GFP_KERNEL);
140*4882a593Smuzhiyun 	if (!map->cache)
141*4882a593Smuzhiyun 		return -ENOMEM;
142*4882a593Smuzhiyun 	lzo_blocks = map->cache;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	/*
145*4882a593Smuzhiyun 	 * allocate a bitmap to be used when syncing the cache with
146*4882a593Smuzhiyun 	 * the hardware.  Each time a register is modified, the corresponding
147*4882a593Smuzhiyun 	 * bit is set in the bitmap, so we know that we have to sync
148*4882a593Smuzhiyun 	 * that register.
149*4882a593Smuzhiyun 	 */
150*4882a593Smuzhiyun 	bmp_size = map->num_reg_defaults_raw;
151*4882a593Smuzhiyun 	sync_bmp = bitmap_zalloc(bmp_size, GFP_KERNEL);
152*4882a593Smuzhiyun 	if (!sync_bmp) {
153*4882a593Smuzhiyun 		ret = -ENOMEM;
154*4882a593Smuzhiyun 		goto err;
155*4882a593Smuzhiyun 	}
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	/* allocate the lzo blocks and initialize them */
158*4882a593Smuzhiyun 	for (i = 0; i < blkcount; i++) {
159*4882a593Smuzhiyun 		lzo_blocks[i] = kzalloc(sizeof **lzo_blocks,
160*4882a593Smuzhiyun 					GFP_KERNEL);
161*4882a593Smuzhiyun 		if (!lzo_blocks[i]) {
162*4882a593Smuzhiyun 			bitmap_free(sync_bmp);
163*4882a593Smuzhiyun 			ret = -ENOMEM;
164*4882a593Smuzhiyun 			goto err;
165*4882a593Smuzhiyun 		}
166*4882a593Smuzhiyun 		lzo_blocks[i]->sync_bmp = sync_bmp;
167*4882a593Smuzhiyun 		lzo_blocks[i]->sync_bmp_nbits = bmp_size;
168*4882a593Smuzhiyun 		/* alloc the working space for the compressed block */
169*4882a593Smuzhiyun 		ret = regcache_lzo_prepare(lzo_blocks[i]);
170*4882a593Smuzhiyun 		if (ret < 0)
171*4882a593Smuzhiyun 			goto err;
172*4882a593Smuzhiyun 	}
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	blksize = regcache_lzo_get_blksize(map);
175*4882a593Smuzhiyun 	p = map->reg_defaults_raw;
176*4882a593Smuzhiyun 	end = map->reg_defaults_raw + map->cache_size_raw;
177*4882a593Smuzhiyun 	/* compress the register map and fill the lzo blocks */
178*4882a593Smuzhiyun 	for (i = 0; i < blkcount; i++, p += blksize) {
179*4882a593Smuzhiyun 		lzo_blocks[i]->src = p;
180*4882a593Smuzhiyun 		if (p + blksize > end)
181*4882a593Smuzhiyun 			lzo_blocks[i]->src_len = end - p;
182*4882a593Smuzhiyun 		else
183*4882a593Smuzhiyun 			lzo_blocks[i]->src_len = blksize;
184*4882a593Smuzhiyun 		ret = regcache_lzo_compress_cache_block(map,
185*4882a593Smuzhiyun 						       lzo_blocks[i]);
186*4882a593Smuzhiyun 		if (ret < 0)
187*4882a593Smuzhiyun 			goto err;
188*4882a593Smuzhiyun 		lzo_blocks[i]->decompressed_size =
189*4882a593Smuzhiyun 			lzo_blocks[i]->src_len;
190*4882a593Smuzhiyun 	}
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	return 0;
193*4882a593Smuzhiyun err:
194*4882a593Smuzhiyun 	regcache_lzo_exit(map);
195*4882a593Smuzhiyun 	return ret;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun 
regcache_lzo_exit(struct regmap * map)198*4882a593Smuzhiyun static int regcache_lzo_exit(struct regmap *map)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	struct regcache_lzo_ctx **lzo_blocks;
201*4882a593Smuzhiyun 	int i, blkcount;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	lzo_blocks = map->cache;
204*4882a593Smuzhiyun 	if (!lzo_blocks)
205*4882a593Smuzhiyun 		return 0;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	blkcount = regcache_lzo_block_count(map);
208*4882a593Smuzhiyun 	/*
209*4882a593Smuzhiyun 	 * the pointer to the bitmap used for syncing the cache
210*4882a593Smuzhiyun 	 * is shared amongst all lzo_blocks.  Ensure it is freed
211*4882a593Smuzhiyun 	 * only once.
212*4882a593Smuzhiyun 	 */
213*4882a593Smuzhiyun 	if (lzo_blocks[0])
214*4882a593Smuzhiyun 		bitmap_free(lzo_blocks[0]->sync_bmp);
215*4882a593Smuzhiyun 	for (i = 0; i < blkcount; i++) {
216*4882a593Smuzhiyun 		if (lzo_blocks[i]) {
217*4882a593Smuzhiyun 			kfree(lzo_blocks[i]->wmem);
218*4882a593Smuzhiyun 			kfree(lzo_blocks[i]->dst);
219*4882a593Smuzhiyun 		}
220*4882a593Smuzhiyun 		/* each lzo_block is a pointer returned by kmalloc or NULL */
221*4882a593Smuzhiyun 		kfree(lzo_blocks[i]);
222*4882a593Smuzhiyun 	}
223*4882a593Smuzhiyun 	kfree(lzo_blocks);
224*4882a593Smuzhiyun 	map->cache = NULL;
225*4882a593Smuzhiyun 	return 0;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun 
regcache_lzo_read(struct regmap * map,unsigned int reg,unsigned int * value)228*4882a593Smuzhiyun static int regcache_lzo_read(struct regmap *map,
229*4882a593Smuzhiyun 			     unsigned int reg, unsigned int *value)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
232*4882a593Smuzhiyun 	int ret, blkindex, blkpos;
233*4882a593Smuzhiyun 	size_t tmp_dst_len;
234*4882a593Smuzhiyun 	void *tmp_dst;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	/* index of the compressed lzo block */
237*4882a593Smuzhiyun 	blkindex = regcache_lzo_get_blkindex(map, reg);
238*4882a593Smuzhiyun 	/* register index within the decompressed block */
239*4882a593Smuzhiyun 	blkpos = regcache_lzo_get_blkpos(map, reg);
240*4882a593Smuzhiyun 	lzo_blocks = map->cache;
241*4882a593Smuzhiyun 	lzo_block = lzo_blocks[blkindex];
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	/* save the pointer and length of the compressed block */
244*4882a593Smuzhiyun 	tmp_dst = lzo_block->dst;
245*4882a593Smuzhiyun 	tmp_dst_len = lzo_block->dst_len;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	/* prepare the source to be the compressed block */
248*4882a593Smuzhiyun 	lzo_block->src = lzo_block->dst;
249*4882a593Smuzhiyun 	lzo_block->src_len = lzo_block->dst_len;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	/* decompress the block */
252*4882a593Smuzhiyun 	ret = regcache_lzo_decompress_cache_block(map, lzo_block);
253*4882a593Smuzhiyun 	if (ret >= 0)
254*4882a593Smuzhiyun 		/* fetch the value from the cache */
255*4882a593Smuzhiyun 		*value = regcache_get_val(map, lzo_block->dst, blkpos);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	kfree(lzo_block->dst);
258*4882a593Smuzhiyun 	/* restore the pointer and length of the compressed block */
259*4882a593Smuzhiyun 	lzo_block->dst = tmp_dst;
260*4882a593Smuzhiyun 	lzo_block->dst_len = tmp_dst_len;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	return ret;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun 
regcache_lzo_write(struct regmap * map,unsigned int reg,unsigned int value)265*4882a593Smuzhiyun static int regcache_lzo_write(struct regmap *map,
266*4882a593Smuzhiyun 			      unsigned int reg, unsigned int value)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
269*4882a593Smuzhiyun 	int ret, blkindex, blkpos;
270*4882a593Smuzhiyun 	size_t tmp_dst_len;
271*4882a593Smuzhiyun 	void *tmp_dst;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	/* index of the compressed lzo block */
274*4882a593Smuzhiyun 	blkindex = regcache_lzo_get_blkindex(map, reg);
275*4882a593Smuzhiyun 	/* register index within the decompressed block */
276*4882a593Smuzhiyun 	blkpos = regcache_lzo_get_blkpos(map, reg);
277*4882a593Smuzhiyun 	lzo_blocks = map->cache;
278*4882a593Smuzhiyun 	lzo_block = lzo_blocks[blkindex];
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	/* save the pointer and length of the compressed block */
281*4882a593Smuzhiyun 	tmp_dst = lzo_block->dst;
282*4882a593Smuzhiyun 	tmp_dst_len = lzo_block->dst_len;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	/* prepare the source to be the compressed block */
285*4882a593Smuzhiyun 	lzo_block->src = lzo_block->dst;
286*4882a593Smuzhiyun 	lzo_block->src_len = lzo_block->dst_len;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	/* decompress the block */
289*4882a593Smuzhiyun 	ret = regcache_lzo_decompress_cache_block(map, lzo_block);
290*4882a593Smuzhiyun 	if (ret < 0) {
291*4882a593Smuzhiyun 		kfree(lzo_block->dst);
292*4882a593Smuzhiyun 		goto out;
293*4882a593Smuzhiyun 	}
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	/* write the new value to the cache */
296*4882a593Smuzhiyun 	if (regcache_set_val(map, lzo_block->dst, blkpos, value)) {
297*4882a593Smuzhiyun 		kfree(lzo_block->dst);
298*4882a593Smuzhiyun 		goto out;
299*4882a593Smuzhiyun 	}
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	/* prepare the source to be the decompressed block */
302*4882a593Smuzhiyun 	lzo_block->src = lzo_block->dst;
303*4882a593Smuzhiyun 	lzo_block->src_len = lzo_block->dst_len;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	/* compress the block */
306*4882a593Smuzhiyun 	ret = regcache_lzo_compress_cache_block(map, lzo_block);
307*4882a593Smuzhiyun 	if (ret < 0) {
308*4882a593Smuzhiyun 		kfree(lzo_block->dst);
309*4882a593Smuzhiyun 		kfree(lzo_block->src);
310*4882a593Smuzhiyun 		goto out;
311*4882a593Smuzhiyun 	}
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	/* set the bit so we know we have to sync this register */
314*4882a593Smuzhiyun 	set_bit(reg / map->reg_stride, lzo_block->sync_bmp);
315*4882a593Smuzhiyun 	kfree(tmp_dst);
316*4882a593Smuzhiyun 	kfree(lzo_block->src);
317*4882a593Smuzhiyun 	return 0;
318*4882a593Smuzhiyun out:
319*4882a593Smuzhiyun 	lzo_block->dst = tmp_dst;
320*4882a593Smuzhiyun 	lzo_block->dst_len = tmp_dst_len;
321*4882a593Smuzhiyun 	return ret;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun 
regcache_lzo_sync(struct regmap * map,unsigned int min,unsigned int max)324*4882a593Smuzhiyun static int regcache_lzo_sync(struct regmap *map, unsigned int min,
325*4882a593Smuzhiyun 			     unsigned int max)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun 	struct regcache_lzo_ctx **lzo_blocks;
328*4882a593Smuzhiyun 	unsigned int val;
329*4882a593Smuzhiyun 	int i;
330*4882a593Smuzhiyun 	int ret;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	lzo_blocks = map->cache;
333*4882a593Smuzhiyun 	i = min;
334*4882a593Smuzhiyun 	for_each_set_bit_from(i, lzo_blocks[0]->sync_bmp,
335*4882a593Smuzhiyun 			      lzo_blocks[0]->sync_bmp_nbits) {
336*4882a593Smuzhiyun 		if (i > max)
337*4882a593Smuzhiyun 			continue;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 		ret = regcache_read(map, i, &val);
340*4882a593Smuzhiyun 		if (ret)
341*4882a593Smuzhiyun 			return ret;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 		/* Is this the hardware default?  If so skip. */
344*4882a593Smuzhiyun 		ret = regcache_lookup_reg(map, i);
345*4882a593Smuzhiyun 		if (ret > 0 && val == map->reg_defaults[ret].def)
346*4882a593Smuzhiyun 			continue;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 		map->cache_bypass = true;
349*4882a593Smuzhiyun 		ret = _regmap_write(map, i, val);
350*4882a593Smuzhiyun 		map->cache_bypass = false;
351*4882a593Smuzhiyun 		if (ret)
352*4882a593Smuzhiyun 			return ret;
353*4882a593Smuzhiyun 		dev_dbg(map->dev, "Synced register %#x, value %#x\n",
354*4882a593Smuzhiyun 			i, val);
355*4882a593Smuzhiyun 	}
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	return 0;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun struct regcache_ops regcache_lzo_ops = {
361*4882a593Smuzhiyun 	.type = REGCACHE_COMPRESSED,
362*4882a593Smuzhiyun 	.name = "lzo",
363*4882a593Smuzhiyun 	.init = regcache_lzo_init,
364*4882a593Smuzhiyun 	.exit = regcache_lzo_exit,
365*4882a593Smuzhiyun 	.read = regcache_lzo_read,
366*4882a593Smuzhiyun 	.write = regcache_lzo_write,
367*4882a593Smuzhiyun 	.sync = regcache_lzo_sync
368*4882a593Smuzhiyun };
369