xref: /OK3568_Linux_fs/kernel/drivers/base/regmap/regcache.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun //
3*4882a593Smuzhiyun // Register cache access API
4*4882a593Smuzhiyun //
5*4882a593Smuzhiyun // Copyright 2011 Wolfson Microelectronics plc
6*4882a593Smuzhiyun //
7*4882a593Smuzhiyun // Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/bsearch.h>
10*4882a593Smuzhiyun #include <linux/device.h>
11*4882a593Smuzhiyun #include <linux/export.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun #include <linux/sort.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include "trace.h"
16*4882a593Smuzhiyun #include "internal.h"
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun static const struct regcache_ops *cache_types[] = {
19*4882a593Smuzhiyun 	&regcache_rbtree_ops,
20*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_REGCACHE_COMPRESSED)
21*4882a593Smuzhiyun 	&regcache_lzo_ops,
22*4882a593Smuzhiyun #endif
23*4882a593Smuzhiyun 	&regcache_flat_ops,
24*4882a593Smuzhiyun };
25*4882a593Smuzhiyun 
regcache_hw_init(struct regmap * map)26*4882a593Smuzhiyun static int regcache_hw_init(struct regmap *map)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	int i, j;
29*4882a593Smuzhiyun 	int ret;
30*4882a593Smuzhiyun 	int count;
31*4882a593Smuzhiyun 	unsigned int reg, val;
32*4882a593Smuzhiyun 	void *tmp_buf;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	if (!map->num_reg_defaults_raw)
35*4882a593Smuzhiyun 		return -EINVAL;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	/* calculate the size of reg_defaults */
38*4882a593Smuzhiyun 	for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++)
39*4882a593Smuzhiyun 		if (regmap_readable(map, i * map->reg_stride) &&
40*4882a593Smuzhiyun 		    !regmap_volatile(map, i * map->reg_stride))
41*4882a593Smuzhiyun 			count++;
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	/* all registers are unreadable or volatile, so just bypass */
44*4882a593Smuzhiyun 	if (!count) {
45*4882a593Smuzhiyun 		map->cache_bypass = true;
46*4882a593Smuzhiyun 		return 0;
47*4882a593Smuzhiyun 	}
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	map->num_reg_defaults = count;
50*4882a593Smuzhiyun 	map->reg_defaults = kmalloc_array(count, sizeof(struct reg_default),
51*4882a593Smuzhiyun 					  GFP_KERNEL);
52*4882a593Smuzhiyun 	if (!map->reg_defaults)
53*4882a593Smuzhiyun 		return -ENOMEM;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	if (!map->reg_defaults_raw) {
56*4882a593Smuzhiyun 		bool cache_bypass = map->cache_bypass;
57*4882a593Smuzhiyun 		dev_warn(map->dev, "No cache defaults, reading back from HW\n");
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 		/* Bypass the cache access till data read from HW */
60*4882a593Smuzhiyun 		map->cache_bypass = true;
61*4882a593Smuzhiyun 		tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
62*4882a593Smuzhiyun 		if (!tmp_buf) {
63*4882a593Smuzhiyun 			ret = -ENOMEM;
64*4882a593Smuzhiyun 			goto err_free;
65*4882a593Smuzhiyun 		}
66*4882a593Smuzhiyun 		ret = regmap_raw_read(map, 0, tmp_buf,
67*4882a593Smuzhiyun 				      map->cache_size_raw);
68*4882a593Smuzhiyun 		map->cache_bypass = cache_bypass;
69*4882a593Smuzhiyun 		if (ret == 0) {
70*4882a593Smuzhiyun 			map->reg_defaults_raw = tmp_buf;
71*4882a593Smuzhiyun 			map->cache_free = 1;
72*4882a593Smuzhiyun 		} else {
73*4882a593Smuzhiyun 			kfree(tmp_buf);
74*4882a593Smuzhiyun 		}
75*4882a593Smuzhiyun 	}
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	/* fill the reg_defaults */
78*4882a593Smuzhiyun 	for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
79*4882a593Smuzhiyun 		reg = i * map->reg_stride;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 		if (!regmap_readable(map, reg))
82*4882a593Smuzhiyun 			continue;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 		if (regmap_volatile(map, reg))
85*4882a593Smuzhiyun 			continue;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 		if (map->reg_defaults_raw) {
88*4882a593Smuzhiyun 			val = regcache_get_val(map, map->reg_defaults_raw, i);
89*4882a593Smuzhiyun 		} else {
90*4882a593Smuzhiyun 			bool cache_bypass = map->cache_bypass;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 			map->cache_bypass = true;
93*4882a593Smuzhiyun 			ret = regmap_read(map, reg, &val);
94*4882a593Smuzhiyun 			map->cache_bypass = cache_bypass;
95*4882a593Smuzhiyun 			if (ret != 0) {
96*4882a593Smuzhiyun 				dev_err(map->dev, "Failed to read %d: %d\n",
97*4882a593Smuzhiyun 					reg, ret);
98*4882a593Smuzhiyun 				goto err_free;
99*4882a593Smuzhiyun 			}
100*4882a593Smuzhiyun 		}
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 		map->reg_defaults[j].reg = reg;
103*4882a593Smuzhiyun 		map->reg_defaults[j].def = val;
104*4882a593Smuzhiyun 		j++;
105*4882a593Smuzhiyun 	}
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	return 0;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun err_free:
110*4882a593Smuzhiyun 	kfree(map->reg_defaults);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	return ret;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
regcache_init(struct regmap * map,const struct regmap_config * config)115*4882a593Smuzhiyun int regcache_init(struct regmap *map, const struct regmap_config *config)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	int ret;
118*4882a593Smuzhiyun 	int i;
119*4882a593Smuzhiyun 	void *tmp_buf;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	if (map->cache_type == REGCACHE_NONE) {
122*4882a593Smuzhiyun 		if (config->reg_defaults || config->num_reg_defaults_raw)
123*4882a593Smuzhiyun 			dev_warn(map->dev,
124*4882a593Smuzhiyun 				 "No cache used with register defaults set!\n");
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 		map->cache_bypass = true;
127*4882a593Smuzhiyun 		return 0;
128*4882a593Smuzhiyun 	}
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	if (config->reg_defaults && !config->num_reg_defaults) {
131*4882a593Smuzhiyun 		dev_err(map->dev,
132*4882a593Smuzhiyun 			 "Register defaults are set without the number!\n");
133*4882a593Smuzhiyun 		return -EINVAL;
134*4882a593Smuzhiyun 	}
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	for (i = 0; i < config->num_reg_defaults; i++)
137*4882a593Smuzhiyun 		if (config->reg_defaults[i].reg % map->reg_stride)
138*4882a593Smuzhiyun 			return -EINVAL;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(cache_types); i++)
141*4882a593Smuzhiyun 		if (cache_types[i]->type == map->cache_type)
142*4882a593Smuzhiyun 			break;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	if (i == ARRAY_SIZE(cache_types)) {
145*4882a593Smuzhiyun 		dev_err(map->dev, "Could not match compress type: %d\n",
146*4882a593Smuzhiyun 			map->cache_type);
147*4882a593Smuzhiyun 		return -EINVAL;
148*4882a593Smuzhiyun 	}
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	map->num_reg_defaults = config->num_reg_defaults;
151*4882a593Smuzhiyun 	map->num_reg_defaults_raw = config->num_reg_defaults_raw;
152*4882a593Smuzhiyun 	map->reg_defaults_raw = config->reg_defaults_raw;
153*4882a593Smuzhiyun 	map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
154*4882a593Smuzhiyun 	map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	map->cache = NULL;
157*4882a593Smuzhiyun 	map->cache_ops = cache_types[i];
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	if (!map->cache_ops->read ||
160*4882a593Smuzhiyun 	    !map->cache_ops->write ||
161*4882a593Smuzhiyun 	    !map->cache_ops->name)
162*4882a593Smuzhiyun 		return -EINVAL;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	/* We still need to ensure that the reg_defaults
165*4882a593Smuzhiyun 	 * won't vanish from under us.  We'll need to make
166*4882a593Smuzhiyun 	 * a copy of it.
167*4882a593Smuzhiyun 	 */
168*4882a593Smuzhiyun 	if (config->reg_defaults) {
169*4882a593Smuzhiyun 		tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
170*4882a593Smuzhiyun 				  sizeof(struct reg_default), GFP_KERNEL);
171*4882a593Smuzhiyun 		if (!tmp_buf)
172*4882a593Smuzhiyun 			return -ENOMEM;
173*4882a593Smuzhiyun 		map->reg_defaults = tmp_buf;
174*4882a593Smuzhiyun 	} else if (map->num_reg_defaults_raw) {
175*4882a593Smuzhiyun 		/* Some devices such as PMICs don't have cache defaults,
176*4882a593Smuzhiyun 		 * we cope with this by reading back the HW registers and
177*4882a593Smuzhiyun 		 * crafting the cache defaults by hand.
178*4882a593Smuzhiyun 		 */
179*4882a593Smuzhiyun 		ret = regcache_hw_init(map);
180*4882a593Smuzhiyun 		if (ret < 0)
181*4882a593Smuzhiyun 			return ret;
182*4882a593Smuzhiyun 		if (map->cache_bypass)
183*4882a593Smuzhiyun 			return 0;
184*4882a593Smuzhiyun 	}
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	if (!map->max_register)
187*4882a593Smuzhiyun 		map->max_register = map->num_reg_defaults_raw;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	if (map->cache_ops->init) {
190*4882a593Smuzhiyun 		dev_dbg(map->dev, "Initializing %s cache\n",
191*4882a593Smuzhiyun 			map->cache_ops->name);
192*4882a593Smuzhiyun 		ret = map->cache_ops->init(map);
193*4882a593Smuzhiyun 		if (ret)
194*4882a593Smuzhiyun 			goto err_free;
195*4882a593Smuzhiyun 	}
196*4882a593Smuzhiyun 	return 0;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun err_free:
199*4882a593Smuzhiyun 	kfree(map->reg_defaults);
200*4882a593Smuzhiyun 	if (map->cache_free)
201*4882a593Smuzhiyun 		kfree(map->reg_defaults_raw);
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	return ret;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun 
regcache_exit(struct regmap * map)206*4882a593Smuzhiyun void regcache_exit(struct regmap *map)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	if (map->cache_type == REGCACHE_NONE)
209*4882a593Smuzhiyun 		return;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	BUG_ON(!map->cache_ops);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	kfree(map->reg_defaults);
214*4882a593Smuzhiyun 	if (map->cache_free)
215*4882a593Smuzhiyun 		kfree(map->reg_defaults_raw);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	if (map->cache_ops->exit) {
218*4882a593Smuzhiyun 		dev_dbg(map->dev, "Destroying %s cache\n",
219*4882a593Smuzhiyun 			map->cache_ops->name);
220*4882a593Smuzhiyun 		map->cache_ops->exit(map);
221*4882a593Smuzhiyun 	}
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun /**
225*4882a593Smuzhiyun  * regcache_read - Fetch the value of a given register from the cache.
226*4882a593Smuzhiyun  *
227*4882a593Smuzhiyun  * @map: map to configure.
228*4882a593Smuzhiyun  * @reg: The register index.
229*4882a593Smuzhiyun  * @value: The value to be returned.
230*4882a593Smuzhiyun  *
231*4882a593Smuzhiyun  * Return a negative value on failure, 0 on success.
232*4882a593Smuzhiyun  */
regcache_read(struct regmap * map,unsigned int reg,unsigned int * value)233*4882a593Smuzhiyun int regcache_read(struct regmap *map,
234*4882a593Smuzhiyun 		  unsigned int reg, unsigned int *value)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun 	int ret;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	if (map->cache_type == REGCACHE_NONE)
239*4882a593Smuzhiyun 		return -ENOSYS;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	BUG_ON(!map->cache_ops);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	if (!regmap_volatile(map, reg)) {
244*4882a593Smuzhiyun 		ret = map->cache_ops->read(map, reg, value);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 		if (ret == 0)
247*4882a593Smuzhiyun 			trace_regmap_reg_read_cache(map, reg, *value);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 		return ret;
250*4882a593Smuzhiyun 	}
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	return -EINVAL;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun /**
256*4882a593Smuzhiyun  * regcache_write - Set the value of a given register in the cache.
257*4882a593Smuzhiyun  *
258*4882a593Smuzhiyun  * @map: map to configure.
259*4882a593Smuzhiyun  * @reg: The register index.
260*4882a593Smuzhiyun  * @value: The new register value.
261*4882a593Smuzhiyun  *
262*4882a593Smuzhiyun  * Return a negative value on failure, 0 on success.
263*4882a593Smuzhiyun  */
regcache_write(struct regmap * map,unsigned int reg,unsigned int value)264*4882a593Smuzhiyun int regcache_write(struct regmap *map,
265*4882a593Smuzhiyun 		   unsigned int reg, unsigned int value)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	if (map->cache_type == REGCACHE_NONE)
268*4882a593Smuzhiyun 		return 0;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	BUG_ON(!map->cache_ops);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	if (!regmap_volatile(map, reg))
273*4882a593Smuzhiyun 		return map->cache_ops->write(map, reg, value);
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	return 0;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun 
regcache_reg_needs_sync(struct regmap * map,unsigned int reg,unsigned int val)278*4882a593Smuzhiyun static bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg,
279*4882a593Smuzhiyun 				    unsigned int val)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	int ret;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	/* If we don't know the chip just got reset, then sync everything. */
284*4882a593Smuzhiyun 	if (!map->no_sync_defaults)
285*4882a593Smuzhiyun 		return true;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	/* Is this the hardware default?  If so skip. */
288*4882a593Smuzhiyun 	ret = regcache_lookup_reg(map, reg);
289*4882a593Smuzhiyun 	if (ret >= 0 && val == map->reg_defaults[ret].def)
290*4882a593Smuzhiyun 		return false;
291*4882a593Smuzhiyun 	return true;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
regcache_default_sync(struct regmap * map,unsigned int min,unsigned int max)294*4882a593Smuzhiyun static int regcache_default_sync(struct regmap *map, unsigned int min,
295*4882a593Smuzhiyun 				 unsigned int max)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	unsigned int reg;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	for (reg = min; reg <= max; reg += map->reg_stride) {
300*4882a593Smuzhiyun 		unsigned int val;
301*4882a593Smuzhiyun 		int ret;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 		if (regmap_volatile(map, reg) ||
304*4882a593Smuzhiyun 		    !regmap_writeable(map, reg))
305*4882a593Smuzhiyun 			continue;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 		ret = regcache_read(map, reg, &val);
308*4882a593Smuzhiyun 		if (ret)
309*4882a593Smuzhiyun 			return ret;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 		if (!regcache_reg_needs_sync(map, reg, val))
312*4882a593Smuzhiyun 			continue;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 		map->cache_bypass = true;
315*4882a593Smuzhiyun 		ret = _regmap_write(map, reg, val);
316*4882a593Smuzhiyun 		map->cache_bypass = false;
317*4882a593Smuzhiyun 		if (ret) {
318*4882a593Smuzhiyun 			dev_err(map->dev, "Unable to sync register %#x. %d\n",
319*4882a593Smuzhiyun 				reg, ret);
320*4882a593Smuzhiyun 			return ret;
321*4882a593Smuzhiyun 		}
322*4882a593Smuzhiyun 		dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val);
323*4882a593Smuzhiyun 	}
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	return 0;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun /**
329*4882a593Smuzhiyun  * regcache_sync - Sync the register cache with the hardware.
330*4882a593Smuzhiyun  *
331*4882a593Smuzhiyun  * @map: map to configure.
332*4882a593Smuzhiyun  *
333*4882a593Smuzhiyun  * Any registers that should not be synced should be marked as
334*4882a593Smuzhiyun  * volatile.  In general drivers can choose not to use the provided
335*4882a593Smuzhiyun  * syncing functionality if they so require.
336*4882a593Smuzhiyun  *
337*4882a593Smuzhiyun  * Return a negative value on failure, 0 on success.
338*4882a593Smuzhiyun  */
regcache_sync(struct regmap * map)339*4882a593Smuzhiyun int regcache_sync(struct regmap *map)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun 	int ret = 0;
342*4882a593Smuzhiyun 	unsigned int i;
343*4882a593Smuzhiyun 	const char *name;
344*4882a593Smuzhiyun 	bool bypass;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	BUG_ON(!map->cache_ops);
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	map->lock(map->lock_arg);
349*4882a593Smuzhiyun 	/* Remember the initial bypass state */
350*4882a593Smuzhiyun 	bypass = map->cache_bypass;
351*4882a593Smuzhiyun 	dev_dbg(map->dev, "Syncing %s cache\n",
352*4882a593Smuzhiyun 		map->cache_ops->name);
353*4882a593Smuzhiyun 	name = map->cache_ops->name;
354*4882a593Smuzhiyun 	trace_regcache_sync(map, name, "start");
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	if (!map->cache_dirty)
357*4882a593Smuzhiyun 		goto out;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	map->async = true;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	/* Apply any patch first */
362*4882a593Smuzhiyun 	map->cache_bypass = true;
363*4882a593Smuzhiyun 	for (i = 0; i < map->patch_regs; i++) {
364*4882a593Smuzhiyun 		ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
365*4882a593Smuzhiyun 		if (ret != 0) {
366*4882a593Smuzhiyun 			dev_err(map->dev, "Failed to write %x = %x: %d\n",
367*4882a593Smuzhiyun 				map->patch[i].reg, map->patch[i].def, ret);
368*4882a593Smuzhiyun 			goto out;
369*4882a593Smuzhiyun 		}
370*4882a593Smuzhiyun 	}
371*4882a593Smuzhiyun 	map->cache_bypass = false;
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	if (map->cache_ops->sync)
374*4882a593Smuzhiyun 		ret = map->cache_ops->sync(map, 0, map->max_register);
375*4882a593Smuzhiyun 	else
376*4882a593Smuzhiyun 		ret = regcache_default_sync(map, 0, map->max_register);
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	if (ret == 0)
379*4882a593Smuzhiyun 		map->cache_dirty = false;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun out:
382*4882a593Smuzhiyun 	/* Restore the bypass state */
383*4882a593Smuzhiyun 	map->async = false;
384*4882a593Smuzhiyun 	map->cache_bypass = bypass;
385*4882a593Smuzhiyun 	map->no_sync_defaults = false;
386*4882a593Smuzhiyun 	map->unlock(map->lock_arg);
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	regmap_async_complete(map);
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	trace_regcache_sync(map, name, "stop");
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	return ret;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(regcache_sync);
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun /**
397*4882a593Smuzhiyun  * regcache_sync_region - Sync part  of the register cache with the hardware.
398*4882a593Smuzhiyun  *
399*4882a593Smuzhiyun  * @map: map to sync.
400*4882a593Smuzhiyun  * @min: first register to sync
401*4882a593Smuzhiyun  * @max: last register to sync
402*4882a593Smuzhiyun  *
403*4882a593Smuzhiyun  * Write all non-default register values in the specified region to
404*4882a593Smuzhiyun  * the hardware.
405*4882a593Smuzhiyun  *
406*4882a593Smuzhiyun  * Return a negative value on failure, 0 on success.
407*4882a593Smuzhiyun  */
regcache_sync_region(struct regmap * map,unsigned int min,unsigned int max)408*4882a593Smuzhiyun int regcache_sync_region(struct regmap *map, unsigned int min,
409*4882a593Smuzhiyun 			 unsigned int max)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun 	int ret = 0;
412*4882a593Smuzhiyun 	const char *name;
413*4882a593Smuzhiyun 	bool bypass;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	BUG_ON(!map->cache_ops);
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	map->lock(map->lock_arg);
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	/* Remember the initial bypass state */
420*4882a593Smuzhiyun 	bypass = map->cache_bypass;
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	name = map->cache_ops->name;
423*4882a593Smuzhiyun 	dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	trace_regcache_sync(map, name, "start region");
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	if (!map->cache_dirty)
428*4882a593Smuzhiyun 		goto out;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	map->async = true;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	if (map->cache_ops->sync)
433*4882a593Smuzhiyun 		ret = map->cache_ops->sync(map, min, max);
434*4882a593Smuzhiyun 	else
435*4882a593Smuzhiyun 		ret = regcache_default_sync(map, min, max);
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun out:
438*4882a593Smuzhiyun 	/* Restore the bypass state */
439*4882a593Smuzhiyun 	map->cache_bypass = bypass;
440*4882a593Smuzhiyun 	map->async = false;
441*4882a593Smuzhiyun 	map->no_sync_defaults = false;
442*4882a593Smuzhiyun 	map->unlock(map->lock_arg);
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	regmap_async_complete(map);
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	trace_regcache_sync(map, name, "stop region");
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	return ret;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(regcache_sync_region);
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun /**
453*4882a593Smuzhiyun  * regcache_drop_region - Discard part of the register cache
454*4882a593Smuzhiyun  *
455*4882a593Smuzhiyun  * @map: map to operate on
456*4882a593Smuzhiyun  * @min: first register to discard
457*4882a593Smuzhiyun  * @max: last register to discard
458*4882a593Smuzhiyun  *
459*4882a593Smuzhiyun  * Discard part of the register cache.
460*4882a593Smuzhiyun  *
461*4882a593Smuzhiyun  * Return a negative value on failure, 0 on success.
462*4882a593Smuzhiyun  */
regcache_drop_region(struct regmap * map,unsigned int min,unsigned int max)463*4882a593Smuzhiyun int regcache_drop_region(struct regmap *map, unsigned int min,
464*4882a593Smuzhiyun 			 unsigned int max)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun 	int ret = 0;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	if (!map->cache_ops || !map->cache_ops->drop)
469*4882a593Smuzhiyun 		return -EINVAL;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	map->lock(map->lock_arg);
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	trace_regcache_drop_region(map, min, max);
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	ret = map->cache_ops->drop(map, min, max);
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	map->unlock(map->lock_arg);
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	return ret;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(regcache_drop_region);
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun /**
484*4882a593Smuzhiyun  * regcache_cache_only - Put a register map into cache only mode
485*4882a593Smuzhiyun  *
486*4882a593Smuzhiyun  * @map: map to configure
487*4882a593Smuzhiyun  * @enable: flag if changes should be written to the hardware
488*4882a593Smuzhiyun  *
489*4882a593Smuzhiyun  * When a register map is marked as cache only writes to the register
490*4882a593Smuzhiyun  * map API will only update the register cache, they will not cause
491*4882a593Smuzhiyun  * any hardware changes.  This is useful for allowing portions of
492*4882a593Smuzhiyun  * drivers to act as though the device were functioning as normal when
493*4882a593Smuzhiyun  * it is disabled for power saving reasons.
494*4882a593Smuzhiyun  */
regcache_cache_only(struct regmap * map,bool enable)495*4882a593Smuzhiyun void regcache_cache_only(struct regmap *map, bool enable)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun 	map->lock(map->lock_arg);
498*4882a593Smuzhiyun 	WARN_ON(map->cache_bypass && enable);
499*4882a593Smuzhiyun 	map->cache_only = enable;
500*4882a593Smuzhiyun 	trace_regmap_cache_only(map, enable);
501*4882a593Smuzhiyun 	map->unlock(map->lock_arg);
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(regcache_cache_only);
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun /**
506*4882a593Smuzhiyun  * regcache_mark_dirty - Indicate that HW registers were reset to default values
507*4882a593Smuzhiyun  *
508*4882a593Smuzhiyun  * @map: map to mark
509*4882a593Smuzhiyun  *
510*4882a593Smuzhiyun  * Inform regcache that the device has been powered down or reset, so that
511*4882a593Smuzhiyun  * on resume, regcache_sync() knows to write out all non-default values
512*4882a593Smuzhiyun  * stored in the cache.
513*4882a593Smuzhiyun  *
514*4882a593Smuzhiyun  * If this function is not called, regcache_sync() will assume that
515*4882a593Smuzhiyun  * the hardware state still matches the cache state, modulo any writes that
516*4882a593Smuzhiyun  * happened when cache_only was true.
517*4882a593Smuzhiyun  */
regcache_mark_dirty(struct regmap * map)518*4882a593Smuzhiyun void regcache_mark_dirty(struct regmap *map)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun 	map->lock(map->lock_arg);
521*4882a593Smuzhiyun 	map->cache_dirty = true;
522*4882a593Smuzhiyun 	map->no_sync_defaults = true;
523*4882a593Smuzhiyun 	map->unlock(map->lock_arg);
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(regcache_mark_dirty);
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun /**
528*4882a593Smuzhiyun  * regcache_cache_bypass - Put a register map into cache bypass mode
529*4882a593Smuzhiyun  *
530*4882a593Smuzhiyun  * @map: map to configure
531*4882a593Smuzhiyun  * @enable: flag if changes should not be written to the cache
532*4882a593Smuzhiyun  *
533*4882a593Smuzhiyun  * When a register map is marked with the cache bypass option, writes
534*4882a593Smuzhiyun  * to the register map API will only update the hardware and not the
535*4882a593Smuzhiyun  * the cache directly.  This is useful when syncing the cache back to
536*4882a593Smuzhiyun  * the hardware.
537*4882a593Smuzhiyun  */
regcache_cache_bypass(struct regmap * map,bool enable)538*4882a593Smuzhiyun void regcache_cache_bypass(struct regmap *map, bool enable)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun 	map->lock(map->lock_arg);
541*4882a593Smuzhiyun 	WARN_ON(map->cache_only && enable);
542*4882a593Smuzhiyun 	map->cache_bypass = enable;
543*4882a593Smuzhiyun 	trace_regmap_cache_bypass(map, enable);
544*4882a593Smuzhiyun 	map->unlock(map->lock_arg);
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(regcache_cache_bypass);
547*4882a593Smuzhiyun 
regcache_set_val(struct regmap * map,void * base,unsigned int idx,unsigned int val)548*4882a593Smuzhiyun bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
549*4882a593Smuzhiyun 		      unsigned int val)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun 	if (regcache_get_val(map, base, idx) == val)
552*4882a593Smuzhiyun 		return true;
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	/* Use device native format if possible */
555*4882a593Smuzhiyun 	if (map->format.format_val) {
556*4882a593Smuzhiyun 		map->format.format_val(base + (map->cache_word_size * idx),
557*4882a593Smuzhiyun 				       val, 0);
558*4882a593Smuzhiyun 		return false;
559*4882a593Smuzhiyun 	}
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	switch (map->cache_word_size) {
562*4882a593Smuzhiyun 	case 1: {
563*4882a593Smuzhiyun 		u8 *cache = base;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 		cache[idx] = val;
566*4882a593Smuzhiyun 		break;
567*4882a593Smuzhiyun 	}
568*4882a593Smuzhiyun 	case 2: {
569*4882a593Smuzhiyun 		u16 *cache = base;
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 		cache[idx] = val;
572*4882a593Smuzhiyun 		break;
573*4882a593Smuzhiyun 	}
574*4882a593Smuzhiyun 	case 4: {
575*4882a593Smuzhiyun 		u32 *cache = base;
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 		cache[idx] = val;
578*4882a593Smuzhiyun 		break;
579*4882a593Smuzhiyun 	}
580*4882a593Smuzhiyun #ifdef CONFIG_64BIT
581*4882a593Smuzhiyun 	case 8: {
582*4882a593Smuzhiyun 		u64 *cache = base;
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 		cache[idx] = val;
585*4882a593Smuzhiyun 		break;
586*4882a593Smuzhiyun 	}
587*4882a593Smuzhiyun #endif
588*4882a593Smuzhiyun 	default:
589*4882a593Smuzhiyun 		BUG();
590*4882a593Smuzhiyun 	}
591*4882a593Smuzhiyun 	return false;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun 
regcache_get_val(struct regmap * map,const void * base,unsigned int idx)594*4882a593Smuzhiyun unsigned int regcache_get_val(struct regmap *map, const void *base,
595*4882a593Smuzhiyun 			      unsigned int idx)
596*4882a593Smuzhiyun {
597*4882a593Smuzhiyun 	if (!base)
598*4882a593Smuzhiyun 		return -EINVAL;
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	/* Use device native format if possible */
601*4882a593Smuzhiyun 	if (map->format.parse_val)
602*4882a593Smuzhiyun 		return map->format.parse_val(regcache_get_val_addr(map, base,
603*4882a593Smuzhiyun 								   idx));
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	switch (map->cache_word_size) {
606*4882a593Smuzhiyun 	case 1: {
607*4882a593Smuzhiyun 		const u8 *cache = base;
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 		return cache[idx];
610*4882a593Smuzhiyun 	}
611*4882a593Smuzhiyun 	case 2: {
612*4882a593Smuzhiyun 		const u16 *cache = base;
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 		return cache[idx];
615*4882a593Smuzhiyun 	}
616*4882a593Smuzhiyun 	case 4: {
617*4882a593Smuzhiyun 		const u32 *cache = base;
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 		return cache[idx];
620*4882a593Smuzhiyun 	}
621*4882a593Smuzhiyun #ifdef CONFIG_64BIT
622*4882a593Smuzhiyun 	case 8: {
623*4882a593Smuzhiyun 		const u64 *cache = base;
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 		return cache[idx];
626*4882a593Smuzhiyun 	}
627*4882a593Smuzhiyun #endif
628*4882a593Smuzhiyun 	default:
629*4882a593Smuzhiyun 		BUG();
630*4882a593Smuzhiyun 	}
631*4882a593Smuzhiyun 	/* unreachable */
632*4882a593Smuzhiyun 	return -1;
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun 
regcache_default_cmp(const void * a,const void * b)635*4882a593Smuzhiyun static int regcache_default_cmp(const void *a, const void *b)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun 	const struct reg_default *_a = a;
638*4882a593Smuzhiyun 	const struct reg_default *_b = b;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	return _a->reg - _b->reg;
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun 
regcache_lookup_reg(struct regmap * map,unsigned int reg)643*4882a593Smuzhiyun int regcache_lookup_reg(struct regmap *map, unsigned int reg)
644*4882a593Smuzhiyun {
645*4882a593Smuzhiyun 	struct reg_default key;
646*4882a593Smuzhiyun 	struct reg_default *r;
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	key.reg = reg;
649*4882a593Smuzhiyun 	key.def = 0;
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
652*4882a593Smuzhiyun 		    sizeof(struct reg_default), regcache_default_cmp);
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	if (r)
655*4882a593Smuzhiyun 		return r - map->reg_defaults;
656*4882a593Smuzhiyun 	else
657*4882a593Smuzhiyun 		return -ENOENT;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun 
regcache_reg_present(unsigned long * cache_present,unsigned int idx)660*4882a593Smuzhiyun static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun 	if (!cache_present)
663*4882a593Smuzhiyun 		return true;
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	return test_bit(idx, cache_present);
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun 
regcache_sync_block_single(struct regmap * map,void * block,unsigned long * cache_present,unsigned int block_base,unsigned int start,unsigned int end)668*4882a593Smuzhiyun static int regcache_sync_block_single(struct regmap *map, void *block,
669*4882a593Smuzhiyun 				      unsigned long *cache_present,
670*4882a593Smuzhiyun 				      unsigned int block_base,
671*4882a593Smuzhiyun 				      unsigned int start, unsigned int end)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun 	unsigned int i, regtmp, val;
674*4882a593Smuzhiyun 	int ret;
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	for (i = start; i < end; i++) {
677*4882a593Smuzhiyun 		regtmp = block_base + (i * map->reg_stride);
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 		if (!regcache_reg_present(cache_present, i) ||
680*4882a593Smuzhiyun 		    !regmap_writeable(map, regtmp))
681*4882a593Smuzhiyun 			continue;
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 		val = regcache_get_val(map, block, i);
684*4882a593Smuzhiyun 		if (!regcache_reg_needs_sync(map, regtmp, val))
685*4882a593Smuzhiyun 			continue;
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 		map->cache_bypass = true;
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 		ret = _regmap_write(map, regtmp, val);
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 		map->cache_bypass = false;
692*4882a593Smuzhiyun 		if (ret != 0) {
693*4882a593Smuzhiyun 			dev_err(map->dev, "Unable to sync register %#x. %d\n",
694*4882a593Smuzhiyun 				regtmp, ret);
695*4882a593Smuzhiyun 			return ret;
696*4882a593Smuzhiyun 		}
697*4882a593Smuzhiyun 		dev_dbg(map->dev, "Synced register %#x, value %#x\n",
698*4882a593Smuzhiyun 			regtmp, val);
699*4882a593Smuzhiyun 	}
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	return 0;
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun 
regcache_sync_block_raw_flush(struct regmap * map,const void ** data,unsigned int base,unsigned int cur)704*4882a593Smuzhiyun static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
705*4882a593Smuzhiyun 					 unsigned int base, unsigned int cur)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun 	size_t val_bytes = map->format.val_bytes;
708*4882a593Smuzhiyun 	int ret, count;
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	if (*data == NULL)
711*4882a593Smuzhiyun 		return 0;
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	count = (cur - base) / map->reg_stride;
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
716*4882a593Smuzhiyun 		count * val_bytes, count, base, cur - map->reg_stride);
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	map->cache_bypass = true;
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	ret = _regmap_raw_write(map, base, *data, count * val_bytes, false);
721*4882a593Smuzhiyun 	if (ret)
722*4882a593Smuzhiyun 		dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
723*4882a593Smuzhiyun 			base, cur - map->reg_stride, ret);
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	map->cache_bypass = false;
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	*data = NULL;
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	return ret;
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun 
regcache_sync_block_raw(struct regmap * map,void * block,unsigned long * cache_present,unsigned int block_base,unsigned int start,unsigned int end)732*4882a593Smuzhiyun static int regcache_sync_block_raw(struct regmap *map, void *block,
733*4882a593Smuzhiyun 			    unsigned long *cache_present,
734*4882a593Smuzhiyun 			    unsigned int block_base, unsigned int start,
735*4882a593Smuzhiyun 			    unsigned int end)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun 	unsigned int i, val;
738*4882a593Smuzhiyun 	unsigned int regtmp = 0;
739*4882a593Smuzhiyun 	unsigned int base = 0;
740*4882a593Smuzhiyun 	const void *data = NULL;
741*4882a593Smuzhiyun 	int ret;
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	for (i = start; i < end; i++) {
744*4882a593Smuzhiyun 		regtmp = block_base + (i * map->reg_stride);
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 		if (!regcache_reg_present(cache_present, i) ||
747*4882a593Smuzhiyun 		    !regmap_writeable(map, regtmp)) {
748*4882a593Smuzhiyun 			ret = regcache_sync_block_raw_flush(map, &data,
749*4882a593Smuzhiyun 							    base, regtmp);
750*4882a593Smuzhiyun 			if (ret != 0)
751*4882a593Smuzhiyun 				return ret;
752*4882a593Smuzhiyun 			continue;
753*4882a593Smuzhiyun 		}
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 		val = regcache_get_val(map, block, i);
756*4882a593Smuzhiyun 		if (!regcache_reg_needs_sync(map, regtmp, val)) {
757*4882a593Smuzhiyun 			ret = regcache_sync_block_raw_flush(map, &data,
758*4882a593Smuzhiyun 							    base, regtmp);
759*4882a593Smuzhiyun 			if (ret != 0)
760*4882a593Smuzhiyun 				return ret;
761*4882a593Smuzhiyun 			continue;
762*4882a593Smuzhiyun 		}
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 		if (!data) {
765*4882a593Smuzhiyun 			data = regcache_get_val_addr(map, block, i);
766*4882a593Smuzhiyun 			base = regtmp;
767*4882a593Smuzhiyun 		}
768*4882a593Smuzhiyun 	}
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	return regcache_sync_block_raw_flush(map, &data, base, regtmp +
771*4882a593Smuzhiyun 			map->reg_stride);
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun 
regcache_sync_block(struct regmap * map,void * block,unsigned long * cache_present,unsigned int block_base,unsigned int start,unsigned int end)774*4882a593Smuzhiyun int regcache_sync_block(struct regmap *map, void *block,
775*4882a593Smuzhiyun 			unsigned long *cache_present,
776*4882a593Smuzhiyun 			unsigned int block_base, unsigned int start,
777*4882a593Smuzhiyun 			unsigned int end)
778*4882a593Smuzhiyun {
779*4882a593Smuzhiyun 	if (regmap_can_raw_write(map) && !map->use_single_write)
780*4882a593Smuzhiyun 		return regcache_sync_block_raw(map, block, cache_present,
781*4882a593Smuzhiyun 					       block_base, start, end);
782*4882a593Smuzhiyun 	else
783*4882a593Smuzhiyun 		return regcache_sync_block_single(map, block, cache_present,
784*4882a593Smuzhiyun 						  block_base, start, end);
785*4882a593Smuzhiyun }
786