xref: /OK3568_Linux_fs/kernel/drivers/base/regmap/regmap-debugfs.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun //
3*4882a593Smuzhiyun // Register map access API - debugfs
4*4882a593Smuzhiyun //
5*4882a593Smuzhiyun // Copyright 2011 Wolfson Microelectronics plc
6*4882a593Smuzhiyun //
7*4882a593Smuzhiyun // Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/mutex.h>
11*4882a593Smuzhiyun #include <linux/debugfs.h>
12*4882a593Smuzhiyun #include <linux/uaccess.h>
13*4882a593Smuzhiyun #include <linux/device.h>
14*4882a593Smuzhiyun #include <linux/list.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include "internal.h"
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun struct regmap_debugfs_node {
19*4882a593Smuzhiyun 	struct regmap *map;
20*4882a593Smuzhiyun 	struct list_head link;
21*4882a593Smuzhiyun };
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun static unsigned int dummy_index;
24*4882a593Smuzhiyun static struct dentry *regmap_debugfs_root;
25*4882a593Smuzhiyun static LIST_HEAD(regmap_debugfs_early_list);
26*4882a593Smuzhiyun static DEFINE_MUTEX(regmap_debugfs_early_lock);
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /* Calculate the length of a fixed format  */
regmap_calc_reg_len(int max_val)29*4882a593Smuzhiyun static size_t regmap_calc_reg_len(int max_val)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun 	return snprintf(NULL, 0, "%x", max_val);
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun 
regmap_name_read_file(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)34*4882a593Smuzhiyun static ssize_t regmap_name_read_file(struct file *file,
35*4882a593Smuzhiyun 				     char __user *user_buf, size_t count,
36*4882a593Smuzhiyun 				     loff_t *ppos)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	struct regmap *map = file->private_data;
39*4882a593Smuzhiyun 	const char *name = "nodev";
40*4882a593Smuzhiyun 	int ret;
41*4882a593Smuzhiyun 	char *buf;
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
44*4882a593Smuzhiyun 	if (!buf)
45*4882a593Smuzhiyun 		return -ENOMEM;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	if (map->dev && map->dev->driver)
48*4882a593Smuzhiyun 		name = map->dev->driver->name;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
51*4882a593Smuzhiyun 	if (ret < 0) {
52*4882a593Smuzhiyun 		kfree(buf);
53*4882a593Smuzhiyun 		return ret;
54*4882a593Smuzhiyun 	}
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
57*4882a593Smuzhiyun 	kfree(buf);
58*4882a593Smuzhiyun 	return ret;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun static const struct file_operations regmap_name_fops = {
62*4882a593Smuzhiyun 	.open = simple_open,
63*4882a593Smuzhiyun 	.read = regmap_name_read_file,
64*4882a593Smuzhiyun 	.llseek = default_llseek,
65*4882a593Smuzhiyun };
66*4882a593Smuzhiyun 
regmap_debugfs_free_dump_cache(struct regmap * map)67*4882a593Smuzhiyun static void regmap_debugfs_free_dump_cache(struct regmap *map)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun 	struct regmap_debugfs_off_cache *c;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	while (!list_empty(&map->debugfs_off_cache)) {
72*4882a593Smuzhiyun 		c = list_first_entry(&map->debugfs_off_cache,
73*4882a593Smuzhiyun 				     struct regmap_debugfs_off_cache,
74*4882a593Smuzhiyun 				     list);
75*4882a593Smuzhiyun 		list_del(&c->list);
76*4882a593Smuzhiyun 		kfree(c);
77*4882a593Smuzhiyun 	}
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun 
regmap_printable(struct regmap * map,unsigned int reg)80*4882a593Smuzhiyun static bool regmap_printable(struct regmap *map, unsigned int reg)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	if (regmap_precious(map, reg))
83*4882a593Smuzhiyun 		return false;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	if (!regmap_readable(map, reg) && !regmap_cached(map, reg))
86*4882a593Smuzhiyun 		return false;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	return true;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun /*
92*4882a593Smuzhiyun  * Work out where the start offset maps into register numbers, bearing
93*4882a593Smuzhiyun  * in mind that we suppress hidden registers.
94*4882a593Smuzhiyun  */
regmap_debugfs_get_dump_start(struct regmap * map,unsigned int base,loff_t from,loff_t * pos)95*4882a593Smuzhiyun static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
96*4882a593Smuzhiyun 						  unsigned int base,
97*4882a593Smuzhiyun 						  loff_t from,
98*4882a593Smuzhiyun 						  loff_t *pos)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	struct regmap_debugfs_off_cache *c = NULL;
101*4882a593Smuzhiyun 	loff_t p = 0;
102*4882a593Smuzhiyun 	unsigned int i, ret;
103*4882a593Smuzhiyun 	unsigned int fpos_offset;
104*4882a593Smuzhiyun 	unsigned int reg_offset;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	/* Suppress the cache if we're using a subrange */
107*4882a593Smuzhiyun 	if (base)
108*4882a593Smuzhiyun 		return base;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	/*
111*4882a593Smuzhiyun 	 * If we don't have a cache build one so we don't have to do a
112*4882a593Smuzhiyun 	 * linear scan each time.
113*4882a593Smuzhiyun 	 */
114*4882a593Smuzhiyun 	mutex_lock(&map->cache_lock);
115*4882a593Smuzhiyun 	i = base;
116*4882a593Smuzhiyun 	if (list_empty(&map->debugfs_off_cache)) {
117*4882a593Smuzhiyun 		for (; i <= map->max_register; i += map->reg_stride) {
118*4882a593Smuzhiyun 			/* Skip unprinted registers, closing off cache entry */
119*4882a593Smuzhiyun 			if (!regmap_printable(map, i)) {
120*4882a593Smuzhiyun 				if (c) {
121*4882a593Smuzhiyun 					c->max = p - 1;
122*4882a593Smuzhiyun 					c->max_reg = i - map->reg_stride;
123*4882a593Smuzhiyun 					list_add_tail(&c->list,
124*4882a593Smuzhiyun 						      &map->debugfs_off_cache);
125*4882a593Smuzhiyun 					c = NULL;
126*4882a593Smuzhiyun 				}
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 				continue;
129*4882a593Smuzhiyun 			}
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 			/* No cache entry?  Start a new one */
132*4882a593Smuzhiyun 			if (!c) {
133*4882a593Smuzhiyun 				c = kzalloc(sizeof(*c), GFP_KERNEL);
134*4882a593Smuzhiyun 				if (!c) {
135*4882a593Smuzhiyun 					regmap_debugfs_free_dump_cache(map);
136*4882a593Smuzhiyun 					mutex_unlock(&map->cache_lock);
137*4882a593Smuzhiyun 					return base;
138*4882a593Smuzhiyun 				}
139*4882a593Smuzhiyun 				c->min = p;
140*4882a593Smuzhiyun 				c->base_reg = i;
141*4882a593Smuzhiyun 			}
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 			p += map->debugfs_tot_len;
144*4882a593Smuzhiyun 		}
145*4882a593Smuzhiyun 	}
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	/* Close the last entry off if we didn't scan beyond it */
148*4882a593Smuzhiyun 	if (c) {
149*4882a593Smuzhiyun 		c->max = p - 1;
150*4882a593Smuzhiyun 		c->max_reg = i - map->reg_stride;
151*4882a593Smuzhiyun 		list_add_tail(&c->list,
152*4882a593Smuzhiyun 			      &map->debugfs_off_cache);
153*4882a593Smuzhiyun 	}
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	/*
156*4882a593Smuzhiyun 	 * This should never happen; we return above if we fail to
157*4882a593Smuzhiyun 	 * allocate and we should never be in this code if there are
158*4882a593Smuzhiyun 	 * no registers at all.
159*4882a593Smuzhiyun 	 */
160*4882a593Smuzhiyun 	WARN_ON(list_empty(&map->debugfs_off_cache));
161*4882a593Smuzhiyun 	ret = base;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	/* Find the relevant block:offset */
164*4882a593Smuzhiyun 	list_for_each_entry(c, &map->debugfs_off_cache, list) {
165*4882a593Smuzhiyun 		if (from >= c->min && from <= c->max) {
166*4882a593Smuzhiyun 			fpos_offset = from - c->min;
167*4882a593Smuzhiyun 			reg_offset = fpos_offset / map->debugfs_tot_len;
168*4882a593Smuzhiyun 			*pos = c->min + (reg_offset * map->debugfs_tot_len);
169*4882a593Smuzhiyun 			mutex_unlock(&map->cache_lock);
170*4882a593Smuzhiyun 			return c->base_reg + (reg_offset * map->reg_stride);
171*4882a593Smuzhiyun 		}
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 		*pos = c->max;
174*4882a593Smuzhiyun 		ret = c->max_reg;
175*4882a593Smuzhiyun 	}
176*4882a593Smuzhiyun 	mutex_unlock(&map->cache_lock);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	return ret;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun 
regmap_calc_tot_len(struct regmap * map,void * buf,size_t count)181*4882a593Smuzhiyun static inline void regmap_calc_tot_len(struct regmap *map,
182*4882a593Smuzhiyun 				       void *buf, size_t count)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun 	/* Calculate the length of a fixed format  */
185*4882a593Smuzhiyun 	if (!map->debugfs_tot_len) {
186*4882a593Smuzhiyun 		map->debugfs_reg_len = regmap_calc_reg_len(map->max_register);
187*4882a593Smuzhiyun 		map->debugfs_val_len = 2 * map->format.val_bytes;
188*4882a593Smuzhiyun 		map->debugfs_tot_len = map->debugfs_reg_len +
189*4882a593Smuzhiyun 			map->debugfs_val_len + 3;      /* : \n */
190*4882a593Smuzhiyun 	}
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
regmap_next_readable_reg(struct regmap * map,int reg)193*4882a593Smuzhiyun static int regmap_next_readable_reg(struct regmap *map, int reg)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	struct regmap_debugfs_off_cache *c;
196*4882a593Smuzhiyun 	int ret = -EINVAL;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	if (regmap_printable(map, reg + map->reg_stride)) {
199*4882a593Smuzhiyun 		ret = reg + map->reg_stride;
200*4882a593Smuzhiyun 	} else {
201*4882a593Smuzhiyun 		mutex_lock(&map->cache_lock);
202*4882a593Smuzhiyun 		list_for_each_entry(c, &map->debugfs_off_cache, list) {
203*4882a593Smuzhiyun 			if (reg > c->max_reg)
204*4882a593Smuzhiyun 				continue;
205*4882a593Smuzhiyun 			if (reg < c->base_reg) {
206*4882a593Smuzhiyun 				ret = c->base_reg;
207*4882a593Smuzhiyun 				break;
208*4882a593Smuzhiyun 			}
209*4882a593Smuzhiyun 		}
210*4882a593Smuzhiyun 		mutex_unlock(&map->cache_lock);
211*4882a593Smuzhiyun 	}
212*4882a593Smuzhiyun 	return ret;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
regmap_read_debugfs(struct regmap * map,unsigned int from,unsigned int to,char __user * user_buf,size_t count,loff_t * ppos)215*4882a593Smuzhiyun static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
216*4882a593Smuzhiyun 				   unsigned int to, char __user *user_buf,
217*4882a593Smuzhiyun 				   size_t count, loff_t *ppos)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	size_t buf_pos = 0;
220*4882a593Smuzhiyun 	loff_t p = *ppos;
221*4882a593Smuzhiyun 	ssize_t ret;
222*4882a593Smuzhiyun 	int i;
223*4882a593Smuzhiyun 	char *buf;
224*4882a593Smuzhiyun 	unsigned int val, start_reg;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	if (*ppos < 0 || !count)
227*4882a593Smuzhiyun 		return -EINVAL;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
230*4882a593Smuzhiyun 		count = PAGE_SIZE << (MAX_ORDER - 1);
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	buf = kmalloc(count, GFP_KERNEL);
233*4882a593Smuzhiyun 	if (!buf)
234*4882a593Smuzhiyun 		return -ENOMEM;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	regmap_calc_tot_len(map, buf, count);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	/* Work out which register we're starting at */
239*4882a593Smuzhiyun 	start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	for (i = start_reg; i >= 0 && i <= to;
242*4882a593Smuzhiyun 	     i = regmap_next_readable_reg(map, i)) {
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 		/* If we're in the region the user is trying to read */
245*4882a593Smuzhiyun 		if (p >= *ppos) {
246*4882a593Smuzhiyun 			/* ...but not beyond it */
247*4882a593Smuzhiyun 			if (buf_pos + map->debugfs_tot_len > count)
248*4882a593Smuzhiyun 				break;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 			/* Format the register */
251*4882a593Smuzhiyun 			snprintf(buf + buf_pos, count - buf_pos, "%.*x: ",
252*4882a593Smuzhiyun 				 map->debugfs_reg_len, i - from);
253*4882a593Smuzhiyun 			buf_pos += map->debugfs_reg_len + 2;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 			/* Format the value, write all X if we can't read */
256*4882a593Smuzhiyun 			ret = regmap_read(map, i, &val);
257*4882a593Smuzhiyun 			if (ret == 0)
258*4882a593Smuzhiyun 				snprintf(buf + buf_pos, count - buf_pos,
259*4882a593Smuzhiyun 					 "%.*x", map->debugfs_val_len, val);
260*4882a593Smuzhiyun 			else
261*4882a593Smuzhiyun 				memset(buf + buf_pos, 'X',
262*4882a593Smuzhiyun 				       map->debugfs_val_len);
263*4882a593Smuzhiyun 			buf_pos += 2 * map->format.val_bytes;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 			buf[buf_pos++] = '\n';
266*4882a593Smuzhiyun 		}
267*4882a593Smuzhiyun 		p += map->debugfs_tot_len;
268*4882a593Smuzhiyun 	}
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	ret = buf_pos;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	if (copy_to_user(user_buf, buf, buf_pos)) {
273*4882a593Smuzhiyun 		ret = -EFAULT;
274*4882a593Smuzhiyun 		goto out;
275*4882a593Smuzhiyun 	}
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	*ppos += buf_pos;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun out:
280*4882a593Smuzhiyun 	kfree(buf);
281*4882a593Smuzhiyun 	return ret;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun 
regmap_map_read_file(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)284*4882a593Smuzhiyun static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
285*4882a593Smuzhiyun 				    size_t count, loff_t *ppos)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	struct regmap *map = file->private_data;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	return regmap_read_debugfs(map, 0, map->max_register, user_buf,
290*4882a593Smuzhiyun 				   count, ppos);
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun #undef REGMAP_ALLOW_WRITE_DEBUGFS
294*4882a593Smuzhiyun #ifdef REGMAP_ALLOW_WRITE_DEBUGFS
295*4882a593Smuzhiyun /*
296*4882a593Smuzhiyun  * This can be dangerous especially when we have clients such as
297*4882a593Smuzhiyun  * PMICs, therefore don't provide any real compile time configuration option
298*4882a593Smuzhiyun  * for this feature, people who want to use this will need to modify
299*4882a593Smuzhiyun  * the source code directly.
300*4882a593Smuzhiyun  */
regmap_map_write_file(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)301*4882a593Smuzhiyun static ssize_t regmap_map_write_file(struct file *file,
302*4882a593Smuzhiyun 				     const char __user *user_buf,
303*4882a593Smuzhiyun 				     size_t count, loff_t *ppos)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun 	char buf[32];
306*4882a593Smuzhiyun 	size_t buf_size;
307*4882a593Smuzhiyun 	char *start = buf;
308*4882a593Smuzhiyun 	unsigned long reg, value;
309*4882a593Smuzhiyun 	struct regmap *map = file->private_data;
310*4882a593Smuzhiyun 	int ret;
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	buf_size = min(count, (sizeof(buf)-1));
313*4882a593Smuzhiyun 	if (copy_from_user(buf, user_buf, buf_size))
314*4882a593Smuzhiyun 		return -EFAULT;
315*4882a593Smuzhiyun 	buf[buf_size] = 0;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	while (*start == ' ')
318*4882a593Smuzhiyun 		start++;
319*4882a593Smuzhiyun 	reg = simple_strtoul(start, &start, 16);
320*4882a593Smuzhiyun 	while (*start == ' ')
321*4882a593Smuzhiyun 		start++;
322*4882a593Smuzhiyun 	if (kstrtoul(start, 16, &value))
323*4882a593Smuzhiyun 		return -EINVAL;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	/* Userspace has been fiddling around behind the kernel's back */
326*4882a593Smuzhiyun 	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	ret = regmap_write(map, reg, value);
329*4882a593Smuzhiyun 	if (ret < 0)
330*4882a593Smuzhiyun 		return ret;
331*4882a593Smuzhiyun 	return buf_size;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun #else
334*4882a593Smuzhiyun #define regmap_map_write_file NULL
335*4882a593Smuzhiyun #endif
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun static const struct file_operations regmap_map_fops = {
338*4882a593Smuzhiyun 	.open = simple_open,
339*4882a593Smuzhiyun 	.read = regmap_map_read_file,
340*4882a593Smuzhiyun 	.write = regmap_map_write_file,
341*4882a593Smuzhiyun 	.llseek = default_llseek,
342*4882a593Smuzhiyun };
343*4882a593Smuzhiyun 
regmap_range_read_file(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)344*4882a593Smuzhiyun static ssize_t regmap_range_read_file(struct file *file, char __user *user_buf,
345*4882a593Smuzhiyun 				      size_t count, loff_t *ppos)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun 	struct regmap_range_node *range = file->private_data;
348*4882a593Smuzhiyun 	struct regmap *map = range->map;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	return regmap_read_debugfs(map, range->range_min, range->range_max,
351*4882a593Smuzhiyun 				   user_buf, count, ppos);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun static const struct file_operations regmap_range_fops = {
355*4882a593Smuzhiyun 	.open = simple_open,
356*4882a593Smuzhiyun 	.read = regmap_range_read_file,
357*4882a593Smuzhiyun 	.llseek = default_llseek,
358*4882a593Smuzhiyun };
359*4882a593Smuzhiyun 
regmap_reg_ranges_read_file(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)360*4882a593Smuzhiyun static ssize_t regmap_reg_ranges_read_file(struct file *file,
361*4882a593Smuzhiyun 					   char __user *user_buf, size_t count,
362*4882a593Smuzhiyun 					   loff_t *ppos)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun 	struct regmap *map = file->private_data;
365*4882a593Smuzhiyun 	struct regmap_debugfs_off_cache *c;
366*4882a593Smuzhiyun 	loff_t p = 0;
367*4882a593Smuzhiyun 	size_t buf_pos = 0;
368*4882a593Smuzhiyun 	char *buf;
369*4882a593Smuzhiyun 	char *entry;
370*4882a593Smuzhiyun 	int ret;
371*4882a593Smuzhiyun 	unsigned entry_len;
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	if (*ppos < 0 || !count)
374*4882a593Smuzhiyun 		return -EINVAL;
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
377*4882a593Smuzhiyun 		count = PAGE_SIZE << (MAX_ORDER - 1);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	buf = kmalloc(count, GFP_KERNEL);
380*4882a593Smuzhiyun 	if (!buf)
381*4882a593Smuzhiyun 		return -ENOMEM;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	entry = kmalloc(PAGE_SIZE, GFP_KERNEL);
384*4882a593Smuzhiyun 	if (!entry) {
385*4882a593Smuzhiyun 		kfree(buf);
386*4882a593Smuzhiyun 		return -ENOMEM;
387*4882a593Smuzhiyun 	}
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	/* While we are at it, build the register dump cache
390*4882a593Smuzhiyun 	 * now so the read() operation on the `registers' file
391*4882a593Smuzhiyun 	 * can benefit from using the cache.  We do not care
392*4882a593Smuzhiyun 	 * about the file position information that is contained
393*4882a593Smuzhiyun 	 * in the cache, just about the actual register blocks */
394*4882a593Smuzhiyun 	regmap_calc_tot_len(map, buf, count);
395*4882a593Smuzhiyun 	regmap_debugfs_get_dump_start(map, 0, *ppos, &p);
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	/* Reset file pointer as the fixed-format of the `registers'
398*4882a593Smuzhiyun 	 * file is not compatible with the `range' file */
399*4882a593Smuzhiyun 	p = 0;
400*4882a593Smuzhiyun 	mutex_lock(&map->cache_lock);
401*4882a593Smuzhiyun 	list_for_each_entry(c, &map->debugfs_off_cache, list) {
402*4882a593Smuzhiyun 		entry_len = snprintf(entry, PAGE_SIZE, "%x-%x\n",
403*4882a593Smuzhiyun 				     c->base_reg, c->max_reg);
404*4882a593Smuzhiyun 		if (p >= *ppos) {
405*4882a593Smuzhiyun 			if (buf_pos + entry_len > count)
406*4882a593Smuzhiyun 				break;
407*4882a593Smuzhiyun 			memcpy(buf + buf_pos, entry, entry_len);
408*4882a593Smuzhiyun 			buf_pos += entry_len;
409*4882a593Smuzhiyun 		}
410*4882a593Smuzhiyun 		p += entry_len;
411*4882a593Smuzhiyun 	}
412*4882a593Smuzhiyun 	mutex_unlock(&map->cache_lock);
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	kfree(entry);
415*4882a593Smuzhiyun 	ret = buf_pos;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	if (copy_to_user(user_buf, buf, buf_pos)) {
418*4882a593Smuzhiyun 		ret = -EFAULT;
419*4882a593Smuzhiyun 		goto out_buf;
420*4882a593Smuzhiyun 	}
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	*ppos += buf_pos;
423*4882a593Smuzhiyun out_buf:
424*4882a593Smuzhiyun 	kfree(buf);
425*4882a593Smuzhiyun 	return ret;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun static const struct file_operations regmap_reg_ranges_fops = {
429*4882a593Smuzhiyun 	.open = simple_open,
430*4882a593Smuzhiyun 	.read = regmap_reg_ranges_read_file,
431*4882a593Smuzhiyun 	.llseek = default_llseek,
432*4882a593Smuzhiyun };
433*4882a593Smuzhiyun 
regmap_access_show(struct seq_file * s,void * ignored)434*4882a593Smuzhiyun static int regmap_access_show(struct seq_file *s, void *ignored)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun 	struct regmap *map = s->private;
437*4882a593Smuzhiyun 	int i, reg_len;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	reg_len = regmap_calc_reg_len(map->max_register);
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	for (i = 0; i <= map->max_register; i += map->reg_stride) {
442*4882a593Smuzhiyun 		/* Ignore registers which are neither readable nor writable */
443*4882a593Smuzhiyun 		if (!regmap_readable(map, i) && !regmap_writeable(map, i))
444*4882a593Smuzhiyun 			continue;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 		/* Format the register */
447*4882a593Smuzhiyun 		seq_printf(s, "%.*x: %c %c %c %c\n", reg_len, i,
448*4882a593Smuzhiyun 			   regmap_readable(map, i) ? 'y' : 'n',
449*4882a593Smuzhiyun 			   regmap_writeable(map, i) ? 'y' : 'n',
450*4882a593Smuzhiyun 			   regmap_volatile(map, i) ? 'y' : 'n',
451*4882a593Smuzhiyun 			   regmap_precious(map, i) ? 'y' : 'n');
452*4882a593Smuzhiyun 	}
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	return 0;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(regmap_access);
458*4882a593Smuzhiyun 
regmap_cache_only_write_file(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)459*4882a593Smuzhiyun static ssize_t regmap_cache_only_write_file(struct file *file,
460*4882a593Smuzhiyun 					    const char __user *user_buf,
461*4882a593Smuzhiyun 					    size_t count, loff_t *ppos)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun 	struct regmap *map = container_of(file->private_data,
464*4882a593Smuzhiyun 					  struct regmap, cache_only);
465*4882a593Smuzhiyun 	bool new_val, require_sync = false;
466*4882a593Smuzhiyun 	int err;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	err = kstrtobool_from_user(user_buf, count, &new_val);
469*4882a593Smuzhiyun 	/* Ignore malforned data like debugfs_write_file_bool() */
470*4882a593Smuzhiyun 	if (err)
471*4882a593Smuzhiyun 		return count;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	err = debugfs_file_get(file->f_path.dentry);
474*4882a593Smuzhiyun 	if (err)
475*4882a593Smuzhiyun 		return err;
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	map->lock(map->lock_arg);
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	if (new_val && !map->cache_only) {
480*4882a593Smuzhiyun 		dev_warn(map->dev, "debugfs cache_only=Y forced\n");
481*4882a593Smuzhiyun 		add_taint(TAINT_USER, LOCKDEP_STILL_OK);
482*4882a593Smuzhiyun 	} else if (!new_val && map->cache_only) {
483*4882a593Smuzhiyun 		dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n");
484*4882a593Smuzhiyun 		require_sync = true;
485*4882a593Smuzhiyun 	}
486*4882a593Smuzhiyun 	map->cache_only = new_val;
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	map->unlock(map->lock_arg);
489*4882a593Smuzhiyun 	debugfs_file_put(file->f_path.dentry);
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	if (require_sync) {
492*4882a593Smuzhiyun 		err = regcache_sync(map);
493*4882a593Smuzhiyun 		if (err)
494*4882a593Smuzhiyun 			dev_err(map->dev, "Failed to sync cache %d\n", err);
495*4882a593Smuzhiyun 	}
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	return count;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun static const struct file_operations regmap_cache_only_fops = {
501*4882a593Smuzhiyun 	.open = simple_open,
502*4882a593Smuzhiyun 	.read = debugfs_read_file_bool,
503*4882a593Smuzhiyun 	.write = regmap_cache_only_write_file,
504*4882a593Smuzhiyun };
505*4882a593Smuzhiyun 
regmap_cache_bypass_write_file(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)506*4882a593Smuzhiyun static ssize_t regmap_cache_bypass_write_file(struct file *file,
507*4882a593Smuzhiyun 					      const char __user *user_buf,
508*4882a593Smuzhiyun 					      size_t count, loff_t *ppos)
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun 	struct regmap *map = container_of(file->private_data,
511*4882a593Smuzhiyun 					  struct regmap, cache_bypass);
512*4882a593Smuzhiyun 	bool new_val;
513*4882a593Smuzhiyun 	int err;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	err = kstrtobool_from_user(user_buf, count, &new_val);
516*4882a593Smuzhiyun 	/* Ignore malforned data like debugfs_write_file_bool() */
517*4882a593Smuzhiyun 	if (err)
518*4882a593Smuzhiyun 		return count;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	err = debugfs_file_get(file->f_path.dentry);
521*4882a593Smuzhiyun 	if (err)
522*4882a593Smuzhiyun 		return err;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	map->lock(map->lock_arg);
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	if (new_val && !map->cache_bypass) {
527*4882a593Smuzhiyun 		dev_warn(map->dev, "debugfs cache_bypass=Y forced\n");
528*4882a593Smuzhiyun 		add_taint(TAINT_USER, LOCKDEP_STILL_OK);
529*4882a593Smuzhiyun 	} else if (!new_val && map->cache_bypass) {
530*4882a593Smuzhiyun 		dev_warn(map->dev, "debugfs cache_bypass=N forced\n");
531*4882a593Smuzhiyun 	}
532*4882a593Smuzhiyun 	map->cache_bypass = new_val;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	map->unlock(map->lock_arg);
535*4882a593Smuzhiyun 	debugfs_file_put(file->f_path.dentry);
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	return count;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun static const struct file_operations regmap_cache_bypass_fops = {
541*4882a593Smuzhiyun 	.open = simple_open,
542*4882a593Smuzhiyun 	.read = debugfs_read_file_bool,
543*4882a593Smuzhiyun 	.write = regmap_cache_bypass_write_file,
544*4882a593Smuzhiyun };
545*4882a593Smuzhiyun 
regmap_debugfs_init(struct regmap * map)546*4882a593Smuzhiyun void regmap_debugfs_init(struct regmap *map)
547*4882a593Smuzhiyun {
548*4882a593Smuzhiyun 	struct rb_node *next;
549*4882a593Smuzhiyun 	struct regmap_range_node *range_node;
550*4882a593Smuzhiyun 	const char *devname = "dummy";
551*4882a593Smuzhiyun 	const char *name = map->name;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	/*
554*4882a593Smuzhiyun 	 * Userspace can initiate reads from the hardware over debugfs.
555*4882a593Smuzhiyun 	 * Normally internal regmap structures and buffers are protected with
556*4882a593Smuzhiyun 	 * a mutex or a spinlock, but if the regmap owner decided to disable
557*4882a593Smuzhiyun 	 * all locking mechanisms, this is no longer the case. For safety:
558*4882a593Smuzhiyun 	 * don't create the debugfs entries if locking is disabled.
559*4882a593Smuzhiyun 	 */
560*4882a593Smuzhiyun 	if (map->debugfs_disable) {
561*4882a593Smuzhiyun 		dev_dbg(map->dev, "regmap locking disabled - not creating debugfs entries\n");
562*4882a593Smuzhiyun 		return;
563*4882a593Smuzhiyun 	}
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	/* If we don't have the debugfs root yet, postpone init */
566*4882a593Smuzhiyun 	if (!regmap_debugfs_root) {
567*4882a593Smuzhiyun 		struct regmap_debugfs_node *node;
568*4882a593Smuzhiyun 		node = kzalloc(sizeof(*node), GFP_KERNEL);
569*4882a593Smuzhiyun 		if (!node)
570*4882a593Smuzhiyun 			return;
571*4882a593Smuzhiyun 		node->map = map;
572*4882a593Smuzhiyun 		mutex_lock(&regmap_debugfs_early_lock);
573*4882a593Smuzhiyun 		list_add(&node->link, &regmap_debugfs_early_list);
574*4882a593Smuzhiyun 		mutex_unlock(&regmap_debugfs_early_lock);
575*4882a593Smuzhiyun 		return;
576*4882a593Smuzhiyun 	}
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	INIT_LIST_HEAD(&map->debugfs_off_cache);
579*4882a593Smuzhiyun 	mutex_init(&map->cache_lock);
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	if (map->dev)
582*4882a593Smuzhiyun 		devname = dev_name(map->dev);
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	if (name) {
585*4882a593Smuzhiyun 		if (!map->debugfs_name) {
586*4882a593Smuzhiyun 			map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
587*4882a593Smuzhiyun 					      devname, name);
588*4882a593Smuzhiyun 			if (!map->debugfs_name)
589*4882a593Smuzhiyun 				return;
590*4882a593Smuzhiyun 		}
591*4882a593Smuzhiyun 		name = map->debugfs_name;
592*4882a593Smuzhiyun 	} else {
593*4882a593Smuzhiyun 		name = devname;
594*4882a593Smuzhiyun 	}
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	if (!strcmp(name, "dummy")) {
597*4882a593Smuzhiyun 		kfree(map->debugfs_name);
598*4882a593Smuzhiyun 		map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
599*4882a593Smuzhiyun 						dummy_index);
600*4882a593Smuzhiyun 		if (!map->debugfs_name)
601*4882a593Smuzhiyun 				return;
602*4882a593Smuzhiyun 		name = map->debugfs_name;
603*4882a593Smuzhiyun 		dummy_index++;
604*4882a593Smuzhiyun 	}
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	debugfs_create_file("name", 0400, map->debugfs,
609*4882a593Smuzhiyun 			    map, &regmap_name_fops);
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	debugfs_create_file("range", 0400, map->debugfs,
612*4882a593Smuzhiyun 			    map, &regmap_reg_ranges_fops);
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	if (map->max_register || regmap_readable(map, 0)) {
615*4882a593Smuzhiyun 		umode_t registers_mode;
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun #if defined(REGMAP_ALLOW_WRITE_DEBUGFS)
618*4882a593Smuzhiyun 		registers_mode = 0600;
619*4882a593Smuzhiyun #else
620*4882a593Smuzhiyun 		registers_mode = 0400;
621*4882a593Smuzhiyun #endif
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 		debugfs_create_file("registers", registers_mode, map->debugfs,
624*4882a593Smuzhiyun 				    map, &regmap_map_fops);
625*4882a593Smuzhiyun 		debugfs_create_file("access", 0400, map->debugfs,
626*4882a593Smuzhiyun 				    map, &regmap_access_fops);
627*4882a593Smuzhiyun 	}
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	if (map->cache_type) {
630*4882a593Smuzhiyun 		debugfs_create_file("cache_only", 0600, map->debugfs,
631*4882a593Smuzhiyun 				    &map->cache_only, &regmap_cache_only_fops);
632*4882a593Smuzhiyun 		debugfs_create_bool("cache_dirty", 0400, map->debugfs,
633*4882a593Smuzhiyun 				    &map->cache_dirty);
634*4882a593Smuzhiyun 		debugfs_create_file("cache_bypass", 0600, map->debugfs,
635*4882a593Smuzhiyun 				    &map->cache_bypass,
636*4882a593Smuzhiyun 				    &regmap_cache_bypass_fops);
637*4882a593Smuzhiyun 	}
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	next = rb_first(&map->range_tree);
640*4882a593Smuzhiyun 	while (next) {
641*4882a593Smuzhiyun 		range_node = rb_entry(next, struct regmap_range_node, node);
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 		if (range_node->name)
644*4882a593Smuzhiyun 			debugfs_create_file(range_node->name, 0400,
645*4882a593Smuzhiyun 					    map->debugfs, range_node,
646*4882a593Smuzhiyun 					    &regmap_range_fops);
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 		next = rb_next(&range_node->node);
649*4882a593Smuzhiyun 	}
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	if (map->cache_ops && map->cache_ops->debugfs_init)
652*4882a593Smuzhiyun 		map->cache_ops->debugfs_init(map);
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun 
regmap_debugfs_exit(struct regmap * map)655*4882a593Smuzhiyun void regmap_debugfs_exit(struct regmap *map)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun 	if (map->debugfs) {
658*4882a593Smuzhiyun 		debugfs_remove_recursive(map->debugfs);
659*4882a593Smuzhiyun 		mutex_lock(&map->cache_lock);
660*4882a593Smuzhiyun 		regmap_debugfs_free_dump_cache(map);
661*4882a593Smuzhiyun 		mutex_unlock(&map->cache_lock);
662*4882a593Smuzhiyun 		kfree(map->debugfs_name);
663*4882a593Smuzhiyun 		map->debugfs_name = NULL;
664*4882a593Smuzhiyun 	} else {
665*4882a593Smuzhiyun 		struct regmap_debugfs_node *node, *tmp;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 		mutex_lock(&regmap_debugfs_early_lock);
668*4882a593Smuzhiyun 		list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list,
669*4882a593Smuzhiyun 					 link) {
670*4882a593Smuzhiyun 			if (node->map == map) {
671*4882a593Smuzhiyun 				list_del(&node->link);
672*4882a593Smuzhiyun 				kfree(node);
673*4882a593Smuzhiyun 			}
674*4882a593Smuzhiyun 		}
675*4882a593Smuzhiyun 		mutex_unlock(&regmap_debugfs_early_lock);
676*4882a593Smuzhiyun 	}
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun 
regmap_debugfs_initcall(void)679*4882a593Smuzhiyun void regmap_debugfs_initcall(void)
680*4882a593Smuzhiyun {
681*4882a593Smuzhiyun 	struct regmap_debugfs_node *node, *tmp;
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 	regmap_debugfs_root = debugfs_create_dir("regmap", NULL);
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	mutex_lock(&regmap_debugfs_early_lock);
686*4882a593Smuzhiyun 	list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list, link) {
687*4882a593Smuzhiyun 		regmap_debugfs_init(node->map);
688*4882a593Smuzhiyun 		list_del(&node->link);
689*4882a593Smuzhiyun 		kfree(node);
690*4882a593Smuzhiyun 	}
691*4882a593Smuzhiyun 	mutex_unlock(&regmap_debugfs_early_lock);
692*4882a593Smuzhiyun }
693