1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* The industrial I/O core in kernel channel mapping
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (c) 2011 Jonathan Cameron
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #include <linux/err.h>
7*4882a593Smuzhiyun #include <linux/export.h>
8*4882a593Smuzhiyun #include <linux/slab.h>
9*4882a593Smuzhiyun #include <linux/mutex.h>
10*4882a593Smuzhiyun #include <linux/of.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/iio/iio.h>
13*4882a593Smuzhiyun #include "iio_core.h"
14*4882a593Smuzhiyun #include <linux/iio/machine.h>
15*4882a593Smuzhiyun #include <linux/iio/driver.h>
16*4882a593Smuzhiyun #include <linux/iio/consumer.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun struct iio_map_internal {
19*4882a593Smuzhiyun struct iio_dev *indio_dev;
20*4882a593Smuzhiyun struct iio_map *map;
21*4882a593Smuzhiyun struct list_head l;
22*4882a593Smuzhiyun };
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun static LIST_HEAD(iio_map_list);
25*4882a593Smuzhiyun static DEFINE_MUTEX(iio_map_list_lock);
26*4882a593Smuzhiyun
iio_map_array_register(struct iio_dev * indio_dev,struct iio_map * maps)27*4882a593Smuzhiyun int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun int i = 0, ret = 0;
30*4882a593Smuzhiyun struct iio_map_internal *mapi;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun if (maps == NULL)
33*4882a593Smuzhiyun return 0;
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun mutex_lock(&iio_map_list_lock);
36*4882a593Smuzhiyun while (maps[i].consumer_dev_name != NULL) {
37*4882a593Smuzhiyun mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
38*4882a593Smuzhiyun if (mapi == NULL) {
39*4882a593Smuzhiyun ret = -ENOMEM;
40*4882a593Smuzhiyun goto error_ret;
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun mapi->map = &maps[i];
43*4882a593Smuzhiyun mapi->indio_dev = indio_dev;
44*4882a593Smuzhiyun list_add_tail(&mapi->l, &iio_map_list);
45*4882a593Smuzhiyun i++;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun error_ret:
48*4882a593Smuzhiyun mutex_unlock(&iio_map_list_lock);
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun return ret;
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iio_map_array_register);
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun * Remove all map entries associated with the given iio device
57*4882a593Smuzhiyun */
iio_map_array_unregister(struct iio_dev * indio_dev)58*4882a593Smuzhiyun int iio_map_array_unregister(struct iio_dev *indio_dev)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun int ret = -ENODEV;
61*4882a593Smuzhiyun struct iio_map_internal *mapi, *next;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun mutex_lock(&iio_map_list_lock);
64*4882a593Smuzhiyun list_for_each_entry_safe(mapi, next, &iio_map_list, l) {
65*4882a593Smuzhiyun if (indio_dev == mapi->indio_dev) {
66*4882a593Smuzhiyun list_del(&mapi->l);
67*4882a593Smuzhiyun kfree(mapi);
68*4882a593Smuzhiyun ret = 0;
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun mutex_unlock(&iio_map_list_lock);
72*4882a593Smuzhiyun return ret;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iio_map_array_unregister);
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun static const struct iio_chan_spec
iio_chan_spec_from_name(const struct iio_dev * indio_dev,const char * name)77*4882a593Smuzhiyun *iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun int i;
80*4882a593Smuzhiyun const struct iio_chan_spec *chan = NULL;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun for (i = 0; i < indio_dev->num_channels; i++)
83*4882a593Smuzhiyun if (indio_dev->channels[i].datasheet_name &&
84*4882a593Smuzhiyun strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
85*4882a593Smuzhiyun chan = &indio_dev->channels[i];
86*4882a593Smuzhiyun break;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun return chan;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun #ifdef CONFIG_OF
92*4882a593Smuzhiyun
iio_dev_node_match(struct device * dev,const void * data)93*4882a593Smuzhiyun static int iio_dev_node_match(struct device *dev, const void *data)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun return dev->of_node == data && dev->type == &iio_device_type;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /**
99*4882a593Smuzhiyun * __of_iio_simple_xlate - translate iiospec to the IIO channel index
100*4882a593Smuzhiyun * @indio_dev: pointer to the iio_dev structure
101*4882a593Smuzhiyun * @iiospec: IIO specifier as found in the device tree
102*4882a593Smuzhiyun *
103*4882a593Smuzhiyun * This is simple translation function, suitable for the most 1:1 mapped
104*4882a593Smuzhiyun * channels in IIO chips. This function performs only one sanity check:
105*4882a593Smuzhiyun * whether IIO index is less than num_channels (that is specified in the
106*4882a593Smuzhiyun * iio_dev).
107*4882a593Smuzhiyun */
__of_iio_simple_xlate(struct iio_dev * indio_dev,const struct of_phandle_args * iiospec)108*4882a593Smuzhiyun static int __of_iio_simple_xlate(struct iio_dev *indio_dev,
109*4882a593Smuzhiyun const struct of_phandle_args *iiospec)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun if (!iiospec->args_count)
112*4882a593Smuzhiyun return 0;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun if (iiospec->args[0] >= indio_dev->num_channels) {
115*4882a593Smuzhiyun dev_err(&indio_dev->dev, "invalid channel index %u\n",
116*4882a593Smuzhiyun iiospec->args[0]);
117*4882a593Smuzhiyun return -EINVAL;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun return iiospec->args[0];
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
__of_iio_channel_get(struct iio_channel * channel,struct device_node * np,int index)123*4882a593Smuzhiyun static int __of_iio_channel_get(struct iio_channel *channel,
124*4882a593Smuzhiyun struct device_node *np, int index)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun struct device *idev;
127*4882a593Smuzhiyun struct iio_dev *indio_dev;
128*4882a593Smuzhiyun int err;
129*4882a593Smuzhiyun struct of_phandle_args iiospec;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun err = of_parse_phandle_with_args(np, "io-channels",
132*4882a593Smuzhiyun "#io-channel-cells",
133*4882a593Smuzhiyun index, &iiospec);
134*4882a593Smuzhiyun if (err)
135*4882a593Smuzhiyun return err;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun idev = bus_find_device(&iio_bus_type, NULL, iiospec.np,
138*4882a593Smuzhiyun iio_dev_node_match);
139*4882a593Smuzhiyun if (idev == NULL) {
140*4882a593Smuzhiyun of_node_put(iiospec.np);
141*4882a593Smuzhiyun return -EPROBE_DEFER;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun indio_dev = dev_to_iio_dev(idev);
145*4882a593Smuzhiyun channel->indio_dev = indio_dev;
146*4882a593Smuzhiyun if (indio_dev->info->of_xlate)
147*4882a593Smuzhiyun index = indio_dev->info->of_xlate(indio_dev, &iiospec);
148*4882a593Smuzhiyun else
149*4882a593Smuzhiyun index = __of_iio_simple_xlate(indio_dev, &iiospec);
150*4882a593Smuzhiyun of_node_put(iiospec.np);
151*4882a593Smuzhiyun if (index < 0)
152*4882a593Smuzhiyun goto err_put;
153*4882a593Smuzhiyun channel->channel = &indio_dev->channels[index];
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun return 0;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun err_put:
158*4882a593Smuzhiyun iio_device_put(indio_dev);
159*4882a593Smuzhiyun return index;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
of_iio_channel_get(struct device_node * np,int index)162*4882a593Smuzhiyun static struct iio_channel *of_iio_channel_get(struct device_node *np, int index)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun struct iio_channel *channel;
165*4882a593Smuzhiyun int err;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun if (index < 0)
168*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun channel = kzalloc(sizeof(*channel), GFP_KERNEL);
171*4882a593Smuzhiyun if (channel == NULL)
172*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun err = __of_iio_channel_get(channel, np, index);
175*4882a593Smuzhiyun if (err)
176*4882a593Smuzhiyun goto err_free_channel;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun return channel;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun err_free_channel:
181*4882a593Smuzhiyun kfree(channel);
182*4882a593Smuzhiyun return ERR_PTR(err);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
of_iio_channel_get_by_name(struct device_node * np,const char * name)185*4882a593Smuzhiyun static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
186*4882a593Smuzhiyun const char *name)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun struct iio_channel *chan = NULL;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /* Walk up the tree of devices looking for a matching iio channel */
191*4882a593Smuzhiyun while (np) {
192*4882a593Smuzhiyun int index = 0;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /*
195*4882a593Smuzhiyun * For named iio channels, first look up the name in the
196*4882a593Smuzhiyun * "io-channel-names" property. If it cannot be found, the
197*4882a593Smuzhiyun * index will be an error code, and of_iio_channel_get()
198*4882a593Smuzhiyun * will fail.
199*4882a593Smuzhiyun */
200*4882a593Smuzhiyun if (name)
201*4882a593Smuzhiyun index = of_property_match_string(np, "io-channel-names",
202*4882a593Smuzhiyun name);
203*4882a593Smuzhiyun chan = of_iio_channel_get(np, index);
204*4882a593Smuzhiyun if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
205*4882a593Smuzhiyun break;
206*4882a593Smuzhiyun else if (name && index >= 0) {
207*4882a593Smuzhiyun pr_err("ERROR: could not get IIO channel %pOF:%s(%i)\n",
208*4882a593Smuzhiyun np, name ? name : "", index);
209*4882a593Smuzhiyun return NULL;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun /*
213*4882a593Smuzhiyun * No matching IIO channel found on this node.
214*4882a593Smuzhiyun * If the parent node has a "io-channel-ranges" property,
215*4882a593Smuzhiyun * then we can try one of its channels.
216*4882a593Smuzhiyun */
217*4882a593Smuzhiyun np = np->parent;
218*4882a593Smuzhiyun if (np && !of_get_property(np, "io-channel-ranges", NULL))
219*4882a593Smuzhiyun return NULL;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun return chan;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
of_iio_channel_get_all(struct device * dev)225*4882a593Smuzhiyun static struct iio_channel *of_iio_channel_get_all(struct device *dev)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun struct iio_channel *chans;
228*4882a593Smuzhiyun int i, mapind, nummaps = 0;
229*4882a593Smuzhiyun int ret;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun do {
232*4882a593Smuzhiyun ret = of_parse_phandle_with_args(dev->of_node,
233*4882a593Smuzhiyun "io-channels",
234*4882a593Smuzhiyun "#io-channel-cells",
235*4882a593Smuzhiyun nummaps, NULL);
236*4882a593Smuzhiyun if (ret < 0)
237*4882a593Smuzhiyun break;
238*4882a593Smuzhiyun } while (++nummaps);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun if (nummaps == 0) /* no error, return NULL to search map table */
241*4882a593Smuzhiyun return NULL;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /* NULL terminated array to save passing size */
244*4882a593Smuzhiyun chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
245*4882a593Smuzhiyun if (chans == NULL)
246*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun /* Search for OF matches */
249*4882a593Smuzhiyun for (mapind = 0; mapind < nummaps; mapind++) {
250*4882a593Smuzhiyun ret = __of_iio_channel_get(&chans[mapind], dev->of_node,
251*4882a593Smuzhiyun mapind);
252*4882a593Smuzhiyun if (ret)
253*4882a593Smuzhiyun goto error_free_chans;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun return chans;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun error_free_chans:
258*4882a593Smuzhiyun for (i = 0; i < mapind; i++)
259*4882a593Smuzhiyun iio_device_put(chans[i].indio_dev);
260*4882a593Smuzhiyun kfree(chans);
261*4882a593Smuzhiyun return ERR_PTR(ret);
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun #else /* CONFIG_OF */
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun static inline struct iio_channel *
of_iio_channel_get_by_name(struct device_node * np,const char * name)267*4882a593Smuzhiyun of_iio_channel_get_by_name(struct device_node *np, const char *name)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun return NULL;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
of_iio_channel_get_all(struct device * dev)272*4882a593Smuzhiyun static inline struct iio_channel *of_iio_channel_get_all(struct device *dev)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun return NULL;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun #endif /* CONFIG_OF */
278*4882a593Smuzhiyun
iio_channel_get_sys(const char * name,const char * channel_name)279*4882a593Smuzhiyun static struct iio_channel *iio_channel_get_sys(const char *name,
280*4882a593Smuzhiyun const char *channel_name)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun struct iio_map_internal *c_i = NULL, *c = NULL;
283*4882a593Smuzhiyun struct iio_channel *channel;
284*4882a593Smuzhiyun int err;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun if (name == NULL && channel_name == NULL)
287*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun /* first find matching entry the channel map */
290*4882a593Smuzhiyun mutex_lock(&iio_map_list_lock);
291*4882a593Smuzhiyun list_for_each_entry(c_i, &iio_map_list, l) {
292*4882a593Smuzhiyun if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
293*4882a593Smuzhiyun (channel_name &&
294*4882a593Smuzhiyun strcmp(channel_name, c_i->map->consumer_channel) != 0))
295*4882a593Smuzhiyun continue;
296*4882a593Smuzhiyun c = c_i;
297*4882a593Smuzhiyun iio_device_get(c->indio_dev);
298*4882a593Smuzhiyun break;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun mutex_unlock(&iio_map_list_lock);
301*4882a593Smuzhiyun if (c == NULL)
302*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun channel = kzalloc(sizeof(*channel), GFP_KERNEL);
305*4882a593Smuzhiyun if (channel == NULL) {
306*4882a593Smuzhiyun err = -ENOMEM;
307*4882a593Smuzhiyun goto error_no_mem;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun channel->indio_dev = c->indio_dev;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun if (c->map->adc_channel_label) {
313*4882a593Smuzhiyun channel->channel =
314*4882a593Smuzhiyun iio_chan_spec_from_name(channel->indio_dev,
315*4882a593Smuzhiyun c->map->adc_channel_label);
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun if (channel->channel == NULL) {
318*4882a593Smuzhiyun err = -EINVAL;
319*4882a593Smuzhiyun goto error_no_chan;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun return channel;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun error_no_chan:
326*4882a593Smuzhiyun kfree(channel);
327*4882a593Smuzhiyun error_no_mem:
328*4882a593Smuzhiyun iio_device_put(c->indio_dev);
329*4882a593Smuzhiyun return ERR_PTR(err);
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
iio_channel_get(struct device * dev,const char * channel_name)332*4882a593Smuzhiyun struct iio_channel *iio_channel_get(struct device *dev,
333*4882a593Smuzhiyun const char *channel_name)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun const char *name = dev ? dev_name(dev) : NULL;
336*4882a593Smuzhiyun struct iio_channel *channel;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun if (dev) {
339*4882a593Smuzhiyun channel = of_iio_channel_get_by_name(dev->of_node,
340*4882a593Smuzhiyun channel_name);
341*4882a593Smuzhiyun if (channel != NULL)
342*4882a593Smuzhiyun return channel;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun return iio_channel_get_sys(name, channel_name);
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iio_channel_get);
348*4882a593Smuzhiyun
iio_channel_release(struct iio_channel * channel)349*4882a593Smuzhiyun void iio_channel_release(struct iio_channel *channel)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun if (!channel)
352*4882a593Smuzhiyun return;
353*4882a593Smuzhiyun iio_device_put(channel->indio_dev);
354*4882a593Smuzhiyun kfree(channel);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iio_channel_release);
357*4882a593Smuzhiyun
devm_iio_channel_free(struct device * dev,void * res)358*4882a593Smuzhiyun static void devm_iio_channel_free(struct device *dev, void *res)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun struct iio_channel *channel = *(struct iio_channel **)res;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun iio_channel_release(channel);
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
devm_iio_channel_get(struct device * dev,const char * channel_name)365*4882a593Smuzhiyun struct iio_channel *devm_iio_channel_get(struct device *dev,
366*4882a593Smuzhiyun const char *channel_name)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun struct iio_channel **ptr, *channel;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun ptr = devres_alloc(devm_iio_channel_free, sizeof(*ptr), GFP_KERNEL);
371*4882a593Smuzhiyun if (!ptr)
372*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun channel = iio_channel_get(dev, channel_name);
375*4882a593Smuzhiyun if (IS_ERR(channel)) {
376*4882a593Smuzhiyun devres_free(ptr);
377*4882a593Smuzhiyun return channel;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun *ptr = channel;
381*4882a593Smuzhiyun devres_add(dev, ptr);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun return channel;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_iio_channel_get);
386*4882a593Smuzhiyun
iio_channel_get_all(struct device * dev)387*4882a593Smuzhiyun struct iio_channel *iio_channel_get_all(struct device *dev)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun const char *name;
390*4882a593Smuzhiyun struct iio_channel *chans;
391*4882a593Smuzhiyun struct iio_map_internal *c = NULL;
392*4882a593Smuzhiyun int nummaps = 0;
393*4882a593Smuzhiyun int mapind = 0;
394*4882a593Smuzhiyun int i, ret;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun if (dev == NULL)
397*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun chans = of_iio_channel_get_all(dev);
400*4882a593Smuzhiyun if (chans)
401*4882a593Smuzhiyun return chans;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun name = dev_name(dev);
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun mutex_lock(&iio_map_list_lock);
406*4882a593Smuzhiyun /* first count the matching maps */
407*4882a593Smuzhiyun list_for_each_entry(c, &iio_map_list, l)
408*4882a593Smuzhiyun if (name && strcmp(name, c->map->consumer_dev_name) != 0)
409*4882a593Smuzhiyun continue;
410*4882a593Smuzhiyun else
411*4882a593Smuzhiyun nummaps++;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun if (nummaps == 0) {
414*4882a593Smuzhiyun ret = -ENODEV;
415*4882a593Smuzhiyun goto error_ret;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun /* NULL terminated array to save passing size */
419*4882a593Smuzhiyun chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
420*4882a593Smuzhiyun if (chans == NULL) {
421*4882a593Smuzhiyun ret = -ENOMEM;
422*4882a593Smuzhiyun goto error_ret;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun /* for each map fill in the chans element */
426*4882a593Smuzhiyun list_for_each_entry(c, &iio_map_list, l) {
427*4882a593Smuzhiyun if (name && strcmp(name, c->map->consumer_dev_name) != 0)
428*4882a593Smuzhiyun continue;
429*4882a593Smuzhiyun chans[mapind].indio_dev = c->indio_dev;
430*4882a593Smuzhiyun chans[mapind].data = c->map->consumer_data;
431*4882a593Smuzhiyun chans[mapind].channel =
432*4882a593Smuzhiyun iio_chan_spec_from_name(chans[mapind].indio_dev,
433*4882a593Smuzhiyun c->map->adc_channel_label);
434*4882a593Smuzhiyun if (chans[mapind].channel == NULL) {
435*4882a593Smuzhiyun ret = -EINVAL;
436*4882a593Smuzhiyun goto error_free_chans;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun iio_device_get(chans[mapind].indio_dev);
439*4882a593Smuzhiyun mapind++;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun if (mapind == 0) {
442*4882a593Smuzhiyun ret = -ENODEV;
443*4882a593Smuzhiyun goto error_free_chans;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun mutex_unlock(&iio_map_list_lock);
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun return chans;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun error_free_chans:
450*4882a593Smuzhiyun for (i = 0; i < nummaps; i++)
451*4882a593Smuzhiyun iio_device_put(chans[i].indio_dev);
452*4882a593Smuzhiyun kfree(chans);
453*4882a593Smuzhiyun error_ret:
454*4882a593Smuzhiyun mutex_unlock(&iio_map_list_lock);
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun return ERR_PTR(ret);
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iio_channel_get_all);
459*4882a593Smuzhiyun
iio_channel_release_all(struct iio_channel * channels)460*4882a593Smuzhiyun void iio_channel_release_all(struct iio_channel *channels)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun struct iio_channel *chan = &channels[0];
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun while (chan->indio_dev) {
465*4882a593Smuzhiyun iio_device_put(chan->indio_dev);
466*4882a593Smuzhiyun chan++;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun kfree(channels);
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iio_channel_release_all);
471*4882a593Smuzhiyun
devm_iio_channel_free_all(struct device * dev,void * res)472*4882a593Smuzhiyun static void devm_iio_channel_free_all(struct device *dev, void *res)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun struct iio_channel *channels = *(struct iio_channel **)res;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun iio_channel_release_all(channels);
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
devm_iio_channel_get_all(struct device * dev)479*4882a593Smuzhiyun struct iio_channel *devm_iio_channel_get_all(struct device *dev)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun struct iio_channel **ptr, *channels;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun ptr = devres_alloc(devm_iio_channel_free_all, sizeof(*ptr), GFP_KERNEL);
484*4882a593Smuzhiyun if (!ptr)
485*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun channels = iio_channel_get_all(dev);
488*4882a593Smuzhiyun if (IS_ERR(channels)) {
489*4882a593Smuzhiyun devres_free(ptr);
490*4882a593Smuzhiyun return channels;
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun *ptr = channels;
494*4882a593Smuzhiyun devres_add(dev, ptr);
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun return channels;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
499*4882a593Smuzhiyun
iio_channel_read(struct iio_channel * chan,int * val,int * val2,enum iio_chan_info_enum info)500*4882a593Smuzhiyun static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
501*4882a593Smuzhiyun enum iio_chan_info_enum info)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun int unused;
504*4882a593Smuzhiyun int vals[INDIO_MAX_RAW_ELEMENTS];
505*4882a593Smuzhiyun int ret;
506*4882a593Smuzhiyun int val_len = 2;
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun if (val2 == NULL)
509*4882a593Smuzhiyun val2 = &unused;
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun if (!iio_channel_has_info(chan->channel, info))
512*4882a593Smuzhiyun return -EINVAL;
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun if (chan->indio_dev->info->read_raw_multi) {
515*4882a593Smuzhiyun ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev,
516*4882a593Smuzhiyun chan->channel, INDIO_MAX_RAW_ELEMENTS,
517*4882a593Smuzhiyun vals, &val_len, info);
518*4882a593Smuzhiyun *val = vals[0];
519*4882a593Smuzhiyun *val2 = vals[1];
520*4882a593Smuzhiyun } else
521*4882a593Smuzhiyun ret = chan->indio_dev->info->read_raw(chan->indio_dev,
522*4882a593Smuzhiyun chan->channel, val, val2, info);
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun return ret;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
iio_read_channel_raw(struct iio_channel * chan,int * val)527*4882a593Smuzhiyun int iio_read_channel_raw(struct iio_channel *chan, int *val)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun int ret;
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun mutex_lock(&chan->indio_dev->info_exist_lock);
532*4882a593Smuzhiyun if (chan->indio_dev->info == NULL) {
533*4882a593Smuzhiyun ret = -ENODEV;
534*4882a593Smuzhiyun goto err_unlock;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
538*4882a593Smuzhiyun err_unlock:
539*4882a593Smuzhiyun mutex_unlock(&chan->indio_dev->info_exist_lock);
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun return ret;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iio_read_channel_raw);
544*4882a593Smuzhiyun
iio_read_channel_average_raw(struct iio_channel * chan,int * val)545*4882a593Smuzhiyun int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun int ret;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun mutex_lock(&chan->indio_dev->info_exist_lock);
550*4882a593Smuzhiyun if (chan->indio_dev->info == NULL) {
551*4882a593Smuzhiyun ret = -ENODEV;
552*4882a593Smuzhiyun goto err_unlock;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW);
556*4882a593Smuzhiyun err_unlock:
557*4882a593Smuzhiyun mutex_unlock(&chan->indio_dev->info_exist_lock);
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun return ret;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
562*4882a593Smuzhiyun
iio_convert_raw_to_processed_unlocked(struct iio_channel * chan,int raw,int * processed,unsigned int scale)563*4882a593Smuzhiyun static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
564*4882a593Smuzhiyun int raw, int *processed, unsigned int scale)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun int scale_type, scale_val, scale_val2;
567*4882a593Smuzhiyun int offset_type, offset_val, offset_val2;
568*4882a593Smuzhiyun s64 raw64 = raw;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun offset_type = iio_channel_read(chan, &offset_val, &offset_val2,
571*4882a593Smuzhiyun IIO_CHAN_INFO_OFFSET);
572*4882a593Smuzhiyun if (offset_type >= 0) {
573*4882a593Smuzhiyun switch (offset_type) {
574*4882a593Smuzhiyun case IIO_VAL_INT:
575*4882a593Smuzhiyun break;
576*4882a593Smuzhiyun case IIO_VAL_INT_PLUS_MICRO:
577*4882a593Smuzhiyun case IIO_VAL_INT_PLUS_NANO:
578*4882a593Smuzhiyun /*
579*4882a593Smuzhiyun * Both IIO_VAL_INT_PLUS_MICRO and IIO_VAL_INT_PLUS_NANO
580*4882a593Smuzhiyun * implicitely truncate the offset to it's integer form.
581*4882a593Smuzhiyun */
582*4882a593Smuzhiyun break;
583*4882a593Smuzhiyun case IIO_VAL_FRACTIONAL:
584*4882a593Smuzhiyun offset_val /= offset_val2;
585*4882a593Smuzhiyun break;
586*4882a593Smuzhiyun case IIO_VAL_FRACTIONAL_LOG2:
587*4882a593Smuzhiyun offset_val >>= offset_val2;
588*4882a593Smuzhiyun break;
589*4882a593Smuzhiyun default:
590*4882a593Smuzhiyun return -EINVAL;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun raw64 += offset_val;
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
597*4882a593Smuzhiyun IIO_CHAN_INFO_SCALE);
598*4882a593Smuzhiyun if (scale_type < 0) {
599*4882a593Smuzhiyun /*
600*4882a593Smuzhiyun * If no channel scaling is available apply consumer scale to
601*4882a593Smuzhiyun * raw value and return.
602*4882a593Smuzhiyun */
603*4882a593Smuzhiyun *processed = raw * scale;
604*4882a593Smuzhiyun return 0;
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun switch (scale_type) {
608*4882a593Smuzhiyun case IIO_VAL_INT:
609*4882a593Smuzhiyun *processed = raw64 * scale_val * scale;
610*4882a593Smuzhiyun break;
611*4882a593Smuzhiyun case IIO_VAL_INT_PLUS_MICRO:
612*4882a593Smuzhiyun if (scale_val2 < 0)
613*4882a593Smuzhiyun *processed = -raw64 * scale_val;
614*4882a593Smuzhiyun else
615*4882a593Smuzhiyun *processed = raw64 * scale_val;
616*4882a593Smuzhiyun *processed += div_s64(raw64 * (s64)scale_val2 * scale,
617*4882a593Smuzhiyun 1000000LL);
618*4882a593Smuzhiyun break;
619*4882a593Smuzhiyun case IIO_VAL_INT_PLUS_NANO:
620*4882a593Smuzhiyun if (scale_val2 < 0)
621*4882a593Smuzhiyun *processed = -raw64 * scale_val;
622*4882a593Smuzhiyun else
623*4882a593Smuzhiyun *processed = raw64 * scale_val;
624*4882a593Smuzhiyun *processed += div_s64(raw64 * (s64)scale_val2 * scale,
625*4882a593Smuzhiyun 1000000000LL);
626*4882a593Smuzhiyun break;
627*4882a593Smuzhiyun case IIO_VAL_FRACTIONAL:
628*4882a593Smuzhiyun *processed = div_s64(raw64 * (s64)scale_val * scale,
629*4882a593Smuzhiyun scale_val2);
630*4882a593Smuzhiyun break;
631*4882a593Smuzhiyun case IIO_VAL_FRACTIONAL_LOG2:
632*4882a593Smuzhiyun *processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
633*4882a593Smuzhiyun break;
634*4882a593Smuzhiyun default:
635*4882a593Smuzhiyun return -EINVAL;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun return 0;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
iio_convert_raw_to_processed(struct iio_channel * chan,int raw,int * processed,unsigned int scale)641*4882a593Smuzhiyun int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
642*4882a593Smuzhiyun int *processed, unsigned int scale)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun int ret;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun mutex_lock(&chan->indio_dev->info_exist_lock);
647*4882a593Smuzhiyun if (chan->indio_dev->info == NULL) {
648*4882a593Smuzhiyun ret = -ENODEV;
649*4882a593Smuzhiyun goto err_unlock;
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
653*4882a593Smuzhiyun scale);
654*4882a593Smuzhiyun err_unlock:
655*4882a593Smuzhiyun mutex_unlock(&chan->indio_dev->info_exist_lock);
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun return ret;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
660*4882a593Smuzhiyun
iio_read_channel_attribute(struct iio_channel * chan,int * val,int * val2,enum iio_chan_info_enum attribute)661*4882a593Smuzhiyun int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
662*4882a593Smuzhiyun enum iio_chan_info_enum attribute)
663*4882a593Smuzhiyun {
664*4882a593Smuzhiyun int ret;
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun mutex_lock(&chan->indio_dev->info_exist_lock);
667*4882a593Smuzhiyun if (chan->indio_dev->info == NULL) {
668*4882a593Smuzhiyun ret = -ENODEV;
669*4882a593Smuzhiyun goto err_unlock;
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun ret = iio_channel_read(chan, val, val2, attribute);
673*4882a593Smuzhiyun err_unlock:
674*4882a593Smuzhiyun mutex_unlock(&chan->indio_dev->info_exist_lock);
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun return ret;
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
679*4882a593Smuzhiyun
iio_read_channel_offset(struct iio_channel * chan,int * val,int * val2)680*4882a593Smuzhiyun int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_OFFSET);
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iio_read_channel_offset);
685*4882a593Smuzhiyun
iio_read_channel_processed(struct iio_channel * chan,int * val)686*4882a593Smuzhiyun int iio_read_channel_processed(struct iio_channel *chan, int *val)
687*4882a593Smuzhiyun {
688*4882a593Smuzhiyun int ret;
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun mutex_lock(&chan->indio_dev->info_exist_lock);
691*4882a593Smuzhiyun if (chan->indio_dev->info == NULL) {
692*4882a593Smuzhiyun ret = -ENODEV;
693*4882a593Smuzhiyun goto err_unlock;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
697*4882a593Smuzhiyun ret = iio_channel_read(chan, val, NULL,
698*4882a593Smuzhiyun IIO_CHAN_INFO_PROCESSED);
699*4882a593Smuzhiyun } else {
700*4882a593Smuzhiyun ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
701*4882a593Smuzhiyun if (ret < 0)
702*4882a593Smuzhiyun goto err_unlock;
703*4882a593Smuzhiyun ret = iio_convert_raw_to_processed_unlocked(chan, *val, val, 1);
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun err_unlock:
707*4882a593Smuzhiyun mutex_unlock(&chan->indio_dev->info_exist_lock);
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun return ret;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iio_read_channel_processed);
712*4882a593Smuzhiyun
iio_read_channel_scale(struct iio_channel * chan,int * val,int * val2)713*4882a593Smuzhiyun int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_SCALE);
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iio_read_channel_scale);
718*4882a593Smuzhiyun
iio_channel_read_avail(struct iio_channel * chan,const int ** vals,int * type,int * length,enum iio_chan_info_enum info)719*4882a593Smuzhiyun static int iio_channel_read_avail(struct iio_channel *chan,
720*4882a593Smuzhiyun const int **vals, int *type, int *length,
721*4882a593Smuzhiyun enum iio_chan_info_enum info)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun if (!iio_channel_has_available(chan->channel, info))
724*4882a593Smuzhiyun return -EINVAL;
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel,
727*4882a593Smuzhiyun vals, type, length, info);
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun
iio_read_avail_channel_attribute(struct iio_channel * chan,const int ** vals,int * type,int * length,enum iio_chan_info_enum attribute)730*4882a593Smuzhiyun int iio_read_avail_channel_attribute(struct iio_channel *chan,
731*4882a593Smuzhiyun const int **vals, int *type, int *length,
732*4882a593Smuzhiyun enum iio_chan_info_enum attribute)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun int ret;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun mutex_lock(&chan->indio_dev->info_exist_lock);
737*4882a593Smuzhiyun if (!chan->indio_dev->info) {
738*4882a593Smuzhiyun ret = -ENODEV;
739*4882a593Smuzhiyun goto err_unlock;
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun ret = iio_channel_read_avail(chan, vals, type, length, attribute);
743*4882a593Smuzhiyun err_unlock:
744*4882a593Smuzhiyun mutex_unlock(&chan->indio_dev->info_exist_lock);
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun return ret;
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute);
749*4882a593Smuzhiyun
iio_read_avail_channel_raw(struct iio_channel * chan,const int ** vals,int * length)750*4882a593Smuzhiyun int iio_read_avail_channel_raw(struct iio_channel *chan,
751*4882a593Smuzhiyun const int **vals, int *length)
752*4882a593Smuzhiyun {
753*4882a593Smuzhiyun int ret;
754*4882a593Smuzhiyun int type;
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun ret = iio_read_avail_channel_attribute(chan, vals, &type, length,
757*4882a593Smuzhiyun IIO_CHAN_INFO_RAW);
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun if (ret >= 0 && type != IIO_VAL_INT)
760*4882a593Smuzhiyun /* raw values are assumed to be IIO_VAL_INT */
761*4882a593Smuzhiyun ret = -EINVAL;
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun return ret;
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw);
766*4882a593Smuzhiyun
iio_channel_read_max(struct iio_channel * chan,int * val,int * val2,int * type,enum iio_chan_info_enum info)767*4882a593Smuzhiyun static int iio_channel_read_max(struct iio_channel *chan,
768*4882a593Smuzhiyun int *val, int *val2, int *type,
769*4882a593Smuzhiyun enum iio_chan_info_enum info)
770*4882a593Smuzhiyun {
771*4882a593Smuzhiyun int unused;
772*4882a593Smuzhiyun const int *vals;
773*4882a593Smuzhiyun int length;
774*4882a593Smuzhiyun int ret;
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun if (!val2)
777*4882a593Smuzhiyun val2 = &unused;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun ret = iio_channel_read_avail(chan, &vals, type, &length, info);
780*4882a593Smuzhiyun switch (ret) {
781*4882a593Smuzhiyun case IIO_AVAIL_RANGE:
782*4882a593Smuzhiyun switch (*type) {
783*4882a593Smuzhiyun case IIO_VAL_INT:
784*4882a593Smuzhiyun *val = vals[2];
785*4882a593Smuzhiyun break;
786*4882a593Smuzhiyun default:
787*4882a593Smuzhiyun *val = vals[4];
788*4882a593Smuzhiyun *val2 = vals[5];
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun return 0;
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun case IIO_AVAIL_LIST:
793*4882a593Smuzhiyun if (length <= 0)
794*4882a593Smuzhiyun return -EINVAL;
795*4882a593Smuzhiyun switch (*type) {
796*4882a593Smuzhiyun case IIO_VAL_INT:
797*4882a593Smuzhiyun *val = vals[--length];
798*4882a593Smuzhiyun while (length) {
799*4882a593Smuzhiyun if (vals[--length] > *val)
800*4882a593Smuzhiyun *val = vals[length];
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun break;
803*4882a593Smuzhiyun default:
804*4882a593Smuzhiyun /* FIXME: learn about max for other iio values */
805*4882a593Smuzhiyun return -EINVAL;
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun return 0;
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun default:
810*4882a593Smuzhiyun return ret;
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun
iio_read_max_channel_raw(struct iio_channel * chan,int * val)814*4882a593Smuzhiyun int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
815*4882a593Smuzhiyun {
816*4882a593Smuzhiyun int ret;
817*4882a593Smuzhiyun int type;
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun mutex_lock(&chan->indio_dev->info_exist_lock);
820*4882a593Smuzhiyun if (!chan->indio_dev->info) {
821*4882a593Smuzhiyun ret = -ENODEV;
822*4882a593Smuzhiyun goto err_unlock;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun ret = iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
826*4882a593Smuzhiyun err_unlock:
827*4882a593Smuzhiyun mutex_unlock(&chan->indio_dev->info_exist_lock);
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun return ret;
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
832*4882a593Smuzhiyun
iio_get_channel_type(struct iio_channel * chan,enum iio_chan_type * type)833*4882a593Smuzhiyun int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
834*4882a593Smuzhiyun {
835*4882a593Smuzhiyun int ret = 0;
836*4882a593Smuzhiyun /* Need to verify underlying driver has not gone away */
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun mutex_lock(&chan->indio_dev->info_exist_lock);
839*4882a593Smuzhiyun if (chan->indio_dev->info == NULL) {
840*4882a593Smuzhiyun ret = -ENODEV;
841*4882a593Smuzhiyun goto err_unlock;
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun *type = chan->channel->type;
845*4882a593Smuzhiyun err_unlock:
846*4882a593Smuzhiyun mutex_unlock(&chan->indio_dev->info_exist_lock);
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun return ret;
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iio_get_channel_type);
851*4882a593Smuzhiyun
iio_channel_write(struct iio_channel * chan,int val,int val2,enum iio_chan_info_enum info)852*4882a593Smuzhiyun static int iio_channel_write(struct iio_channel *chan, int val, int val2,
853*4882a593Smuzhiyun enum iio_chan_info_enum info)
854*4882a593Smuzhiyun {
855*4882a593Smuzhiyun return chan->indio_dev->info->write_raw(chan->indio_dev,
856*4882a593Smuzhiyun chan->channel, val, val2, info);
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun
iio_write_channel_attribute(struct iio_channel * chan,int val,int val2,enum iio_chan_info_enum attribute)859*4882a593Smuzhiyun int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
860*4882a593Smuzhiyun enum iio_chan_info_enum attribute)
861*4882a593Smuzhiyun {
862*4882a593Smuzhiyun int ret;
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun mutex_lock(&chan->indio_dev->info_exist_lock);
865*4882a593Smuzhiyun if (chan->indio_dev->info == NULL) {
866*4882a593Smuzhiyun ret = -ENODEV;
867*4882a593Smuzhiyun goto err_unlock;
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun ret = iio_channel_write(chan, val, val2, attribute);
871*4882a593Smuzhiyun err_unlock:
872*4882a593Smuzhiyun mutex_unlock(&chan->indio_dev->info_exist_lock);
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun return ret;
875*4882a593Smuzhiyun }
876*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iio_write_channel_attribute);
877*4882a593Smuzhiyun
iio_write_channel_raw(struct iio_channel * chan,int val)878*4882a593Smuzhiyun int iio_write_channel_raw(struct iio_channel *chan, int val)
879*4882a593Smuzhiyun {
880*4882a593Smuzhiyun return iio_write_channel_attribute(chan, val, 0, IIO_CHAN_INFO_RAW);
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iio_write_channel_raw);
883*4882a593Smuzhiyun
iio_get_channel_ext_info_count(struct iio_channel * chan)884*4882a593Smuzhiyun unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
885*4882a593Smuzhiyun {
886*4882a593Smuzhiyun const struct iio_chan_spec_ext_info *ext_info;
887*4882a593Smuzhiyun unsigned int i = 0;
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun if (!chan->channel->ext_info)
890*4882a593Smuzhiyun return i;
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun for (ext_info = chan->channel->ext_info; ext_info->name; ext_info++)
893*4882a593Smuzhiyun ++i;
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun return i;
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count);
898*4882a593Smuzhiyun
iio_lookup_ext_info(const struct iio_channel * chan,const char * attr)899*4882a593Smuzhiyun static const struct iio_chan_spec_ext_info *iio_lookup_ext_info(
900*4882a593Smuzhiyun const struct iio_channel *chan,
901*4882a593Smuzhiyun const char *attr)
902*4882a593Smuzhiyun {
903*4882a593Smuzhiyun const struct iio_chan_spec_ext_info *ext_info;
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun if (!chan->channel->ext_info)
906*4882a593Smuzhiyun return NULL;
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun for (ext_info = chan->channel->ext_info; ext_info->name; ++ext_info) {
909*4882a593Smuzhiyun if (!strcmp(attr, ext_info->name))
910*4882a593Smuzhiyun return ext_info;
911*4882a593Smuzhiyun }
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun return NULL;
914*4882a593Smuzhiyun }
915*4882a593Smuzhiyun
iio_read_channel_ext_info(struct iio_channel * chan,const char * attr,char * buf)916*4882a593Smuzhiyun ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
917*4882a593Smuzhiyun const char *attr, char *buf)
918*4882a593Smuzhiyun {
919*4882a593Smuzhiyun const struct iio_chan_spec_ext_info *ext_info;
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun ext_info = iio_lookup_ext_info(chan, attr);
922*4882a593Smuzhiyun if (!ext_info)
923*4882a593Smuzhiyun return -EINVAL;
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun return ext_info->read(chan->indio_dev, ext_info->private,
926*4882a593Smuzhiyun chan->channel, buf);
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iio_read_channel_ext_info);
929*4882a593Smuzhiyun
iio_write_channel_ext_info(struct iio_channel * chan,const char * attr,const char * buf,size_t len)930*4882a593Smuzhiyun ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
931*4882a593Smuzhiyun const char *buf, size_t len)
932*4882a593Smuzhiyun {
933*4882a593Smuzhiyun const struct iio_chan_spec_ext_info *ext_info;
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun ext_info = iio_lookup_ext_info(chan, attr);
936*4882a593Smuzhiyun if (!ext_info)
937*4882a593Smuzhiyun return -EINVAL;
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun return ext_info->write(chan->indio_dev, ext_info->private,
940*4882a593Smuzhiyun chan->channel, buf, len);
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iio_write_channel_ext_info);
943