1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* The industrial I/O core, trigger handling functions
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (c) 2008 Jonathan Cameron
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/kernel.h>
8*4882a593Smuzhiyun #include <linux/idr.h>
9*4882a593Smuzhiyun #include <linux/err.h>
10*4882a593Smuzhiyun #include <linux/device.h>
11*4882a593Smuzhiyun #include <linux/interrupt.h>
12*4882a593Smuzhiyun #include <linux/list.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <linux/iio/iio.h>
16*4882a593Smuzhiyun #include <linux/iio/trigger.h>
17*4882a593Smuzhiyun #include "iio_core.h"
18*4882a593Smuzhiyun #include "iio_core_trigger.h"
19*4882a593Smuzhiyun #include <linux/iio/trigger_consumer.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun /* RFC - Question of approach
22*4882a593Smuzhiyun * Make the common case (single sensor single trigger)
23*4882a593Smuzhiyun * simple by starting trigger capture from when first sensors
24*4882a593Smuzhiyun * is added.
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * Complex simultaneous start requires use of 'hold' functionality
27*4882a593Smuzhiyun * of the trigger. (not implemented)
28*4882a593Smuzhiyun *
29*4882a593Smuzhiyun * Any other suggestions?
30*4882a593Smuzhiyun */
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun static DEFINE_IDA(iio_trigger_ida);
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /* Single list of all available triggers */
35*4882a593Smuzhiyun static LIST_HEAD(iio_trigger_list);
36*4882a593Smuzhiyun static DEFINE_MUTEX(iio_trigger_list_lock);
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /**
39*4882a593Smuzhiyun * iio_trigger_read_name() - retrieve useful identifying name
40*4882a593Smuzhiyun * @dev: device associated with the iio_trigger
41*4882a593Smuzhiyun * @attr: pointer to the device_attribute structure that is
42*4882a593Smuzhiyun * being processed
43*4882a593Smuzhiyun * @buf: buffer to print the name into
44*4882a593Smuzhiyun *
45*4882a593Smuzhiyun * Return: a negative number on failure or the number of written
46*4882a593Smuzhiyun * characters on success.
47*4882a593Smuzhiyun */
iio_trigger_read_name(struct device * dev,struct device_attribute * attr,char * buf)48*4882a593Smuzhiyun static ssize_t iio_trigger_read_name(struct device *dev,
49*4882a593Smuzhiyun struct device_attribute *attr,
50*4882a593Smuzhiyun char *buf)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun struct iio_trigger *trig = to_iio_trigger(dev);
53*4882a593Smuzhiyun return sprintf(buf, "%s\n", trig->name);
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun static struct attribute *iio_trig_dev_attrs[] = {
59*4882a593Smuzhiyun &dev_attr_name.attr,
60*4882a593Smuzhiyun NULL,
61*4882a593Smuzhiyun };
62*4882a593Smuzhiyun ATTRIBUTE_GROUPS(iio_trig_dev);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun static struct iio_trigger *__iio_trigger_find_by_name(const char *name);
65*4882a593Smuzhiyun
__iio_trigger_register(struct iio_trigger * trig_info,struct module * this_mod)66*4882a593Smuzhiyun int __iio_trigger_register(struct iio_trigger *trig_info,
67*4882a593Smuzhiyun struct module *this_mod)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun int ret;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun trig_info->owner = this_mod;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL);
74*4882a593Smuzhiyun if (trig_info->id < 0)
75*4882a593Smuzhiyun return trig_info->id;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /* Set the name used for the sysfs directory etc */
78*4882a593Smuzhiyun dev_set_name(&trig_info->dev, "trigger%ld",
79*4882a593Smuzhiyun (unsigned long) trig_info->id);
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun ret = device_add(&trig_info->dev);
82*4882a593Smuzhiyun if (ret)
83*4882a593Smuzhiyun goto error_unregister_id;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun /* Add to list of available triggers held by the IIO core */
86*4882a593Smuzhiyun mutex_lock(&iio_trigger_list_lock);
87*4882a593Smuzhiyun if (__iio_trigger_find_by_name(trig_info->name)) {
88*4882a593Smuzhiyun pr_err("Duplicate trigger name '%s'\n", trig_info->name);
89*4882a593Smuzhiyun ret = -EEXIST;
90*4882a593Smuzhiyun goto error_device_del;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun list_add_tail(&trig_info->list, &iio_trigger_list);
93*4882a593Smuzhiyun mutex_unlock(&iio_trigger_list_lock);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun return 0;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun error_device_del:
98*4882a593Smuzhiyun mutex_unlock(&iio_trigger_list_lock);
99*4882a593Smuzhiyun device_del(&trig_info->dev);
100*4882a593Smuzhiyun error_unregister_id:
101*4882a593Smuzhiyun ida_simple_remove(&iio_trigger_ida, trig_info->id);
102*4882a593Smuzhiyun return ret;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun EXPORT_SYMBOL(__iio_trigger_register);
105*4882a593Smuzhiyun
iio_trigger_unregister(struct iio_trigger * trig_info)106*4882a593Smuzhiyun void iio_trigger_unregister(struct iio_trigger *trig_info)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun mutex_lock(&iio_trigger_list_lock);
109*4882a593Smuzhiyun list_del(&trig_info->list);
110*4882a593Smuzhiyun mutex_unlock(&iio_trigger_list_lock);
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun ida_simple_remove(&iio_trigger_ida, trig_info->id);
113*4882a593Smuzhiyun /* Possible issue in here */
114*4882a593Smuzhiyun device_del(&trig_info->dev);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun EXPORT_SYMBOL(iio_trigger_unregister);
117*4882a593Smuzhiyun
iio_trigger_set_immutable(struct iio_dev * indio_dev,struct iio_trigger * trig)118*4882a593Smuzhiyun int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun if (!indio_dev || !trig)
121*4882a593Smuzhiyun return -EINVAL;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun mutex_lock(&indio_dev->mlock);
124*4882a593Smuzhiyun WARN_ON(indio_dev->trig_readonly);
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun indio_dev->trig = iio_trigger_get(trig);
127*4882a593Smuzhiyun indio_dev->trig_readonly = true;
128*4882a593Smuzhiyun mutex_unlock(&indio_dev->mlock);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun return 0;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun EXPORT_SYMBOL(iio_trigger_set_immutable);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /* Search for trigger by name, assuming iio_trigger_list_lock held */
__iio_trigger_find_by_name(const char * name)135*4882a593Smuzhiyun static struct iio_trigger *__iio_trigger_find_by_name(const char *name)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun struct iio_trigger *iter;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun list_for_each_entry(iter, &iio_trigger_list, list)
140*4882a593Smuzhiyun if (!strcmp(iter->name, name))
141*4882a593Smuzhiyun return iter;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun return NULL;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
iio_trigger_acquire_by_name(const char * name)146*4882a593Smuzhiyun static struct iio_trigger *iio_trigger_acquire_by_name(const char *name)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun struct iio_trigger *trig = NULL, *iter;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun mutex_lock(&iio_trigger_list_lock);
151*4882a593Smuzhiyun list_for_each_entry(iter, &iio_trigger_list, list)
152*4882a593Smuzhiyun if (sysfs_streq(iter->name, name)) {
153*4882a593Smuzhiyun trig = iter;
154*4882a593Smuzhiyun iio_trigger_get(trig);
155*4882a593Smuzhiyun break;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun mutex_unlock(&iio_trigger_list_lock);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun return trig;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
iio_trigger_poll(struct iio_trigger * trig)162*4882a593Smuzhiyun void iio_trigger_poll(struct iio_trigger *trig)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun int i;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun if (!atomic_read(&trig->use_count)) {
167*4882a593Smuzhiyun atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
170*4882a593Smuzhiyun if (trig->subirqs[i].enabled)
171*4882a593Smuzhiyun generic_handle_irq(trig->subirq_base + i);
172*4882a593Smuzhiyun else
173*4882a593Smuzhiyun iio_trigger_notify_done(trig);
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun EXPORT_SYMBOL(iio_trigger_poll);
178*4882a593Smuzhiyun
iio_trigger_generic_data_rdy_poll(int irq,void * private)179*4882a593Smuzhiyun irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun iio_trigger_poll(private);
182*4882a593Smuzhiyun return IRQ_HANDLED;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
185*4882a593Smuzhiyun
iio_trigger_poll_chained(struct iio_trigger * trig)186*4882a593Smuzhiyun void iio_trigger_poll_chained(struct iio_trigger *trig)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun int i;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun if (!atomic_read(&trig->use_count)) {
191*4882a593Smuzhiyun atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
194*4882a593Smuzhiyun if (trig->subirqs[i].enabled)
195*4882a593Smuzhiyun handle_nested_irq(trig->subirq_base + i);
196*4882a593Smuzhiyun else
197*4882a593Smuzhiyun iio_trigger_notify_done(trig);
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun EXPORT_SYMBOL(iio_trigger_poll_chained);
202*4882a593Smuzhiyun
iio_trigger_notify_done(struct iio_trigger * trig)203*4882a593Smuzhiyun void iio_trigger_notify_done(struct iio_trigger *trig)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
206*4882a593Smuzhiyun trig->ops->try_reenable)
207*4882a593Smuzhiyun if (trig->ops->try_reenable(trig))
208*4882a593Smuzhiyun /* Missed an interrupt so launch new poll now */
209*4882a593Smuzhiyun iio_trigger_poll(trig);
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun EXPORT_SYMBOL(iio_trigger_notify_done);
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /* Trigger Consumer related functions */
iio_trigger_get_irq(struct iio_trigger * trig)214*4882a593Smuzhiyun static int iio_trigger_get_irq(struct iio_trigger *trig)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun int ret;
217*4882a593Smuzhiyun mutex_lock(&trig->pool_lock);
218*4882a593Smuzhiyun ret = bitmap_find_free_region(trig->pool,
219*4882a593Smuzhiyun CONFIG_IIO_CONSUMERS_PER_TRIGGER,
220*4882a593Smuzhiyun ilog2(1));
221*4882a593Smuzhiyun mutex_unlock(&trig->pool_lock);
222*4882a593Smuzhiyun if (ret >= 0)
223*4882a593Smuzhiyun ret += trig->subirq_base;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun return ret;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
iio_trigger_put_irq(struct iio_trigger * trig,int irq)228*4882a593Smuzhiyun static void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun mutex_lock(&trig->pool_lock);
231*4882a593Smuzhiyun clear_bit(irq - trig->subirq_base, trig->pool);
232*4882a593Smuzhiyun mutex_unlock(&trig->pool_lock);
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /* Complexity in here. With certain triggers (datardy) an acknowledgement
236*4882a593Smuzhiyun * may be needed if the pollfuncs do not include the data read for the
237*4882a593Smuzhiyun * triggering device.
238*4882a593Smuzhiyun * This is not currently handled. Alternative of not enabling trigger unless
239*4882a593Smuzhiyun * the relevant function is in there may be the best option.
240*4882a593Smuzhiyun */
241*4882a593Smuzhiyun /* Worth protecting against double additions? */
iio_trigger_attach_poll_func(struct iio_trigger * trig,struct iio_poll_func * pf)242*4882a593Smuzhiyun int iio_trigger_attach_poll_func(struct iio_trigger *trig,
243*4882a593Smuzhiyun struct iio_poll_func *pf)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun int ret = 0;
246*4882a593Smuzhiyun bool notinuse
247*4882a593Smuzhiyun = bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /* Prevent the module from being removed whilst attached to a trigger */
250*4882a593Smuzhiyun __module_get(pf->indio_dev->driver_module);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /* Get irq number */
253*4882a593Smuzhiyun pf->irq = iio_trigger_get_irq(trig);
254*4882a593Smuzhiyun if (pf->irq < 0) {
255*4882a593Smuzhiyun pr_err("Could not find an available irq for trigger %s, CONFIG_IIO_CONSUMERS_PER_TRIGGER=%d limit might be exceeded\n",
256*4882a593Smuzhiyun trig->name, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
257*4882a593Smuzhiyun goto out_put_module;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /* Request irq */
261*4882a593Smuzhiyun ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
262*4882a593Smuzhiyun pf->type, pf->name,
263*4882a593Smuzhiyun pf);
264*4882a593Smuzhiyun if (ret < 0)
265*4882a593Smuzhiyun goto out_put_irq;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /* Enable trigger in driver */
268*4882a593Smuzhiyun if (trig->ops && trig->ops->set_trigger_state && notinuse) {
269*4882a593Smuzhiyun ret = trig->ops->set_trigger_state(trig, true);
270*4882a593Smuzhiyun if (ret < 0)
271*4882a593Smuzhiyun goto out_free_irq;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun /*
275*4882a593Smuzhiyun * Check if we just registered to our own trigger: we determine that
276*4882a593Smuzhiyun * this is the case if the IIO device and the trigger device share the
277*4882a593Smuzhiyun * same parent device.
278*4882a593Smuzhiyun */
279*4882a593Smuzhiyun if (pf->indio_dev->dev.parent == trig->dev.parent)
280*4882a593Smuzhiyun trig->attached_own_device = true;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun return ret;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun out_free_irq:
285*4882a593Smuzhiyun free_irq(pf->irq, pf);
286*4882a593Smuzhiyun out_put_irq:
287*4882a593Smuzhiyun iio_trigger_put_irq(trig, pf->irq);
288*4882a593Smuzhiyun out_put_module:
289*4882a593Smuzhiyun module_put(pf->indio_dev->driver_module);
290*4882a593Smuzhiyun return ret;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
iio_trigger_detach_poll_func(struct iio_trigger * trig,struct iio_poll_func * pf)293*4882a593Smuzhiyun int iio_trigger_detach_poll_func(struct iio_trigger *trig,
294*4882a593Smuzhiyun struct iio_poll_func *pf)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun int ret = 0;
297*4882a593Smuzhiyun bool no_other_users
298*4882a593Smuzhiyun = (bitmap_weight(trig->pool,
299*4882a593Smuzhiyun CONFIG_IIO_CONSUMERS_PER_TRIGGER)
300*4882a593Smuzhiyun == 1);
301*4882a593Smuzhiyun if (trig->ops && trig->ops->set_trigger_state && no_other_users) {
302*4882a593Smuzhiyun ret = trig->ops->set_trigger_state(trig, false);
303*4882a593Smuzhiyun if (ret)
304*4882a593Smuzhiyun return ret;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun if (pf->indio_dev->dev.parent == trig->dev.parent)
307*4882a593Smuzhiyun trig->attached_own_device = false;
308*4882a593Smuzhiyun iio_trigger_put_irq(trig, pf->irq);
309*4882a593Smuzhiyun free_irq(pf->irq, pf);
310*4882a593Smuzhiyun module_put(pf->indio_dev->driver_module);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun return ret;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun
iio_pollfunc_store_time(int irq,void * p)315*4882a593Smuzhiyun irqreturn_t iio_pollfunc_store_time(int irq, void *p)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun struct iio_poll_func *pf = p;
318*4882a593Smuzhiyun pf->timestamp = iio_get_time_ns(pf->indio_dev);
319*4882a593Smuzhiyun return IRQ_WAKE_THREAD;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun EXPORT_SYMBOL(iio_pollfunc_store_time);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun struct iio_poll_func
iio_alloc_pollfunc(irqreturn_t (* h)(int irq,void * p),irqreturn_t (* thread)(int irq,void * p),int type,struct iio_dev * indio_dev,const char * fmt,...)324*4882a593Smuzhiyun *iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
325*4882a593Smuzhiyun irqreturn_t (*thread)(int irq, void *p),
326*4882a593Smuzhiyun int type,
327*4882a593Smuzhiyun struct iio_dev *indio_dev,
328*4882a593Smuzhiyun const char *fmt,
329*4882a593Smuzhiyun ...)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun va_list vargs;
332*4882a593Smuzhiyun struct iio_poll_func *pf;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun pf = kmalloc(sizeof *pf, GFP_KERNEL);
335*4882a593Smuzhiyun if (pf == NULL)
336*4882a593Smuzhiyun return NULL;
337*4882a593Smuzhiyun va_start(vargs, fmt);
338*4882a593Smuzhiyun pf->name = kvasprintf(GFP_KERNEL, fmt, vargs);
339*4882a593Smuzhiyun va_end(vargs);
340*4882a593Smuzhiyun if (pf->name == NULL) {
341*4882a593Smuzhiyun kfree(pf);
342*4882a593Smuzhiyun return NULL;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun pf->h = h;
345*4882a593Smuzhiyun pf->thread = thread;
346*4882a593Smuzhiyun pf->type = type;
347*4882a593Smuzhiyun pf->indio_dev = indio_dev;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun return pf;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iio_alloc_pollfunc);
352*4882a593Smuzhiyun
iio_dealloc_pollfunc(struct iio_poll_func * pf)353*4882a593Smuzhiyun void iio_dealloc_pollfunc(struct iio_poll_func *pf)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun kfree(pf->name);
356*4882a593Smuzhiyun kfree(pf);
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun /**
361*4882a593Smuzhiyun * iio_trigger_read_current() - trigger consumer sysfs query current trigger
362*4882a593Smuzhiyun * @dev: device associated with an industrial I/O device
363*4882a593Smuzhiyun * @attr: pointer to the device_attribute structure that
364*4882a593Smuzhiyun * is being processed
365*4882a593Smuzhiyun * @buf: buffer where the current trigger name will be printed into
366*4882a593Smuzhiyun *
367*4882a593Smuzhiyun * For trigger consumers the current_trigger interface allows the trigger
368*4882a593Smuzhiyun * used by the device to be queried.
369*4882a593Smuzhiyun *
370*4882a593Smuzhiyun * Return: a negative number on failure, the number of characters written
371*4882a593Smuzhiyun * on success or 0 if no trigger is available
372*4882a593Smuzhiyun */
iio_trigger_read_current(struct device * dev,struct device_attribute * attr,char * buf)373*4882a593Smuzhiyun static ssize_t iio_trigger_read_current(struct device *dev,
374*4882a593Smuzhiyun struct device_attribute *attr,
375*4882a593Smuzhiyun char *buf)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun struct iio_dev *indio_dev = dev_to_iio_dev(dev);
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun if (indio_dev->trig)
380*4882a593Smuzhiyun return sprintf(buf, "%s\n", indio_dev->trig->name);
381*4882a593Smuzhiyun return 0;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun /**
385*4882a593Smuzhiyun * iio_trigger_write_current() - trigger consumer sysfs set current trigger
386*4882a593Smuzhiyun * @dev: device associated with an industrial I/O device
387*4882a593Smuzhiyun * @attr: device attribute that is being processed
388*4882a593Smuzhiyun * @buf: string buffer that holds the name of the trigger
389*4882a593Smuzhiyun * @len: length of the trigger name held by buf
390*4882a593Smuzhiyun *
391*4882a593Smuzhiyun * For trigger consumers the current_trigger interface allows the trigger
392*4882a593Smuzhiyun * used for this device to be specified at run time based on the trigger's
393*4882a593Smuzhiyun * name.
394*4882a593Smuzhiyun *
395*4882a593Smuzhiyun * Return: negative error code on failure or length of the buffer
396*4882a593Smuzhiyun * on success
397*4882a593Smuzhiyun */
iio_trigger_write_current(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)398*4882a593Smuzhiyun static ssize_t iio_trigger_write_current(struct device *dev,
399*4882a593Smuzhiyun struct device_attribute *attr,
400*4882a593Smuzhiyun const char *buf,
401*4882a593Smuzhiyun size_t len)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun struct iio_dev *indio_dev = dev_to_iio_dev(dev);
404*4882a593Smuzhiyun struct iio_trigger *oldtrig = indio_dev->trig;
405*4882a593Smuzhiyun struct iio_trigger *trig;
406*4882a593Smuzhiyun int ret;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun mutex_lock(&indio_dev->mlock);
409*4882a593Smuzhiyun if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
410*4882a593Smuzhiyun mutex_unlock(&indio_dev->mlock);
411*4882a593Smuzhiyun return -EBUSY;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun if (indio_dev->trig_readonly) {
414*4882a593Smuzhiyun mutex_unlock(&indio_dev->mlock);
415*4882a593Smuzhiyun return -EPERM;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun mutex_unlock(&indio_dev->mlock);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun trig = iio_trigger_acquire_by_name(buf);
420*4882a593Smuzhiyun if (oldtrig == trig) {
421*4882a593Smuzhiyun ret = len;
422*4882a593Smuzhiyun goto out_trigger_put;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun if (trig && indio_dev->info->validate_trigger) {
426*4882a593Smuzhiyun ret = indio_dev->info->validate_trigger(indio_dev, trig);
427*4882a593Smuzhiyun if (ret)
428*4882a593Smuzhiyun goto out_trigger_put;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun if (trig && trig->ops && trig->ops->validate_device) {
432*4882a593Smuzhiyun ret = trig->ops->validate_device(trig, indio_dev);
433*4882a593Smuzhiyun if (ret)
434*4882a593Smuzhiyun goto out_trigger_put;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun indio_dev->trig = trig;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun if (oldtrig) {
440*4882a593Smuzhiyun if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
441*4882a593Smuzhiyun iio_trigger_detach_poll_func(oldtrig,
442*4882a593Smuzhiyun indio_dev->pollfunc_event);
443*4882a593Smuzhiyun iio_trigger_put(oldtrig);
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun if (indio_dev->trig) {
446*4882a593Smuzhiyun if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
447*4882a593Smuzhiyun iio_trigger_attach_poll_func(indio_dev->trig,
448*4882a593Smuzhiyun indio_dev->pollfunc_event);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun return len;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun out_trigger_put:
454*4882a593Smuzhiyun if (trig)
455*4882a593Smuzhiyun iio_trigger_put(trig);
456*4882a593Smuzhiyun return ret;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun static DEVICE_ATTR(current_trigger, S_IRUGO | S_IWUSR,
460*4882a593Smuzhiyun iio_trigger_read_current,
461*4882a593Smuzhiyun iio_trigger_write_current);
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun static struct attribute *iio_trigger_consumer_attrs[] = {
464*4882a593Smuzhiyun &dev_attr_current_trigger.attr,
465*4882a593Smuzhiyun NULL,
466*4882a593Smuzhiyun };
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun static const struct attribute_group iio_trigger_consumer_attr_group = {
469*4882a593Smuzhiyun .name = "trigger",
470*4882a593Smuzhiyun .attrs = iio_trigger_consumer_attrs,
471*4882a593Smuzhiyun };
472*4882a593Smuzhiyun
iio_trig_release(struct device * device)473*4882a593Smuzhiyun static void iio_trig_release(struct device *device)
474*4882a593Smuzhiyun {
475*4882a593Smuzhiyun struct iio_trigger *trig = to_iio_trigger(device);
476*4882a593Smuzhiyun int i;
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun if (trig->subirq_base) {
479*4882a593Smuzhiyun for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
480*4882a593Smuzhiyun irq_modify_status(trig->subirq_base + i,
481*4882a593Smuzhiyun IRQ_NOAUTOEN,
482*4882a593Smuzhiyun IRQ_NOREQUEST | IRQ_NOPROBE);
483*4882a593Smuzhiyun irq_set_chip(trig->subirq_base + i,
484*4882a593Smuzhiyun NULL);
485*4882a593Smuzhiyun irq_set_handler(trig->subirq_base + i,
486*4882a593Smuzhiyun NULL);
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun irq_free_descs(trig->subirq_base,
490*4882a593Smuzhiyun CONFIG_IIO_CONSUMERS_PER_TRIGGER);
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun kfree(trig->name);
493*4882a593Smuzhiyun kfree(trig);
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun static const struct device_type iio_trig_type = {
497*4882a593Smuzhiyun .release = iio_trig_release,
498*4882a593Smuzhiyun .groups = iio_trig_dev_groups,
499*4882a593Smuzhiyun };
500*4882a593Smuzhiyun
iio_trig_subirqmask(struct irq_data * d)501*4882a593Smuzhiyun static void iio_trig_subirqmask(struct irq_data *d)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun struct irq_chip *chip = irq_data_get_irq_chip(d);
504*4882a593Smuzhiyun struct iio_trigger *trig
505*4882a593Smuzhiyun = container_of(chip,
506*4882a593Smuzhiyun struct iio_trigger, subirq_chip);
507*4882a593Smuzhiyun trig->subirqs[d->irq - trig->subirq_base].enabled = false;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun
iio_trig_subirqunmask(struct irq_data * d)510*4882a593Smuzhiyun static void iio_trig_subirqunmask(struct irq_data *d)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun struct irq_chip *chip = irq_data_get_irq_chip(d);
513*4882a593Smuzhiyun struct iio_trigger *trig
514*4882a593Smuzhiyun = container_of(chip,
515*4882a593Smuzhiyun struct iio_trigger, subirq_chip);
516*4882a593Smuzhiyun trig->subirqs[d->irq - trig->subirq_base].enabled = true;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun static __printf(1, 0)
viio_trigger_alloc(const char * fmt,va_list vargs)520*4882a593Smuzhiyun struct iio_trigger *viio_trigger_alloc(const char *fmt, va_list vargs)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun struct iio_trigger *trig;
523*4882a593Smuzhiyun int i;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun trig = kzalloc(sizeof *trig, GFP_KERNEL);
526*4882a593Smuzhiyun if (!trig)
527*4882a593Smuzhiyun return NULL;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun trig->dev.type = &iio_trig_type;
530*4882a593Smuzhiyun trig->dev.bus = &iio_bus_type;
531*4882a593Smuzhiyun device_initialize(&trig->dev);
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun mutex_init(&trig->pool_lock);
534*4882a593Smuzhiyun trig->subirq_base = irq_alloc_descs(-1, 0,
535*4882a593Smuzhiyun CONFIG_IIO_CONSUMERS_PER_TRIGGER,
536*4882a593Smuzhiyun 0);
537*4882a593Smuzhiyun if (trig->subirq_base < 0)
538*4882a593Smuzhiyun goto free_trig;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun trig->name = kvasprintf(GFP_KERNEL, fmt, vargs);
541*4882a593Smuzhiyun if (trig->name == NULL)
542*4882a593Smuzhiyun goto free_descs;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun trig->subirq_chip.name = trig->name;
545*4882a593Smuzhiyun trig->subirq_chip.irq_mask = &iio_trig_subirqmask;
546*4882a593Smuzhiyun trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask;
547*4882a593Smuzhiyun for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
548*4882a593Smuzhiyun irq_set_chip(trig->subirq_base + i, &trig->subirq_chip);
549*4882a593Smuzhiyun irq_set_handler(trig->subirq_base + i, &handle_simple_irq);
550*4882a593Smuzhiyun irq_modify_status(trig->subirq_base + i,
551*4882a593Smuzhiyun IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun return trig;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun free_descs:
557*4882a593Smuzhiyun irq_free_descs(trig->subirq_base, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
558*4882a593Smuzhiyun free_trig:
559*4882a593Smuzhiyun kfree(trig);
560*4882a593Smuzhiyun return NULL;
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
iio_trigger_alloc(const char * fmt,...)563*4882a593Smuzhiyun struct iio_trigger *iio_trigger_alloc(const char *fmt, ...)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun struct iio_trigger *trig;
566*4882a593Smuzhiyun va_list vargs;
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun va_start(vargs, fmt);
569*4882a593Smuzhiyun trig = viio_trigger_alloc(fmt, vargs);
570*4882a593Smuzhiyun va_end(vargs);
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun return trig;
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun EXPORT_SYMBOL(iio_trigger_alloc);
575*4882a593Smuzhiyun
iio_trigger_free(struct iio_trigger * trig)576*4882a593Smuzhiyun void iio_trigger_free(struct iio_trigger *trig)
577*4882a593Smuzhiyun {
578*4882a593Smuzhiyun if (trig)
579*4882a593Smuzhiyun put_device(&trig->dev);
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun EXPORT_SYMBOL(iio_trigger_free);
582*4882a593Smuzhiyun
devm_iio_trigger_release(struct device * dev,void * res)583*4882a593Smuzhiyun static void devm_iio_trigger_release(struct device *dev, void *res)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun iio_trigger_free(*(struct iio_trigger **)res);
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun /**
589*4882a593Smuzhiyun * devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc()
590*4882a593Smuzhiyun * @dev: Device to allocate iio_trigger for
591*4882a593Smuzhiyun * @fmt: trigger name format. If it includes format
592*4882a593Smuzhiyun * specifiers, the additional arguments following
593*4882a593Smuzhiyun * format are formatted and inserted in the resulting
594*4882a593Smuzhiyun * string replacing their respective specifiers.
595*4882a593Smuzhiyun *
596*4882a593Smuzhiyun * Managed iio_trigger_alloc. iio_trigger allocated with this function is
597*4882a593Smuzhiyun * automatically freed on driver detach.
598*4882a593Smuzhiyun *
599*4882a593Smuzhiyun * RETURNS:
600*4882a593Smuzhiyun * Pointer to allocated iio_trigger on success, NULL on failure.
601*4882a593Smuzhiyun */
devm_iio_trigger_alloc(struct device * dev,const char * fmt,...)602*4882a593Smuzhiyun struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
603*4882a593Smuzhiyun const char *fmt, ...)
604*4882a593Smuzhiyun {
605*4882a593Smuzhiyun struct iio_trigger **ptr, *trig;
606*4882a593Smuzhiyun va_list vargs;
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun ptr = devres_alloc(devm_iio_trigger_release, sizeof(*ptr),
609*4882a593Smuzhiyun GFP_KERNEL);
610*4882a593Smuzhiyun if (!ptr)
611*4882a593Smuzhiyun return NULL;
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun /* use raw alloc_dr for kmalloc caller tracing */
614*4882a593Smuzhiyun va_start(vargs, fmt);
615*4882a593Smuzhiyun trig = viio_trigger_alloc(fmt, vargs);
616*4882a593Smuzhiyun va_end(vargs);
617*4882a593Smuzhiyun if (trig) {
618*4882a593Smuzhiyun *ptr = trig;
619*4882a593Smuzhiyun devres_add(dev, ptr);
620*4882a593Smuzhiyun } else {
621*4882a593Smuzhiyun devres_free(ptr);
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun return trig;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_iio_trigger_alloc);
627*4882a593Smuzhiyun
devm_iio_trigger_unreg(struct device * dev,void * res)628*4882a593Smuzhiyun static void devm_iio_trigger_unreg(struct device *dev, void *res)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun iio_trigger_unregister(*(struct iio_trigger **)res);
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun /**
634*4882a593Smuzhiyun * __devm_iio_trigger_register - Resource-managed iio_trigger_register()
635*4882a593Smuzhiyun * @dev: device this trigger was allocated for
636*4882a593Smuzhiyun * @trig_info: trigger to register
637*4882a593Smuzhiyun * @this_mod: module registering the trigger
638*4882a593Smuzhiyun *
639*4882a593Smuzhiyun * Managed iio_trigger_register(). The IIO trigger registered with this
640*4882a593Smuzhiyun * function is automatically unregistered on driver detach. This function
641*4882a593Smuzhiyun * calls iio_trigger_register() internally. Refer to that function for more
642*4882a593Smuzhiyun * information.
643*4882a593Smuzhiyun *
644*4882a593Smuzhiyun * RETURNS:
645*4882a593Smuzhiyun * 0 on success, negative error number on failure.
646*4882a593Smuzhiyun */
__devm_iio_trigger_register(struct device * dev,struct iio_trigger * trig_info,struct module * this_mod)647*4882a593Smuzhiyun int __devm_iio_trigger_register(struct device *dev,
648*4882a593Smuzhiyun struct iio_trigger *trig_info,
649*4882a593Smuzhiyun struct module *this_mod)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun struct iio_trigger **ptr;
652*4882a593Smuzhiyun int ret;
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun ptr = devres_alloc(devm_iio_trigger_unreg, sizeof(*ptr), GFP_KERNEL);
655*4882a593Smuzhiyun if (!ptr)
656*4882a593Smuzhiyun return -ENOMEM;
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun *ptr = trig_info;
659*4882a593Smuzhiyun ret = __iio_trigger_register(trig_info, this_mod);
660*4882a593Smuzhiyun if (!ret)
661*4882a593Smuzhiyun devres_add(dev, ptr);
662*4882a593Smuzhiyun else
663*4882a593Smuzhiyun devres_free(ptr);
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun return ret;
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__devm_iio_trigger_register);
668*4882a593Smuzhiyun
iio_trigger_using_own(struct iio_dev * indio_dev)669*4882a593Smuzhiyun bool iio_trigger_using_own(struct iio_dev *indio_dev)
670*4882a593Smuzhiyun {
671*4882a593Smuzhiyun return indio_dev->trig->attached_own_device;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun EXPORT_SYMBOL(iio_trigger_using_own);
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun /**
676*4882a593Smuzhiyun * iio_trigger_validate_own_device - Check if a trigger and IIO device belong to
677*4882a593Smuzhiyun * the same device
678*4882a593Smuzhiyun * @trig: The IIO trigger to check
679*4882a593Smuzhiyun * @indio_dev: the IIO device to check
680*4882a593Smuzhiyun *
681*4882a593Smuzhiyun * This function can be used as the validate_device callback for triggers that
682*4882a593Smuzhiyun * can only be attached to their own device.
683*4882a593Smuzhiyun *
684*4882a593Smuzhiyun * Return: 0 if both the trigger and the IIO device belong to the same
685*4882a593Smuzhiyun * device, -EINVAL otherwise.
686*4882a593Smuzhiyun */
iio_trigger_validate_own_device(struct iio_trigger * trig,struct iio_dev * indio_dev)687*4882a593Smuzhiyun int iio_trigger_validate_own_device(struct iio_trigger *trig,
688*4882a593Smuzhiyun struct iio_dev *indio_dev)
689*4882a593Smuzhiyun {
690*4882a593Smuzhiyun if (indio_dev->dev.parent != trig->dev.parent)
691*4882a593Smuzhiyun return -EINVAL;
692*4882a593Smuzhiyun return 0;
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun EXPORT_SYMBOL(iio_trigger_validate_own_device);
695*4882a593Smuzhiyun
iio_device_register_trigger_consumer(struct iio_dev * indio_dev)696*4882a593Smuzhiyun void iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
697*4882a593Smuzhiyun {
698*4882a593Smuzhiyun indio_dev->groups[indio_dev->groupcounter++] =
699*4882a593Smuzhiyun &iio_trigger_consumer_attr_group;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun
iio_device_unregister_trigger_consumer(struct iio_dev * indio_dev)702*4882a593Smuzhiyun void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
703*4882a593Smuzhiyun {
704*4882a593Smuzhiyun /* Clean up an associated but not attached trigger reference */
705*4882a593Smuzhiyun if (indio_dev->trig)
706*4882a593Smuzhiyun iio_trigger_put(indio_dev->trig);
707*4882a593Smuzhiyun }
708