xref: /OK3568_Linux_fs/kernel/drivers/iio/industrialio-event.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Industrial I/O event handling
3  *
4  * Copyright (c) 2008 Jonathan Cameron
5  *
6  * Based on elements of hwmon and input subsystems.
7  */
8 
9 #include <linux/anon_inodes.h>
10 #include <linux/device.h>
11 #include <linux/fs.h>
12 #include <linux/kernel.h>
13 #include <linux/kfifo.h>
14 #include <linux/module.h>
15 #include <linux/poll.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/uaccess.h>
19 #include <linux/wait.h>
20 #include <linux/iio/iio.h>
21 #include <linux/iio/iio-opaque.h>
22 #include "iio_core.h"
23 #include <linux/iio/sysfs.h>
24 #include <linux/iio/events.h>
25 
26 /**
27  * struct iio_event_interface - chrdev interface for an event line
28  * @wait:		wait queue to allow blocking reads of events
29  * @det_events:		list of detected events
30  * @dev_attr_list:	list of event interface sysfs attribute
31  * @flags:		file operations related flags including busy flag.
32  * @group:		event interface sysfs attribute group
33  * @read_lock:		lock to protect kfifo read operations
34  */
35 struct iio_event_interface {
36 	wait_queue_head_t	wait;
37 	DECLARE_KFIFO(det_events, struct iio_event_data, 16);
38 
39 	struct list_head	dev_attr_list;
40 	unsigned long		flags;
41 	struct attribute_group	group;
42 	struct mutex		read_lock;
43 };
44 
iio_event_enabled(const struct iio_event_interface * ev_int)45 bool iio_event_enabled(const struct iio_event_interface *ev_int)
46 {
47 	return !!test_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
48 }
49 
50 /**
51  * iio_push_event() - try to add event to the list for userspace reading
52  * @indio_dev:		IIO device structure
53  * @ev_code:		What event
54  * @timestamp:		When the event occurred
55  *
56  * Note: The caller must make sure that this function is not running
57  * concurrently for the same indio_dev more than once.
58  *
59  * This function may be safely used as soon as a valid reference to iio_dev has
60  * been obtained via iio_device_alloc(), but any events that are submitted
61  * before iio_device_register() has successfully completed will be silently
62  * discarded.
63  **/
iio_push_event(struct iio_dev * indio_dev,u64 ev_code,s64 timestamp)64 int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
65 {
66 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
67 	struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
68 	struct iio_event_data ev;
69 	int copied;
70 
71 	if (!ev_int)
72 		return 0;
73 
74 	/* Does anyone care? */
75 	if (iio_event_enabled(ev_int)) {
76 
77 		ev.id = ev_code;
78 		ev.timestamp = timestamp;
79 
80 		copied = kfifo_put(&ev_int->det_events, ev);
81 		if (copied != 0)
82 			wake_up_poll(&ev_int->wait, EPOLLIN);
83 	}
84 
85 	return 0;
86 }
87 EXPORT_SYMBOL(iio_push_event);
88 
89 /**
90  * iio_event_poll() - poll the event queue to find out if it has data
91  * @filep:	File structure pointer to identify the device
92  * @wait:	Poll table pointer to add the wait queue on
93  *
94  * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
95  *	   or a negative error code on failure
96  */
iio_event_poll(struct file * filep,struct poll_table_struct * wait)97 static __poll_t iio_event_poll(struct file *filep,
98 			     struct poll_table_struct *wait)
99 {
100 	struct iio_dev *indio_dev = filep->private_data;
101 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
102 	struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
103 	__poll_t events = 0;
104 
105 	if (!indio_dev->info)
106 		return events;
107 
108 	poll_wait(filep, &ev_int->wait, wait);
109 
110 	if (!kfifo_is_empty(&ev_int->det_events))
111 		events = EPOLLIN | EPOLLRDNORM;
112 
113 	return events;
114 }
115 
iio_event_chrdev_read(struct file * filep,char __user * buf,size_t count,loff_t * f_ps)116 static ssize_t iio_event_chrdev_read(struct file *filep,
117 				     char __user *buf,
118 				     size_t count,
119 				     loff_t *f_ps)
120 {
121 	struct iio_dev *indio_dev = filep->private_data;
122 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
123 	struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
124 	unsigned int copied;
125 	int ret;
126 
127 	if (!indio_dev->info)
128 		return -ENODEV;
129 
130 	if (count < sizeof(struct iio_event_data))
131 		return -EINVAL;
132 
133 	do {
134 		if (kfifo_is_empty(&ev_int->det_events)) {
135 			if (filep->f_flags & O_NONBLOCK)
136 				return -EAGAIN;
137 
138 			ret = wait_event_interruptible(ev_int->wait,
139 					!kfifo_is_empty(&ev_int->det_events) ||
140 					indio_dev->info == NULL);
141 			if (ret)
142 				return ret;
143 			if (indio_dev->info == NULL)
144 				return -ENODEV;
145 		}
146 
147 		if (mutex_lock_interruptible(&ev_int->read_lock))
148 			return -ERESTARTSYS;
149 		ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
150 		mutex_unlock(&ev_int->read_lock);
151 
152 		if (ret)
153 			return ret;
154 
155 		/*
156 		 * If we couldn't read anything from the fifo (a different
157 		 * thread might have been faster) we either return -EAGAIN if
158 		 * the file descriptor is non-blocking, otherwise we go back to
159 		 * sleep and wait for more data to arrive.
160 		 */
161 		if (copied == 0 && (filep->f_flags & O_NONBLOCK))
162 			return -EAGAIN;
163 
164 	} while (copied == 0);
165 
166 	return copied;
167 }
168 
iio_event_chrdev_release(struct inode * inode,struct file * filep)169 static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
170 {
171 	struct iio_dev *indio_dev = filep->private_data;
172 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
173 	struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
174 
175 	clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
176 
177 	iio_device_put(indio_dev);
178 
179 	return 0;
180 }
181 
182 static const struct file_operations iio_event_chrdev_fileops = {
183 	.read =  iio_event_chrdev_read,
184 	.poll =  iio_event_poll,
185 	.release = iio_event_chrdev_release,
186 	.owner = THIS_MODULE,
187 	.llseek = noop_llseek,
188 };
189 
iio_event_getfd(struct iio_dev * indio_dev)190 int iio_event_getfd(struct iio_dev *indio_dev)
191 {
192 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
193 	struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
194 	int fd;
195 
196 	if (ev_int == NULL)
197 		return -ENODEV;
198 
199 	fd = mutex_lock_interruptible(&indio_dev->mlock);
200 	if (fd)
201 		return fd;
202 
203 	if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
204 		fd = -EBUSY;
205 		goto unlock;
206 	}
207 
208 	iio_device_get(indio_dev);
209 
210 	fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
211 				indio_dev, O_RDONLY | O_CLOEXEC);
212 	if (fd < 0) {
213 		clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
214 		iio_device_put(indio_dev);
215 	} else {
216 		kfifo_reset_out(&ev_int->det_events);
217 	}
218 
219 unlock:
220 	mutex_unlock(&indio_dev->mlock);
221 	return fd;
222 }
223 
224 static const char * const iio_ev_type_text[] = {
225 	[IIO_EV_TYPE_THRESH] = "thresh",
226 	[IIO_EV_TYPE_MAG] = "mag",
227 	[IIO_EV_TYPE_ROC] = "roc",
228 	[IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
229 	[IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
230 	[IIO_EV_TYPE_CHANGE] = "change",
231 #ifdef CONFIG_NO_GKI
232 	[IIO_EV_TYPE_FIFO_FLUSH] = "fifo_flush",
233 #endif
234 };
235 
236 static const char * const iio_ev_dir_text[] = {
237 	[IIO_EV_DIR_EITHER] = "either",
238 	[IIO_EV_DIR_RISING] = "rising",
239 	[IIO_EV_DIR_FALLING] = "falling",
240 #ifdef CONFIG_NO_GKI
241 	[IIO_EV_DIR_FIFO_EMPTY] = "empty",
242 	[IIO_EV_DIR_FIFO_DATA] = "data",
243 #endif
244 };
245 
246 static const char * const iio_ev_info_text[] = {
247 	[IIO_EV_INFO_ENABLE] = "en",
248 	[IIO_EV_INFO_VALUE] = "value",
249 	[IIO_EV_INFO_HYSTERESIS] = "hysteresis",
250 	[IIO_EV_INFO_PERIOD] = "period",
251 	[IIO_EV_INFO_HIGH_PASS_FILTER_3DB] = "high_pass_filter_3db",
252 	[IIO_EV_INFO_LOW_PASS_FILTER_3DB] = "low_pass_filter_3db",
253 };
254 
iio_ev_attr_dir(struct iio_dev_attr * attr)255 static enum iio_event_direction iio_ev_attr_dir(struct iio_dev_attr *attr)
256 {
257 	return attr->c->event_spec[attr->address & 0xffff].dir;
258 }
259 
iio_ev_attr_type(struct iio_dev_attr * attr)260 static enum iio_event_type iio_ev_attr_type(struct iio_dev_attr *attr)
261 {
262 	return attr->c->event_spec[attr->address & 0xffff].type;
263 }
264 
iio_ev_attr_info(struct iio_dev_attr * attr)265 static enum iio_event_info iio_ev_attr_info(struct iio_dev_attr *attr)
266 {
267 	return (attr->address >> 16) & 0xffff;
268 }
269 
iio_ev_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)270 static ssize_t iio_ev_state_store(struct device *dev,
271 				  struct device_attribute *attr,
272 				  const char *buf,
273 				  size_t len)
274 {
275 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
276 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
277 	int ret;
278 	bool val;
279 
280 	ret = strtobool(buf, &val);
281 	if (ret < 0)
282 		return ret;
283 
284 	ret = indio_dev->info->write_event_config(indio_dev,
285 		this_attr->c, iio_ev_attr_type(this_attr),
286 		iio_ev_attr_dir(this_attr), val);
287 
288 	return (ret < 0) ? ret : len;
289 }
290 
iio_ev_state_show(struct device * dev,struct device_attribute * attr,char * buf)291 static ssize_t iio_ev_state_show(struct device *dev,
292 				 struct device_attribute *attr,
293 				 char *buf)
294 {
295 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
296 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
297 	int val;
298 
299 	val = indio_dev->info->read_event_config(indio_dev,
300 		this_attr->c, iio_ev_attr_type(this_attr),
301 		iio_ev_attr_dir(this_attr));
302 	if (val < 0)
303 		return val;
304 	else
305 		return sprintf(buf, "%d\n", val);
306 }
307 
iio_ev_value_show(struct device * dev,struct device_attribute * attr,char * buf)308 static ssize_t iio_ev_value_show(struct device *dev,
309 				 struct device_attribute *attr,
310 				 char *buf)
311 {
312 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
313 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
314 	int val, val2, val_arr[2];
315 	int ret;
316 
317 	ret = indio_dev->info->read_event_value(indio_dev,
318 		this_attr->c, iio_ev_attr_type(this_attr),
319 		iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
320 		&val, &val2);
321 	if (ret < 0)
322 		return ret;
323 	val_arr[0] = val;
324 	val_arr[1] = val2;
325 	return iio_format_value(buf, ret, 2, val_arr);
326 }
327 
iio_ev_value_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)328 static ssize_t iio_ev_value_store(struct device *dev,
329 				  struct device_attribute *attr,
330 				  const char *buf,
331 				  size_t len)
332 {
333 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
334 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
335 	int val, val2;
336 	int ret;
337 
338 	if (!indio_dev->info->write_event_value)
339 		return -EINVAL;
340 
341 	ret = iio_str_to_fixpoint(buf, 100000, &val, &val2);
342 	if (ret)
343 		return ret;
344 	ret = indio_dev->info->write_event_value(indio_dev,
345 		this_attr->c, iio_ev_attr_type(this_attr),
346 		iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
347 		val, val2);
348 	if (ret < 0)
349 		return ret;
350 
351 	return len;
352 }
353 
iio_device_add_event(struct iio_dev * indio_dev,const struct iio_chan_spec * chan,unsigned int spec_index,enum iio_event_type type,enum iio_event_direction dir,enum iio_shared_by shared_by,const unsigned long * mask)354 static int iio_device_add_event(struct iio_dev *indio_dev,
355 	const struct iio_chan_spec *chan, unsigned int spec_index,
356 	enum iio_event_type type, enum iio_event_direction dir,
357 	enum iio_shared_by shared_by, const unsigned long *mask)
358 {
359 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
360 	ssize_t (*show)(struct device *, struct device_attribute *, char *);
361 	ssize_t (*store)(struct device *, struct device_attribute *,
362 		const char *, size_t);
363 	unsigned int attrcount = 0;
364 	unsigned int i;
365 	char *postfix;
366 	int ret;
367 
368 	for_each_set_bit(i, mask, sizeof(*mask)*8) {
369 		if (i >= ARRAY_SIZE(iio_ev_info_text))
370 			return -EINVAL;
371 		if (dir != IIO_EV_DIR_NONE)
372 			postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
373 					iio_ev_type_text[type],
374 					iio_ev_dir_text[dir],
375 					iio_ev_info_text[i]);
376 		else
377 			postfix = kasprintf(GFP_KERNEL, "%s_%s",
378 					iio_ev_type_text[type],
379 					iio_ev_info_text[i]);
380 		if (postfix == NULL)
381 			return -ENOMEM;
382 
383 		if (i == IIO_EV_INFO_ENABLE) {
384 			show = iio_ev_state_show;
385 			store = iio_ev_state_store;
386 		} else {
387 			show = iio_ev_value_show;
388 			store = iio_ev_value_store;
389 		}
390 
391 		ret = __iio_add_chan_devattr(postfix, chan, show, store,
392 			 (i << 16) | spec_index, shared_by, &indio_dev->dev,
393 			&iio_dev_opaque->event_interface->dev_attr_list);
394 		kfree(postfix);
395 
396 		if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
397 			continue;
398 
399 		if (ret)
400 			return ret;
401 
402 		attrcount++;
403 	}
404 
405 	return attrcount;
406 }
407 
iio_device_add_event_sysfs(struct iio_dev * indio_dev,struct iio_chan_spec const * chan)408 static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
409 	struct iio_chan_spec const *chan)
410 {
411 	int ret = 0, i, attrcount = 0;
412 	enum iio_event_direction dir;
413 	enum iio_event_type type;
414 
415 	for (i = 0; i < chan->num_event_specs; i++) {
416 		type = chan->event_spec[i].type;
417 		dir = chan->event_spec[i].dir;
418 
419 		ret = iio_device_add_event(indio_dev, chan, i, type, dir,
420 			IIO_SEPARATE, &chan->event_spec[i].mask_separate);
421 		if (ret < 0)
422 			return ret;
423 		attrcount += ret;
424 
425 		ret = iio_device_add_event(indio_dev, chan, i, type, dir,
426 			IIO_SHARED_BY_TYPE,
427 			&chan->event_spec[i].mask_shared_by_type);
428 		if (ret < 0)
429 			return ret;
430 		attrcount += ret;
431 
432 		ret = iio_device_add_event(indio_dev, chan, i, type, dir,
433 			IIO_SHARED_BY_DIR,
434 			&chan->event_spec[i].mask_shared_by_dir);
435 		if (ret < 0)
436 			return ret;
437 		attrcount += ret;
438 
439 		ret = iio_device_add_event(indio_dev, chan, i, type, dir,
440 			IIO_SHARED_BY_ALL,
441 			&chan->event_spec[i].mask_shared_by_all);
442 		if (ret < 0)
443 			return ret;
444 		attrcount += ret;
445 	}
446 	ret = attrcount;
447 	return ret;
448 }
449 
__iio_add_event_config_attrs(struct iio_dev * indio_dev)450 static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
451 {
452 	int j, ret, attrcount = 0;
453 
454 	/* Dynamically created from the channels array */
455 	for (j = 0; j < indio_dev->num_channels; j++) {
456 		ret = iio_device_add_event_sysfs(indio_dev,
457 						 &indio_dev->channels[j]);
458 		if (ret < 0)
459 			return ret;
460 		attrcount += ret;
461 	}
462 	return attrcount;
463 }
464 
iio_check_for_dynamic_events(struct iio_dev * indio_dev)465 static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
466 {
467 	int j;
468 
469 	for (j = 0; j < indio_dev->num_channels; j++) {
470 		if (indio_dev->channels[j].num_event_specs != 0)
471 			return true;
472 	}
473 	return false;
474 }
475 
iio_setup_ev_int(struct iio_event_interface * ev_int)476 static void iio_setup_ev_int(struct iio_event_interface *ev_int)
477 {
478 	INIT_KFIFO(ev_int->det_events);
479 	init_waitqueue_head(&ev_int->wait);
480 	mutex_init(&ev_int->read_lock);
481 }
482 
483 static const char *iio_event_group_name = "events";
iio_device_register_eventset(struct iio_dev * indio_dev)484 int iio_device_register_eventset(struct iio_dev *indio_dev)
485 {
486 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
487 	struct iio_event_interface *ev_int;
488 	struct iio_dev_attr *p;
489 	int ret = 0, attrcount_orig = 0, attrcount, attrn;
490 	struct attribute **attr;
491 
492 	if (!(indio_dev->info->event_attrs ||
493 	      iio_check_for_dynamic_events(indio_dev)))
494 		return 0;
495 
496 	ev_int = kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
497 	if (ev_int == NULL)
498 		return -ENOMEM;
499 
500 	iio_dev_opaque->event_interface = ev_int;
501 
502 	INIT_LIST_HEAD(&ev_int->dev_attr_list);
503 
504 	iio_setup_ev_int(ev_int);
505 	if (indio_dev->info->event_attrs != NULL) {
506 		attr = indio_dev->info->event_attrs->attrs;
507 		while (*attr++ != NULL)
508 			attrcount_orig++;
509 	}
510 	attrcount = attrcount_orig;
511 	if (indio_dev->channels) {
512 		ret = __iio_add_event_config_attrs(indio_dev);
513 		if (ret < 0)
514 			goto error_free_setup_event_lines;
515 		attrcount += ret;
516 	}
517 
518 	ev_int->group.name = iio_event_group_name;
519 	ev_int->group.attrs = kcalloc(attrcount + 1,
520 				      sizeof(ev_int->group.attrs[0]),
521 				      GFP_KERNEL);
522 	if (ev_int->group.attrs == NULL) {
523 		ret = -ENOMEM;
524 		goto error_free_setup_event_lines;
525 	}
526 	if (indio_dev->info->event_attrs)
527 		memcpy(ev_int->group.attrs,
528 		       indio_dev->info->event_attrs->attrs,
529 		       sizeof(ev_int->group.attrs[0]) * attrcount_orig);
530 	attrn = attrcount_orig;
531 	/* Add all elements from the list. */
532 	list_for_each_entry(p, &ev_int->dev_attr_list, l)
533 		ev_int->group.attrs[attrn++] = &p->dev_attr.attr;
534 	indio_dev->groups[indio_dev->groupcounter++] = &ev_int->group;
535 
536 	return 0;
537 
538 error_free_setup_event_lines:
539 	iio_free_chan_devattr_list(&ev_int->dev_attr_list);
540 	kfree(ev_int);
541 	iio_dev_opaque->event_interface = NULL;
542 	return ret;
543 }
544 
545 /**
546  * iio_device_wakeup_eventset - Wakes up the event waitqueue
547  * @indio_dev: The IIO device
548  *
549  * Wakes up the event waitqueue used for poll() and blocking read().
550  * Should usually be called when the device is unregistered.
551  */
iio_device_wakeup_eventset(struct iio_dev * indio_dev)552 void iio_device_wakeup_eventset(struct iio_dev *indio_dev)
553 {
554 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
555 
556 	if (iio_dev_opaque->event_interface == NULL)
557 		return;
558 	wake_up(&iio_dev_opaque->event_interface->wait);
559 }
560 
iio_device_unregister_eventset(struct iio_dev * indio_dev)561 void iio_device_unregister_eventset(struct iio_dev *indio_dev)
562 {
563 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
564 	struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
565 
566 	if (ev_int == NULL)
567 		return;
568 	iio_free_chan_devattr_list(&ev_int->dev_attr_list);
569 	kfree(ev_int->group.attrs);
570 	kfree(ev_int);
571 	iio_dev_opaque->event_interface = NULL;
572 }
573