1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun #include <linux/slab.h>
3*4882a593Smuzhiyun #include <linux/kernel.h>
4*4882a593Smuzhiyun #include <linux/module.h>
5*4882a593Smuzhiyun #include <linux/device.h>
6*4882a593Smuzhiyun #include <linux/workqueue.h>
7*4882a593Smuzhiyun #include <linux/kfifo.h>
8*4882a593Smuzhiyun #include <linux/mutex.h>
9*4882a593Smuzhiyun #include <linux/iio/iio.h>
10*4882a593Smuzhiyun #include <linux/iio/buffer.h>
11*4882a593Smuzhiyun #include <linux/iio/kfifo_buf.h>
12*4882a593Smuzhiyun #include <linux/iio/buffer_impl.h>
13*4882a593Smuzhiyun #include <linux/sched.h>
14*4882a593Smuzhiyun #include <linux/poll.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun struct iio_kfifo {
17*4882a593Smuzhiyun struct iio_buffer buffer;
18*4882a593Smuzhiyun struct kfifo kf;
19*4882a593Smuzhiyun struct mutex user_lock;
20*4882a593Smuzhiyun int update_needed;
21*4882a593Smuzhiyun };
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
24*4882a593Smuzhiyun
__iio_allocate_kfifo(struct iio_kfifo * buf,size_t bytes_per_datum,unsigned int length)25*4882a593Smuzhiyun static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
26*4882a593Smuzhiyun size_t bytes_per_datum, unsigned int length)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun if ((length == 0) || (bytes_per_datum == 0))
29*4882a593Smuzhiyun return -EINVAL;
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun /*
32*4882a593Smuzhiyun * Make sure we don't overflow an unsigned int after kfifo rounds up to
33*4882a593Smuzhiyun * the next power of 2.
34*4882a593Smuzhiyun */
35*4882a593Smuzhiyun if (roundup_pow_of_two(length) > UINT_MAX / bytes_per_datum)
36*4882a593Smuzhiyun return -EINVAL;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun return __kfifo_alloc((struct __kfifo *)&buf->kf, length,
39*4882a593Smuzhiyun bytes_per_datum, GFP_KERNEL);
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
iio_request_update_kfifo(struct iio_buffer * r)42*4882a593Smuzhiyun static int iio_request_update_kfifo(struct iio_buffer *r)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun int ret = 0;
45*4882a593Smuzhiyun struct iio_kfifo *buf = iio_to_kfifo(r);
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun mutex_lock(&buf->user_lock);
48*4882a593Smuzhiyun if (buf->update_needed) {
49*4882a593Smuzhiyun kfifo_free(&buf->kf);
50*4882a593Smuzhiyun ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum,
51*4882a593Smuzhiyun buf->buffer.length);
52*4882a593Smuzhiyun if (ret >= 0)
53*4882a593Smuzhiyun buf->update_needed = false;
54*4882a593Smuzhiyun } else {
55*4882a593Smuzhiyun kfifo_reset_out(&buf->kf);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun mutex_unlock(&buf->user_lock);
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun return ret;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun
iio_mark_update_needed_kfifo(struct iio_buffer * r)62*4882a593Smuzhiyun static int iio_mark_update_needed_kfifo(struct iio_buffer *r)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun struct iio_kfifo *kf = iio_to_kfifo(r);
65*4882a593Smuzhiyun kf->update_needed = true;
66*4882a593Smuzhiyun return 0;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
iio_set_bytes_per_datum_kfifo(struct iio_buffer * r,size_t bpd)69*4882a593Smuzhiyun static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun if (r->bytes_per_datum != bpd) {
72*4882a593Smuzhiyun r->bytes_per_datum = bpd;
73*4882a593Smuzhiyun iio_mark_update_needed_kfifo(r);
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun return 0;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
iio_set_length_kfifo(struct iio_buffer * r,unsigned int length)78*4882a593Smuzhiyun static int iio_set_length_kfifo(struct iio_buffer *r, unsigned int length)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun /* Avoid an invalid state */
81*4882a593Smuzhiyun if (length < 2)
82*4882a593Smuzhiyun length = 2;
83*4882a593Smuzhiyun if (r->length != length) {
84*4882a593Smuzhiyun r->length = length;
85*4882a593Smuzhiyun iio_mark_update_needed_kfifo(r);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun return 0;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
iio_store_to_kfifo(struct iio_buffer * r,const void * data)90*4882a593Smuzhiyun static int iio_store_to_kfifo(struct iio_buffer *r,
91*4882a593Smuzhiyun const void *data)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun int ret;
94*4882a593Smuzhiyun struct iio_kfifo *kf = iio_to_kfifo(r);
95*4882a593Smuzhiyun ret = kfifo_in(&kf->kf, data, 1);
96*4882a593Smuzhiyun if (ret != 1)
97*4882a593Smuzhiyun return -EBUSY;
98*4882a593Smuzhiyun return 0;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
iio_read_kfifo(struct iio_buffer * r,size_t n,char __user * buf)101*4882a593Smuzhiyun static int iio_read_kfifo(struct iio_buffer *r, size_t n, char __user *buf)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun int ret, copied;
104*4882a593Smuzhiyun struct iio_kfifo *kf = iio_to_kfifo(r);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun if (mutex_lock_interruptible(&kf->user_lock))
107*4882a593Smuzhiyun return -ERESTARTSYS;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun if (!kfifo_initialized(&kf->kf) || n < kfifo_esize(&kf->kf))
110*4882a593Smuzhiyun ret = -EINVAL;
111*4882a593Smuzhiyun else
112*4882a593Smuzhiyun ret = kfifo_to_user(&kf->kf, buf, n, &copied);
113*4882a593Smuzhiyun mutex_unlock(&kf->user_lock);
114*4882a593Smuzhiyun if (ret < 0)
115*4882a593Smuzhiyun return ret;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun return copied;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
iio_kfifo_buf_data_available(struct iio_buffer * r)120*4882a593Smuzhiyun static size_t iio_kfifo_buf_data_available(struct iio_buffer *r)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun struct iio_kfifo *kf = iio_to_kfifo(r);
123*4882a593Smuzhiyun size_t samples;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun mutex_lock(&kf->user_lock);
126*4882a593Smuzhiyun samples = kfifo_len(&kf->kf);
127*4882a593Smuzhiyun mutex_unlock(&kf->user_lock);
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun return samples;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
iio_kfifo_buffer_release(struct iio_buffer * buffer)132*4882a593Smuzhiyun static void iio_kfifo_buffer_release(struct iio_buffer *buffer)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun struct iio_kfifo *kf = iio_to_kfifo(buffer);
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun mutex_destroy(&kf->user_lock);
137*4882a593Smuzhiyun kfifo_free(&kf->kf);
138*4882a593Smuzhiyun kfree(kf);
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun static const struct iio_buffer_access_funcs kfifo_access_funcs = {
142*4882a593Smuzhiyun .store_to = &iio_store_to_kfifo,
143*4882a593Smuzhiyun .read = &iio_read_kfifo,
144*4882a593Smuzhiyun .data_available = iio_kfifo_buf_data_available,
145*4882a593Smuzhiyun .request_update = &iio_request_update_kfifo,
146*4882a593Smuzhiyun .set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo,
147*4882a593Smuzhiyun .set_length = &iio_set_length_kfifo,
148*4882a593Smuzhiyun .release = &iio_kfifo_buffer_release,
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun .modes = INDIO_BUFFER_SOFTWARE | INDIO_BUFFER_TRIGGERED,
151*4882a593Smuzhiyun };
152*4882a593Smuzhiyun
iio_kfifo_allocate(void)153*4882a593Smuzhiyun struct iio_buffer *iio_kfifo_allocate(void)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun struct iio_kfifo *kf;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun kf = kzalloc(sizeof(*kf), GFP_KERNEL);
158*4882a593Smuzhiyun if (!kf)
159*4882a593Smuzhiyun return NULL;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun kf->update_needed = true;
162*4882a593Smuzhiyun iio_buffer_init(&kf->buffer);
163*4882a593Smuzhiyun kf->buffer.access = &kfifo_access_funcs;
164*4882a593Smuzhiyun kf->buffer.length = 2;
165*4882a593Smuzhiyun mutex_init(&kf->user_lock);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun return &kf->buffer;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun EXPORT_SYMBOL(iio_kfifo_allocate);
170*4882a593Smuzhiyun
iio_kfifo_free(struct iio_buffer * r)171*4882a593Smuzhiyun void iio_kfifo_free(struct iio_buffer *r)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun iio_buffer_put(r);
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun EXPORT_SYMBOL(iio_kfifo_free);
176*4882a593Smuzhiyun
devm_iio_kfifo_release(struct device * dev,void * res)177*4882a593Smuzhiyun static void devm_iio_kfifo_release(struct device *dev, void *res)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun iio_kfifo_free(*(struct iio_buffer **)res);
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun /**
183*4882a593Smuzhiyun * devm_iio_fifo_allocate - Resource-managed iio_kfifo_allocate()
184*4882a593Smuzhiyun * @dev: Device to allocate kfifo buffer for
185*4882a593Smuzhiyun *
186*4882a593Smuzhiyun * RETURNS:
187*4882a593Smuzhiyun * Pointer to allocated iio_buffer on success, NULL on failure.
188*4882a593Smuzhiyun */
devm_iio_kfifo_allocate(struct device * dev)189*4882a593Smuzhiyun struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun struct iio_buffer **ptr, *r;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun ptr = devres_alloc(devm_iio_kfifo_release, sizeof(*ptr), GFP_KERNEL);
194*4882a593Smuzhiyun if (!ptr)
195*4882a593Smuzhiyun return NULL;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun r = iio_kfifo_allocate();
198*4882a593Smuzhiyun if (r) {
199*4882a593Smuzhiyun *ptr = r;
200*4882a593Smuzhiyun devres_add(dev, ptr);
201*4882a593Smuzhiyun } else {
202*4882a593Smuzhiyun devres_free(ptr);
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun return r;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun EXPORT_SYMBOL(devm_iio_kfifo_allocate);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun MODULE_LICENSE("GPL");
210