1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * cdev.c - Character device component for Mostcore
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/module.h>
9*4882a593Smuzhiyun #include <linux/sched.h>
10*4882a593Smuzhiyun #include <linux/fs.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include <linux/device.h>
13*4882a593Smuzhiyun #include <linux/cdev.h>
14*4882a593Smuzhiyun #include <linux/poll.h>
15*4882a593Smuzhiyun #include <linux/kfifo.h>
16*4882a593Smuzhiyun #include <linux/uaccess.h>
17*4882a593Smuzhiyun #include <linux/idr.h>
18*4882a593Smuzhiyun #include <linux/most.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #define CHRDEV_REGION_SIZE 50
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun static struct cdev_component {
23*4882a593Smuzhiyun dev_t devno;
24*4882a593Smuzhiyun struct ida minor_id;
25*4882a593Smuzhiyun unsigned int major;
26*4882a593Smuzhiyun struct class *class;
27*4882a593Smuzhiyun struct most_component cc;
28*4882a593Smuzhiyun } comp;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun struct comp_channel {
31*4882a593Smuzhiyun wait_queue_head_t wq;
32*4882a593Smuzhiyun spinlock_t unlink; /* synchronization lock to unlink channels */
33*4882a593Smuzhiyun struct cdev cdev;
34*4882a593Smuzhiyun struct device *dev;
35*4882a593Smuzhiyun struct mutex io_mutex;
36*4882a593Smuzhiyun struct most_interface *iface;
37*4882a593Smuzhiyun struct most_channel_config *cfg;
38*4882a593Smuzhiyun unsigned int channel_id;
39*4882a593Smuzhiyun dev_t devno;
40*4882a593Smuzhiyun size_t mbo_offs;
41*4882a593Smuzhiyun DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
42*4882a593Smuzhiyun int access_ref;
43*4882a593Smuzhiyun struct list_head list;
44*4882a593Smuzhiyun };
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun #define to_channel(d) container_of(d, struct comp_channel, cdev)
47*4882a593Smuzhiyun static struct list_head channel_list;
48*4882a593Smuzhiyun static spinlock_t ch_list_lock;
49*4882a593Smuzhiyun
ch_has_mbo(struct comp_channel * c)50*4882a593Smuzhiyun static inline bool ch_has_mbo(struct comp_channel *c)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun return channel_has_mbo(c->iface, c->channel_id, &comp.cc) > 0;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
ch_get_mbo(struct comp_channel * c,struct mbo ** mbo)55*4882a593Smuzhiyun static inline struct mbo *ch_get_mbo(struct comp_channel *c, struct mbo **mbo)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun if (!kfifo_peek(&c->fifo, mbo)) {
58*4882a593Smuzhiyun *mbo = most_get_mbo(c->iface, c->channel_id, &comp.cc);
59*4882a593Smuzhiyun if (*mbo)
60*4882a593Smuzhiyun kfifo_in(&c->fifo, mbo, 1);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun return *mbo;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
get_channel(struct most_interface * iface,int id)65*4882a593Smuzhiyun static struct comp_channel *get_channel(struct most_interface *iface, int id)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun struct comp_channel *c, *tmp;
68*4882a593Smuzhiyun unsigned long flags;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun spin_lock_irqsave(&ch_list_lock, flags);
71*4882a593Smuzhiyun list_for_each_entry_safe(c, tmp, &channel_list, list) {
72*4882a593Smuzhiyun if ((c->iface == iface) && (c->channel_id == id)) {
73*4882a593Smuzhiyun spin_unlock_irqrestore(&ch_list_lock, flags);
74*4882a593Smuzhiyun return c;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun spin_unlock_irqrestore(&ch_list_lock, flags);
78*4882a593Smuzhiyun return NULL;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
stop_channel(struct comp_channel * c)81*4882a593Smuzhiyun static void stop_channel(struct comp_channel *c)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun struct mbo *mbo;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun while (kfifo_out((struct kfifo *)&c->fifo, &mbo, 1))
86*4882a593Smuzhiyun most_put_mbo(mbo);
87*4882a593Smuzhiyun most_stop_channel(c->iface, c->channel_id, &comp.cc);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
destroy_cdev(struct comp_channel * c)90*4882a593Smuzhiyun static void destroy_cdev(struct comp_channel *c)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun unsigned long flags;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun device_destroy(comp.class, c->devno);
95*4882a593Smuzhiyun cdev_del(&c->cdev);
96*4882a593Smuzhiyun spin_lock_irqsave(&ch_list_lock, flags);
97*4882a593Smuzhiyun list_del(&c->list);
98*4882a593Smuzhiyun spin_unlock_irqrestore(&ch_list_lock, flags);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
destroy_channel(struct comp_channel * c)101*4882a593Smuzhiyun static void destroy_channel(struct comp_channel *c)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun ida_simple_remove(&comp.minor_id, MINOR(c->devno));
104*4882a593Smuzhiyun kfifo_free(&c->fifo);
105*4882a593Smuzhiyun kfree(c);
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /**
109*4882a593Smuzhiyun * comp_open - implements the syscall to open the device
110*4882a593Smuzhiyun * @inode: inode pointer
111*4882a593Smuzhiyun * @filp: file pointer
112*4882a593Smuzhiyun *
113*4882a593Smuzhiyun * This stores the channel pointer in the private data field of
114*4882a593Smuzhiyun * the file structure and activates the channel within the core.
115*4882a593Smuzhiyun */
comp_open(struct inode * inode,struct file * filp)116*4882a593Smuzhiyun static int comp_open(struct inode *inode, struct file *filp)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun struct comp_channel *c;
119*4882a593Smuzhiyun int ret;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun c = to_channel(inode->i_cdev);
122*4882a593Smuzhiyun filp->private_data = c;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun if (((c->cfg->direction == MOST_CH_RX) &&
125*4882a593Smuzhiyun ((filp->f_flags & O_ACCMODE) != O_RDONLY)) ||
126*4882a593Smuzhiyun ((c->cfg->direction == MOST_CH_TX) &&
127*4882a593Smuzhiyun ((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
128*4882a593Smuzhiyun return -EACCES;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun mutex_lock(&c->io_mutex);
132*4882a593Smuzhiyun if (!c->dev) {
133*4882a593Smuzhiyun mutex_unlock(&c->io_mutex);
134*4882a593Smuzhiyun return -ENODEV;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun if (c->access_ref) {
138*4882a593Smuzhiyun mutex_unlock(&c->io_mutex);
139*4882a593Smuzhiyun return -EBUSY;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun c->mbo_offs = 0;
143*4882a593Smuzhiyun ret = most_start_channel(c->iface, c->channel_id, &comp.cc);
144*4882a593Smuzhiyun if (!ret)
145*4882a593Smuzhiyun c->access_ref = 1;
146*4882a593Smuzhiyun mutex_unlock(&c->io_mutex);
147*4882a593Smuzhiyun return ret;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun /**
151*4882a593Smuzhiyun * comp_close - implements the syscall to close the device
152*4882a593Smuzhiyun * @inode: inode pointer
153*4882a593Smuzhiyun * @filp: file pointer
154*4882a593Smuzhiyun *
155*4882a593Smuzhiyun * This stops the channel within the core.
156*4882a593Smuzhiyun */
comp_close(struct inode * inode,struct file * filp)157*4882a593Smuzhiyun static int comp_close(struct inode *inode, struct file *filp)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun struct comp_channel *c = to_channel(inode->i_cdev);
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun mutex_lock(&c->io_mutex);
162*4882a593Smuzhiyun spin_lock(&c->unlink);
163*4882a593Smuzhiyun c->access_ref = 0;
164*4882a593Smuzhiyun spin_unlock(&c->unlink);
165*4882a593Smuzhiyun if (c->dev) {
166*4882a593Smuzhiyun stop_channel(c);
167*4882a593Smuzhiyun mutex_unlock(&c->io_mutex);
168*4882a593Smuzhiyun } else {
169*4882a593Smuzhiyun mutex_unlock(&c->io_mutex);
170*4882a593Smuzhiyun destroy_channel(c);
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun return 0;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /**
176*4882a593Smuzhiyun * comp_write - implements the syscall to write to the device
177*4882a593Smuzhiyun * @filp: file pointer
178*4882a593Smuzhiyun * @buf: pointer to user buffer
179*4882a593Smuzhiyun * @count: number of bytes to write
180*4882a593Smuzhiyun * @offset: offset from where to start writing
181*4882a593Smuzhiyun */
comp_write(struct file * filp,const char __user * buf,size_t count,loff_t * offset)182*4882a593Smuzhiyun static ssize_t comp_write(struct file *filp, const char __user *buf,
183*4882a593Smuzhiyun size_t count, loff_t *offset)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun int ret;
186*4882a593Smuzhiyun size_t to_copy, left;
187*4882a593Smuzhiyun struct mbo *mbo = NULL;
188*4882a593Smuzhiyun struct comp_channel *c = filp->private_data;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun mutex_lock(&c->io_mutex);
191*4882a593Smuzhiyun while (c->dev && !ch_get_mbo(c, &mbo)) {
192*4882a593Smuzhiyun mutex_unlock(&c->io_mutex);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun if ((filp->f_flags & O_NONBLOCK))
195*4882a593Smuzhiyun return -EAGAIN;
196*4882a593Smuzhiyun if (wait_event_interruptible(c->wq, ch_has_mbo(c) || !c->dev))
197*4882a593Smuzhiyun return -ERESTARTSYS;
198*4882a593Smuzhiyun mutex_lock(&c->io_mutex);
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun if (unlikely(!c->dev)) {
202*4882a593Smuzhiyun ret = -ENODEV;
203*4882a593Smuzhiyun goto unlock;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun to_copy = min(count, c->cfg->buffer_size - c->mbo_offs);
207*4882a593Smuzhiyun left = copy_from_user(mbo->virt_address + c->mbo_offs, buf, to_copy);
208*4882a593Smuzhiyun if (left == to_copy) {
209*4882a593Smuzhiyun ret = -EFAULT;
210*4882a593Smuzhiyun goto unlock;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun c->mbo_offs += to_copy - left;
214*4882a593Smuzhiyun if (c->mbo_offs >= c->cfg->buffer_size ||
215*4882a593Smuzhiyun c->cfg->data_type == MOST_CH_CONTROL ||
216*4882a593Smuzhiyun c->cfg->data_type == MOST_CH_ASYNC) {
217*4882a593Smuzhiyun kfifo_skip(&c->fifo);
218*4882a593Smuzhiyun mbo->buffer_length = c->mbo_offs;
219*4882a593Smuzhiyun c->mbo_offs = 0;
220*4882a593Smuzhiyun most_submit_mbo(mbo);
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun ret = to_copy - left;
224*4882a593Smuzhiyun unlock:
225*4882a593Smuzhiyun mutex_unlock(&c->io_mutex);
226*4882a593Smuzhiyun return ret;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /**
230*4882a593Smuzhiyun * comp_read - implements the syscall to read from the device
231*4882a593Smuzhiyun * @filp: file pointer
232*4882a593Smuzhiyun * @buf: pointer to user buffer
233*4882a593Smuzhiyun * @count: number of bytes to read
234*4882a593Smuzhiyun * @offset: offset from where to start reading
235*4882a593Smuzhiyun */
236*4882a593Smuzhiyun static ssize_t
comp_read(struct file * filp,char __user * buf,size_t count,loff_t * offset)237*4882a593Smuzhiyun comp_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun size_t to_copy, not_copied, copied;
240*4882a593Smuzhiyun struct mbo *mbo = NULL;
241*4882a593Smuzhiyun struct comp_channel *c = filp->private_data;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun mutex_lock(&c->io_mutex);
244*4882a593Smuzhiyun while (c->dev && !kfifo_peek(&c->fifo, &mbo)) {
245*4882a593Smuzhiyun mutex_unlock(&c->io_mutex);
246*4882a593Smuzhiyun if (filp->f_flags & O_NONBLOCK)
247*4882a593Smuzhiyun return -EAGAIN;
248*4882a593Smuzhiyun if (wait_event_interruptible(c->wq,
249*4882a593Smuzhiyun (!kfifo_is_empty(&c->fifo) ||
250*4882a593Smuzhiyun (!c->dev))))
251*4882a593Smuzhiyun return -ERESTARTSYS;
252*4882a593Smuzhiyun mutex_lock(&c->io_mutex);
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /* make sure we don't submit to gone devices */
256*4882a593Smuzhiyun if (unlikely(!c->dev)) {
257*4882a593Smuzhiyun mutex_unlock(&c->io_mutex);
258*4882a593Smuzhiyun return -ENODEV;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun to_copy = min_t(size_t,
262*4882a593Smuzhiyun count,
263*4882a593Smuzhiyun mbo->processed_length - c->mbo_offs);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun not_copied = copy_to_user(buf,
266*4882a593Smuzhiyun mbo->virt_address + c->mbo_offs,
267*4882a593Smuzhiyun to_copy);
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun copied = to_copy - not_copied;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun c->mbo_offs += copied;
272*4882a593Smuzhiyun if (c->mbo_offs >= mbo->processed_length) {
273*4882a593Smuzhiyun kfifo_skip(&c->fifo);
274*4882a593Smuzhiyun most_put_mbo(mbo);
275*4882a593Smuzhiyun c->mbo_offs = 0;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun mutex_unlock(&c->io_mutex);
278*4882a593Smuzhiyun return copied;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
comp_poll(struct file * filp,poll_table * wait)281*4882a593Smuzhiyun static __poll_t comp_poll(struct file *filp, poll_table *wait)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun struct comp_channel *c = filp->private_data;
284*4882a593Smuzhiyun __poll_t mask = 0;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun poll_wait(filp, &c->wq, wait);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun mutex_lock(&c->io_mutex);
289*4882a593Smuzhiyun if (c->cfg->direction == MOST_CH_RX) {
290*4882a593Smuzhiyun if (!c->dev || !kfifo_is_empty(&c->fifo))
291*4882a593Smuzhiyun mask |= EPOLLIN | EPOLLRDNORM;
292*4882a593Smuzhiyun } else {
293*4882a593Smuzhiyun if (!c->dev || !kfifo_is_empty(&c->fifo) || ch_has_mbo(c))
294*4882a593Smuzhiyun mask |= EPOLLOUT | EPOLLWRNORM;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun mutex_unlock(&c->io_mutex);
297*4882a593Smuzhiyun return mask;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /**
301*4882a593Smuzhiyun * Initialization of struct file_operations
302*4882a593Smuzhiyun */
303*4882a593Smuzhiyun static const struct file_operations channel_fops = {
304*4882a593Smuzhiyun .owner = THIS_MODULE,
305*4882a593Smuzhiyun .read = comp_read,
306*4882a593Smuzhiyun .write = comp_write,
307*4882a593Smuzhiyun .open = comp_open,
308*4882a593Smuzhiyun .release = comp_close,
309*4882a593Smuzhiyun .poll = comp_poll,
310*4882a593Smuzhiyun };
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun /**
313*4882a593Smuzhiyun * comp_disconnect_channel - disconnect a channel
314*4882a593Smuzhiyun * @iface: pointer to interface instance
315*4882a593Smuzhiyun * @channel_id: channel index
316*4882a593Smuzhiyun *
317*4882a593Smuzhiyun * This frees allocated memory and removes the cdev that represents this
318*4882a593Smuzhiyun * channel in user space.
319*4882a593Smuzhiyun */
comp_disconnect_channel(struct most_interface * iface,int channel_id)320*4882a593Smuzhiyun static int comp_disconnect_channel(struct most_interface *iface, int channel_id)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun struct comp_channel *c;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun c = get_channel(iface, channel_id);
325*4882a593Smuzhiyun if (!c)
326*4882a593Smuzhiyun return -EINVAL;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun mutex_lock(&c->io_mutex);
329*4882a593Smuzhiyun spin_lock(&c->unlink);
330*4882a593Smuzhiyun c->dev = NULL;
331*4882a593Smuzhiyun spin_unlock(&c->unlink);
332*4882a593Smuzhiyun destroy_cdev(c);
333*4882a593Smuzhiyun if (c->access_ref) {
334*4882a593Smuzhiyun stop_channel(c);
335*4882a593Smuzhiyun wake_up_interruptible(&c->wq);
336*4882a593Smuzhiyun mutex_unlock(&c->io_mutex);
337*4882a593Smuzhiyun } else {
338*4882a593Smuzhiyun mutex_unlock(&c->io_mutex);
339*4882a593Smuzhiyun destroy_channel(c);
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun return 0;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun /**
345*4882a593Smuzhiyun * comp_rx_completion - completion handler for rx channels
346*4882a593Smuzhiyun * @mbo: pointer to buffer object that has completed
347*4882a593Smuzhiyun *
348*4882a593Smuzhiyun * This searches for the channel linked to this MBO and stores it in the local
349*4882a593Smuzhiyun * fifo buffer.
350*4882a593Smuzhiyun */
comp_rx_completion(struct mbo * mbo)351*4882a593Smuzhiyun static int comp_rx_completion(struct mbo *mbo)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun struct comp_channel *c;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun if (!mbo)
356*4882a593Smuzhiyun return -EINVAL;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun c = get_channel(mbo->ifp, mbo->hdm_channel_id);
359*4882a593Smuzhiyun if (!c)
360*4882a593Smuzhiyun return -EINVAL;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun spin_lock(&c->unlink);
363*4882a593Smuzhiyun if (!c->access_ref || !c->dev) {
364*4882a593Smuzhiyun spin_unlock(&c->unlink);
365*4882a593Smuzhiyun return -ENODEV;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun kfifo_in(&c->fifo, &mbo, 1);
368*4882a593Smuzhiyun spin_unlock(&c->unlink);
369*4882a593Smuzhiyun #ifdef DEBUG_MESG
370*4882a593Smuzhiyun if (kfifo_is_full(&c->fifo))
371*4882a593Smuzhiyun dev_warn(c->dev, "Fifo is full\n");
372*4882a593Smuzhiyun #endif
373*4882a593Smuzhiyun wake_up_interruptible(&c->wq);
374*4882a593Smuzhiyun return 0;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun /**
378*4882a593Smuzhiyun * comp_tx_completion - completion handler for tx channels
379*4882a593Smuzhiyun * @iface: pointer to interface instance
380*4882a593Smuzhiyun * @channel_id: channel index/ID
381*4882a593Smuzhiyun *
382*4882a593Smuzhiyun * This wakes sleeping processes in the wait-queue.
383*4882a593Smuzhiyun */
comp_tx_completion(struct most_interface * iface,int channel_id)384*4882a593Smuzhiyun static int comp_tx_completion(struct most_interface *iface, int channel_id)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun struct comp_channel *c;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun c = get_channel(iface, channel_id);
389*4882a593Smuzhiyun if (!c)
390*4882a593Smuzhiyun return -EINVAL;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun if ((channel_id < 0) || (channel_id >= iface->num_channels)) {
393*4882a593Smuzhiyun dev_warn(c->dev, "Channel ID out of range\n");
394*4882a593Smuzhiyun return -EINVAL;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun wake_up_interruptible(&c->wq);
398*4882a593Smuzhiyun return 0;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun /**
402*4882a593Smuzhiyun * comp_probe - probe function of the driver module
403*4882a593Smuzhiyun * @iface: pointer to interface instance
404*4882a593Smuzhiyun * @channel_id: channel index/ID
405*4882a593Smuzhiyun * @cfg: pointer to actual channel configuration
406*4882a593Smuzhiyun * @name: name of the device to be created
407*4882a593Smuzhiyun *
408*4882a593Smuzhiyun * This allocates achannel object and creates the device node in /dev
409*4882a593Smuzhiyun *
410*4882a593Smuzhiyun * Returns 0 on success or error code otherwise.
411*4882a593Smuzhiyun */
comp_probe(struct most_interface * iface,int channel_id,struct most_channel_config * cfg,char * name,char * args)412*4882a593Smuzhiyun static int comp_probe(struct most_interface *iface, int channel_id,
413*4882a593Smuzhiyun struct most_channel_config *cfg, char *name, char *args)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun struct comp_channel *c;
416*4882a593Smuzhiyun unsigned long cl_flags;
417*4882a593Smuzhiyun int retval;
418*4882a593Smuzhiyun int current_minor;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun if (!cfg || !name)
421*4882a593Smuzhiyun return -EINVAL;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun c = get_channel(iface, channel_id);
424*4882a593Smuzhiyun if (c)
425*4882a593Smuzhiyun return -EEXIST;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun current_minor = ida_simple_get(&comp.minor_id, 0, 0, GFP_KERNEL);
428*4882a593Smuzhiyun if (current_minor < 0)
429*4882a593Smuzhiyun return current_minor;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun c = kzalloc(sizeof(*c), GFP_KERNEL);
432*4882a593Smuzhiyun if (!c) {
433*4882a593Smuzhiyun retval = -ENOMEM;
434*4882a593Smuzhiyun goto err_remove_ida;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun c->devno = MKDEV(comp.major, current_minor);
438*4882a593Smuzhiyun cdev_init(&c->cdev, &channel_fops);
439*4882a593Smuzhiyun c->cdev.owner = THIS_MODULE;
440*4882a593Smuzhiyun retval = cdev_add(&c->cdev, c->devno, 1);
441*4882a593Smuzhiyun if (retval < 0)
442*4882a593Smuzhiyun goto err_free_c;
443*4882a593Smuzhiyun c->iface = iface;
444*4882a593Smuzhiyun c->cfg = cfg;
445*4882a593Smuzhiyun c->channel_id = channel_id;
446*4882a593Smuzhiyun c->access_ref = 0;
447*4882a593Smuzhiyun spin_lock_init(&c->unlink);
448*4882a593Smuzhiyun INIT_KFIFO(c->fifo);
449*4882a593Smuzhiyun retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL);
450*4882a593Smuzhiyun if (retval)
451*4882a593Smuzhiyun goto err_del_cdev_and_free_channel;
452*4882a593Smuzhiyun init_waitqueue_head(&c->wq);
453*4882a593Smuzhiyun mutex_init(&c->io_mutex);
454*4882a593Smuzhiyun spin_lock_irqsave(&ch_list_lock, cl_flags);
455*4882a593Smuzhiyun list_add_tail(&c->list, &channel_list);
456*4882a593Smuzhiyun spin_unlock_irqrestore(&ch_list_lock, cl_flags);
457*4882a593Smuzhiyun c->dev = device_create(comp.class, NULL, c->devno, NULL, "%s", name);
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun if (IS_ERR(c->dev)) {
460*4882a593Smuzhiyun retval = PTR_ERR(c->dev);
461*4882a593Smuzhiyun goto err_free_kfifo_and_del_list;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun kobject_uevent(&c->dev->kobj, KOBJ_ADD);
464*4882a593Smuzhiyun return 0;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun err_free_kfifo_and_del_list:
467*4882a593Smuzhiyun kfifo_free(&c->fifo);
468*4882a593Smuzhiyun list_del(&c->list);
469*4882a593Smuzhiyun err_del_cdev_and_free_channel:
470*4882a593Smuzhiyun cdev_del(&c->cdev);
471*4882a593Smuzhiyun err_free_c:
472*4882a593Smuzhiyun kfree(c);
473*4882a593Smuzhiyun err_remove_ida:
474*4882a593Smuzhiyun ida_simple_remove(&comp.minor_id, current_minor);
475*4882a593Smuzhiyun return retval;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun static struct cdev_component comp = {
479*4882a593Smuzhiyun .cc = {
480*4882a593Smuzhiyun .mod = THIS_MODULE,
481*4882a593Smuzhiyun .name = "cdev",
482*4882a593Smuzhiyun .probe_channel = comp_probe,
483*4882a593Smuzhiyun .disconnect_channel = comp_disconnect_channel,
484*4882a593Smuzhiyun .rx_completion = comp_rx_completion,
485*4882a593Smuzhiyun .tx_completion = comp_tx_completion,
486*4882a593Smuzhiyun },
487*4882a593Smuzhiyun };
488*4882a593Smuzhiyun
mod_init(void)489*4882a593Smuzhiyun static int __init mod_init(void)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun int err;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun comp.class = class_create(THIS_MODULE, "most_cdev");
494*4882a593Smuzhiyun if (IS_ERR(comp.class))
495*4882a593Smuzhiyun return PTR_ERR(comp.class);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun INIT_LIST_HEAD(&channel_list);
498*4882a593Smuzhiyun spin_lock_init(&ch_list_lock);
499*4882a593Smuzhiyun ida_init(&comp.minor_id);
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun err = alloc_chrdev_region(&comp.devno, 0, CHRDEV_REGION_SIZE, "cdev");
502*4882a593Smuzhiyun if (err < 0)
503*4882a593Smuzhiyun goto dest_ida;
504*4882a593Smuzhiyun comp.major = MAJOR(comp.devno);
505*4882a593Smuzhiyun err = most_register_component(&comp.cc);
506*4882a593Smuzhiyun if (err)
507*4882a593Smuzhiyun goto free_cdev;
508*4882a593Smuzhiyun err = most_register_configfs_subsys(&comp.cc);
509*4882a593Smuzhiyun if (err)
510*4882a593Smuzhiyun goto deregister_comp;
511*4882a593Smuzhiyun return 0;
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun deregister_comp:
514*4882a593Smuzhiyun most_deregister_component(&comp.cc);
515*4882a593Smuzhiyun free_cdev:
516*4882a593Smuzhiyun unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
517*4882a593Smuzhiyun dest_ida:
518*4882a593Smuzhiyun ida_destroy(&comp.minor_id);
519*4882a593Smuzhiyun class_destroy(comp.class);
520*4882a593Smuzhiyun return err;
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun
mod_exit(void)523*4882a593Smuzhiyun static void __exit mod_exit(void)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun struct comp_channel *c, *tmp;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun most_deregister_configfs_subsys(&comp.cc);
528*4882a593Smuzhiyun most_deregister_component(&comp.cc);
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun list_for_each_entry_safe(c, tmp, &channel_list, list) {
531*4882a593Smuzhiyun destroy_cdev(c);
532*4882a593Smuzhiyun destroy_channel(c);
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
535*4882a593Smuzhiyun ida_destroy(&comp.minor_id);
536*4882a593Smuzhiyun class_destroy(comp.class);
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun module_init(mod_init);
540*4882a593Smuzhiyun module_exit(mod_exit);
541*4882a593Smuzhiyun MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
542*4882a593Smuzhiyun MODULE_LICENSE("GPL");
543*4882a593Smuzhiyun MODULE_DESCRIPTION("character device component for mostcore");
544