xref: /OK3568_Linux_fs/kernel/sound/firewire/fireworks/fireworks_hwdep.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * fireworks_hwdep.c - a part of driver for Fireworks based devices
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2013-2014 Takashi Sakamoto
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun /*
9*4882a593Smuzhiyun  * This codes have five functionalities.
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * 1.get information about firewire node
12*4882a593Smuzhiyun  * 2.get notification about starting/stopping stream
13*4882a593Smuzhiyun  * 3.lock/unlock streaming
14*4882a593Smuzhiyun  * 4.transmit command of EFW transaction
15*4882a593Smuzhiyun  * 5.receive response of EFW transaction
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  */
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include "fireworks.h"
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun static long
hwdep_read_resp_buf(struct snd_efw * efw,char __user * buf,long remained,loff_t * offset)22*4882a593Smuzhiyun hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
23*4882a593Smuzhiyun 		    loff_t *offset)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun 	unsigned int length, till_end, type;
26*4882a593Smuzhiyun 	struct snd_efw_transaction *t;
27*4882a593Smuzhiyun 	u8 *pull_ptr;
28*4882a593Smuzhiyun 	long count = 0;
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	if (remained < sizeof(type) + sizeof(struct snd_efw_transaction))
31*4882a593Smuzhiyun 		return -ENOSPC;
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 	/* data type is SNDRV_FIREWIRE_EVENT_EFW_RESPONSE */
34*4882a593Smuzhiyun 	type = SNDRV_FIREWIRE_EVENT_EFW_RESPONSE;
35*4882a593Smuzhiyun 	if (copy_to_user(buf, &type, sizeof(type)))
36*4882a593Smuzhiyun 		return -EFAULT;
37*4882a593Smuzhiyun 	count += sizeof(type);
38*4882a593Smuzhiyun 	remained -= sizeof(type);
39*4882a593Smuzhiyun 	buf += sizeof(type);
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	/* write into buffer as many responses as possible */
42*4882a593Smuzhiyun 	spin_lock_irq(&efw->lock);
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	/*
45*4882a593Smuzhiyun 	 * When another task reaches here during this task's access to user
46*4882a593Smuzhiyun 	 * space, it picks up current position in buffer and can read the same
47*4882a593Smuzhiyun 	 * series of responses.
48*4882a593Smuzhiyun 	 */
49*4882a593Smuzhiyun 	pull_ptr = efw->pull_ptr;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	while (efw->push_ptr != pull_ptr) {
52*4882a593Smuzhiyun 		t = (struct snd_efw_transaction *)(pull_ptr);
53*4882a593Smuzhiyun 		length = be32_to_cpu(t->length) * sizeof(__be32);
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 		/* confirm enough space for this response */
56*4882a593Smuzhiyun 		if (remained < length)
57*4882a593Smuzhiyun 			break;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 		/* copy from ring buffer to user buffer */
60*4882a593Smuzhiyun 		while (length > 0) {
61*4882a593Smuzhiyun 			till_end = snd_efw_resp_buf_size -
62*4882a593Smuzhiyun 				(unsigned int)(pull_ptr - efw->resp_buf);
63*4882a593Smuzhiyun 			till_end = min_t(unsigned int, length, till_end);
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 			spin_unlock_irq(&efw->lock);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 			if (copy_to_user(buf, pull_ptr, till_end))
68*4882a593Smuzhiyun 				return -EFAULT;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 			spin_lock_irq(&efw->lock);
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 			pull_ptr += till_end;
73*4882a593Smuzhiyun 			if (pull_ptr >= efw->resp_buf + snd_efw_resp_buf_size)
74*4882a593Smuzhiyun 				pull_ptr -= snd_efw_resp_buf_size;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 			length -= till_end;
77*4882a593Smuzhiyun 			buf += till_end;
78*4882a593Smuzhiyun 			count += till_end;
79*4882a593Smuzhiyun 			remained -= till_end;
80*4882a593Smuzhiyun 		}
81*4882a593Smuzhiyun 	}
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	/*
84*4882a593Smuzhiyun 	 * All of tasks can read from the buffer nearly simultaneously, but the
85*4882a593Smuzhiyun 	 * last position for each task is different depending on the length of
86*4882a593Smuzhiyun 	 * given buffer. Here, for simplicity, a position of buffer is set by
87*4882a593Smuzhiyun 	 * the latest task. It's better for a listening application to allow one
88*4882a593Smuzhiyun 	 * thread to read from the buffer. Unless, each task can read different
89*4882a593Smuzhiyun 	 * sequence of responses depending on variation of buffer length.
90*4882a593Smuzhiyun 	 */
91*4882a593Smuzhiyun 	efw->pull_ptr = pull_ptr;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	spin_unlock_irq(&efw->lock);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	return count;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun static long
hwdep_read_locked(struct snd_efw * efw,char __user * buf,long count,loff_t * offset)99*4882a593Smuzhiyun hwdep_read_locked(struct snd_efw *efw, char __user *buf, long count,
100*4882a593Smuzhiyun 		  loff_t *offset)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	union snd_firewire_event event = {
103*4882a593Smuzhiyun 		.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
104*4882a593Smuzhiyun 	};
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	spin_lock_irq(&efw->lock);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	event.lock_status.status = (efw->dev_lock_count > 0);
109*4882a593Smuzhiyun 	efw->dev_lock_changed = false;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	spin_unlock_irq(&efw->lock);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	count = min_t(long, count, sizeof(event.lock_status));
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	if (copy_to_user(buf, &event, count))
116*4882a593Smuzhiyun 		return -EFAULT;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	return count;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun static long
hwdep_read(struct snd_hwdep * hwdep,char __user * buf,long count,loff_t * offset)122*4882a593Smuzhiyun hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
123*4882a593Smuzhiyun 	   loff_t *offset)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	struct snd_efw *efw = hwdep->private_data;
126*4882a593Smuzhiyun 	DEFINE_WAIT(wait);
127*4882a593Smuzhiyun 	bool dev_lock_changed;
128*4882a593Smuzhiyun 	bool queued;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	spin_lock_irq(&efw->lock);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	dev_lock_changed = efw->dev_lock_changed;
133*4882a593Smuzhiyun 	queued = efw->push_ptr != efw->pull_ptr;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	while (!dev_lock_changed && !queued) {
136*4882a593Smuzhiyun 		prepare_to_wait(&efw->hwdep_wait, &wait, TASK_INTERRUPTIBLE);
137*4882a593Smuzhiyun 		spin_unlock_irq(&efw->lock);
138*4882a593Smuzhiyun 		schedule();
139*4882a593Smuzhiyun 		finish_wait(&efw->hwdep_wait, &wait);
140*4882a593Smuzhiyun 		if (signal_pending(current))
141*4882a593Smuzhiyun 			return -ERESTARTSYS;
142*4882a593Smuzhiyun 		spin_lock_irq(&efw->lock);
143*4882a593Smuzhiyun 		dev_lock_changed = efw->dev_lock_changed;
144*4882a593Smuzhiyun 		queued = efw->push_ptr != efw->pull_ptr;
145*4882a593Smuzhiyun 	}
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	spin_unlock_irq(&efw->lock);
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	if (dev_lock_changed)
150*4882a593Smuzhiyun 		count = hwdep_read_locked(efw, buf, count, offset);
151*4882a593Smuzhiyun 	else if (queued)
152*4882a593Smuzhiyun 		count = hwdep_read_resp_buf(efw, buf, count, offset);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	return count;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun static long
hwdep_write(struct snd_hwdep * hwdep,const char __user * data,long count,loff_t * offset)158*4882a593Smuzhiyun hwdep_write(struct snd_hwdep *hwdep, const char __user *data, long count,
159*4882a593Smuzhiyun 	    loff_t *offset)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	struct snd_efw *efw = hwdep->private_data;
162*4882a593Smuzhiyun 	u32 seqnum;
163*4882a593Smuzhiyun 	u8 *buf;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	if (count < sizeof(struct snd_efw_transaction) ||
166*4882a593Smuzhiyun 	    SND_EFW_RESPONSE_MAXIMUM_BYTES < count)
167*4882a593Smuzhiyun 		return -EINVAL;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	buf = memdup_user(data, count);
170*4882a593Smuzhiyun 	if (IS_ERR(buf))
171*4882a593Smuzhiyun 		return PTR_ERR(buf);
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	/* check seqnum is not for kernel-land */
174*4882a593Smuzhiyun 	seqnum = be32_to_cpu(((struct snd_efw_transaction *)buf)->seqnum);
175*4882a593Smuzhiyun 	if (seqnum > SND_EFW_TRANSACTION_USER_SEQNUM_MAX) {
176*4882a593Smuzhiyun 		count = -EINVAL;
177*4882a593Smuzhiyun 		goto end;
178*4882a593Smuzhiyun 	}
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	if (snd_efw_transaction_cmd(efw->unit, buf, count) < 0)
181*4882a593Smuzhiyun 		count = -EIO;
182*4882a593Smuzhiyun end:
183*4882a593Smuzhiyun 	kfree(buf);
184*4882a593Smuzhiyun 	return count;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun static __poll_t
hwdep_poll(struct snd_hwdep * hwdep,struct file * file,poll_table * wait)188*4882a593Smuzhiyun hwdep_poll(struct snd_hwdep *hwdep, struct file *file, poll_table *wait)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	struct snd_efw *efw = hwdep->private_data;
191*4882a593Smuzhiyun 	__poll_t events;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	poll_wait(file, &efw->hwdep_wait, wait);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	spin_lock_irq(&efw->lock);
196*4882a593Smuzhiyun 	if (efw->dev_lock_changed || efw->pull_ptr != efw->push_ptr)
197*4882a593Smuzhiyun 		events = EPOLLIN | EPOLLRDNORM;
198*4882a593Smuzhiyun 	else
199*4882a593Smuzhiyun 		events = 0;
200*4882a593Smuzhiyun 	spin_unlock_irq(&efw->lock);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	return events | EPOLLOUT;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun static int
hwdep_get_info(struct snd_efw * efw,void __user * arg)206*4882a593Smuzhiyun hwdep_get_info(struct snd_efw *efw, void __user *arg)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	struct fw_device *dev = fw_parent_device(efw->unit);
209*4882a593Smuzhiyun 	struct snd_firewire_get_info info;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	memset(&info, 0, sizeof(info));
212*4882a593Smuzhiyun 	info.type = SNDRV_FIREWIRE_TYPE_FIREWORKS;
213*4882a593Smuzhiyun 	info.card = dev->card->index;
214*4882a593Smuzhiyun 	*(__be32 *)&info.guid[0] = cpu_to_be32(dev->config_rom[3]);
215*4882a593Smuzhiyun 	*(__be32 *)&info.guid[4] = cpu_to_be32(dev->config_rom[4]);
216*4882a593Smuzhiyun 	strlcpy(info.device_name, dev_name(&dev->device),
217*4882a593Smuzhiyun 		sizeof(info.device_name));
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	if (copy_to_user(arg, &info, sizeof(info)))
220*4882a593Smuzhiyun 		return -EFAULT;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	return 0;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun static int
hwdep_lock(struct snd_efw * efw)226*4882a593Smuzhiyun hwdep_lock(struct snd_efw *efw)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun 	int err;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	spin_lock_irq(&efw->lock);
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	if (efw->dev_lock_count == 0) {
233*4882a593Smuzhiyun 		efw->dev_lock_count = -1;
234*4882a593Smuzhiyun 		err = 0;
235*4882a593Smuzhiyun 	} else {
236*4882a593Smuzhiyun 		err = -EBUSY;
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	spin_unlock_irq(&efw->lock);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	return err;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun static int
hwdep_unlock(struct snd_efw * efw)245*4882a593Smuzhiyun hwdep_unlock(struct snd_efw *efw)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	int err;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	spin_lock_irq(&efw->lock);
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	if (efw->dev_lock_count == -1) {
252*4882a593Smuzhiyun 		efw->dev_lock_count = 0;
253*4882a593Smuzhiyun 		err = 0;
254*4882a593Smuzhiyun 	} else {
255*4882a593Smuzhiyun 		err = -EBADFD;
256*4882a593Smuzhiyun 	}
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	spin_unlock_irq(&efw->lock);
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	return err;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun static int
hwdep_release(struct snd_hwdep * hwdep,struct file * file)264*4882a593Smuzhiyun hwdep_release(struct snd_hwdep *hwdep, struct file *file)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	struct snd_efw *efw = hwdep->private_data;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	spin_lock_irq(&efw->lock);
269*4882a593Smuzhiyun 	if (efw->dev_lock_count == -1)
270*4882a593Smuzhiyun 		efw->dev_lock_count = 0;
271*4882a593Smuzhiyun 	spin_unlock_irq(&efw->lock);
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	return 0;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun static int
hwdep_ioctl(struct snd_hwdep * hwdep,struct file * file,unsigned int cmd,unsigned long arg)277*4882a593Smuzhiyun hwdep_ioctl(struct snd_hwdep *hwdep, struct file *file,
278*4882a593Smuzhiyun 	    unsigned int cmd, unsigned long arg)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun 	struct snd_efw *efw = hwdep->private_data;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	switch (cmd) {
283*4882a593Smuzhiyun 	case SNDRV_FIREWIRE_IOCTL_GET_INFO:
284*4882a593Smuzhiyun 		return hwdep_get_info(efw, (void __user *)arg);
285*4882a593Smuzhiyun 	case SNDRV_FIREWIRE_IOCTL_LOCK:
286*4882a593Smuzhiyun 		return hwdep_lock(efw);
287*4882a593Smuzhiyun 	case SNDRV_FIREWIRE_IOCTL_UNLOCK:
288*4882a593Smuzhiyun 		return hwdep_unlock(efw);
289*4882a593Smuzhiyun 	default:
290*4882a593Smuzhiyun 		return -ENOIOCTLCMD;
291*4882a593Smuzhiyun 	}
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
295*4882a593Smuzhiyun static int
hwdep_compat_ioctl(struct snd_hwdep * hwdep,struct file * file,unsigned int cmd,unsigned long arg)296*4882a593Smuzhiyun hwdep_compat_ioctl(struct snd_hwdep *hwdep, struct file *file,
297*4882a593Smuzhiyun 		   unsigned int cmd, unsigned long arg)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun 	return hwdep_ioctl(hwdep, file, cmd,
300*4882a593Smuzhiyun 			   (unsigned long)compat_ptr(arg));
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun #else
303*4882a593Smuzhiyun #define hwdep_compat_ioctl NULL
304*4882a593Smuzhiyun #endif
305*4882a593Smuzhiyun 
snd_efw_create_hwdep_device(struct snd_efw * efw)306*4882a593Smuzhiyun int snd_efw_create_hwdep_device(struct snd_efw *efw)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	static const struct snd_hwdep_ops ops = {
309*4882a593Smuzhiyun 		.read		= hwdep_read,
310*4882a593Smuzhiyun 		.write		= hwdep_write,
311*4882a593Smuzhiyun 		.release	= hwdep_release,
312*4882a593Smuzhiyun 		.poll		= hwdep_poll,
313*4882a593Smuzhiyun 		.ioctl		= hwdep_ioctl,
314*4882a593Smuzhiyun 		.ioctl_compat	= hwdep_compat_ioctl,
315*4882a593Smuzhiyun 	};
316*4882a593Smuzhiyun 	struct snd_hwdep *hwdep;
317*4882a593Smuzhiyun 	int err;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	err = snd_hwdep_new(efw->card, "Fireworks", 0, &hwdep);
320*4882a593Smuzhiyun 	if (err < 0)
321*4882a593Smuzhiyun 		goto end;
322*4882a593Smuzhiyun 	strcpy(hwdep->name, "Fireworks");
323*4882a593Smuzhiyun 	hwdep->iface = SNDRV_HWDEP_IFACE_FW_FIREWORKS;
324*4882a593Smuzhiyun 	hwdep->ops = ops;
325*4882a593Smuzhiyun 	hwdep->private_data = efw;
326*4882a593Smuzhiyun 	hwdep->exclusive = true;
327*4882a593Smuzhiyun end:
328*4882a593Smuzhiyun 	return err;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
331