xref: /OK3568_Linux_fs/kernel/drivers/misc/uacce/uacce.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun #include <linux/compat.h>
3*4882a593Smuzhiyun #include <linux/dma-mapping.h>
4*4882a593Smuzhiyun #include <linux/iommu.h>
5*4882a593Smuzhiyun #include <linux/module.h>
6*4882a593Smuzhiyun #include <linux/poll.h>
7*4882a593Smuzhiyun #include <linux/slab.h>
8*4882a593Smuzhiyun #include <linux/uacce.h>
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun static struct class *uacce_class;
11*4882a593Smuzhiyun static dev_t uacce_devt;
12*4882a593Smuzhiyun static DEFINE_XARRAY_ALLOC(uacce_xa);
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun /*
15*4882a593Smuzhiyun  * If the parent driver or the device disappears, the queue state is invalid and
16*4882a593Smuzhiyun  * ops are not usable anymore.
17*4882a593Smuzhiyun  */
uacce_queue_is_valid(struct uacce_queue * q)18*4882a593Smuzhiyun static bool uacce_queue_is_valid(struct uacce_queue *q)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun 	return q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED;
21*4882a593Smuzhiyun }
22*4882a593Smuzhiyun 
uacce_start_queue(struct uacce_queue * q)23*4882a593Smuzhiyun static int uacce_start_queue(struct uacce_queue *q)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun 	int ret;
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 	if (q->state != UACCE_Q_INIT)
28*4882a593Smuzhiyun 		return -EINVAL;
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	if (q->uacce->ops->start_queue) {
31*4882a593Smuzhiyun 		ret = q->uacce->ops->start_queue(q);
32*4882a593Smuzhiyun 		if (ret < 0)
33*4882a593Smuzhiyun 			return ret;
34*4882a593Smuzhiyun 	}
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	q->state = UACCE_Q_STARTED;
37*4882a593Smuzhiyun 	return 0;
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun 
uacce_put_queue(struct uacce_queue * q)40*4882a593Smuzhiyun static int uacce_put_queue(struct uacce_queue *q)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	struct uacce_device *uacce = q->uacce;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	if ((q->state == UACCE_Q_STARTED) && uacce->ops->stop_queue)
45*4882a593Smuzhiyun 		uacce->ops->stop_queue(q);
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	if ((q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED) &&
48*4882a593Smuzhiyun 	     uacce->ops->put_queue)
49*4882a593Smuzhiyun 		uacce->ops->put_queue(q);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	q->state = UACCE_Q_ZOMBIE;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	return 0;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun 
uacce_fops_unl_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)56*4882a593Smuzhiyun static long uacce_fops_unl_ioctl(struct file *filep,
57*4882a593Smuzhiyun 				 unsigned int cmd, unsigned long arg)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	struct uacce_queue *q = filep->private_data;
60*4882a593Smuzhiyun 	struct uacce_device *uacce = q->uacce;
61*4882a593Smuzhiyun 	long ret = -ENXIO;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	/*
64*4882a593Smuzhiyun 	 * uacce->ops->ioctl() may take the mmap_lock when copying arg to/from
65*4882a593Smuzhiyun 	 * user. Avoid a circular lock dependency with uacce_fops_mmap(), which
66*4882a593Smuzhiyun 	 * gets called with mmap_lock held, by taking uacce->mutex instead of
67*4882a593Smuzhiyun 	 * q->mutex. Doing this in uacce_fops_mmap() is not possible because
68*4882a593Smuzhiyun 	 * uacce_fops_open() calls iommu_sva_bind_device(), which takes
69*4882a593Smuzhiyun 	 * mmap_lock, while holding uacce->mutex.
70*4882a593Smuzhiyun 	 */
71*4882a593Smuzhiyun 	mutex_lock(&uacce->mutex);
72*4882a593Smuzhiyun 	if (!uacce_queue_is_valid(q))
73*4882a593Smuzhiyun 		goto out_unlock;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	switch (cmd) {
76*4882a593Smuzhiyun 	case UACCE_CMD_START_Q:
77*4882a593Smuzhiyun 		ret = uacce_start_queue(q);
78*4882a593Smuzhiyun 		break;
79*4882a593Smuzhiyun 	case UACCE_CMD_PUT_Q:
80*4882a593Smuzhiyun 		ret = uacce_put_queue(q);
81*4882a593Smuzhiyun 		break;
82*4882a593Smuzhiyun 	default:
83*4882a593Smuzhiyun 		if (uacce->ops->ioctl)
84*4882a593Smuzhiyun 			ret = uacce->ops->ioctl(q, cmd, arg);
85*4882a593Smuzhiyun 		else
86*4882a593Smuzhiyun 			ret = -EINVAL;
87*4882a593Smuzhiyun 	}
88*4882a593Smuzhiyun out_unlock:
89*4882a593Smuzhiyun 	mutex_unlock(&uacce->mutex);
90*4882a593Smuzhiyun 	return ret;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
uacce_fops_compat_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)94*4882a593Smuzhiyun static long uacce_fops_compat_ioctl(struct file *filep,
95*4882a593Smuzhiyun 				   unsigned int cmd, unsigned long arg)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	arg = (unsigned long)compat_ptr(arg);
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	return uacce_fops_unl_ioctl(filep, cmd, arg);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun #endif
102*4882a593Smuzhiyun 
uacce_bind_queue(struct uacce_device * uacce,struct uacce_queue * q)103*4882a593Smuzhiyun static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	u32 pasid;
106*4882a593Smuzhiyun 	struct iommu_sva *handle;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	if (!(uacce->flags & UACCE_DEV_SVA))
109*4882a593Smuzhiyun 		return 0;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	handle = iommu_sva_bind_device(uacce->parent, current->mm, NULL);
112*4882a593Smuzhiyun 	if (IS_ERR(handle))
113*4882a593Smuzhiyun 		return PTR_ERR(handle);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	pasid = iommu_sva_get_pasid(handle);
116*4882a593Smuzhiyun 	if (pasid == IOMMU_PASID_INVALID) {
117*4882a593Smuzhiyun 		iommu_sva_unbind_device(handle);
118*4882a593Smuzhiyun 		return -ENODEV;
119*4882a593Smuzhiyun 	}
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	q->handle = handle;
122*4882a593Smuzhiyun 	q->pasid = pasid;
123*4882a593Smuzhiyun 	return 0;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
uacce_unbind_queue(struct uacce_queue * q)126*4882a593Smuzhiyun static void uacce_unbind_queue(struct uacce_queue *q)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	if (!q->handle)
129*4882a593Smuzhiyun 		return;
130*4882a593Smuzhiyun 	iommu_sva_unbind_device(q->handle);
131*4882a593Smuzhiyun 	q->handle = NULL;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
uacce_fops_open(struct inode * inode,struct file * filep)134*4882a593Smuzhiyun static int uacce_fops_open(struct inode *inode, struct file *filep)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	struct uacce_device *uacce;
137*4882a593Smuzhiyun 	struct uacce_queue *q;
138*4882a593Smuzhiyun 	int ret = 0;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	uacce = xa_load(&uacce_xa, iminor(inode));
141*4882a593Smuzhiyun 	if (!uacce)
142*4882a593Smuzhiyun 		return -ENODEV;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	q = kzalloc(sizeof(struct uacce_queue), GFP_KERNEL);
145*4882a593Smuzhiyun 	if (!q)
146*4882a593Smuzhiyun 		return -ENOMEM;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	mutex_lock(&uacce->mutex);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	if (!uacce->parent) {
151*4882a593Smuzhiyun 		ret = -EINVAL;
152*4882a593Smuzhiyun 		goto out_with_mem;
153*4882a593Smuzhiyun 	}
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	ret = uacce_bind_queue(uacce, q);
156*4882a593Smuzhiyun 	if (ret)
157*4882a593Smuzhiyun 		goto out_with_mem;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	q->uacce = uacce;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	if (uacce->ops->get_queue) {
162*4882a593Smuzhiyun 		ret = uacce->ops->get_queue(uacce, q->pasid, q);
163*4882a593Smuzhiyun 		if (ret < 0)
164*4882a593Smuzhiyun 			goto out_with_bond;
165*4882a593Smuzhiyun 	}
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	init_waitqueue_head(&q->wait);
168*4882a593Smuzhiyun 	filep->private_data = q;
169*4882a593Smuzhiyun 	uacce->inode = inode;
170*4882a593Smuzhiyun 	q->state = UACCE_Q_INIT;
171*4882a593Smuzhiyun 	mutex_init(&q->mutex);
172*4882a593Smuzhiyun 	list_add(&q->list, &uacce->queues);
173*4882a593Smuzhiyun 	mutex_unlock(&uacce->mutex);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	return 0;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun out_with_bond:
178*4882a593Smuzhiyun 	uacce_unbind_queue(q);
179*4882a593Smuzhiyun out_with_mem:
180*4882a593Smuzhiyun 	kfree(q);
181*4882a593Smuzhiyun 	mutex_unlock(&uacce->mutex);
182*4882a593Smuzhiyun 	return ret;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
uacce_fops_release(struct inode * inode,struct file * filep)185*4882a593Smuzhiyun static int uacce_fops_release(struct inode *inode, struct file *filep)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	struct uacce_queue *q = filep->private_data;
188*4882a593Smuzhiyun 	struct uacce_device *uacce = q->uacce;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	mutex_lock(&uacce->mutex);
191*4882a593Smuzhiyun 	uacce_put_queue(q);
192*4882a593Smuzhiyun 	uacce_unbind_queue(q);
193*4882a593Smuzhiyun 	list_del(&q->list);
194*4882a593Smuzhiyun 	mutex_unlock(&uacce->mutex);
195*4882a593Smuzhiyun 	kfree(q);
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	return 0;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun 
uacce_vma_close(struct vm_area_struct * vma)200*4882a593Smuzhiyun static void uacce_vma_close(struct vm_area_struct *vma)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	struct uacce_queue *q = vma->vm_private_data;
203*4882a593Smuzhiyun 	struct uacce_qfile_region *qfr = NULL;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	if (vma->vm_pgoff < UACCE_MAX_REGION)
206*4882a593Smuzhiyun 		qfr = q->qfrs[vma->vm_pgoff];
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	kfree(qfr);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun static const struct vm_operations_struct uacce_vm_ops = {
212*4882a593Smuzhiyun 	.close = uacce_vma_close,
213*4882a593Smuzhiyun };
214*4882a593Smuzhiyun 
uacce_fops_mmap(struct file * filep,struct vm_area_struct * vma)215*4882a593Smuzhiyun static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun 	struct uacce_queue *q = filep->private_data;
218*4882a593Smuzhiyun 	struct uacce_device *uacce = q->uacce;
219*4882a593Smuzhiyun 	struct uacce_qfile_region *qfr;
220*4882a593Smuzhiyun 	enum uacce_qfrt type = UACCE_MAX_REGION;
221*4882a593Smuzhiyun 	int ret = 0;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	if (vma->vm_pgoff < UACCE_MAX_REGION)
224*4882a593Smuzhiyun 		type = vma->vm_pgoff;
225*4882a593Smuzhiyun 	else
226*4882a593Smuzhiyun 		return -EINVAL;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	qfr = kzalloc(sizeof(*qfr), GFP_KERNEL);
229*4882a593Smuzhiyun 	if (!qfr)
230*4882a593Smuzhiyun 		return -ENOMEM;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK;
233*4882a593Smuzhiyun 	vma->vm_ops = &uacce_vm_ops;
234*4882a593Smuzhiyun 	vma->vm_private_data = q;
235*4882a593Smuzhiyun 	qfr->type = type;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	mutex_lock(&q->mutex);
238*4882a593Smuzhiyun 	if (!uacce_queue_is_valid(q)) {
239*4882a593Smuzhiyun 		ret = -ENXIO;
240*4882a593Smuzhiyun 		goto out_with_lock;
241*4882a593Smuzhiyun 	}
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	if (q->qfrs[type]) {
244*4882a593Smuzhiyun 		ret = -EEXIST;
245*4882a593Smuzhiyun 		goto out_with_lock;
246*4882a593Smuzhiyun 	}
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	switch (type) {
249*4882a593Smuzhiyun 	case UACCE_QFRT_MMIO:
250*4882a593Smuzhiyun 		if (!uacce->ops->mmap) {
251*4882a593Smuzhiyun 			ret = -EINVAL;
252*4882a593Smuzhiyun 			goto out_with_lock;
253*4882a593Smuzhiyun 		}
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 		ret = uacce->ops->mmap(q, vma, qfr);
256*4882a593Smuzhiyun 		if (ret)
257*4882a593Smuzhiyun 			goto out_with_lock;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 		break;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	case UACCE_QFRT_DUS:
262*4882a593Smuzhiyun 		if (!uacce->ops->mmap) {
263*4882a593Smuzhiyun 			ret = -EINVAL;
264*4882a593Smuzhiyun 			goto out_with_lock;
265*4882a593Smuzhiyun 		}
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 		ret = uacce->ops->mmap(q, vma, qfr);
268*4882a593Smuzhiyun 		if (ret)
269*4882a593Smuzhiyun 			goto out_with_lock;
270*4882a593Smuzhiyun 		break;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	default:
273*4882a593Smuzhiyun 		ret = -EINVAL;
274*4882a593Smuzhiyun 		goto out_with_lock;
275*4882a593Smuzhiyun 	}
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	q->qfrs[type] = qfr;
278*4882a593Smuzhiyun 	mutex_unlock(&q->mutex);
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	return ret;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun out_with_lock:
283*4882a593Smuzhiyun 	mutex_unlock(&q->mutex);
284*4882a593Smuzhiyun 	kfree(qfr);
285*4882a593Smuzhiyun 	return ret;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun 
uacce_fops_poll(struct file * file,poll_table * wait)288*4882a593Smuzhiyun static __poll_t uacce_fops_poll(struct file *file, poll_table *wait)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	struct uacce_queue *q = file->private_data;
291*4882a593Smuzhiyun 	struct uacce_device *uacce = q->uacce;
292*4882a593Smuzhiyun 	__poll_t ret = 0;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	mutex_lock(&q->mutex);
295*4882a593Smuzhiyun 	if (!uacce_queue_is_valid(q))
296*4882a593Smuzhiyun 		goto out_unlock;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	poll_wait(file, &q->wait, wait);
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q))
301*4882a593Smuzhiyun 		ret = EPOLLIN | EPOLLRDNORM;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun out_unlock:
304*4882a593Smuzhiyun 	mutex_unlock(&q->mutex);
305*4882a593Smuzhiyun 	return ret;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun static const struct file_operations uacce_fops = {
309*4882a593Smuzhiyun 	.owner		= THIS_MODULE,
310*4882a593Smuzhiyun 	.open		= uacce_fops_open,
311*4882a593Smuzhiyun 	.release	= uacce_fops_release,
312*4882a593Smuzhiyun 	.unlocked_ioctl	= uacce_fops_unl_ioctl,
313*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
314*4882a593Smuzhiyun 	.compat_ioctl	= uacce_fops_compat_ioctl,
315*4882a593Smuzhiyun #endif
316*4882a593Smuzhiyun 	.mmap		= uacce_fops_mmap,
317*4882a593Smuzhiyun 	.poll		= uacce_fops_poll,
318*4882a593Smuzhiyun };
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun #define to_uacce_device(dev) container_of(dev, struct uacce_device, dev)
321*4882a593Smuzhiyun 
api_show(struct device * dev,struct device_attribute * attr,char * buf)322*4882a593Smuzhiyun static ssize_t api_show(struct device *dev,
323*4882a593Smuzhiyun 			struct device_attribute *attr, char *buf)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun 	struct uacce_device *uacce = to_uacce_device(dev);
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	return sprintf(buf, "%s\n", uacce->api_ver);
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun 
flags_show(struct device * dev,struct device_attribute * attr,char * buf)330*4882a593Smuzhiyun static ssize_t flags_show(struct device *dev,
331*4882a593Smuzhiyun 			  struct device_attribute *attr, char *buf)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun 	struct uacce_device *uacce = to_uacce_device(dev);
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	return sprintf(buf, "%u\n", uacce->flags);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun 
available_instances_show(struct device * dev,struct device_attribute * attr,char * buf)338*4882a593Smuzhiyun static ssize_t available_instances_show(struct device *dev,
339*4882a593Smuzhiyun 					struct device_attribute *attr,
340*4882a593Smuzhiyun 					char *buf)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun 	struct uacce_device *uacce = to_uacce_device(dev);
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	if (!uacce->ops->get_available_instances)
345*4882a593Smuzhiyun 		return -ENODEV;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	return sprintf(buf, "%d\n",
348*4882a593Smuzhiyun 		       uacce->ops->get_available_instances(uacce));
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun 
algorithms_show(struct device * dev,struct device_attribute * attr,char * buf)351*4882a593Smuzhiyun static ssize_t algorithms_show(struct device *dev,
352*4882a593Smuzhiyun 			       struct device_attribute *attr, char *buf)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun 	struct uacce_device *uacce = to_uacce_device(dev);
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	return sprintf(buf, "%s\n", uacce->algs);
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun 
region_mmio_size_show(struct device * dev,struct device_attribute * attr,char * buf)359*4882a593Smuzhiyun static ssize_t region_mmio_size_show(struct device *dev,
360*4882a593Smuzhiyun 				     struct device_attribute *attr, char *buf)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun 	struct uacce_device *uacce = to_uacce_device(dev);
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	return sprintf(buf, "%lu\n",
365*4882a593Smuzhiyun 		       uacce->qf_pg_num[UACCE_QFRT_MMIO] << PAGE_SHIFT);
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun 
region_dus_size_show(struct device * dev,struct device_attribute * attr,char * buf)368*4882a593Smuzhiyun static ssize_t region_dus_size_show(struct device *dev,
369*4882a593Smuzhiyun 				    struct device_attribute *attr, char *buf)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun 	struct uacce_device *uacce = to_uacce_device(dev);
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	return sprintf(buf, "%lu\n",
374*4882a593Smuzhiyun 		       uacce->qf_pg_num[UACCE_QFRT_DUS] << PAGE_SHIFT);
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun static DEVICE_ATTR_RO(api);
378*4882a593Smuzhiyun static DEVICE_ATTR_RO(flags);
379*4882a593Smuzhiyun static DEVICE_ATTR_RO(available_instances);
380*4882a593Smuzhiyun static DEVICE_ATTR_RO(algorithms);
381*4882a593Smuzhiyun static DEVICE_ATTR_RO(region_mmio_size);
382*4882a593Smuzhiyun static DEVICE_ATTR_RO(region_dus_size);
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun static struct attribute *uacce_dev_attrs[] = {
385*4882a593Smuzhiyun 	&dev_attr_api.attr,
386*4882a593Smuzhiyun 	&dev_attr_flags.attr,
387*4882a593Smuzhiyun 	&dev_attr_available_instances.attr,
388*4882a593Smuzhiyun 	&dev_attr_algorithms.attr,
389*4882a593Smuzhiyun 	&dev_attr_region_mmio_size.attr,
390*4882a593Smuzhiyun 	&dev_attr_region_dus_size.attr,
391*4882a593Smuzhiyun 	NULL,
392*4882a593Smuzhiyun };
393*4882a593Smuzhiyun 
uacce_dev_is_visible(struct kobject * kobj,struct attribute * attr,int n)394*4882a593Smuzhiyun static umode_t uacce_dev_is_visible(struct kobject *kobj,
395*4882a593Smuzhiyun 				    struct attribute *attr, int n)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun 	struct device *dev = kobj_to_dev(kobj);
398*4882a593Smuzhiyun 	struct uacce_device *uacce = to_uacce_device(dev);
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	if (((attr == &dev_attr_region_mmio_size.attr) &&
401*4882a593Smuzhiyun 	    (!uacce->qf_pg_num[UACCE_QFRT_MMIO])) ||
402*4882a593Smuzhiyun 	    ((attr == &dev_attr_region_dus_size.attr) &&
403*4882a593Smuzhiyun 	    (!uacce->qf_pg_num[UACCE_QFRT_DUS])))
404*4882a593Smuzhiyun 		return 0;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	return attr->mode;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun static struct attribute_group uacce_dev_group = {
410*4882a593Smuzhiyun 	.is_visible	= uacce_dev_is_visible,
411*4882a593Smuzhiyun 	.attrs		= uacce_dev_attrs,
412*4882a593Smuzhiyun };
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun __ATTRIBUTE_GROUPS(uacce_dev);
415*4882a593Smuzhiyun 
uacce_release(struct device * dev)416*4882a593Smuzhiyun static void uacce_release(struct device *dev)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun 	struct uacce_device *uacce = to_uacce_device(dev);
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	kfree(uacce);
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun /**
424*4882a593Smuzhiyun  * uacce_alloc() - alloc an accelerator
425*4882a593Smuzhiyun  * @parent: pointer of uacce parent device
426*4882a593Smuzhiyun  * @interface: pointer of uacce_interface for register
427*4882a593Smuzhiyun  *
428*4882a593Smuzhiyun  * Returns uacce pointer if success and ERR_PTR if not
429*4882a593Smuzhiyun  * Need check returned negotiated uacce->flags
430*4882a593Smuzhiyun  */
uacce_alloc(struct device * parent,struct uacce_interface * interface)431*4882a593Smuzhiyun struct uacce_device *uacce_alloc(struct device *parent,
432*4882a593Smuzhiyun 				 struct uacce_interface *interface)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun 	unsigned int flags = interface->flags;
435*4882a593Smuzhiyun 	struct uacce_device *uacce;
436*4882a593Smuzhiyun 	int ret;
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	uacce = kzalloc(sizeof(struct uacce_device), GFP_KERNEL);
439*4882a593Smuzhiyun 	if (!uacce)
440*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	if (flags & UACCE_DEV_SVA) {
443*4882a593Smuzhiyun 		ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_SVA);
444*4882a593Smuzhiyun 		if (ret)
445*4882a593Smuzhiyun 			flags &= ~UACCE_DEV_SVA;
446*4882a593Smuzhiyun 	}
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	uacce->parent = parent;
449*4882a593Smuzhiyun 	uacce->flags = flags;
450*4882a593Smuzhiyun 	uacce->ops = interface->ops;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	ret = xa_alloc(&uacce_xa, &uacce->dev_id, uacce, xa_limit_32b,
453*4882a593Smuzhiyun 		       GFP_KERNEL);
454*4882a593Smuzhiyun 	if (ret < 0)
455*4882a593Smuzhiyun 		goto err_with_uacce;
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	INIT_LIST_HEAD(&uacce->queues);
458*4882a593Smuzhiyun 	mutex_init(&uacce->mutex);
459*4882a593Smuzhiyun 	device_initialize(&uacce->dev);
460*4882a593Smuzhiyun 	uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
461*4882a593Smuzhiyun 	uacce->dev.class = uacce_class;
462*4882a593Smuzhiyun 	uacce->dev.groups = uacce_dev_groups;
463*4882a593Smuzhiyun 	uacce->dev.parent = uacce->parent;
464*4882a593Smuzhiyun 	uacce->dev.release = uacce_release;
465*4882a593Smuzhiyun 	dev_set_name(&uacce->dev, "%s-%d", interface->name, uacce->dev_id);
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	return uacce;
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun err_with_uacce:
470*4882a593Smuzhiyun 	if (flags & UACCE_DEV_SVA)
471*4882a593Smuzhiyun 		iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
472*4882a593Smuzhiyun 	kfree(uacce);
473*4882a593Smuzhiyun 	return ERR_PTR(ret);
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(uacce_alloc);
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun /**
478*4882a593Smuzhiyun  * uacce_register() - add the accelerator to cdev and export to user space
479*4882a593Smuzhiyun  * @uacce: The initialized uacce device
480*4882a593Smuzhiyun  *
481*4882a593Smuzhiyun  * Return 0 if register succeeded, or an error.
482*4882a593Smuzhiyun  */
uacce_register(struct uacce_device * uacce)483*4882a593Smuzhiyun int uacce_register(struct uacce_device *uacce)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun 	if (!uacce)
486*4882a593Smuzhiyun 		return -ENODEV;
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	uacce->cdev = cdev_alloc();
489*4882a593Smuzhiyun 	if (!uacce->cdev)
490*4882a593Smuzhiyun 		return -ENOMEM;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	uacce->cdev->ops = &uacce_fops;
493*4882a593Smuzhiyun 	uacce->cdev->owner = THIS_MODULE;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	return cdev_device_add(uacce->cdev, &uacce->dev);
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(uacce_register);
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun /**
500*4882a593Smuzhiyun  * uacce_remove() - remove the accelerator
501*4882a593Smuzhiyun  * @uacce: the accelerator to remove
502*4882a593Smuzhiyun  */
uacce_remove(struct uacce_device * uacce)503*4882a593Smuzhiyun void uacce_remove(struct uacce_device *uacce)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun 	struct uacce_queue *q, *next_q;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	if (!uacce)
508*4882a593Smuzhiyun 		return;
509*4882a593Smuzhiyun 	/*
510*4882a593Smuzhiyun 	 * unmap remaining mapping from user space, preventing user still
511*4882a593Smuzhiyun 	 * access the mmaped area while parent device is already removed
512*4882a593Smuzhiyun 	 */
513*4882a593Smuzhiyun 	if (uacce->inode)
514*4882a593Smuzhiyun 		unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1);
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	/*
517*4882a593Smuzhiyun 	 * uacce_fops_open() may be running concurrently, even after we remove
518*4882a593Smuzhiyun 	 * the cdev. Holding uacce->mutex ensures that open() does not obtain a
519*4882a593Smuzhiyun 	 * removed uacce device.
520*4882a593Smuzhiyun 	 */
521*4882a593Smuzhiyun 	mutex_lock(&uacce->mutex);
522*4882a593Smuzhiyun 	/* ensure no open queue remains */
523*4882a593Smuzhiyun 	list_for_each_entry_safe(q, next_q, &uacce->queues, list) {
524*4882a593Smuzhiyun 		/*
525*4882a593Smuzhiyun 		 * Taking q->mutex ensures that fops do not use the defunct
526*4882a593Smuzhiyun 		 * uacce->ops after the queue is disabled.
527*4882a593Smuzhiyun 		 */
528*4882a593Smuzhiyun 		mutex_lock(&q->mutex);
529*4882a593Smuzhiyun 		uacce_put_queue(q);
530*4882a593Smuzhiyun 		mutex_unlock(&q->mutex);
531*4882a593Smuzhiyun 		uacce_unbind_queue(q);
532*4882a593Smuzhiyun 	}
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	/* disable sva now since no opened queues */
535*4882a593Smuzhiyun 	if (uacce->flags & UACCE_DEV_SVA)
536*4882a593Smuzhiyun 		iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	if (uacce->cdev)
539*4882a593Smuzhiyun 		cdev_device_del(uacce->cdev, &uacce->dev);
540*4882a593Smuzhiyun 	xa_erase(&uacce_xa, uacce->dev_id);
541*4882a593Smuzhiyun 	/*
542*4882a593Smuzhiyun 	 * uacce exists as long as there are open fds, but ops will be freed
543*4882a593Smuzhiyun 	 * now. Ensure that bugs cause NULL deref rather than use-after-free.
544*4882a593Smuzhiyun 	 */
545*4882a593Smuzhiyun 	uacce->ops = NULL;
546*4882a593Smuzhiyun 	uacce->parent = NULL;
547*4882a593Smuzhiyun 	mutex_unlock(&uacce->mutex);
548*4882a593Smuzhiyun 	put_device(&uacce->dev);
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(uacce_remove);
551*4882a593Smuzhiyun 
uacce_init(void)552*4882a593Smuzhiyun static int __init uacce_init(void)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun 	int ret;
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	uacce_class = class_create(THIS_MODULE, UACCE_NAME);
557*4882a593Smuzhiyun 	if (IS_ERR(uacce_class))
558*4882a593Smuzhiyun 		return PTR_ERR(uacce_class);
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	ret = alloc_chrdev_region(&uacce_devt, 0, MINORMASK, UACCE_NAME);
561*4882a593Smuzhiyun 	if (ret)
562*4882a593Smuzhiyun 		class_destroy(uacce_class);
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	return ret;
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun 
uacce_exit(void)567*4882a593Smuzhiyun static __exit void uacce_exit(void)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun 	unregister_chrdev_region(uacce_devt, MINORMASK);
570*4882a593Smuzhiyun 	class_destroy(uacce_class);
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun subsys_initcall(uacce_init);
574*4882a593Smuzhiyun module_exit(uacce_exit);
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun MODULE_LICENSE("GPL");
577*4882a593Smuzhiyun MODULE_AUTHOR("Hisilicon Tech. Co., Ltd.");
578*4882a593Smuzhiyun MODULE_DESCRIPTION("Accelerator interface for Userland applications");
579