1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright 2013-2016 Freescale Semiconductor Inc.
4*4882a593Smuzhiyun * Copyright 2016-2017,2019-2020 NXP
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/device.h>
8*4882a593Smuzhiyun #include <linux/iommu.h>
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/mutex.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include <linux/types.h>
13*4882a593Smuzhiyun #include <linux/vfio.h>
14*4882a593Smuzhiyun #include <linux/fsl/mc.h>
15*4882a593Smuzhiyun #include <linux/delay.h>
16*4882a593Smuzhiyun #include <linux/io-64-nonatomic-hi-lo.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include "vfio_fsl_mc_private.h"
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun static struct fsl_mc_driver vfio_fsl_mc_driver;
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun static DEFINE_MUTEX(reflck_lock);
23*4882a593Smuzhiyun
vfio_fsl_mc_reflck_get(struct vfio_fsl_mc_reflck * reflck)24*4882a593Smuzhiyun static void vfio_fsl_mc_reflck_get(struct vfio_fsl_mc_reflck *reflck)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun kref_get(&reflck->kref);
27*4882a593Smuzhiyun }
28*4882a593Smuzhiyun
vfio_fsl_mc_reflck_release(struct kref * kref)29*4882a593Smuzhiyun static void vfio_fsl_mc_reflck_release(struct kref *kref)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun struct vfio_fsl_mc_reflck *reflck = container_of(kref,
32*4882a593Smuzhiyun struct vfio_fsl_mc_reflck,
33*4882a593Smuzhiyun kref);
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun mutex_destroy(&reflck->lock);
36*4882a593Smuzhiyun kfree(reflck);
37*4882a593Smuzhiyun mutex_unlock(&reflck_lock);
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun
vfio_fsl_mc_reflck_put(struct vfio_fsl_mc_reflck * reflck)40*4882a593Smuzhiyun static void vfio_fsl_mc_reflck_put(struct vfio_fsl_mc_reflck *reflck)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun kref_put_mutex(&reflck->kref, vfio_fsl_mc_reflck_release, &reflck_lock);
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun
vfio_fsl_mc_reflck_alloc(void)45*4882a593Smuzhiyun static struct vfio_fsl_mc_reflck *vfio_fsl_mc_reflck_alloc(void)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun struct vfio_fsl_mc_reflck *reflck;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun reflck = kzalloc(sizeof(*reflck), GFP_KERNEL);
50*4882a593Smuzhiyun if (!reflck)
51*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun kref_init(&reflck->kref);
54*4882a593Smuzhiyun mutex_init(&reflck->lock);
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun return reflck;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun
vfio_fsl_mc_reflck_attach(struct vfio_fsl_mc_device * vdev)59*4882a593Smuzhiyun static int vfio_fsl_mc_reflck_attach(struct vfio_fsl_mc_device *vdev)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun int ret = 0;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun mutex_lock(&reflck_lock);
64*4882a593Smuzhiyun if (is_fsl_mc_bus_dprc(vdev->mc_dev)) {
65*4882a593Smuzhiyun vdev->reflck = vfio_fsl_mc_reflck_alloc();
66*4882a593Smuzhiyun ret = PTR_ERR_OR_ZERO(vdev->reflck);
67*4882a593Smuzhiyun } else {
68*4882a593Smuzhiyun struct device *mc_cont_dev = vdev->mc_dev->dev.parent;
69*4882a593Smuzhiyun struct vfio_device *device;
70*4882a593Smuzhiyun struct vfio_fsl_mc_device *cont_vdev;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun device = vfio_device_get_from_dev(mc_cont_dev);
73*4882a593Smuzhiyun if (!device) {
74*4882a593Smuzhiyun ret = -ENODEV;
75*4882a593Smuzhiyun goto unlock;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun cont_vdev = vfio_device_data(device);
79*4882a593Smuzhiyun if (!cont_vdev || !cont_vdev->reflck) {
80*4882a593Smuzhiyun vfio_device_put(device);
81*4882a593Smuzhiyun ret = -ENODEV;
82*4882a593Smuzhiyun goto unlock;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun vfio_fsl_mc_reflck_get(cont_vdev->reflck);
85*4882a593Smuzhiyun vdev->reflck = cont_vdev->reflck;
86*4882a593Smuzhiyun vfio_device_put(device);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun unlock:
90*4882a593Smuzhiyun mutex_unlock(&reflck_lock);
91*4882a593Smuzhiyun return ret;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
vfio_fsl_mc_regions_init(struct vfio_fsl_mc_device * vdev)94*4882a593Smuzhiyun static int vfio_fsl_mc_regions_init(struct vfio_fsl_mc_device *vdev)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun struct fsl_mc_device *mc_dev = vdev->mc_dev;
97*4882a593Smuzhiyun int count = mc_dev->obj_desc.region_count;
98*4882a593Smuzhiyun int i;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun vdev->regions = kcalloc(count, sizeof(struct vfio_fsl_mc_region),
101*4882a593Smuzhiyun GFP_KERNEL);
102*4882a593Smuzhiyun if (!vdev->regions)
103*4882a593Smuzhiyun return -ENOMEM;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun for (i = 0; i < count; i++) {
106*4882a593Smuzhiyun struct resource *res = &mc_dev->regions[i];
107*4882a593Smuzhiyun int no_mmap = is_fsl_mc_bus_dprc(mc_dev);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun vdev->regions[i].addr = res->start;
110*4882a593Smuzhiyun vdev->regions[i].size = resource_size(res);
111*4882a593Smuzhiyun vdev->regions[i].type = mc_dev->regions[i].flags & IORESOURCE_BITS;
112*4882a593Smuzhiyun /*
113*4882a593Smuzhiyun * Only regions addressed with PAGE granularity may be
114*4882a593Smuzhiyun * MMAPed securely.
115*4882a593Smuzhiyun */
116*4882a593Smuzhiyun if (!no_mmap && !(vdev->regions[i].addr & ~PAGE_MASK) &&
117*4882a593Smuzhiyun !(vdev->regions[i].size & ~PAGE_MASK))
118*4882a593Smuzhiyun vdev->regions[i].flags |=
119*4882a593Smuzhiyun VFIO_REGION_INFO_FLAG_MMAP;
120*4882a593Smuzhiyun vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
121*4882a593Smuzhiyun if (!(mc_dev->regions[i].flags & IORESOURCE_READONLY))
122*4882a593Smuzhiyun vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun return 0;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
vfio_fsl_mc_regions_cleanup(struct vfio_fsl_mc_device * vdev)128*4882a593Smuzhiyun static void vfio_fsl_mc_regions_cleanup(struct vfio_fsl_mc_device *vdev)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun struct fsl_mc_device *mc_dev = vdev->mc_dev;
131*4882a593Smuzhiyun int i;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun for (i = 0; i < mc_dev->obj_desc.region_count; i++)
134*4882a593Smuzhiyun iounmap(vdev->regions[i].ioaddr);
135*4882a593Smuzhiyun kfree(vdev->regions);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
vfio_fsl_mc_open(void * device_data)138*4882a593Smuzhiyun static int vfio_fsl_mc_open(void *device_data)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun struct vfio_fsl_mc_device *vdev = device_data;
141*4882a593Smuzhiyun int ret;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun if (!try_module_get(THIS_MODULE))
144*4882a593Smuzhiyun return -ENODEV;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun mutex_lock(&vdev->reflck->lock);
147*4882a593Smuzhiyun if (!vdev->refcnt) {
148*4882a593Smuzhiyun ret = vfio_fsl_mc_regions_init(vdev);
149*4882a593Smuzhiyun if (ret)
150*4882a593Smuzhiyun goto err_reg_init;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun vdev->refcnt++;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun mutex_unlock(&vdev->reflck->lock);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun return 0;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun err_reg_init:
159*4882a593Smuzhiyun mutex_unlock(&vdev->reflck->lock);
160*4882a593Smuzhiyun module_put(THIS_MODULE);
161*4882a593Smuzhiyun return ret;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
vfio_fsl_mc_release(void * device_data)164*4882a593Smuzhiyun static void vfio_fsl_mc_release(void *device_data)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun struct vfio_fsl_mc_device *vdev = device_data;
167*4882a593Smuzhiyun int ret;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun mutex_lock(&vdev->reflck->lock);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun if (!(--vdev->refcnt)) {
172*4882a593Smuzhiyun struct fsl_mc_device *mc_dev = vdev->mc_dev;
173*4882a593Smuzhiyun struct device *cont_dev = fsl_mc_cont_dev(&mc_dev->dev);
174*4882a593Smuzhiyun struct fsl_mc_device *mc_cont = to_fsl_mc_device(cont_dev);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun vfio_fsl_mc_regions_cleanup(vdev);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /* reset the device before cleaning up the interrupts */
179*4882a593Smuzhiyun ret = dprc_reset_container(mc_cont->mc_io, 0,
180*4882a593Smuzhiyun mc_cont->mc_handle,
181*4882a593Smuzhiyun mc_cont->obj_desc.id,
182*4882a593Smuzhiyun DPRC_RESET_OPTION_NON_RECURSIVE);
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun if (ret) {
185*4882a593Smuzhiyun dev_warn(&mc_cont->dev, "VFIO_FLS_MC: reset device has failed (%d)\n",
186*4882a593Smuzhiyun ret);
187*4882a593Smuzhiyun WARN_ON(1);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun vfio_fsl_mc_irqs_cleanup(vdev);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun fsl_mc_cleanup_irq_pool(mc_cont);
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun mutex_unlock(&vdev->reflck->lock);
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun module_put(THIS_MODULE);
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
vfio_fsl_mc_ioctl(void * device_data,unsigned int cmd,unsigned long arg)200*4882a593Smuzhiyun static long vfio_fsl_mc_ioctl(void *device_data, unsigned int cmd,
201*4882a593Smuzhiyun unsigned long arg)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun unsigned long minsz;
204*4882a593Smuzhiyun struct vfio_fsl_mc_device *vdev = device_data;
205*4882a593Smuzhiyun struct fsl_mc_device *mc_dev = vdev->mc_dev;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun switch (cmd) {
208*4882a593Smuzhiyun case VFIO_DEVICE_GET_INFO:
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun struct vfio_device_info info;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun minsz = offsetofend(struct vfio_device_info, num_irqs);
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun if (copy_from_user(&info, (void __user *)arg, minsz))
215*4882a593Smuzhiyun return -EFAULT;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun if (info.argsz < minsz)
218*4882a593Smuzhiyun return -EINVAL;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun info.flags = VFIO_DEVICE_FLAGS_FSL_MC;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun if (is_fsl_mc_bus_dprc(mc_dev))
223*4882a593Smuzhiyun info.flags |= VFIO_DEVICE_FLAGS_RESET;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun info.num_regions = mc_dev->obj_desc.region_count;
226*4882a593Smuzhiyun info.num_irqs = mc_dev->obj_desc.irq_count;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun return copy_to_user((void __user *)arg, &info, minsz) ?
229*4882a593Smuzhiyun -EFAULT : 0;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun case VFIO_DEVICE_GET_REGION_INFO:
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun struct vfio_region_info info;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun minsz = offsetofend(struct vfio_region_info, offset);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun if (copy_from_user(&info, (void __user *)arg, minsz))
238*4882a593Smuzhiyun return -EFAULT;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun if (info.argsz < minsz)
241*4882a593Smuzhiyun return -EINVAL;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun if (info.index >= mc_dev->obj_desc.region_count)
244*4882a593Smuzhiyun return -EINVAL;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /* map offset to the physical address */
247*4882a593Smuzhiyun info.offset = VFIO_FSL_MC_INDEX_TO_OFFSET(info.index);
248*4882a593Smuzhiyun info.size = vdev->regions[info.index].size;
249*4882a593Smuzhiyun info.flags = vdev->regions[info.index].flags;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun if (copy_to_user((void __user *)arg, &info, minsz))
252*4882a593Smuzhiyun return -EFAULT;
253*4882a593Smuzhiyun return 0;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun case VFIO_DEVICE_GET_IRQ_INFO:
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun struct vfio_irq_info info;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun minsz = offsetofend(struct vfio_irq_info, count);
260*4882a593Smuzhiyun if (copy_from_user(&info, (void __user *)arg, minsz))
261*4882a593Smuzhiyun return -EFAULT;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun if (info.argsz < minsz)
264*4882a593Smuzhiyun return -EINVAL;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun if (info.index >= mc_dev->obj_desc.irq_count)
267*4882a593Smuzhiyun return -EINVAL;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun info.flags = VFIO_IRQ_INFO_EVENTFD;
270*4882a593Smuzhiyun info.count = 1;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun if (copy_to_user((void __user *)arg, &info, minsz))
273*4882a593Smuzhiyun return -EFAULT;
274*4882a593Smuzhiyun return 0;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun case VFIO_DEVICE_SET_IRQS:
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun struct vfio_irq_set hdr;
279*4882a593Smuzhiyun u8 *data = NULL;
280*4882a593Smuzhiyun int ret = 0;
281*4882a593Smuzhiyun size_t data_size = 0;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun minsz = offsetofend(struct vfio_irq_set, count);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun if (copy_from_user(&hdr, (void __user *)arg, minsz))
286*4882a593Smuzhiyun return -EFAULT;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun ret = vfio_set_irqs_validate_and_prepare(&hdr, mc_dev->obj_desc.irq_count,
289*4882a593Smuzhiyun mc_dev->obj_desc.irq_count, &data_size);
290*4882a593Smuzhiyun if (ret)
291*4882a593Smuzhiyun return ret;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun if (data_size) {
294*4882a593Smuzhiyun data = memdup_user((void __user *)(arg + minsz),
295*4882a593Smuzhiyun data_size);
296*4882a593Smuzhiyun if (IS_ERR(data))
297*4882a593Smuzhiyun return PTR_ERR(data);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun mutex_lock(&vdev->igate);
301*4882a593Smuzhiyun ret = vfio_fsl_mc_set_irqs_ioctl(vdev, hdr.flags,
302*4882a593Smuzhiyun hdr.index, hdr.start,
303*4882a593Smuzhiyun hdr.count, data);
304*4882a593Smuzhiyun mutex_unlock(&vdev->igate);
305*4882a593Smuzhiyun kfree(data);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun return ret;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun case VFIO_DEVICE_RESET:
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun int ret;
312*4882a593Smuzhiyun struct fsl_mc_device *mc_dev = vdev->mc_dev;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /* reset is supported only for the DPRC */
315*4882a593Smuzhiyun if (!is_fsl_mc_bus_dprc(mc_dev))
316*4882a593Smuzhiyun return -ENOTTY;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun ret = dprc_reset_container(mc_dev->mc_io, 0,
319*4882a593Smuzhiyun mc_dev->mc_handle,
320*4882a593Smuzhiyun mc_dev->obj_desc.id,
321*4882a593Smuzhiyun DPRC_RESET_OPTION_NON_RECURSIVE);
322*4882a593Smuzhiyun return ret;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun default:
326*4882a593Smuzhiyun return -ENOTTY;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
vfio_fsl_mc_read(void * device_data,char __user * buf,size_t count,loff_t * ppos)330*4882a593Smuzhiyun static ssize_t vfio_fsl_mc_read(void *device_data, char __user *buf,
331*4882a593Smuzhiyun size_t count, loff_t *ppos)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun struct vfio_fsl_mc_device *vdev = device_data;
334*4882a593Smuzhiyun unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos);
335*4882a593Smuzhiyun loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK;
336*4882a593Smuzhiyun struct fsl_mc_device *mc_dev = vdev->mc_dev;
337*4882a593Smuzhiyun struct vfio_fsl_mc_region *region;
338*4882a593Smuzhiyun u64 data[8];
339*4882a593Smuzhiyun int i;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun if (index >= mc_dev->obj_desc.region_count)
342*4882a593Smuzhiyun return -EINVAL;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun region = &vdev->regions[index];
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun if (!(region->flags & VFIO_REGION_INFO_FLAG_READ))
347*4882a593Smuzhiyun return -EINVAL;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun if (!region->ioaddr) {
350*4882a593Smuzhiyun region->ioaddr = ioremap(region->addr, region->size);
351*4882a593Smuzhiyun if (!region->ioaddr)
352*4882a593Smuzhiyun return -ENOMEM;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun if (count != 64 || off != 0)
356*4882a593Smuzhiyun return -EINVAL;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun for (i = 7; i >= 0; i--)
359*4882a593Smuzhiyun data[i] = readq(region->ioaddr + i * sizeof(uint64_t));
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun if (copy_to_user(buf, data, 64))
362*4882a593Smuzhiyun return -EFAULT;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun return count;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun #define MC_CMD_COMPLETION_TIMEOUT_MS 5000
368*4882a593Smuzhiyun #define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500
369*4882a593Smuzhiyun
vfio_fsl_mc_send_command(void __iomem * ioaddr,uint64_t * cmd_data)370*4882a593Smuzhiyun static int vfio_fsl_mc_send_command(void __iomem *ioaddr, uint64_t *cmd_data)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun int i;
373*4882a593Smuzhiyun enum mc_cmd_status status;
374*4882a593Smuzhiyun unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun /* Write at command parameter into portal */
377*4882a593Smuzhiyun for (i = 7; i >= 1; i--)
378*4882a593Smuzhiyun writeq_relaxed(cmd_data[i], ioaddr + i * sizeof(uint64_t));
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /* Write command header in the end */
381*4882a593Smuzhiyun writeq(cmd_data[0], ioaddr);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /* Wait for response before returning to user-space
384*4882a593Smuzhiyun * This can be optimized in future to even prepare response
385*4882a593Smuzhiyun * before returning to user-space and avoid read ioctl.
386*4882a593Smuzhiyun */
387*4882a593Smuzhiyun for (;;) {
388*4882a593Smuzhiyun u64 header;
389*4882a593Smuzhiyun struct mc_cmd_header *resp_hdr;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun header = cpu_to_le64(readq_relaxed(ioaddr));
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun resp_hdr = (struct mc_cmd_header *)&header;
394*4882a593Smuzhiyun status = (enum mc_cmd_status)resp_hdr->status;
395*4882a593Smuzhiyun if (status != MC_CMD_STATUS_READY)
396*4882a593Smuzhiyun break;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
399*4882a593Smuzhiyun timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
400*4882a593Smuzhiyun if (timeout_usecs == 0)
401*4882a593Smuzhiyun return -ETIMEDOUT;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun return 0;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
vfio_fsl_mc_write(void * device_data,const char __user * buf,size_t count,loff_t * ppos)407*4882a593Smuzhiyun static ssize_t vfio_fsl_mc_write(void *device_data, const char __user *buf,
408*4882a593Smuzhiyun size_t count, loff_t *ppos)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun struct vfio_fsl_mc_device *vdev = device_data;
411*4882a593Smuzhiyun unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos);
412*4882a593Smuzhiyun loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK;
413*4882a593Smuzhiyun struct fsl_mc_device *mc_dev = vdev->mc_dev;
414*4882a593Smuzhiyun struct vfio_fsl_mc_region *region;
415*4882a593Smuzhiyun u64 data[8];
416*4882a593Smuzhiyun int ret;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun if (index >= mc_dev->obj_desc.region_count)
419*4882a593Smuzhiyun return -EINVAL;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun region = &vdev->regions[index];
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun if (!(region->flags & VFIO_REGION_INFO_FLAG_WRITE))
424*4882a593Smuzhiyun return -EINVAL;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun if (!region->ioaddr) {
427*4882a593Smuzhiyun region->ioaddr = ioremap(region->addr, region->size);
428*4882a593Smuzhiyun if (!region->ioaddr)
429*4882a593Smuzhiyun return -ENOMEM;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun if (count != 64 || off != 0)
433*4882a593Smuzhiyun return -EINVAL;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun if (copy_from_user(&data, buf, 64))
436*4882a593Smuzhiyun return -EFAULT;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun ret = vfio_fsl_mc_send_command(region->ioaddr, data);
439*4882a593Smuzhiyun if (ret)
440*4882a593Smuzhiyun return ret;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun return count;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
vfio_fsl_mc_mmap_mmio(struct vfio_fsl_mc_region region,struct vm_area_struct * vma)446*4882a593Smuzhiyun static int vfio_fsl_mc_mmap_mmio(struct vfio_fsl_mc_region region,
447*4882a593Smuzhiyun struct vm_area_struct *vma)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun u64 size = vma->vm_end - vma->vm_start;
450*4882a593Smuzhiyun u64 pgoff, base;
451*4882a593Smuzhiyun u8 region_cacheable;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun pgoff = vma->vm_pgoff &
454*4882a593Smuzhiyun ((1U << (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
455*4882a593Smuzhiyun base = pgoff << PAGE_SHIFT;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun if (region.size < PAGE_SIZE || base + size > region.size)
458*4882a593Smuzhiyun return -EINVAL;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun region_cacheable = (region.type & FSL_MC_REGION_CACHEABLE) &&
461*4882a593Smuzhiyun (region.type & FSL_MC_REGION_SHAREABLE);
462*4882a593Smuzhiyun if (!region_cacheable)
463*4882a593Smuzhiyun vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
468*4882a593Smuzhiyun size, vma->vm_page_prot);
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
vfio_fsl_mc_mmap(void * device_data,struct vm_area_struct * vma)471*4882a593Smuzhiyun static int vfio_fsl_mc_mmap(void *device_data, struct vm_area_struct *vma)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun struct vfio_fsl_mc_device *vdev = device_data;
474*4882a593Smuzhiyun struct fsl_mc_device *mc_dev = vdev->mc_dev;
475*4882a593Smuzhiyun unsigned int index;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun index = vma->vm_pgoff >> (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun if (vma->vm_end < vma->vm_start)
480*4882a593Smuzhiyun return -EINVAL;
481*4882a593Smuzhiyun if (vma->vm_start & ~PAGE_MASK)
482*4882a593Smuzhiyun return -EINVAL;
483*4882a593Smuzhiyun if (vma->vm_end & ~PAGE_MASK)
484*4882a593Smuzhiyun return -EINVAL;
485*4882a593Smuzhiyun if (!(vma->vm_flags & VM_SHARED))
486*4882a593Smuzhiyun return -EINVAL;
487*4882a593Smuzhiyun if (index >= mc_dev->obj_desc.region_count)
488*4882a593Smuzhiyun return -EINVAL;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
491*4882a593Smuzhiyun return -EINVAL;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
494*4882a593Smuzhiyun && (vma->vm_flags & VM_READ))
495*4882a593Smuzhiyun return -EINVAL;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
498*4882a593Smuzhiyun && (vma->vm_flags & VM_WRITE))
499*4882a593Smuzhiyun return -EINVAL;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun vma->vm_private_data = mc_dev;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun return vfio_fsl_mc_mmap_mmio(vdev->regions[index], vma);
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun static const struct vfio_device_ops vfio_fsl_mc_ops = {
507*4882a593Smuzhiyun .name = "vfio-fsl-mc",
508*4882a593Smuzhiyun .open = vfio_fsl_mc_open,
509*4882a593Smuzhiyun .release = vfio_fsl_mc_release,
510*4882a593Smuzhiyun .ioctl = vfio_fsl_mc_ioctl,
511*4882a593Smuzhiyun .read = vfio_fsl_mc_read,
512*4882a593Smuzhiyun .write = vfio_fsl_mc_write,
513*4882a593Smuzhiyun .mmap = vfio_fsl_mc_mmap,
514*4882a593Smuzhiyun };
515*4882a593Smuzhiyun
vfio_fsl_mc_bus_notifier(struct notifier_block * nb,unsigned long action,void * data)516*4882a593Smuzhiyun static int vfio_fsl_mc_bus_notifier(struct notifier_block *nb,
517*4882a593Smuzhiyun unsigned long action, void *data)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun struct vfio_fsl_mc_device *vdev = container_of(nb,
520*4882a593Smuzhiyun struct vfio_fsl_mc_device, nb);
521*4882a593Smuzhiyun struct device *dev = data;
522*4882a593Smuzhiyun struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
523*4882a593Smuzhiyun struct fsl_mc_device *mc_cont = to_fsl_mc_device(mc_dev->dev.parent);
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun if (action == BUS_NOTIFY_ADD_DEVICE &&
526*4882a593Smuzhiyun vdev->mc_dev == mc_cont) {
527*4882a593Smuzhiyun mc_dev->driver_override = kasprintf(GFP_KERNEL, "%s",
528*4882a593Smuzhiyun vfio_fsl_mc_ops.name);
529*4882a593Smuzhiyun if (!mc_dev->driver_override)
530*4882a593Smuzhiyun dev_warn(dev, "VFIO_FSL_MC: Setting driver override for device in dprc %s failed\n",
531*4882a593Smuzhiyun dev_name(&mc_cont->dev));
532*4882a593Smuzhiyun else
533*4882a593Smuzhiyun dev_info(dev, "VFIO_FSL_MC: Setting driver override for device in dprc %s\n",
534*4882a593Smuzhiyun dev_name(&mc_cont->dev));
535*4882a593Smuzhiyun } else if (action == BUS_NOTIFY_BOUND_DRIVER &&
536*4882a593Smuzhiyun vdev->mc_dev == mc_cont) {
537*4882a593Smuzhiyun struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun if (mc_drv && mc_drv != &vfio_fsl_mc_driver)
540*4882a593Smuzhiyun dev_warn(dev, "VFIO_FSL_MC: Object %s bound to driver %s while DPRC bound to vfio-fsl-mc\n",
541*4882a593Smuzhiyun dev_name(dev), mc_drv->driver.name);
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun return 0;
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun
vfio_fsl_mc_init_device(struct vfio_fsl_mc_device * vdev)547*4882a593Smuzhiyun static int vfio_fsl_mc_init_device(struct vfio_fsl_mc_device *vdev)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun struct fsl_mc_device *mc_dev = vdev->mc_dev;
550*4882a593Smuzhiyun int ret;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun /* Non-dprc devices share mc_io from parent */
553*4882a593Smuzhiyun if (!is_fsl_mc_bus_dprc(mc_dev)) {
554*4882a593Smuzhiyun struct fsl_mc_device *mc_cont = to_fsl_mc_device(mc_dev->dev.parent);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun mc_dev->mc_io = mc_cont->mc_io;
557*4882a593Smuzhiyun return 0;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun vdev->nb.notifier_call = vfio_fsl_mc_bus_notifier;
561*4882a593Smuzhiyun ret = bus_register_notifier(&fsl_mc_bus_type, &vdev->nb);
562*4882a593Smuzhiyun if (ret)
563*4882a593Smuzhiyun return ret;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun /* open DPRC, allocate a MC portal */
566*4882a593Smuzhiyun ret = dprc_setup(mc_dev);
567*4882a593Smuzhiyun if (ret) {
568*4882a593Smuzhiyun dev_err(&mc_dev->dev, "VFIO_FSL_MC: Failed to setup DPRC (%d)\n", ret);
569*4882a593Smuzhiyun goto out_nc_unreg;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun return 0;
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun out_nc_unreg:
574*4882a593Smuzhiyun bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
575*4882a593Smuzhiyun return ret;
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun
vfio_fsl_mc_scan_container(struct fsl_mc_device * mc_dev)578*4882a593Smuzhiyun static int vfio_fsl_mc_scan_container(struct fsl_mc_device *mc_dev)
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun int ret;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun /* non dprc devices do not scan for other devices */
583*4882a593Smuzhiyun if (!is_fsl_mc_bus_dprc(mc_dev))
584*4882a593Smuzhiyun return 0;
585*4882a593Smuzhiyun ret = dprc_scan_container(mc_dev, false);
586*4882a593Smuzhiyun if (ret) {
587*4882a593Smuzhiyun dev_err(&mc_dev->dev,
588*4882a593Smuzhiyun "VFIO_FSL_MC: Container scanning failed (%d)\n", ret);
589*4882a593Smuzhiyun dprc_remove_devices(mc_dev, NULL, 0);
590*4882a593Smuzhiyun return ret;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun return 0;
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun
vfio_fsl_uninit_device(struct vfio_fsl_mc_device * vdev)595*4882a593Smuzhiyun static void vfio_fsl_uninit_device(struct vfio_fsl_mc_device *vdev)
596*4882a593Smuzhiyun {
597*4882a593Smuzhiyun struct fsl_mc_device *mc_dev = vdev->mc_dev;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun if (!is_fsl_mc_bus_dprc(mc_dev))
600*4882a593Smuzhiyun return;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun dprc_cleanup(mc_dev);
603*4882a593Smuzhiyun bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun
vfio_fsl_mc_probe(struct fsl_mc_device * mc_dev)606*4882a593Smuzhiyun static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
607*4882a593Smuzhiyun {
608*4882a593Smuzhiyun struct iommu_group *group;
609*4882a593Smuzhiyun struct vfio_fsl_mc_device *vdev;
610*4882a593Smuzhiyun struct device *dev = &mc_dev->dev;
611*4882a593Smuzhiyun int ret;
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun group = vfio_iommu_group_get(dev);
614*4882a593Smuzhiyun if (!group) {
615*4882a593Smuzhiyun dev_err(dev, "VFIO_FSL_MC: No IOMMU group\n");
616*4882a593Smuzhiyun return -EINVAL;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun vdev = devm_kzalloc(dev, sizeof(*vdev), GFP_KERNEL);
620*4882a593Smuzhiyun if (!vdev) {
621*4882a593Smuzhiyun ret = -ENOMEM;
622*4882a593Smuzhiyun goto out_group_put;
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun vdev->mc_dev = mc_dev;
626*4882a593Smuzhiyun mutex_init(&vdev->igate);
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun ret = vfio_fsl_mc_reflck_attach(vdev);
629*4882a593Smuzhiyun if (ret)
630*4882a593Smuzhiyun goto out_group_put;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun ret = vfio_fsl_mc_init_device(vdev);
633*4882a593Smuzhiyun if (ret)
634*4882a593Smuzhiyun goto out_reflck;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev);
637*4882a593Smuzhiyun if (ret) {
638*4882a593Smuzhiyun dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n");
639*4882a593Smuzhiyun goto out_device;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun /*
643*4882a593Smuzhiyun * This triggers recursion into vfio_fsl_mc_probe() on another device
644*4882a593Smuzhiyun * and the vfio_fsl_mc_reflck_attach() must succeed, which relies on the
645*4882a593Smuzhiyun * vfio_add_group_dev() above. It has no impact on this vdev, so it is
646*4882a593Smuzhiyun * safe to be after the vfio device is made live.
647*4882a593Smuzhiyun */
648*4882a593Smuzhiyun ret = vfio_fsl_mc_scan_container(mc_dev);
649*4882a593Smuzhiyun if (ret)
650*4882a593Smuzhiyun goto out_group_dev;
651*4882a593Smuzhiyun return 0;
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun out_group_dev:
654*4882a593Smuzhiyun vfio_del_group_dev(dev);
655*4882a593Smuzhiyun out_device:
656*4882a593Smuzhiyun vfio_fsl_uninit_device(vdev);
657*4882a593Smuzhiyun out_reflck:
658*4882a593Smuzhiyun vfio_fsl_mc_reflck_put(vdev->reflck);
659*4882a593Smuzhiyun out_group_put:
660*4882a593Smuzhiyun vfio_iommu_group_put(group, dev);
661*4882a593Smuzhiyun return ret;
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun
vfio_fsl_mc_remove(struct fsl_mc_device * mc_dev)664*4882a593Smuzhiyun static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
665*4882a593Smuzhiyun {
666*4882a593Smuzhiyun struct vfio_fsl_mc_device *vdev;
667*4882a593Smuzhiyun struct device *dev = &mc_dev->dev;
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun vdev = vfio_del_group_dev(dev);
670*4882a593Smuzhiyun if (!vdev)
671*4882a593Smuzhiyun return -EINVAL;
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun mutex_destroy(&vdev->igate);
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun dprc_remove_devices(mc_dev, NULL, 0);
676*4882a593Smuzhiyun vfio_fsl_uninit_device(vdev);
677*4882a593Smuzhiyun vfio_fsl_mc_reflck_put(vdev->reflck);
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun vfio_iommu_group_put(mc_dev->dev.iommu_group, dev);
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun return 0;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun static struct fsl_mc_driver vfio_fsl_mc_driver = {
685*4882a593Smuzhiyun .probe = vfio_fsl_mc_probe,
686*4882a593Smuzhiyun .remove = vfio_fsl_mc_remove,
687*4882a593Smuzhiyun .driver = {
688*4882a593Smuzhiyun .name = "vfio-fsl-mc",
689*4882a593Smuzhiyun .owner = THIS_MODULE,
690*4882a593Smuzhiyun },
691*4882a593Smuzhiyun };
692*4882a593Smuzhiyun
vfio_fsl_mc_driver_init(void)693*4882a593Smuzhiyun static int __init vfio_fsl_mc_driver_init(void)
694*4882a593Smuzhiyun {
695*4882a593Smuzhiyun return fsl_mc_driver_register(&vfio_fsl_mc_driver);
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun
vfio_fsl_mc_driver_exit(void)698*4882a593Smuzhiyun static void __exit vfio_fsl_mc_driver_exit(void)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun fsl_mc_driver_unregister(&vfio_fsl_mc_driver);
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun module_init(vfio_fsl_mc_driver_init);
704*4882a593Smuzhiyun module_exit(vfio_fsl_mc_driver_exit);
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun MODULE_LICENSE("Dual BSD/GPL");
707*4882a593Smuzhiyun MODULE_DESCRIPTION("VFIO for FSL-MC devices - User Level meta-driver");
708