1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Physical device callbacks for vfio_ccw
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright IBM Corp. 2017
6*4882a593Smuzhiyun * Copyright Red Hat, Inc. 2019
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
9*4882a593Smuzhiyun * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
10*4882a593Smuzhiyun * Cornelia Huck <cohuck@redhat.com>
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/vfio.h>
14*4882a593Smuzhiyun #include <linux/mdev.h>
15*4882a593Smuzhiyun #include <linux/nospec.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include "vfio_ccw_private.h"
19*4882a593Smuzhiyun
vfio_ccw_mdev_reset(struct mdev_device * mdev)20*4882a593Smuzhiyun static int vfio_ccw_mdev_reset(struct mdev_device *mdev)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun struct vfio_ccw_private *private;
23*4882a593Smuzhiyun struct subchannel *sch;
24*4882a593Smuzhiyun int ret;
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun private = dev_get_drvdata(mdev_parent_dev(mdev));
27*4882a593Smuzhiyun sch = private->sch;
28*4882a593Smuzhiyun /*
29*4882a593Smuzhiyun * TODO:
30*4882a593Smuzhiyun * In the cureent stage, some things like "no I/O running" and "no
31*4882a593Smuzhiyun * interrupt pending" are clear, but we are not sure what other state
32*4882a593Smuzhiyun * we need to care about.
33*4882a593Smuzhiyun * There are still a lot more instructions need to be handled. We
34*4882a593Smuzhiyun * should come back here later.
35*4882a593Smuzhiyun */
36*4882a593Smuzhiyun ret = vfio_ccw_sch_quiesce(sch);
37*4882a593Smuzhiyun if (ret)
38*4882a593Smuzhiyun return ret;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
41*4882a593Smuzhiyun if (!ret)
42*4882a593Smuzhiyun private->state = VFIO_CCW_STATE_IDLE;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun return ret;
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
vfio_ccw_mdev_notifier(struct notifier_block * nb,unsigned long action,void * data)47*4882a593Smuzhiyun static int vfio_ccw_mdev_notifier(struct notifier_block *nb,
48*4882a593Smuzhiyun unsigned long action,
49*4882a593Smuzhiyun void *data)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun struct vfio_ccw_private *private =
52*4882a593Smuzhiyun container_of(nb, struct vfio_ccw_private, nb);
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun * Vendor drivers MUST unpin pages in response to an
56*4882a593Smuzhiyun * invalidation.
57*4882a593Smuzhiyun */
58*4882a593Smuzhiyun if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
59*4882a593Smuzhiyun struct vfio_iommu_type1_dma_unmap *unmap = data;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun if (!cp_iova_pinned(&private->cp, unmap->iova))
62*4882a593Smuzhiyun return NOTIFY_OK;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun if (vfio_ccw_mdev_reset(private->mdev))
65*4882a593Smuzhiyun return NOTIFY_BAD;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun cp_free(&private->cp);
68*4882a593Smuzhiyun return NOTIFY_OK;
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun return NOTIFY_DONE;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
name_show(struct kobject * kobj,struct device * dev,char * buf)74*4882a593Smuzhiyun static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun return sprintf(buf, "I/O subchannel (Non-QDIO)\n");
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun static MDEV_TYPE_ATTR_RO(name);
79*4882a593Smuzhiyun
device_api_show(struct kobject * kobj,struct device * dev,char * buf)80*4882a593Smuzhiyun static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
81*4882a593Smuzhiyun char *buf)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun static MDEV_TYPE_ATTR_RO(device_api);
86*4882a593Smuzhiyun
available_instances_show(struct kobject * kobj,struct device * dev,char * buf)87*4882a593Smuzhiyun static ssize_t available_instances_show(struct kobject *kobj,
88*4882a593Smuzhiyun struct device *dev, char *buf)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun struct vfio_ccw_private *private = dev_get_drvdata(dev);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun return sprintf(buf, "%d\n", atomic_read(&private->avail));
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun static MDEV_TYPE_ATTR_RO(available_instances);
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun static struct attribute *mdev_types_attrs[] = {
97*4882a593Smuzhiyun &mdev_type_attr_name.attr,
98*4882a593Smuzhiyun &mdev_type_attr_device_api.attr,
99*4882a593Smuzhiyun &mdev_type_attr_available_instances.attr,
100*4882a593Smuzhiyun NULL,
101*4882a593Smuzhiyun };
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun static struct attribute_group mdev_type_group = {
104*4882a593Smuzhiyun .name = "io",
105*4882a593Smuzhiyun .attrs = mdev_types_attrs,
106*4882a593Smuzhiyun };
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun static struct attribute_group *mdev_type_groups[] = {
109*4882a593Smuzhiyun &mdev_type_group,
110*4882a593Smuzhiyun NULL,
111*4882a593Smuzhiyun };
112*4882a593Smuzhiyun
vfio_ccw_mdev_create(struct kobject * kobj,struct mdev_device * mdev)113*4882a593Smuzhiyun static int vfio_ccw_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun struct vfio_ccw_private *private =
116*4882a593Smuzhiyun dev_get_drvdata(mdev_parent_dev(mdev));
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun if (private->state == VFIO_CCW_STATE_NOT_OPER)
119*4882a593Smuzhiyun return -ENODEV;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun if (atomic_dec_if_positive(&private->avail) < 0)
122*4882a593Smuzhiyun return -EPERM;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun private->mdev = mdev;
125*4882a593Smuzhiyun private->state = VFIO_CCW_STATE_IDLE;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: create\n",
128*4882a593Smuzhiyun mdev_uuid(mdev), private->sch->schid.cssid,
129*4882a593Smuzhiyun private->sch->schid.ssid,
130*4882a593Smuzhiyun private->sch->schid.sch_no);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun return 0;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
vfio_ccw_mdev_remove(struct mdev_device * mdev)135*4882a593Smuzhiyun static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun struct vfio_ccw_private *private =
138*4882a593Smuzhiyun dev_get_drvdata(mdev_parent_dev(mdev));
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: remove\n",
141*4882a593Smuzhiyun mdev_uuid(mdev), private->sch->schid.cssid,
142*4882a593Smuzhiyun private->sch->schid.ssid,
143*4882a593Smuzhiyun private->sch->schid.sch_no);
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
146*4882a593Smuzhiyun (private->state != VFIO_CCW_STATE_STANDBY)) {
147*4882a593Smuzhiyun if (!vfio_ccw_sch_quiesce(private->sch))
148*4882a593Smuzhiyun private->state = VFIO_CCW_STATE_STANDBY;
149*4882a593Smuzhiyun /* The state will be NOT_OPER on error. */
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun cp_free(&private->cp);
153*4882a593Smuzhiyun private->mdev = NULL;
154*4882a593Smuzhiyun atomic_inc(&private->avail);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun return 0;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
vfio_ccw_mdev_open(struct mdev_device * mdev)159*4882a593Smuzhiyun static int vfio_ccw_mdev_open(struct mdev_device *mdev)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun struct vfio_ccw_private *private =
162*4882a593Smuzhiyun dev_get_drvdata(mdev_parent_dev(mdev));
163*4882a593Smuzhiyun unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
164*4882a593Smuzhiyun int ret;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun private->nb.notifier_call = vfio_ccw_mdev_notifier;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
169*4882a593Smuzhiyun &events, &private->nb);
170*4882a593Smuzhiyun if (ret)
171*4882a593Smuzhiyun return ret;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun ret = vfio_ccw_register_async_dev_regions(private);
174*4882a593Smuzhiyun if (ret)
175*4882a593Smuzhiyun goto out_unregister;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun ret = vfio_ccw_register_schib_dev_regions(private);
178*4882a593Smuzhiyun if (ret)
179*4882a593Smuzhiyun goto out_unregister;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun ret = vfio_ccw_register_crw_dev_regions(private);
182*4882a593Smuzhiyun if (ret)
183*4882a593Smuzhiyun goto out_unregister;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun return ret;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun out_unregister:
188*4882a593Smuzhiyun vfio_ccw_unregister_dev_regions(private);
189*4882a593Smuzhiyun vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
190*4882a593Smuzhiyun &private->nb);
191*4882a593Smuzhiyun return ret;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
vfio_ccw_mdev_release(struct mdev_device * mdev)194*4882a593Smuzhiyun static void vfio_ccw_mdev_release(struct mdev_device *mdev)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun struct vfio_ccw_private *private =
197*4882a593Smuzhiyun dev_get_drvdata(mdev_parent_dev(mdev));
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
200*4882a593Smuzhiyun (private->state != VFIO_CCW_STATE_STANDBY)) {
201*4882a593Smuzhiyun if (!vfio_ccw_mdev_reset(mdev))
202*4882a593Smuzhiyun private->state = VFIO_CCW_STATE_STANDBY;
203*4882a593Smuzhiyun /* The state will be NOT_OPER on error. */
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun cp_free(&private->cp);
207*4882a593Smuzhiyun vfio_ccw_unregister_dev_regions(private);
208*4882a593Smuzhiyun vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
209*4882a593Smuzhiyun &private->nb);
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
vfio_ccw_mdev_read_io_region(struct vfio_ccw_private * private,char __user * buf,size_t count,loff_t * ppos)212*4882a593Smuzhiyun static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
213*4882a593Smuzhiyun char __user *buf, size_t count,
214*4882a593Smuzhiyun loff_t *ppos)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
217*4882a593Smuzhiyun struct ccw_io_region *region;
218*4882a593Smuzhiyun int ret;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun if (pos + count > sizeof(*region))
221*4882a593Smuzhiyun return -EINVAL;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun mutex_lock(&private->io_mutex);
224*4882a593Smuzhiyun region = private->io_region;
225*4882a593Smuzhiyun if (copy_to_user(buf, (void *)region + pos, count))
226*4882a593Smuzhiyun ret = -EFAULT;
227*4882a593Smuzhiyun else
228*4882a593Smuzhiyun ret = count;
229*4882a593Smuzhiyun mutex_unlock(&private->io_mutex);
230*4882a593Smuzhiyun return ret;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
vfio_ccw_mdev_read(struct mdev_device * mdev,char __user * buf,size_t count,loff_t * ppos)233*4882a593Smuzhiyun static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
234*4882a593Smuzhiyun char __user *buf,
235*4882a593Smuzhiyun size_t count,
236*4882a593Smuzhiyun loff_t *ppos)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
239*4882a593Smuzhiyun struct vfio_ccw_private *private;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun private = dev_get_drvdata(mdev_parent_dev(mdev));
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
244*4882a593Smuzhiyun return -EINVAL;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun switch (index) {
247*4882a593Smuzhiyun case VFIO_CCW_CONFIG_REGION_INDEX:
248*4882a593Smuzhiyun return vfio_ccw_mdev_read_io_region(private, buf, count, ppos);
249*4882a593Smuzhiyun default:
250*4882a593Smuzhiyun index -= VFIO_CCW_NUM_REGIONS;
251*4882a593Smuzhiyun return private->region[index].ops->read(private, buf, count,
252*4882a593Smuzhiyun ppos);
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun return -EINVAL;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
vfio_ccw_mdev_write_io_region(struct vfio_ccw_private * private,const char __user * buf,size_t count,loff_t * ppos)258*4882a593Smuzhiyun static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private,
259*4882a593Smuzhiyun const char __user *buf,
260*4882a593Smuzhiyun size_t count, loff_t *ppos)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
263*4882a593Smuzhiyun struct ccw_io_region *region;
264*4882a593Smuzhiyun int ret;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun if (pos + count > sizeof(*region))
267*4882a593Smuzhiyun return -EINVAL;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun if (!mutex_trylock(&private->io_mutex))
270*4882a593Smuzhiyun return -EAGAIN;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun region = private->io_region;
273*4882a593Smuzhiyun if (copy_from_user((void *)region + pos, buf, count)) {
274*4882a593Smuzhiyun ret = -EFAULT;
275*4882a593Smuzhiyun goto out_unlock;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
279*4882a593Smuzhiyun ret = (region->ret_code != 0) ? region->ret_code : count;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun out_unlock:
282*4882a593Smuzhiyun mutex_unlock(&private->io_mutex);
283*4882a593Smuzhiyun return ret;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
vfio_ccw_mdev_write(struct mdev_device * mdev,const char __user * buf,size_t count,loff_t * ppos)286*4882a593Smuzhiyun static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
287*4882a593Smuzhiyun const char __user *buf,
288*4882a593Smuzhiyun size_t count,
289*4882a593Smuzhiyun loff_t *ppos)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
292*4882a593Smuzhiyun struct vfio_ccw_private *private;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun private = dev_get_drvdata(mdev_parent_dev(mdev));
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
297*4882a593Smuzhiyun return -EINVAL;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun switch (index) {
300*4882a593Smuzhiyun case VFIO_CCW_CONFIG_REGION_INDEX:
301*4882a593Smuzhiyun return vfio_ccw_mdev_write_io_region(private, buf, count, ppos);
302*4882a593Smuzhiyun default:
303*4882a593Smuzhiyun index -= VFIO_CCW_NUM_REGIONS;
304*4882a593Smuzhiyun return private->region[index].ops->write(private, buf, count,
305*4882a593Smuzhiyun ppos);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun return -EINVAL;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
vfio_ccw_mdev_get_device_info(struct vfio_device_info * info,struct mdev_device * mdev)311*4882a593Smuzhiyun static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info,
312*4882a593Smuzhiyun struct mdev_device *mdev)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun struct vfio_ccw_private *private;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun private = dev_get_drvdata(mdev_parent_dev(mdev));
317*4882a593Smuzhiyun info->flags = VFIO_DEVICE_FLAGS_CCW | VFIO_DEVICE_FLAGS_RESET;
318*4882a593Smuzhiyun info->num_regions = VFIO_CCW_NUM_REGIONS + private->num_regions;
319*4882a593Smuzhiyun info->num_irqs = VFIO_CCW_NUM_IRQS;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun return 0;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
vfio_ccw_mdev_get_region_info(struct vfio_region_info * info,struct mdev_device * mdev,unsigned long arg)324*4882a593Smuzhiyun static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
325*4882a593Smuzhiyun struct mdev_device *mdev,
326*4882a593Smuzhiyun unsigned long arg)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun struct vfio_ccw_private *private;
329*4882a593Smuzhiyun int i;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun private = dev_get_drvdata(mdev_parent_dev(mdev));
332*4882a593Smuzhiyun switch (info->index) {
333*4882a593Smuzhiyun case VFIO_CCW_CONFIG_REGION_INDEX:
334*4882a593Smuzhiyun info->offset = 0;
335*4882a593Smuzhiyun info->size = sizeof(struct ccw_io_region);
336*4882a593Smuzhiyun info->flags = VFIO_REGION_INFO_FLAG_READ
337*4882a593Smuzhiyun | VFIO_REGION_INFO_FLAG_WRITE;
338*4882a593Smuzhiyun return 0;
339*4882a593Smuzhiyun default: /* all other regions are handled via capability chain */
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
342*4882a593Smuzhiyun struct vfio_region_info_cap_type cap_type = {
343*4882a593Smuzhiyun .header.id = VFIO_REGION_INFO_CAP_TYPE,
344*4882a593Smuzhiyun .header.version = 1 };
345*4882a593Smuzhiyun int ret;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (info->index >=
348*4882a593Smuzhiyun VFIO_CCW_NUM_REGIONS + private->num_regions)
349*4882a593Smuzhiyun return -EINVAL;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun info->index = array_index_nospec(info->index,
352*4882a593Smuzhiyun VFIO_CCW_NUM_REGIONS +
353*4882a593Smuzhiyun private->num_regions);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun i = info->index - VFIO_CCW_NUM_REGIONS;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun info->offset = VFIO_CCW_INDEX_TO_OFFSET(info->index);
358*4882a593Smuzhiyun info->size = private->region[i].size;
359*4882a593Smuzhiyun info->flags = private->region[i].flags;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun cap_type.type = private->region[i].type;
362*4882a593Smuzhiyun cap_type.subtype = private->region[i].subtype;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun ret = vfio_info_add_capability(&caps, &cap_type.header,
365*4882a593Smuzhiyun sizeof(cap_type));
366*4882a593Smuzhiyun if (ret)
367*4882a593Smuzhiyun return ret;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun info->flags |= VFIO_REGION_INFO_FLAG_CAPS;
370*4882a593Smuzhiyun if (info->argsz < sizeof(*info) + caps.size) {
371*4882a593Smuzhiyun info->argsz = sizeof(*info) + caps.size;
372*4882a593Smuzhiyun info->cap_offset = 0;
373*4882a593Smuzhiyun } else {
374*4882a593Smuzhiyun vfio_info_cap_shift(&caps, sizeof(*info));
375*4882a593Smuzhiyun if (copy_to_user((void __user *)arg + sizeof(*info),
376*4882a593Smuzhiyun caps.buf, caps.size)) {
377*4882a593Smuzhiyun kfree(caps.buf);
378*4882a593Smuzhiyun return -EFAULT;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun info->cap_offset = sizeof(*info);
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun kfree(caps.buf);
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun return 0;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
vfio_ccw_mdev_get_irq_info(struct vfio_irq_info * info)390*4882a593Smuzhiyun static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun switch (info->index) {
393*4882a593Smuzhiyun case VFIO_CCW_IO_IRQ_INDEX:
394*4882a593Smuzhiyun case VFIO_CCW_CRW_IRQ_INDEX:
395*4882a593Smuzhiyun info->count = 1;
396*4882a593Smuzhiyun info->flags = VFIO_IRQ_INFO_EVENTFD;
397*4882a593Smuzhiyun break;
398*4882a593Smuzhiyun default:
399*4882a593Smuzhiyun return -EINVAL;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun return 0;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun
vfio_ccw_mdev_set_irqs(struct mdev_device * mdev,uint32_t flags,uint32_t index,void __user * data)405*4882a593Smuzhiyun static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
406*4882a593Smuzhiyun uint32_t flags,
407*4882a593Smuzhiyun uint32_t index,
408*4882a593Smuzhiyun void __user *data)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun struct vfio_ccw_private *private;
411*4882a593Smuzhiyun struct eventfd_ctx **ctx;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun if (!(flags & VFIO_IRQ_SET_ACTION_TRIGGER))
414*4882a593Smuzhiyun return -EINVAL;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun private = dev_get_drvdata(mdev_parent_dev(mdev));
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun switch (index) {
419*4882a593Smuzhiyun case VFIO_CCW_IO_IRQ_INDEX:
420*4882a593Smuzhiyun ctx = &private->io_trigger;
421*4882a593Smuzhiyun break;
422*4882a593Smuzhiyun case VFIO_CCW_CRW_IRQ_INDEX:
423*4882a593Smuzhiyun ctx = &private->crw_trigger;
424*4882a593Smuzhiyun break;
425*4882a593Smuzhiyun default:
426*4882a593Smuzhiyun return -EINVAL;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun switch (flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
430*4882a593Smuzhiyun case VFIO_IRQ_SET_DATA_NONE:
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun if (*ctx)
433*4882a593Smuzhiyun eventfd_signal(*ctx, 1);
434*4882a593Smuzhiyun return 0;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun case VFIO_IRQ_SET_DATA_BOOL:
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun uint8_t trigger;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun if (get_user(trigger, (uint8_t __user *)data))
441*4882a593Smuzhiyun return -EFAULT;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun if (trigger && *ctx)
444*4882a593Smuzhiyun eventfd_signal(*ctx, 1);
445*4882a593Smuzhiyun return 0;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun case VFIO_IRQ_SET_DATA_EVENTFD:
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun int32_t fd;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun if (get_user(fd, (int32_t __user *)data))
452*4882a593Smuzhiyun return -EFAULT;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun if (fd == -1) {
455*4882a593Smuzhiyun if (*ctx)
456*4882a593Smuzhiyun eventfd_ctx_put(*ctx);
457*4882a593Smuzhiyun *ctx = NULL;
458*4882a593Smuzhiyun } else if (fd >= 0) {
459*4882a593Smuzhiyun struct eventfd_ctx *efdctx;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun efdctx = eventfd_ctx_fdget(fd);
462*4882a593Smuzhiyun if (IS_ERR(efdctx))
463*4882a593Smuzhiyun return PTR_ERR(efdctx);
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun if (*ctx)
466*4882a593Smuzhiyun eventfd_ctx_put(*ctx);
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun *ctx = efdctx;
469*4882a593Smuzhiyun } else
470*4882a593Smuzhiyun return -EINVAL;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun return 0;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun default:
475*4882a593Smuzhiyun return -EINVAL;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
vfio_ccw_register_dev_region(struct vfio_ccw_private * private,unsigned int subtype,const struct vfio_ccw_regops * ops,size_t size,u32 flags,void * data)479*4882a593Smuzhiyun int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
480*4882a593Smuzhiyun unsigned int subtype,
481*4882a593Smuzhiyun const struct vfio_ccw_regops *ops,
482*4882a593Smuzhiyun size_t size, u32 flags, void *data)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun struct vfio_ccw_region *region;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun region = krealloc(private->region,
487*4882a593Smuzhiyun (private->num_regions + 1) * sizeof(*region),
488*4882a593Smuzhiyun GFP_KERNEL);
489*4882a593Smuzhiyun if (!region)
490*4882a593Smuzhiyun return -ENOMEM;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun private->region = region;
493*4882a593Smuzhiyun private->region[private->num_regions].type = VFIO_REGION_TYPE_CCW;
494*4882a593Smuzhiyun private->region[private->num_regions].subtype = subtype;
495*4882a593Smuzhiyun private->region[private->num_regions].ops = ops;
496*4882a593Smuzhiyun private->region[private->num_regions].size = size;
497*4882a593Smuzhiyun private->region[private->num_regions].flags = flags;
498*4882a593Smuzhiyun private->region[private->num_regions].data = data;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun private->num_regions++;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun return 0;
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
vfio_ccw_unregister_dev_regions(struct vfio_ccw_private * private)505*4882a593Smuzhiyun void vfio_ccw_unregister_dev_regions(struct vfio_ccw_private *private)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun int i;
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun for (i = 0; i < private->num_regions; i++)
510*4882a593Smuzhiyun private->region[i].ops->release(private, &private->region[i]);
511*4882a593Smuzhiyun private->num_regions = 0;
512*4882a593Smuzhiyun kfree(private->region);
513*4882a593Smuzhiyun private->region = NULL;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
vfio_ccw_mdev_ioctl(struct mdev_device * mdev,unsigned int cmd,unsigned long arg)516*4882a593Smuzhiyun static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
517*4882a593Smuzhiyun unsigned int cmd,
518*4882a593Smuzhiyun unsigned long arg)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun int ret = 0;
521*4882a593Smuzhiyun unsigned long minsz;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun switch (cmd) {
524*4882a593Smuzhiyun case VFIO_DEVICE_GET_INFO:
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun struct vfio_device_info info;
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun minsz = offsetofend(struct vfio_device_info, num_irqs);
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun if (copy_from_user(&info, (void __user *)arg, minsz))
531*4882a593Smuzhiyun return -EFAULT;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun if (info.argsz < minsz)
534*4882a593Smuzhiyun return -EINVAL;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun ret = vfio_ccw_mdev_get_device_info(&info, mdev);
537*4882a593Smuzhiyun if (ret)
538*4882a593Smuzhiyun return ret;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun case VFIO_DEVICE_GET_REGION_INFO:
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun struct vfio_region_info info;
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun minsz = offsetofend(struct vfio_region_info, offset);
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun if (copy_from_user(&info, (void __user *)arg, minsz))
549*4882a593Smuzhiyun return -EFAULT;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun if (info.argsz < minsz)
552*4882a593Smuzhiyun return -EINVAL;
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun ret = vfio_ccw_mdev_get_region_info(&info, mdev, arg);
555*4882a593Smuzhiyun if (ret)
556*4882a593Smuzhiyun return ret;
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun case VFIO_DEVICE_GET_IRQ_INFO:
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun struct vfio_irq_info info;
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun minsz = offsetofend(struct vfio_irq_info, count);
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun if (copy_from_user(&info, (void __user *)arg, minsz))
567*4882a593Smuzhiyun return -EFAULT;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun if (info.argsz < minsz || info.index >= VFIO_CCW_NUM_IRQS)
570*4882a593Smuzhiyun return -EINVAL;
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun ret = vfio_ccw_mdev_get_irq_info(&info);
573*4882a593Smuzhiyun if (ret)
574*4882a593Smuzhiyun return ret;
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun if (info.count == -1)
577*4882a593Smuzhiyun return -EINVAL;
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun case VFIO_DEVICE_SET_IRQS:
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun struct vfio_irq_set hdr;
584*4882a593Smuzhiyun size_t data_size;
585*4882a593Smuzhiyun void __user *data;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun minsz = offsetofend(struct vfio_irq_set, count);
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun if (copy_from_user(&hdr, (void __user *)arg, minsz))
590*4882a593Smuzhiyun return -EFAULT;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun ret = vfio_set_irqs_validate_and_prepare(&hdr, 1,
593*4882a593Smuzhiyun VFIO_CCW_NUM_IRQS,
594*4882a593Smuzhiyun &data_size);
595*4882a593Smuzhiyun if (ret)
596*4882a593Smuzhiyun return ret;
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun data = (void __user *)(arg + minsz);
599*4882a593Smuzhiyun return vfio_ccw_mdev_set_irqs(mdev, hdr.flags, hdr.index, data);
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun case VFIO_DEVICE_RESET:
602*4882a593Smuzhiyun return vfio_ccw_mdev_reset(mdev);
603*4882a593Smuzhiyun default:
604*4882a593Smuzhiyun return -ENOTTY;
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun static const struct mdev_parent_ops vfio_ccw_mdev_ops = {
609*4882a593Smuzhiyun .owner = THIS_MODULE,
610*4882a593Smuzhiyun .supported_type_groups = mdev_type_groups,
611*4882a593Smuzhiyun .create = vfio_ccw_mdev_create,
612*4882a593Smuzhiyun .remove = vfio_ccw_mdev_remove,
613*4882a593Smuzhiyun .open = vfio_ccw_mdev_open,
614*4882a593Smuzhiyun .release = vfio_ccw_mdev_release,
615*4882a593Smuzhiyun .read = vfio_ccw_mdev_read,
616*4882a593Smuzhiyun .write = vfio_ccw_mdev_write,
617*4882a593Smuzhiyun .ioctl = vfio_ccw_mdev_ioctl,
618*4882a593Smuzhiyun };
619*4882a593Smuzhiyun
vfio_ccw_mdev_reg(struct subchannel * sch)620*4882a593Smuzhiyun int vfio_ccw_mdev_reg(struct subchannel *sch)
621*4882a593Smuzhiyun {
622*4882a593Smuzhiyun return mdev_register_device(&sch->dev, &vfio_ccw_mdev_ops);
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun
vfio_ccw_mdev_unreg(struct subchannel * sch)625*4882a593Smuzhiyun void vfio_ccw_mdev_unreg(struct subchannel *sch)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun mdev_unregister_device(&sch->dev);
628*4882a593Smuzhiyun }
629