xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/gvt/kvmgt.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * KVMGT - the implementation of Intel mediated pass-through framework for KVM
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright(c) 2014-2016 Intel Corporation. All rights reserved.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
7*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
8*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
9*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
11*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the next
14*4882a593Smuzhiyun  * paragraph) shall be included in all copies or substantial portions of the
15*4882a593Smuzhiyun  * Software.
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20*4882a593Smuzhiyun  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21*4882a593Smuzhiyun  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22*4882a593Smuzhiyun  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23*4882a593Smuzhiyun  * SOFTWARE.
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  * Authors:
26*4882a593Smuzhiyun  *    Kevin Tian <kevin.tian@intel.com>
27*4882a593Smuzhiyun  *    Jike Song <jike.song@intel.com>
28*4882a593Smuzhiyun  *    Xiaoguang Chen <xiaoguang.chen@intel.com>
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #include <linux/init.h>
32*4882a593Smuzhiyun #include <linux/device.h>
33*4882a593Smuzhiyun #include <linux/mm.h>
34*4882a593Smuzhiyun #include <linux/kthread.h>
35*4882a593Smuzhiyun #include <linux/sched/mm.h>
36*4882a593Smuzhiyun #include <linux/types.h>
37*4882a593Smuzhiyun #include <linux/list.h>
38*4882a593Smuzhiyun #include <linux/rbtree.h>
39*4882a593Smuzhiyun #include <linux/spinlock.h>
40*4882a593Smuzhiyun #include <linux/eventfd.h>
41*4882a593Smuzhiyun #include <linux/uuid.h>
42*4882a593Smuzhiyun #include <linux/kvm_host.h>
43*4882a593Smuzhiyun #include <linux/vfio.h>
44*4882a593Smuzhiyun #include <linux/mdev.h>
45*4882a593Smuzhiyun #include <linux/debugfs.h>
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #include <linux/nospec.h>
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #include "i915_drv.h"
50*4882a593Smuzhiyun #include "gvt.h"
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun static const struct intel_gvt_ops *intel_gvt_ops;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /* helper macros copied from vfio-pci */
55*4882a593Smuzhiyun #define VFIO_PCI_OFFSET_SHIFT   40
56*4882a593Smuzhiyun #define VFIO_PCI_OFFSET_TO_INDEX(off)   (off >> VFIO_PCI_OFFSET_SHIFT)
57*4882a593Smuzhiyun #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
58*4882a593Smuzhiyun #define VFIO_PCI_OFFSET_MASK    (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #define EDID_BLOB_OFFSET (PAGE_SIZE/2)
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun #define OPREGION_SIGNATURE "IntelGraphicsMem"
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun struct vfio_region;
65*4882a593Smuzhiyun struct intel_vgpu_regops {
66*4882a593Smuzhiyun 	size_t (*rw)(struct intel_vgpu *vgpu, char *buf,
67*4882a593Smuzhiyun 			size_t count, loff_t *ppos, bool iswrite);
68*4882a593Smuzhiyun 	void (*release)(struct intel_vgpu *vgpu,
69*4882a593Smuzhiyun 			struct vfio_region *region);
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun struct vfio_region {
73*4882a593Smuzhiyun 	u32				type;
74*4882a593Smuzhiyun 	u32				subtype;
75*4882a593Smuzhiyun 	size_t				size;
76*4882a593Smuzhiyun 	u32				flags;
77*4882a593Smuzhiyun 	const struct intel_vgpu_regops	*ops;
78*4882a593Smuzhiyun 	void				*data;
79*4882a593Smuzhiyun };
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun struct vfio_edid_region {
82*4882a593Smuzhiyun 	struct vfio_region_gfx_edid vfio_edid_regs;
83*4882a593Smuzhiyun 	void *edid_blob;
84*4882a593Smuzhiyun };
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun struct kvmgt_pgfn {
87*4882a593Smuzhiyun 	gfn_t gfn;
88*4882a593Smuzhiyun 	struct hlist_node hnode;
89*4882a593Smuzhiyun };
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun struct kvmgt_guest_info {
92*4882a593Smuzhiyun 	struct kvm *kvm;
93*4882a593Smuzhiyun 	struct intel_vgpu *vgpu;
94*4882a593Smuzhiyun 	struct kvm_page_track_notifier_node track_node;
95*4882a593Smuzhiyun #define NR_BKT (1 << 18)
96*4882a593Smuzhiyun 	struct hlist_head ptable[NR_BKT];
97*4882a593Smuzhiyun #undef NR_BKT
98*4882a593Smuzhiyun 	struct dentry *debugfs_cache_entries;
99*4882a593Smuzhiyun };
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun struct gvt_dma {
102*4882a593Smuzhiyun 	struct intel_vgpu *vgpu;
103*4882a593Smuzhiyun 	struct rb_node gfn_node;
104*4882a593Smuzhiyun 	struct rb_node dma_addr_node;
105*4882a593Smuzhiyun 	gfn_t gfn;
106*4882a593Smuzhiyun 	dma_addr_t dma_addr;
107*4882a593Smuzhiyun 	unsigned long size;
108*4882a593Smuzhiyun 	struct kref ref;
109*4882a593Smuzhiyun };
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun struct kvmgt_vdev {
112*4882a593Smuzhiyun 	struct intel_vgpu *vgpu;
113*4882a593Smuzhiyun 	struct mdev_device *mdev;
114*4882a593Smuzhiyun 	struct vfio_region *region;
115*4882a593Smuzhiyun 	int num_regions;
116*4882a593Smuzhiyun 	struct eventfd_ctx *intx_trigger;
117*4882a593Smuzhiyun 	struct eventfd_ctx *msi_trigger;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	/*
120*4882a593Smuzhiyun 	 * Two caches are used to avoid mapping duplicated pages (eg.
121*4882a593Smuzhiyun 	 * scratch pages). This help to reduce dma setup overhead.
122*4882a593Smuzhiyun 	 */
123*4882a593Smuzhiyun 	struct rb_root gfn_cache;
124*4882a593Smuzhiyun 	struct rb_root dma_addr_cache;
125*4882a593Smuzhiyun 	unsigned long nr_cache_entries;
126*4882a593Smuzhiyun 	struct mutex cache_lock;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	struct notifier_block iommu_notifier;
129*4882a593Smuzhiyun 	struct notifier_block group_notifier;
130*4882a593Smuzhiyun 	struct kvm *kvm;
131*4882a593Smuzhiyun 	struct work_struct release_work;
132*4882a593Smuzhiyun 	atomic_t released;
133*4882a593Smuzhiyun 	struct vfio_device *vfio_device;
134*4882a593Smuzhiyun 	struct vfio_group *vfio_group;
135*4882a593Smuzhiyun };
136*4882a593Smuzhiyun 
kvmgt_vdev(struct intel_vgpu * vgpu)137*4882a593Smuzhiyun static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	return intel_vgpu_vdev(vgpu);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
handle_valid(unsigned long handle)142*4882a593Smuzhiyun static inline bool handle_valid(unsigned long handle)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	return !!(handle & ~0xff);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun static int kvmgt_guest_init(struct mdev_device *mdev);
148*4882a593Smuzhiyun static void intel_vgpu_release_work(struct work_struct *work);
149*4882a593Smuzhiyun static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
150*4882a593Smuzhiyun 
gvt_unpin_guest_page(struct intel_vgpu * vgpu,unsigned long gfn,unsigned long size)151*4882a593Smuzhiyun static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
152*4882a593Smuzhiyun 		unsigned long size)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
155*4882a593Smuzhiyun 	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
156*4882a593Smuzhiyun 	int total_pages;
157*4882a593Smuzhiyun 	int npage;
158*4882a593Smuzhiyun 	int ret;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	for (npage = 0; npage < total_pages; npage++) {
163*4882a593Smuzhiyun 		unsigned long cur_gfn = gfn + npage;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 		ret = vfio_group_unpin_pages(vdev->vfio_group, &cur_gfn, 1);
166*4882a593Smuzhiyun 		drm_WARN_ON(&i915->drm, ret != 1);
167*4882a593Smuzhiyun 	}
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun /* Pin a normal or compound guest page for dma. */
gvt_pin_guest_page(struct intel_vgpu * vgpu,unsigned long gfn,unsigned long size,struct page ** page)171*4882a593Smuzhiyun static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
172*4882a593Smuzhiyun 		unsigned long size, struct page **page)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
175*4882a593Smuzhiyun 	unsigned long base_pfn = 0;
176*4882a593Smuzhiyun 	int total_pages;
177*4882a593Smuzhiyun 	int npage;
178*4882a593Smuzhiyun 	int ret;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
181*4882a593Smuzhiyun 	/*
182*4882a593Smuzhiyun 	 * We pin the pages one-by-one to avoid allocating a big arrary
183*4882a593Smuzhiyun 	 * on stack to hold pfns.
184*4882a593Smuzhiyun 	 */
185*4882a593Smuzhiyun 	for (npage = 0; npage < total_pages; npage++) {
186*4882a593Smuzhiyun 		unsigned long cur_gfn = gfn + npage;
187*4882a593Smuzhiyun 		unsigned long pfn;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 		ret = vfio_group_pin_pages(vdev->vfio_group, &cur_gfn, 1,
190*4882a593Smuzhiyun 					   IOMMU_READ | IOMMU_WRITE, &pfn);
191*4882a593Smuzhiyun 		if (ret != 1) {
192*4882a593Smuzhiyun 			gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
193*4882a593Smuzhiyun 				     cur_gfn, ret);
194*4882a593Smuzhiyun 			goto err;
195*4882a593Smuzhiyun 		}
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 		if (!pfn_valid(pfn)) {
198*4882a593Smuzhiyun 			gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
199*4882a593Smuzhiyun 			npage++;
200*4882a593Smuzhiyun 			ret = -EFAULT;
201*4882a593Smuzhiyun 			goto err;
202*4882a593Smuzhiyun 		}
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 		if (npage == 0)
205*4882a593Smuzhiyun 			base_pfn = pfn;
206*4882a593Smuzhiyun 		else if (base_pfn + npage != pfn) {
207*4882a593Smuzhiyun 			gvt_vgpu_err("The pages are not continuous\n");
208*4882a593Smuzhiyun 			ret = -EINVAL;
209*4882a593Smuzhiyun 			npage++;
210*4882a593Smuzhiyun 			goto err;
211*4882a593Smuzhiyun 		}
212*4882a593Smuzhiyun 	}
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	*page = pfn_to_page(base_pfn);
215*4882a593Smuzhiyun 	return 0;
216*4882a593Smuzhiyun err:
217*4882a593Smuzhiyun 	gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
218*4882a593Smuzhiyun 	return ret;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
gvt_dma_map_page(struct intel_vgpu * vgpu,unsigned long gfn,dma_addr_t * dma_addr,unsigned long size)221*4882a593Smuzhiyun static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
222*4882a593Smuzhiyun 		dma_addr_t *dma_addr, unsigned long size)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun 	struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
225*4882a593Smuzhiyun 	struct page *page = NULL;
226*4882a593Smuzhiyun 	int ret;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	ret = gvt_pin_guest_page(vgpu, gfn, size, &page);
229*4882a593Smuzhiyun 	if (ret)
230*4882a593Smuzhiyun 		return ret;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	/* Setup DMA mapping. */
233*4882a593Smuzhiyun 	*dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
234*4882a593Smuzhiyun 	if (dma_mapping_error(dev, *dma_addr)) {
235*4882a593Smuzhiyun 		gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
236*4882a593Smuzhiyun 			     page_to_pfn(page), ret);
237*4882a593Smuzhiyun 		gvt_unpin_guest_page(vgpu, gfn, size);
238*4882a593Smuzhiyun 		return -ENOMEM;
239*4882a593Smuzhiyun 	}
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	return 0;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
gvt_dma_unmap_page(struct intel_vgpu * vgpu,unsigned long gfn,dma_addr_t dma_addr,unsigned long size)244*4882a593Smuzhiyun static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
245*4882a593Smuzhiyun 		dma_addr_t dma_addr, unsigned long size)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
250*4882a593Smuzhiyun 	gvt_unpin_guest_page(vgpu, gfn, size);
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun 
__gvt_cache_find_dma_addr(struct intel_vgpu * vgpu,dma_addr_t dma_addr)253*4882a593Smuzhiyun static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
254*4882a593Smuzhiyun 		dma_addr_t dma_addr)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun 	struct rb_node *node = kvmgt_vdev(vgpu)->dma_addr_cache.rb_node;
257*4882a593Smuzhiyun 	struct gvt_dma *itr;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	while (node) {
260*4882a593Smuzhiyun 		itr = rb_entry(node, struct gvt_dma, dma_addr_node);
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 		if (dma_addr < itr->dma_addr)
263*4882a593Smuzhiyun 			node = node->rb_left;
264*4882a593Smuzhiyun 		else if (dma_addr > itr->dma_addr)
265*4882a593Smuzhiyun 			node = node->rb_right;
266*4882a593Smuzhiyun 		else
267*4882a593Smuzhiyun 			return itr;
268*4882a593Smuzhiyun 	}
269*4882a593Smuzhiyun 	return NULL;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun 
__gvt_cache_find_gfn(struct intel_vgpu * vgpu,gfn_t gfn)272*4882a593Smuzhiyun static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun 	struct rb_node *node = kvmgt_vdev(vgpu)->gfn_cache.rb_node;
275*4882a593Smuzhiyun 	struct gvt_dma *itr;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	while (node) {
278*4882a593Smuzhiyun 		itr = rb_entry(node, struct gvt_dma, gfn_node);
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 		if (gfn < itr->gfn)
281*4882a593Smuzhiyun 			node = node->rb_left;
282*4882a593Smuzhiyun 		else if (gfn > itr->gfn)
283*4882a593Smuzhiyun 			node = node->rb_right;
284*4882a593Smuzhiyun 		else
285*4882a593Smuzhiyun 			return itr;
286*4882a593Smuzhiyun 	}
287*4882a593Smuzhiyun 	return NULL;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
__gvt_cache_add(struct intel_vgpu * vgpu,gfn_t gfn,dma_addr_t dma_addr,unsigned long size)290*4882a593Smuzhiyun static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
291*4882a593Smuzhiyun 		dma_addr_t dma_addr, unsigned long size)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun 	struct gvt_dma *new, *itr;
294*4882a593Smuzhiyun 	struct rb_node **link, *parent = NULL;
295*4882a593Smuzhiyun 	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
298*4882a593Smuzhiyun 	if (!new)
299*4882a593Smuzhiyun 		return -ENOMEM;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	new->vgpu = vgpu;
302*4882a593Smuzhiyun 	new->gfn = gfn;
303*4882a593Smuzhiyun 	new->dma_addr = dma_addr;
304*4882a593Smuzhiyun 	new->size = size;
305*4882a593Smuzhiyun 	kref_init(&new->ref);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	/* gfn_cache maps gfn to struct gvt_dma. */
308*4882a593Smuzhiyun 	link = &vdev->gfn_cache.rb_node;
309*4882a593Smuzhiyun 	while (*link) {
310*4882a593Smuzhiyun 		parent = *link;
311*4882a593Smuzhiyun 		itr = rb_entry(parent, struct gvt_dma, gfn_node);
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 		if (gfn < itr->gfn)
314*4882a593Smuzhiyun 			link = &parent->rb_left;
315*4882a593Smuzhiyun 		else
316*4882a593Smuzhiyun 			link = &parent->rb_right;
317*4882a593Smuzhiyun 	}
318*4882a593Smuzhiyun 	rb_link_node(&new->gfn_node, parent, link);
319*4882a593Smuzhiyun 	rb_insert_color(&new->gfn_node, &vdev->gfn_cache);
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	/* dma_addr_cache maps dma addr to struct gvt_dma. */
322*4882a593Smuzhiyun 	parent = NULL;
323*4882a593Smuzhiyun 	link = &vdev->dma_addr_cache.rb_node;
324*4882a593Smuzhiyun 	while (*link) {
325*4882a593Smuzhiyun 		parent = *link;
326*4882a593Smuzhiyun 		itr = rb_entry(parent, struct gvt_dma, dma_addr_node);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 		if (dma_addr < itr->dma_addr)
329*4882a593Smuzhiyun 			link = &parent->rb_left;
330*4882a593Smuzhiyun 		else
331*4882a593Smuzhiyun 			link = &parent->rb_right;
332*4882a593Smuzhiyun 	}
333*4882a593Smuzhiyun 	rb_link_node(&new->dma_addr_node, parent, link);
334*4882a593Smuzhiyun 	rb_insert_color(&new->dma_addr_node, &vdev->dma_addr_cache);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	vdev->nr_cache_entries++;
337*4882a593Smuzhiyun 	return 0;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun 
__gvt_cache_remove_entry(struct intel_vgpu * vgpu,struct gvt_dma * entry)340*4882a593Smuzhiyun static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
341*4882a593Smuzhiyun 				struct gvt_dma *entry)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun 	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	rb_erase(&entry->gfn_node, &vdev->gfn_cache);
346*4882a593Smuzhiyun 	rb_erase(&entry->dma_addr_node, &vdev->dma_addr_cache);
347*4882a593Smuzhiyun 	kfree(entry);
348*4882a593Smuzhiyun 	vdev->nr_cache_entries--;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun 
gvt_cache_destroy(struct intel_vgpu * vgpu)351*4882a593Smuzhiyun static void gvt_cache_destroy(struct intel_vgpu *vgpu)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun 	struct gvt_dma *dma;
354*4882a593Smuzhiyun 	struct rb_node *node = NULL;
355*4882a593Smuzhiyun 	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	for (;;) {
358*4882a593Smuzhiyun 		mutex_lock(&vdev->cache_lock);
359*4882a593Smuzhiyun 		node = rb_first(&vdev->gfn_cache);
360*4882a593Smuzhiyun 		if (!node) {
361*4882a593Smuzhiyun 			mutex_unlock(&vdev->cache_lock);
362*4882a593Smuzhiyun 			break;
363*4882a593Smuzhiyun 		}
364*4882a593Smuzhiyun 		dma = rb_entry(node, struct gvt_dma, gfn_node);
365*4882a593Smuzhiyun 		gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
366*4882a593Smuzhiyun 		__gvt_cache_remove_entry(vgpu, dma);
367*4882a593Smuzhiyun 		mutex_unlock(&vdev->cache_lock);
368*4882a593Smuzhiyun 	}
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun 
gvt_cache_init(struct intel_vgpu * vgpu)371*4882a593Smuzhiyun static void gvt_cache_init(struct intel_vgpu *vgpu)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun 	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	vdev->gfn_cache = RB_ROOT;
376*4882a593Smuzhiyun 	vdev->dma_addr_cache = RB_ROOT;
377*4882a593Smuzhiyun 	vdev->nr_cache_entries = 0;
378*4882a593Smuzhiyun 	mutex_init(&vdev->cache_lock);
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun 
kvmgt_protect_table_init(struct kvmgt_guest_info * info)381*4882a593Smuzhiyun static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun 	hash_init(info->ptable);
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun 
kvmgt_protect_table_destroy(struct kvmgt_guest_info * info)386*4882a593Smuzhiyun static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun 	struct kvmgt_pgfn *p;
389*4882a593Smuzhiyun 	struct hlist_node *tmp;
390*4882a593Smuzhiyun 	int i;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	hash_for_each_safe(info->ptable, i, tmp, p, hnode) {
393*4882a593Smuzhiyun 		hash_del(&p->hnode);
394*4882a593Smuzhiyun 		kfree(p);
395*4882a593Smuzhiyun 	}
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun static struct kvmgt_pgfn *
__kvmgt_protect_table_find(struct kvmgt_guest_info * info,gfn_t gfn)399*4882a593Smuzhiyun __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun 	struct kvmgt_pgfn *p, *res = NULL;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	hash_for_each_possible(info->ptable, p, hnode, gfn) {
404*4882a593Smuzhiyun 		if (gfn == p->gfn) {
405*4882a593Smuzhiyun 			res = p;
406*4882a593Smuzhiyun 			break;
407*4882a593Smuzhiyun 		}
408*4882a593Smuzhiyun 	}
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	return res;
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun 
kvmgt_gfn_is_write_protected(struct kvmgt_guest_info * info,gfn_t gfn)413*4882a593Smuzhiyun static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
414*4882a593Smuzhiyun 				gfn_t gfn)
415*4882a593Smuzhiyun {
416*4882a593Smuzhiyun 	struct kvmgt_pgfn *p;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	p = __kvmgt_protect_table_find(info, gfn);
419*4882a593Smuzhiyun 	return !!p;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun 
kvmgt_protect_table_add(struct kvmgt_guest_info * info,gfn_t gfn)422*4882a593Smuzhiyun static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun 	struct kvmgt_pgfn *p;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	if (kvmgt_gfn_is_write_protected(info, gfn))
427*4882a593Smuzhiyun 		return;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
430*4882a593Smuzhiyun 	if (WARN(!p, "gfn: 0x%llx\n", gfn))
431*4882a593Smuzhiyun 		return;
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	p->gfn = gfn;
434*4882a593Smuzhiyun 	hash_add(info->ptable, &p->hnode, gfn);
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun 
kvmgt_protect_table_del(struct kvmgt_guest_info * info,gfn_t gfn)437*4882a593Smuzhiyun static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
438*4882a593Smuzhiyun 				gfn_t gfn)
439*4882a593Smuzhiyun {
440*4882a593Smuzhiyun 	struct kvmgt_pgfn *p;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	p = __kvmgt_protect_table_find(info, gfn);
443*4882a593Smuzhiyun 	if (p) {
444*4882a593Smuzhiyun 		hash_del(&p->hnode);
445*4882a593Smuzhiyun 		kfree(p);
446*4882a593Smuzhiyun 	}
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun 
intel_vgpu_reg_rw_opregion(struct intel_vgpu * vgpu,char * buf,size_t count,loff_t * ppos,bool iswrite)449*4882a593Smuzhiyun static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
450*4882a593Smuzhiyun 		size_t count, loff_t *ppos, bool iswrite)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun 	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
453*4882a593Smuzhiyun 	unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
454*4882a593Smuzhiyun 			VFIO_PCI_NUM_REGIONS;
455*4882a593Smuzhiyun 	void *base = vdev->region[i].data;
456*4882a593Smuzhiyun 	loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	if (pos >= vdev->region[i].size || iswrite) {
460*4882a593Smuzhiyun 		gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
461*4882a593Smuzhiyun 		return -EINVAL;
462*4882a593Smuzhiyun 	}
463*4882a593Smuzhiyun 	count = min(count, (size_t)(vdev->region[i].size - pos));
464*4882a593Smuzhiyun 	memcpy(buf, base + pos, count);
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	return count;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun 
intel_vgpu_reg_release_opregion(struct intel_vgpu * vgpu,struct vfio_region * region)469*4882a593Smuzhiyun static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu,
470*4882a593Smuzhiyun 		struct vfio_region *region)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
475*4882a593Smuzhiyun 	.rw = intel_vgpu_reg_rw_opregion,
476*4882a593Smuzhiyun 	.release = intel_vgpu_reg_release_opregion,
477*4882a593Smuzhiyun };
478*4882a593Smuzhiyun 
handle_edid_regs(struct intel_vgpu * vgpu,struct vfio_edid_region * region,char * buf,size_t count,u16 offset,bool is_write)479*4882a593Smuzhiyun static int handle_edid_regs(struct intel_vgpu *vgpu,
480*4882a593Smuzhiyun 			struct vfio_edid_region *region, char *buf,
481*4882a593Smuzhiyun 			size_t count, u16 offset, bool is_write)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun 	struct vfio_region_gfx_edid *regs = &region->vfio_edid_regs;
484*4882a593Smuzhiyun 	unsigned int data;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	if (offset + count > sizeof(*regs))
487*4882a593Smuzhiyun 		return -EINVAL;
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	if (count != 4)
490*4882a593Smuzhiyun 		return -EINVAL;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	if (is_write) {
493*4882a593Smuzhiyun 		data = *((unsigned int *)buf);
494*4882a593Smuzhiyun 		switch (offset) {
495*4882a593Smuzhiyun 		case offsetof(struct vfio_region_gfx_edid, link_state):
496*4882a593Smuzhiyun 			if (data == VFIO_DEVICE_GFX_LINK_STATE_UP) {
497*4882a593Smuzhiyun 				if (!drm_edid_block_valid(
498*4882a593Smuzhiyun 					(u8 *)region->edid_blob,
499*4882a593Smuzhiyun 					0,
500*4882a593Smuzhiyun 					true,
501*4882a593Smuzhiyun 					NULL)) {
502*4882a593Smuzhiyun 					gvt_vgpu_err("invalid EDID blob\n");
503*4882a593Smuzhiyun 					return -EINVAL;
504*4882a593Smuzhiyun 				}
505*4882a593Smuzhiyun 				intel_gvt_ops->emulate_hotplug(vgpu, true);
506*4882a593Smuzhiyun 			} else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN)
507*4882a593Smuzhiyun 				intel_gvt_ops->emulate_hotplug(vgpu, false);
508*4882a593Smuzhiyun 			else {
509*4882a593Smuzhiyun 				gvt_vgpu_err("invalid EDID link state %d\n",
510*4882a593Smuzhiyun 					regs->link_state);
511*4882a593Smuzhiyun 				return -EINVAL;
512*4882a593Smuzhiyun 			}
513*4882a593Smuzhiyun 			regs->link_state = data;
514*4882a593Smuzhiyun 			break;
515*4882a593Smuzhiyun 		case offsetof(struct vfio_region_gfx_edid, edid_size):
516*4882a593Smuzhiyun 			if (data > regs->edid_max_size) {
517*4882a593Smuzhiyun 				gvt_vgpu_err("EDID size is bigger than %d!\n",
518*4882a593Smuzhiyun 					regs->edid_max_size);
519*4882a593Smuzhiyun 				return -EINVAL;
520*4882a593Smuzhiyun 			}
521*4882a593Smuzhiyun 			regs->edid_size = data;
522*4882a593Smuzhiyun 			break;
523*4882a593Smuzhiyun 		default:
524*4882a593Smuzhiyun 			/* read-only regs */
525*4882a593Smuzhiyun 			gvt_vgpu_err("write read-only EDID region at offset %d\n",
526*4882a593Smuzhiyun 				offset);
527*4882a593Smuzhiyun 			return -EPERM;
528*4882a593Smuzhiyun 		}
529*4882a593Smuzhiyun 	} else {
530*4882a593Smuzhiyun 		memcpy(buf, (char *)regs + offset, count);
531*4882a593Smuzhiyun 	}
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	return count;
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun 
handle_edid_blob(struct vfio_edid_region * region,char * buf,size_t count,u16 offset,bool is_write)536*4882a593Smuzhiyun static int handle_edid_blob(struct vfio_edid_region *region, char *buf,
537*4882a593Smuzhiyun 			size_t count, u16 offset, bool is_write)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun 	if (offset + count > region->vfio_edid_regs.edid_size)
540*4882a593Smuzhiyun 		return -EINVAL;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	if (is_write)
543*4882a593Smuzhiyun 		memcpy(region->edid_blob + offset, buf, count);
544*4882a593Smuzhiyun 	else
545*4882a593Smuzhiyun 		memcpy(buf, region->edid_blob + offset, count);
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	return count;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun 
intel_vgpu_reg_rw_edid(struct intel_vgpu * vgpu,char * buf,size_t count,loff_t * ppos,bool iswrite)550*4882a593Smuzhiyun static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
551*4882a593Smuzhiyun 		size_t count, loff_t *ppos, bool iswrite)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun 	int ret;
554*4882a593Smuzhiyun 	unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
555*4882a593Smuzhiyun 			VFIO_PCI_NUM_REGIONS;
556*4882a593Smuzhiyun 	struct vfio_edid_region *region =
557*4882a593Smuzhiyun 		(struct vfio_edid_region *)kvmgt_vdev(vgpu)->region[i].data;
558*4882a593Smuzhiyun 	loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	if (pos < region->vfio_edid_regs.edid_offset) {
561*4882a593Smuzhiyun 		ret = handle_edid_regs(vgpu, region, buf, count, pos, iswrite);
562*4882a593Smuzhiyun 	} else {
563*4882a593Smuzhiyun 		pos -= EDID_BLOB_OFFSET;
564*4882a593Smuzhiyun 		ret = handle_edid_blob(region, buf, count, pos, iswrite);
565*4882a593Smuzhiyun 	}
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	if (ret < 0)
568*4882a593Smuzhiyun 		gvt_vgpu_err("failed to access EDID region\n");
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	return ret;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun 
intel_vgpu_reg_release_edid(struct intel_vgpu * vgpu,struct vfio_region * region)573*4882a593Smuzhiyun static void intel_vgpu_reg_release_edid(struct intel_vgpu *vgpu,
574*4882a593Smuzhiyun 					struct vfio_region *region)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun 	kfree(region->data);
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun static const struct intel_vgpu_regops intel_vgpu_regops_edid = {
580*4882a593Smuzhiyun 	.rw = intel_vgpu_reg_rw_edid,
581*4882a593Smuzhiyun 	.release = intel_vgpu_reg_release_edid,
582*4882a593Smuzhiyun };
583*4882a593Smuzhiyun 
intel_vgpu_register_reg(struct intel_vgpu * vgpu,unsigned int type,unsigned int subtype,const struct intel_vgpu_regops * ops,size_t size,u32 flags,void * data)584*4882a593Smuzhiyun static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
585*4882a593Smuzhiyun 		unsigned int type, unsigned int subtype,
586*4882a593Smuzhiyun 		const struct intel_vgpu_regops *ops,
587*4882a593Smuzhiyun 		size_t size, u32 flags, void *data)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun 	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
590*4882a593Smuzhiyun 	struct vfio_region *region;
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	region = krealloc(vdev->region,
593*4882a593Smuzhiyun 			(vdev->num_regions + 1) * sizeof(*region),
594*4882a593Smuzhiyun 			GFP_KERNEL);
595*4882a593Smuzhiyun 	if (!region)
596*4882a593Smuzhiyun 		return -ENOMEM;
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	vdev->region = region;
599*4882a593Smuzhiyun 	vdev->region[vdev->num_regions].type = type;
600*4882a593Smuzhiyun 	vdev->region[vdev->num_regions].subtype = subtype;
601*4882a593Smuzhiyun 	vdev->region[vdev->num_regions].ops = ops;
602*4882a593Smuzhiyun 	vdev->region[vdev->num_regions].size = size;
603*4882a593Smuzhiyun 	vdev->region[vdev->num_regions].flags = flags;
604*4882a593Smuzhiyun 	vdev->region[vdev->num_regions].data = data;
605*4882a593Smuzhiyun 	vdev->num_regions++;
606*4882a593Smuzhiyun 	return 0;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun 
kvmgt_get_vfio_device(void * p_vgpu)609*4882a593Smuzhiyun static int kvmgt_get_vfio_device(void *p_vgpu)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun 	struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
612*4882a593Smuzhiyun 	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	vdev->vfio_device = vfio_device_get_from_dev(
615*4882a593Smuzhiyun 		mdev_dev(vdev->mdev));
616*4882a593Smuzhiyun 	if (!vdev->vfio_device) {
617*4882a593Smuzhiyun 		gvt_vgpu_err("failed to get vfio device\n");
618*4882a593Smuzhiyun 		return -ENODEV;
619*4882a593Smuzhiyun 	}
620*4882a593Smuzhiyun 	return 0;
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 
kvmgt_set_opregion(void * p_vgpu)624*4882a593Smuzhiyun static int kvmgt_set_opregion(void *p_vgpu)
625*4882a593Smuzhiyun {
626*4882a593Smuzhiyun 	struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
627*4882a593Smuzhiyun 	void *base;
628*4882a593Smuzhiyun 	int ret;
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	/* Each vgpu has its own opregion, although VFIO would create another
631*4882a593Smuzhiyun 	 * one later. This one is used to expose opregion to VFIO. And the
632*4882a593Smuzhiyun 	 * other one created by VFIO later, is used by guest actually.
633*4882a593Smuzhiyun 	 */
634*4882a593Smuzhiyun 	base = vgpu_opregion(vgpu)->va;
635*4882a593Smuzhiyun 	if (!base)
636*4882a593Smuzhiyun 		return -ENOMEM;
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	if (memcmp(base, OPREGION_SIGNATURE, 16)) {
639*4882a593Smuzhiyun 		memunmap(base);
640*4882a593Smuzhiyun 		return -EINVAL;
641*4882a593Smuzhiyun 	}
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	ret = intel_vgpu_register_reg(vgpu,
644*4882a593Smuzhiyun 			PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
645*4882a593Smuzhiyun 			VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
646*4882a593Smuzhiyun 			&intel_vgpu_regops_opregion, OPREGION_SIZE,
647*4882a593Smuzhiyun 			VFIO_REGION_INFO_FLAG_READ, base);
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	return ret;
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun 
kvmgt_set_edid(void * p_vgpu,int port_num)652*4882a593Smuzhiyun static int kvmgt_set_edid(void *p_vgpu, int port_num)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun 	struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
655*4882a593Smuzhiyun 	struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
656*4882a593Smuzhiyun 	struct vfio_edid_region *base;
657*4882a593Smuzhiyun 	int ret;
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	base = kzalloc(sizeof(*base), GFP_KERNEL);
660*4882a593Smuzhiyun 	if (!base)
661*4882a593Smuzhiyun 		return -ENOMEM;
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 	/* TODO: Add multi-port and EDID extension block support */
664*4882a593Smuzhiyun 	base->vfio_edid_regs.edid_offset = EDID_BLOB_OFFSET;
665*4882a593Smuzhiyun 	base->vfio_edid_regs.edid_max_size = EDID_SIZE;
666*4882a593Smuzhiyun 	base->vfio_edid_regs.edid_size = EDID_SIZE;
667*4882a593Smuzhiyun 	base->vfio_edid_regs.max_xres = vgpu_edid_xres(port->id);
668*4882a593Smuzhiyun 	base->vfio_edid_regs.max_yres = vgpu_edid_yres(port->id);
669*4882a593Smuzhiyun 	base->edid_blob = port->edid->edid_block;
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	ret = intel_vgpu_register_reg(vgpu,
672*4882a593Smuzhiyun 			VFIO_REGION_TYPE_GFX,
673*4882a593Smuzhiyun 			VFIO_REGION_SUBTYPE_GFX_EDID,
674*4882a593Smuzhiyun 			&intel_vgpu_regops_edid, EDID_SIZE,
675*4882a593Smuzhiyun 			VFIO_REGION_INFO_FLAG_READ |
676*4882a593Smuzhiyun 			VFIO_REGION_INFO_FLAG_WRITE |
677*4882a593Smuzhiyun 			VFIO_REGION_INFO_FLAG_CAPS, base);
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	return ret;
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun 
kvmgt_put_vfio_device(void * vgpu)682*4882a593Smuzhiyun static void kvmgt_put_vfio_device(void *vgpu)
683*4882a593Smuzhiyun {
684*4882a593Smuzhiyun 	struct kvmgt_vdev *vdev = kvmgt_vdev((struct intel_vgpu *)vgpu);
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	if (WARN_ON(!vdev->vfio_device))
687*4882a593Smuzhiyun 		return;
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	vfio_device_put(vdev->vfio_device);
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun 
intel_vgpu_create(struct kobject * kobj,struct mdev_device * mdev)692*4882a593Smuzhiyun static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
693*4882a593Smuzhiyun {
694*4882a593Smuzhiyun 	struct intel_vgpu *vgpu = NULL;
695*4882a593Smuzhiyun 	struct intel_vgpu_type *type;
696*4882a593Smuzhiyun 	struct device *pdev;
697*4882a593Smuzhiyun 	void *gvt;
698*4882a593Smuzhiyun 	int ret;
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	pdev = mdev_parent_dev(mdev);
701*4882a593Smuzhiyun 	gvt = kdev_to_i915(pdev)->gvt;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	type = intel_gvt_ops->gvt_find_vgpu_type(gvt, kobject_name(kobj));
704*4882a593Smuzhiyun 	if (!type) {
705*4882a593Smuzhiyun 		gvt_vgpu_err("failed to find type %s to create\n",
706*4882a593Smuzhiyun 						kobject_name(kobj));
707*4882a593Smuzhiyun 		ret = -EINVAL;
708*4882a593Smuzhiyun 		goto out;
709*4882a593Smuzhiyun 	}
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 	vgpu = intel_gvt_ops->vgpu_create(gvt, type);
712*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(vgpu)) {
713*4882a593Smuzhiyun 		ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
714*4882a593Smuzhiyun 		gvt_err("failed to create intel vgpu: %d\n", ret);
715*4882a593Smuzhiyun 		goto out;
716*4882a593Smuzhiyun 	}
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	INIT_WORK(&kvmgt_vdev(vgpu)->release_work, intel_vgpu_release_work);
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	kvmgt_vdev(vgpu)->mdev = mdev;
721*4882a593Smuzhiyun 	mdev_set_drvdata(mdev, vgpu);
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
724*4882a593Smuzhiyun 		     dev_name(mdev_dev(mdev)));
725*4882a593Smuzhiyun 	ret = 0;
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun out:
728*4882a593Smuzhiyun 	return ret;
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun 
intel_vgpu_remove(struct mdev_device * mdev)731*4882a593Smuzhiyun static int intel_vgpu_remove(struct mdev_device *mdev)
732*4882a593Smuzhiyun {
733*4882a593Smuzhiyun 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	if (handle_valid(vgpu->handle))
736*4882a593Smuzhiyun 		return -EBUSY;
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 	intel_gvt_ops->vgpu_destroy(vgpu);
739*4882a593Smuzhiyun 	return 0;
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun 
intel_vgpu_iommu_notifier(struct notifier_block * nb,unsigned long action,void * data)742*4882a593Smuzhiyun static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
743*4882a593Smuzhiyun 				     unsigned long action, void *data)
744*4882a593Smuzhiyun {
745*4882a593Smuzhiyun 	struct kvmgt_vdev *vdev = container_of(nb,
746*4882a593Smuzhiyun 					       struct kvmgt_vdev,
747*4882a593Smuzhiyun 					       iommu_notifier);
748*4882a593Smuzhiyun 	struct intel_vgpu *vgpu = vdev->vgpu;
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
751*4882a593Smuzhiyun 		struct vfio_iommu_type1_dma_unmap *unmap = data;
752*4882a593Smuzhiyun 		struct gvt_dma *entry;
753*4882a593Smuzhiyun 		unsigned long iov_pfn, end_iov_pfn;
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 		iov_pfn = unmap->iova >> PAGE_SHIFT;
756*4882a593Smuzhiyun 		end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE;
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 		mutex_lock(&vdev->cache_lock);
759*4882a593Smuzhiyun 		for (; iov_pfn < end_iov_pfn; iov_pfn++) {
760*4882a593Smuzhiyun 			entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
761*4882a593Smuzhiyun 			if (!entry)
762*4882a593Smuzhiyun 				continue;
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 			gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
765*4882a593Smuzhiyun 					   entry->size);
766*4882a593Smuzhiyun 			__gvt_cache_remove_entry(vgpu, entry);
767*4882a593Smuzhiyun 		}
768*4882a593Smuzhiyun 		mutex_unlock(&vdev->cache_lock);
769*4882a593Smuzhiyun 	}
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	return NOTIFY_OK;
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun 
intel_vgpu_group_notifier(struct notifier_block * nb,unsigned long action,void * data)774*4882a593Smuzhiyun static int intel_vgpu_group_notifier(struct notifier_block *nb,
775*4882a593Smuzhiyun 				     unsigned long action, void *data)
776*4882a593Smuzhiyun {
777*4882a593Smuzhiyun 	struct kvmgt_vdev *vdev = container_of(nb,
778*4882a593Smuzhiyun 					       struct kvmgt_vdev,
779*4882a593Smuzhiyun 					       group_notifier);
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	/* the only action we care about */
782*4882a593Smuzhiyun 	if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
783*4882a593Smuzhiyun 		vdev->kvm = data;
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 		if (!data)
786*4882a593Smuzhiyun 			schedule_work(&vdev->release_work);
787*4882a593Smuzhiyun 	}
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	return NOTIFY_OK;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun 
intel_vgpu_open(struct mdev_device * mdev)792*4882a593Smuzhiyun static int intel_vgpu_open(struct mdev_device *mdev)
793*4882a593Smuzhiyun {
794*4882a593Smuzhiyun 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
795*4882a593Smuzhiyun 	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
796*4882a593Smuzhiyun 	unsigned long events;
797*4882a593Smuzhiyun 	int ret;
798*4882a593Smuzhiyun 	struct vfio_group *vfio_group;
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 	vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
801*4882a593Smuzhiyun 	vdev->group_notifier.notifier_call = intel_vgpu_group_notifier;
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
804*4882a593Smuzhiyun 	ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
805*4882a593Smuzhiyun 				&vdev->iommu_notifier);
806*4882a593Smuzhiyun 	if (ret != 0) {
807*4882a593Smuzhiyun 		gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
808*4882a593Smuzhiyun 			ret);
809*4882a593Smuzhiyun 		goto out;
810*4882a593Smuzhiyun 	}
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	events = VFIO_GROUP_NOTIFY_SET_KVM;
813*4882a593Smuzhiyun 	ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
814*4882a593Smuzhiyun 				&vdev->group_notifier);
815*4882a593Smuzhiyun 	if (ret != 0) {
816*4882a593Smuzhiyun 		gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
817*4882a593Smuzhiyun 			ret);
818*4882a593Smuzhiyun 		goto undo_iommu;
819*4882a593Smuzhiyun 	}
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 	vfio_group = vfio_group_get_external_user_from_dev(mdev_dev(mdev));
822*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(vfio_group)) {
823*4882a593Smuzhiyun 		ret = !vfio_group ? -EFAULT : PTR_ERR(vfio_group);
824*4882a593Smuzhiyun 		gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n");
825*4882a593Smuzhiyun 		goto undo_register;
826*4882a593Smuzhiyun 	}
827*4882a593Smuzhiyun 	vdev->vfio_group = vfio_group;
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	/* Take a module reference as mdev core doesn't take
830*4882a593Smuzhiyun 	 * a reference for vendor driver.
831*4882a593Smuzhiyun 	 */
832*4882a593Smuzhiyun 	if (!try_module_get(THIS_MODULE)) {
833*4882a593Smuzhiyun 		ret = -ENODEV;
834*4882a593Smuzhiyun 		goto undo_group;
835*4882a593Smuzhiyun 	}
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	ret = kvmgt_guest_init(mdev);
838*4882a593Smuzhiyun 	if (ret)
839*4882a593Smuzhiyun 		goto undo_group;
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	intel_gvt_ops->vgpu_activate(vgpu);
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 	atomic_set(&vdev->released, 0);
844*4882a593Smuzhiyun 	return ret;
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun undo_group:
847*4882a593Smuzhiyun 	vfio_group_put_external_user(vdev->vfio_group);
848*4882a593Smuzhiyun 	vdev->vfio_group = NULL;
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun undo_register:
851*4882a593Smuzhiyun 	vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
852*4882a593Smuzhiyun 					&vdev->group_notifier);
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun undo_iommu:
855*4882a593Smuzhiyun 	vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
856*4882a593Smuzhiyun 					&vdev->iommu_notifier);
857*4882a593Smuzhiyun out:
858*4882a593Smuzhiyun 	return ret;
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun 
intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu * vgpu)861*4882a593Smuzhiyun static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
862*4882a593Smuzhiyun {
863*4882a593Smuzhiyun 	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
864*4882a593Smuzhiyun 	struct eventfd_ctx *trigger;
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	trigger = vdev->msi_trigger;
867*4882a593Smuzhiyun 	if (trigger) {
868*4882a593Smuzhiyun 		eventfd_ctx_put(trigger);
869*4882a593Smuzhiyun 		vdev->msi_trigger = NULL;
870*4882a593Smuzhiyun 	}
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun 
__intel_vgpu_release(struct intel_vgpu * vgpu)873*4882a593Smuzhiyun static void __intel_vgpu_release(struct intel_vgpu *vgpu)
874*4882a593Smuzhiyun {
875*4882a593Smuzhiyun 	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
876*4882a593Smuzhiyun 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
877*4882a593Smuzhiyun 	struct kvmgt_guest_info *info;
878*4882a593Smuzhiyun 	int ret;
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 	if (!handle_valid(vgpu->handle))
881*4882a593Smuzhiyun 		return;
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	if (atomic_cmpxchg(&vdev->released, 0, 1))
884*4882a593Smuzhiyun 		return;
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 	intel_gvt_ops->vgpu_release(vgpu);
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 	ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_IOMMU_NOTIFY,
889*4882a593Smuzhiyun 					&vdev->iommu_notifier);
890*4882a593Smuzhiyun 	drm_WARN(&i915->drm, ret,
891*4882a593Smuzhiyun 		 "vfio_unregister_notifier for iommu failed: %d\n", ret);
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_GROUP_NOTIFY,
894*4882a593Smuzhiyun 					&vdev->group_notifier);
895*4882a593Smuzhiyun 	drm_WARN(&i915->drm, ret,
896*4882a593Smuzhiyun 		 "vfio_unregister_notifier for group failed: %d\n", ret);
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 	/* dereference module reference taken at open */
899*4882a593Smuzhiyun 	module_put(THIS_MODULE);
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	info = (struct kvmgt_guest_info *)vgpu->handle;
902*4882a593Smuzhiyun 	kvmgt_guest_exit(info);
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	intel_vgpu_release_msi_eventfd_ctx(vgpu);
905*4882a593Smuzhiyun 	vfio_group_put_external_user(vdev->vfio_group);
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun 	vdev->kvm = NULL;
908*4882a593Smuzhiyun 	vgpu->handle = 0;
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun 
intel_vgpu_release(struct mdev_device * mdev)911*4882a593Smuzhiyun static void intel_vgpu_release(struct mdev_device *mdev)
912*4882a593Smuzhiyun {
913*4882a593Smuzhiyun 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	__intel_vgpu_release(vgpu);
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun 
intel_vgpu_release_work(struct work_struct * work)918*4882a593Smuzhiyun static void intel_vgpu_release_work(struct work_struct *work)
919*4882a593Smuzhiyun {
920*4882a593Smuzhiyun 	struct kvmgt_vdev *vdev = container_of(work, struct kvmgt_vdev,
921*4882a593Smuzhiyun 					       release_work);
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 	__intel_vgpu_release(vdev->vgpu);
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun 
intel_vgpu_get_bar_addr(struct intel_vgpu * vgpu,int bar)926*4882a593Smuzhiyun static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
927*4882a593Smuzhiyun {
928*4882a593Smuzhiyun 	u32 start_lo, start_hi;
929*4882a593Smuzhiyun 	u32 mem_type;
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 	start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
932*4882a593Smuzhiyun 			PCI_BASE_ADDRESS_MEM_MASK;
933*4882a593Smuzhiyun 	mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
934*4882a593Smuzhiyun 			PCI_BASE_ADDRESS_MEM_TYPE_MASK;
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	switch (mem_type) {
937*4882a593Smuzhiyun 	case PCI_BASE_ADDRESS_MEM_TYPE_64:
938*4882a593Smuzhiyun 		start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
939*4882a593Smuzhiyun 						+ bar + 4));
940*4882a593Smuzhiyun 		break;
941*4882a593Smuzhiyun 	case PCI_BASE_ADDRESS_MEM_TYPE_32:
942*4882a593Smuzhiyun 	case PCI_BASE_ADDRESS_MEM_TYPE_1M:
943*4882a593Smuzhiyun 		/* 1M mem BAR treated as 32-bit BAR */
944*4882a593Smuzhiyun 	default:
945*4882a593Smuzhiyun 		/* mem unknown type treated as 32-bit BAR */
946*4882a593Smuzhiyun 		start_hi = 0;
947*4882a593Smuzhiyun 		break;
948*4882a593Smuzhiyun 	}
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	return ((u64)start_hi << 32) | start_lo;
951*4882a593Smuzhiyun }
952*4882a593Smuzhiyun 
intel_vgpu_bar_rw(struct intel_vgpu * vgpu,int bar,u64 off,void * buf,unsigned int count,bool is_write)953*4882a593Smuzhiyun static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off,
954*4882a593Smuzhiyun 			     void *buf, unsigned int count, bool is_write)
955*4882a593Smuzhiyun {
956*4882a593Smuzhiyun 	u64 bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
957*4882a593Smuzhiyun 	int ret;
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 	if (is_write)
960*4882a593Smuzhiyun 		ret = intel_gvt_ops->emulate_mmio_write(vgpu,
961*4882a593Smuzhiyun 					bar_start + off, buf, count);
962*4882a593Smuzhiyun 	else
963*4882a593Smuzhiyun 		ret = intel_gvt_ops->emulate_mmio_read(vgpu,
964*4882a593Smuzhiyun 					bar_start + off, buf, count);
965*4882a593Smuzhiyun 	return ret;
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun 
intel_vgpu_in_aperture(struct intel_vgpu * vgpu,u64 off)968*4882a593Smuzhiyun static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off)
969*4882a593Smuzhiyun {
970*4882a593Smuzhiyun 	return off >= vgpu_aperture_offset(vgpu) &&
971*4882a593Smuzhiyun 	       off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun 
intel_vgpu_aperture_rw(struct intel_vgpu * vgpu,u64 off,void * buf,unsigned long count,bool is_write)974*4882a593Smuzhiyun static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
975*4882a593Smuzhiyun 		void *buf, unsigned long count, bool is_write)
976*4882a593Smuzhiyun {
977*4882a593Smuzhiyun 	void __iomem *aperture_va;
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun 	if (!intel_vgpu_in_aperture(vgpu, off) ||
980*4882a593Smuzhiyun 	    !intel_vgpu_in_aperture(vgpu, off + count)) {
981*4882a593Smuzhiyun 		gvt_vgpu_err("Invalid aperture offset %llu\n", off);
982*4882a593Smuzhiyun 		return -EINVAL;
983*4882a593Smuzhiyun 	}
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	aperture_va = io_mapping_map_wc(&vgpu->gvt->gt->ggtt->iomap,
986*4882a593Smuzhiyun 					ALIGN_DOWN(off, PAGE_SIZE),
987*4882a593Smuzhiyun 					count + offset_in_page(off));
988*4882a593Smuzhiyun 	if (!aperture_va)
989*4882a593Smuzhiyun 		return -EIO;
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	if (is_write)
992*4882a593Smuzhiyun 		memcpy_toio(aperture_va + offset_in_page(off), buf, count);
993*4882a593Smuzhiyun 	else
994*4882a593Smuzhiyun 		memcpy_fromio(buf, aperture_va + offset_in_page(off), count);
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	io_mapping_unmap(aperture_va);
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun 	return 0;
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun 
intel_vgpu_rw(struct mdev_device * mdev,char * buf,size_t count,loff_t * ppos,bool is_write)1001*4882a593Smuzhiyun static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
1002*4882a593Smuzhiyun 			size_t count, loff_t *ppos, bool is_write)
1003*4882a593Smuzhiyun {
1004*4882a593Smuzhiyun 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1005*4882a593Smuzhiyun 	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
1006*4882a593Smuzhiyun 	unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
1007*4882a593Smuzhiyun 	u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
1008*4882a593Smuzhiyun 	int ret = -EINVAL;
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) {
1012*4882a593Smuzhiyun 		gvt_vgpu_err("invalid index: %u\n", index);
1013*4882a593Smuzhiyun 		return -EINVAL;
1014*4882a593Smuzhiyun 	}
1015*4882a593Smuzhiyun 
1016*4882a593Smuzhiyun 	switch (index) {
1017*4882a593Smuzhiyun 	case VFIO_PCI_CONFIG_REGION_INDEX:
1018*4882a593Smuzhiyun 		if (is_write)
1019*4882a593Smuzhiyun 			ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos,
1020*4882a593Smuzhiyun 						buf, count);
1021*4882a593Smuzhiyun 		else
1022*4882a593Smuzhiyun 			ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos,
1023*4882a593Smuzhiyun 						buf, count);
1024*4882a593Smuzhiyun 		break;
1025*4882a593Smuzhiyun 	case VFIO_PCI_BAR0_REGION_INDEX:
1026*4882a593Smuzhiyun 		ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos,
1027*4882a593Smuzhiyun 					buf, count, is_write);
1028*4882a593Smuzhiyun 		break;
1029*4882a593Smuzhiyun 	case VFIO_PCI_BAR2_REGION_INDEX:
1030*4882a593Smuzhiyun 		ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write);
1031*4882a593Smuzhiyun 		break;
1032*4882a593Smuzhiyun 	case VFIO_PCI_BAR1_REGION_INDEX:
1033*4882a593Smuzhiyun 	case VFIO_PCI_BAR3_REGION_INDEX:
1034*4882a593Smuzhiyun 	case VFIO_PCI_BAR4_REGION_INDEX:
1035*4882a593Smuzhiyun 	case VFIO_PCI_BAR5_REGION_INDEX:
1036*4882a593Smuzhiyun 	case VFIO_PCI_VGA_REGION_INDEX:
1037*4882a593Smuzhiyun 	case VFIO_PCI_ROM_REGION_INDEX:
1038*4882a593Smuzhiyun 		break;
1039*4882a593Smuzhiyun 	default:
1040*4882a593Smuzhiyun 		if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
1041*4882a593Smuzhiyun 			return -EINVAL;
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 		index -= VFIO_PCI_NUM_REGIONS;
1044*4882a593Smuzhiyun 		return vdev->region[index].ops->rw(vgpu, buf, count,
1045*4882a593Smuzhiyun 				ppos, is_write);
1046*4882a593Smuzhiyun 	}
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun 	return ret == 0 ? count : ret;
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun 
gtt_entry(struct mdev_device * mdev,loff_t * ppos)1051*4882a593Smuzhiyun static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos)
1052*4882a593Smuzhiyun {
1053*4882a593Smuzhiyun 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1054*4882a593Smuzhiyun 	unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
1055*4882a593Smuzhiyun 	struct intel_gvt *gvt = vgpu->gvt;
1056*4882a593Smuzhiyun 	int offset;
1057*4882a593Smuzhiyun 
1058*4882a593Smuzhiyun 	/* Only allow MMIO GGTT entry access */
1059*4882a593Smuzhiyun 	if (index != PCI_BASE_ADDRESS_0)
1060*4882a593Smuzhiyun 		return false;
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun 	offset = (u64)(*ppos & VFIO_PCI_OFFSET_MASK) -
1063*4882a593Smuzhiyun 		intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun 	return (offset >= gvt->device_info.gtt_start_offset &&
1066*4882a593Smuzhiyun 		offset < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) ?
1067*4882a593Smuzhiyun 			true : false;
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun 
intel_vgpu_read(struct mdev_device * mdev,char __user * buf,size_t count,loff_t * ppos)1070*4882a593Smuzhiyun static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
1071*4882a593Smuzhiyun 			size_t count, loff_t *ppos)
1072*4882a593Smuzhiyun {
1073*4882a593Smuzhiyun 	unsigned int done = 0;
1074*4882a593Smuzhiyun 	int ret;
1075*4882a593Smuzhiyun 
1076*4882a593Smuzhiyun 	while (count) {
1077*4882a593Smuzhiyun 		size_t filled;
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun 		/* Only support GGTT entry 8 bytes read */
1080*4882a593Smuzhiyun 		if (count >= 8 && !(*ppos % 8) &&
1081*4882a593Smuzhiyun 			gtt_entry(mdev, ppos)) {
1082*4882a593Smuzhiyun 			u64 val;
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun 			ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
1085*4882a593Smuzhiyun 					ppos, false);
1086*4882a593Smuzhiyun 			if (ret <= 0)
1087*4882a593Smuzhiyun 				goto read_err;
1088*4882a593Smuzhiyun 
1089*4882a593Smuzhiyun 			if (copy_to_user(buf, &val, sizeof(val)))
1090*4882a593Smuzhiyun 				goto read_err;
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 			filled = 8;
1093*4882a593Smuzhiyun 		} else if (count >= 4 && !(*ppos % 4)) {
1094*4882a593Smuzhiyun 			u32 val;
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun 			ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
1097*4882a593Smuzhiyun 					ppos, false);
1098*4882a593Smuzhiyun 			if (ret <= 0)
1099*4882a593Smuzhiyun 				goto read_err;
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 			if (copy_to_user(buf, &val, sizeof(val)))
1102*4882a593Smuzhiyun 				goto read_err;
1103*4882a593Smuzhiyun 
1104*4882a593Smuzhiyun 			filled = 4;
1105*4882a593Smuzhiyun 		} else if (count >= 2 && !(*ppos % 2)) {
1106*4882a593Smuzhiyun 			u16 val;
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 			ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
1109*4882a593Smuzhiyun 					ppos, false);
1110*4882a593Smuzhiyun 			if (ret <= 0)
1111*4882a593Smuzhiyun 				goto read_err;
1112*4882a593Smuzhiyun 
1113*4882a593Smuzhiyun 			if (copy_to_user(buf, &val, sizeof(val)))
1114*4882a593Smuzhiyun 				goto read_err;
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 			filled = 2;
1117*4882a593Smuzhiyun 		} else {
1118*4882a593Smuzhiyun 			u8 val;
1119*4882a593Smuzhiyun 
1120*4882a593Smuzhiyun 			ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos,
1121*4882a593Smuzhiyun 					false);
1122*4882a593Smuzhiyun 			if (ret <= 0)
1123*4882a593Smuzhiyun 				goto read_err;
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 			if (copy_to_user(buf, &val, sizeof(val)))
1126*4882a593Smuzhiyun 				goto read_err;
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun 			filled = 1;
1129*4882a593Smuzhiyun 		}
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 		count -= filled;
1132*4882a593Smuzhiyun 		done += filled;
1133*4882a593Smuzhiyun 		*ppos += filled;
1134*4882a593Smuzhiyun 		buf += filled;
1135*4882a593Smuzhiyun 	}
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun 	return done;
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun read_err:
1140*4882a593Smuzhiyun 	return -EFAULT;
1141*4882a593Smuzhiyun }
1142*4882a593Smuzhiyun 
intel_vgpu_write(struct mdev_device * mdev,const char __user * buf,size_t count,loff_t * ppos)1143*4882a593Smuzhiyun static ssize_t intel_vgpu_write(struct mdev_device *mdev,
1144*4882a593Smuzhiyun 				const char __user *buf,
1145*4882a593Smuzhiyun 				size_t count, loff_t *ppos)
1146*4882a593Smuzhiyun {
1147*4882a593Smuzhiyun 	unsigned int done = 0;
1148*4882a593Smuzhiyun 	int ret;
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 	while (count) {
1151*4882a593Smuzhiyun 		size_t filled;
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun 		/* Only support GGTT entry 8 bytes write */
1154*4882a593Smuzhiyun 		if (count >= 8 && !(*ppos % 8) &&
1155*4882a593Smuzhiyun 			gtt_entry(mdev, ppos)) {
1156*4882a593Smuzhiyun 			u64 val;
1157*4882a593Smuzhiyun 
1158*4882a593Smuzhiyun 			if (copy_from_user(&val, buf, sizeof(val)))
1159*4882a593Smuzhiyun 				goto write_err;
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun 			ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
1162*4882a593Smuzhiyun 					ppos, true);
1163*4882a593Smuzhiyun 			if (ret <= 0)
1164*4882a593Smuzhiyun 				goto write_err;
1165*4882a593Smuzhiyun 
1166*4882a593Smuzhiyun 			filled = 8;
1167*4882a593Smuzhiyun 		} else if (count >= 4 && !(*ppos % 4)) {
1168*4882a593Smuzhiyun 			u32 val;
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun 			if (copy_from_user(&val, buf, sizeof(val)))
1171*4882a593Smuzhiyun 				goto write_err;
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 			ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
1174*4882a593Smuzhiyun 					ppos, true);
1175*4882a593Smuzhiyun 			if (ret <= 0)
1176*4882a593Smuzhiyun 				goto write_err;
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun 			filled = 4;
1179*4882a593Smuzhiyun 		} else if (count >= 2 && !(*ppos % 2)) {
1180*4882a593Smuzhiyun 			u16 val;
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun 			if (copy_from_user(&val, buf, sizeof(val)))
1183*4882a593Smuzhiyun 				goto write_err;
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 			ret = intel_vgpu_rw(mdev, (char *)&val,
1186*4882a593Smuzhiyun 					sizeof(val), ppos, true);
1187*4882a593Smuzhiyun 			if (ret <= 0)
1188*4882a593Smuzhiyun 				goto write_err;
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 			filled = 2;
1191*4882a593Smuzhiyun 		} else {
1192*4882a593Smuzhiyun 			u8 val;
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 			if (copy_from_user(&val, buf, sizeof(val)))
1195*4882a593Smuzhiyun 				goto write_err;
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 			ret = intel_vgpu_rw(mdev, &val, sizeof(val),
1198*4882a593Smuzhiyun 					ppos, true);
1199*4882a593Smuzhiyun 			if (ret <= 0)
1200*4882a593Smuzhiyun 				goto write_err;
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun 			filled = 1;
1203*4882a593Smuzhiyun 		}
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 		count -= filled;
1206*4882a593Smuzhiyun 		done += filled;
1207*4882a593Smuzhiyun 		*ppos += filled;
1208*4882a593Smuzhiyun 		buf += filled;
1209*4882a593Smuzhiyun 	}
1210*4882a593Smuzhiyun 
1211*4882a593Smuzhiyun 	return done;
1212*4882a593Smuzhiyun write_err:
1213*4882a593Smuzhiyun 	return -EFAULT;
1214*4882a593Smuzhiyun }
1215*4882a593Smuzhiyun 
intel_vgpu_mmap(struct mdev_device * mdev,struct vm_area_struct * vma)1216*4882a593Smuzhiyun static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
1217*4882a593Smuzhiyun {
1218*4882a593Smuzhiyun 	unsigned int index;
1219*4882a593Smuzhiyun 	u64 virtaddr;
1220*4882a593Smuzhiyun 	unsigned long req_size, pgoff, req_start;
1221*4882a593Smuzhiyun 	pgprot_t pg_prot;
1222*4882a593Smuzhiyun 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun 	index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1225*4882a593Smuzhiyun 	if (index >= VFIO_PCI_ROM_REGION_INDEX)
1226*4882a593Smuzhiyun 		return -EINVAL;
1227*4882a593Smuzhiyun 
1228*4882a593Smuzhiyun 	if (vma->vm_end < vma->vm_start)
1229*4882a593Smuzhiyun 		return -EINVAL;
1230*4882a593Smuzhiyun 	if ((vma->vm_flags & VM_SHARED) == 0)
1231*4882a593Smuzhiyun 		return -EINVAL;
1232*4882a593Smuzhiyun 	if (index != VFIO_PCI_BAR2_REGION_INDEX)
1233*4882a593Smuzhiyun 		return -EINVAL;
1234*4882a593Smuzhiyun 
1235*4882a593Smuzhiyun 	pg_prot = vma->vm_page_prot;
1236*4882a593Smuzhiyun 	virtaddr = vma->vm_start;
1237*4882a593Smuzhiyun 	req_size = vma->vm_end - vma->vm_start;
1238*4882a593Smuzhiyun 	pgoff = vma->vm_pgoff &
1239*4882a593Smuzhiyun 		((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1240*4882a593Smuzhiyun 	req_start = pgoff << PAGE_SHIFT;
1241*4882a593Smuzhiyun 
1242*4882a593Smuzhiyun 	if (!intel_vgpu_in_aperture(vgpu, req_start))
1243*4882a593Smuzhiyun 		return -EINVAL;
1244*4882a593Smuzhiyun 	if (req_start + req_size >
1245*4882a593Smuzhiyun 	    vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
1246*4882a593Smuzhiyun 		return -EINVAL;
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun 	pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun 	return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
1251*4882a593Smuzhiyun }
1252*4882a593Smuzhiyun 
intel_vgpu_get_irq_count(struct intel_vgpu * vgpu,int type)1253*4882a593Smuzhiyun static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
1254*4882a593Smuzhiyun {
1255*4882a593Smuzhiyun 	if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX)
1256*4882a593Smuzhiyun 		return 1;
1257*4882a593Smuzhiyun 
1258*4882a593Smuzhiyun 	return 0;
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun 
intel_vgpu_set_intx_mask(struct intel_vgpu * vgpu,unsigned int index,unsigned int start,unsigned int count,u32 flags,void * data)1261*4882a593Smuzhiyun static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
1262*4882a593Smuzhiyun 			unsigned int index, unsigned int start,
1263*4882a593Smuzhiyun 			unsigned int count, u32 flags,
1264*4882a593Smuzhiyun 			void *data)
1265*4882a593Smuzhiyun {
1266*4882a593Smuzhiyun 	return 0;
1267*4882a593Smuzhiyun }
1268*4882a593Smuzhiyun 
intel_vgpu_set_intx_unmask(struct intel_vgpu * vgpu,unsigned int index,unsigned int start,unsigned int count,u32 flags,void * data)1269*4882a593Smuzhiyun static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
1270*4882a593Smuzhiyun 			unsigned int index, unsigned int start,
1271*4882a593Smuzhiyun 			unsigned int count, u32 flags, void *data)
1272*4882a593Smuzhiyun {
1273*4882a593Smuzhiyun 	return 0;
1274*4882a593Smuzhiyun }
1275*4882a593Smuzhiyun 
intel_vgpu_set_intx_trigger(struct intel_vgpu * vgpu,unsigned int index,unsigned int start,unsigned int count,u32 flags,void * data)1276*4882a593Smuzhiyun static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
1277*4882a593Smuzhiyun 		unsigned int index, unsigned int start, unsigned int count,
1278*4882a593Smuzhiyun 		u32 flags, void *data)
1279*4882a593Smuzhiyun {
1280*4882a593Smuzhiyun 	return 0;
1281*4882a593Smuzhiyun }
1282*4882a593Smuzhiyun 
intel_vgpu_set_msi_trigger(struct intel_vgpu * vgpu,unsigned int index,unsigned int start,unsigned int count,u32 flags,void * data)1283*4882a593Smuzhiyun static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
1284*4882a593Smuzhiyun 		unsigned int index, unsigned int start, unsigned int count,
1285*4882a593Smuzhiyun 		u32 flags, void *data)
1286*4882a593Smuzhiyun {
1287*4882a593Smuzhiyun 	struct eventfd_ctx *trigger;
1288*4882a593Smuzhiyun 
1289*4882a593Smuzhiyun 	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
1290*4882a593Smuzhiyun 		int fd = *(int *)data;
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun 		trigger = eventfd_ctx_fdget(fd);
1293*4882a593Smuzhiyun 		if (IS_ERR(trigger)) {
1294*4882a593Smuzhiyun 			gvt_vgpu_err("eventfd_ctx_fdget failed\n");
1295*4882a593Smuzhiyun 			return PTR_ERR(trigger);
1296*4882a593Smuzhiyun 		}
1297*4882a593Smuzhiyun 		kvmgt_vdev(vgpu)->msi_trigger = trigger;
1298*4882a593Smuzhiyun 	} else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count)
1299*4882a593Smuzhiyun 		intel_vgpu_release_msi_eventfd_ctx(vgpu);
1300*4882a593Smuzhiyun 
1301*4882a593Smuzhiyun 	return 0;
1302*4882a593Smuzhiyun }
1303*4882a593Smuzhiyun 
intel_vgpu_set_irqs(struct intel_vgpu * vgpu,u32 flags,unsigned int index,unsigned int start,unsigned int count,void * data)1304*4882a593Smuzhiyun static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags,
1305*4882a593Smuzhiyun 		unsigned int index, unsigned int start, unsigned int count,
1306*4882a593Smuzhiyun 		void *data)
1307*4882a593Smuzhiyun {
1308*4882a593Smuzhiyun 	int (*func)(struct intel_vgpu *vgpu, unsigned int index,
1309*4882a593Smuzhiyun 			unsigned int start, unsigned int count, u32 flags,
1310*4882a593Smuzhiyun 			void *data) = NULL;
1311*4882a593Smuzhiyun 
1312*4882a593Smuzhiyun 	switch (index) {
1313*4882a593Smuzhiyun 	case VFIO_PCI_INTX_IRQ_INDEX:
1314*4882a593Smuzhiyun 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
1315*4882a593Smuzhiyun 		case VFIO_IRQ_SET_ACTION_MASK:
1316*4882a593Smuzhiyun 			func = intel_vgpu_set_intx_mask;
1317*4882a593Smuzhiyun 			break;
1318*4882a593Smuzhiyun 		case VFIO_IRQ_SET_ACTION_UNMASK:
1319*4882a593Smuzhiyun 			func = intel_vgpu_set_intx_unmask;
1320*4882a593Smuzhiyun 			break;
1321*4882a593Smuzhiyun 		case VFIO_IRQ_SET_ACTION_TRIGGER:
1322*4882a593Smuzhiyun 			func = intel_vgpu_set_intx_trigger;
1323*4882a593Smuzhiyun 			break;
1324*4882a593Smuzhiyun 		}
1325*4882a593Smuzhiyun 		break;
1326*4882a593Smuzhiyun 	case VFIO_PCI_MSI_IRQ_INDEX:
1327*4882a593Smuzhiyun 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
1328*4882a593Smuzhiyun 		case VFIO_IRQ_SET_ACTION_MASK:
1329*4882a593Smuzhiyun 		case VFIO_IRQ_SET_ACTION_UNMASK:
1330*4882a593Smuzhiyun 			/* XXX Need masking support exported */
1331*4882a593Smuzhiyun 			break;
1332*4882a593Smuzhiyun 		case VFIO_IRQ_SET_ACTION_TRIGGER:
1333*4882a593Smuzhiyun 			func = intel_vgpu_set_msi_trigger;
1334*4882a593Smuzhiyun 			break;
1335*4882a593Smuzhiyun 		}
1336*4882a593Smuzhiyun 		break;
1337*4882a593Smuzhiyun 	}
1338*4882a593Smuzhiyun 
1339*4882a593Smuzhiyun 	if (!func)
1340*4882a593Smuzhiyun 		return -ENOTTY;
1341*4882a593Smuzhiyun 
1342*4882a593Smuzhiyun 	return func(vgpu, index, start, count, flags, data);
1343*4882a593Smuzhiyun }
1344*4882a593Smuzhiyun 
intel_vgpu_ioctl(struct mdev_device * mdev,unsigned int cmd,unsigned long arg)1345*4882a593Smuzhiyun static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
1346*4882a593Smuzhiyun 			     unsigned long arg)
1347*4882a593Smuzhiyun {
1348*4882a593Smuzhiyun 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1349*4882a593Smuzhiyun 	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
1350*4882a593Smuzhiyun 	unsigned long minsz;
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun 	gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
1353*4882a593Smuzhiyun 
1354*4882a593Smuzhiyun 	if (cmd == VFIO_DEVICE_GET_INFO) {
1355*4882a593Smuzhiyun 		struct vfio_device_info info;
1356*4882a593Smuzhiyun 
1357*4882a593Smuzhiyun 		minsz = offsetofend(struct vfio_device_info, num_irqs);
1358*4882a593Smuzhiyun 
1359*4882a593Smuzhiyun 		if (copy_from_user(&info, (void __user *)arg, minsz))
1360*4882a593Smuzhiyun 			return -EFAULT;
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun 		if (info.argsz < minsz)
1363*4882a593Smuzhiyun 			return -EINVAL;
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun 		info.flags = VFIO_DEVICE_FLAGS_PCI;
1366*4882a593Smuzhiyun 		info.flags |= VFIO_DEVICE_FLAGS_RESET;
1367*4882a593Smuzhiyun 		info.num_regions = VFIO_PCI_NUM_REGIONS +
1368*4882a593Smuzhiyun 				vdev->num_regions;
1369*4882a593Smuzhiyun 		info.num_irqs = VFIO_PCI_NUM_IRQS;
1370*4882a593Smuzhiyun 
1371*4882a593Smuzhiyun 		return copy_to_user((void __user *)arg, &info, minsz) ?
1372*4882a593Smuzhiyun 			-EFAULT : 0;
1373*4882a593Smuzhiyun 
1374*4882a593Smuzhiyun 	} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
1375*4882a593Smuzhiyun 		struct vfio_region_info info;
1376*4882a593Smuzhiyun 		struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
1377*4882a593Smuzhiyun 		unsigned int i;
1378*4882a593Smuzhiyun 		int ret;
1379*4882a593Smuzhiyun 		struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
1380*4882a593Smuzhiyun 		int nr_areas = 1;
1381*4882a593Smuzhiyun 		int cap_type_id;
1382*4882a593Smuzhiyun 
1383*4882a593Smuzhiyun 		minsz = offsetofend(struct vfio_region_info, offset);
1384*4882a593Smuzhiyun 
1385*4882a593Smuzhiyun 		if (copy_from_user(&info, (void __user *)arg, minsz))
1386*4882a593Smuzhiyun 			return -EFAULT;
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun 		if (info.argsz < minsz)
1389*4882a593Smuzhiyun 			return -EINVAL;
1390*4882a593Smuzhiyun 
1391*4882a593Smuzhiyun 		switch (info.index) {
1392*4882a593Smuzhiyun 		case VFIO_PCI_CONFIG_REGION_INDEX:
1393*4882a593Smuzhiyun 			info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1394*4882a593Smuzhiyun 			info.size = vgpu->gvt->device_info.cfg_space_size;
1395*4882a593Smuzhiyun 			info.flags = VFIO_REGION_INFO_FLAG_READ |
1396*4882a593Smuzhiyun 				     VFIO_REGION_INFO_FLAG_WRITE;
1397*4882a593Smuzhiyun 			break;
1398*4882a593Smuzhiyun 		case VFIO_PCI_BAR0_REGION_INDEX:
1399*4882a593Smuzhiyun 			info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1400*4882a593Smuzhiyun 			info.size = vgpu->cfg_space.bar[info.index].size;
1401*4882a593Smuzhiyun 			if (!info.size) {
1402*4882a593Smuzhiyun 				info.flags = 0;
1403*4882a593Smuzhiyun 				break;
1404*4882a593Smuzhiyun 			}
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 			info.flags = VFIO_REGION_INFO_FLAG_READ |
1407*4882a593Smuzhiyun 				     VFIO_REGION_INFO_FLAG_WRITE;
1408*4882a593Smuzhiyun 			break;
1409*4882a593Smuzhiyun 		case VFIO_PCI_BAR1_REGION_INDEX:
1410*4882a593Smuzhiyun 			info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1411*4882a593Smuzhiyun 			info.size = 0;
1412*4882a593Smuzhiyun 			info.flags = 0;
1413*4882a593Smuzhiyun 			break;
1414*4882a593Smuzhiyun 		case VFIO_PCI_BAR2_REGION_INDEX:
1415*4882a593Smuzhiyun 			info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1416*4882a593Smuzhiyun 			info.flags = VFIO_REGION_INFO_FLAG_CAPS |
1417*4882a593Smuzhiyun 					VFIO_REGION_INFO_FLAG_MMAP |
1418*4882a593Smuzhiyun 					VFIO_REGION_INFO_FLAG_READ |
1419*4882a593Smuzhiyun 					VFIO_REGION_INFO_FLAG_WRITE;
1420*4882a593Smuzhiyun 			info.size = gvt_aperture_sz(vgpu->gvt);
1421*4882a593Smuzhiyun 
1422*4882a593Smuzhiyun 			sparse = kzalloc(struct_size(sparse, areas, nr_areas),
1423*4882a593Smuzhiyun 					 GFP_KERNEL);
1424*4882a593Smuzhiyun 			if (!sparse)
1425*4882a593Smuzhiyun 				return -ENOMEM;
1426*4882a593Smuzhiyun 
1427*4882a593Smuzhiyun 			sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1428*4882a593Smuzhiyun 			sparse->header.version = 1;
1429*4882a593Smuzhiyun 			sparse->nr_areas = nr_areas;
1430*4882a593Smuzhiyun 			cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1431*4882a593Smuzhiyun 			sparse->areas[0].offset =
1432*4882a593Smuzhiyun 					PAGE_ALIGN(vgpu_aperture_offset(vgpu));
1433*4882a593Smuzhiyun 			sparse->areas[0].size = vgpu_aperture_sz(vgpu);
1434*4882a593Smuzhiyun 			break;
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun 		case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1437*4882a593Smuzhiyun 			info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1438*4882a593Smuzhiyun 			info.size = 0;
1439*4882a593Smuzhiyun 			info.flags = 0;
1440*4882a593Smuzhiyun 
1441*4882a593Smuzhiyun 			gvt_dbg_core("get region info bar:%d\n", info.index);
1442*4882a593Smuzhiyun 			break;
1443*4882a593Smuzhiyun 
1444*4882a593Smuzhiyun 		case VFIO_PCI_ROM_REGION_INDEX:
1445*4882a593Smuzhiyun 		case VFIO_PCI_VGA_REGION_INDEX:
1446*4882a593Smuzhiyun 			info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1447*4882a593Smuzhiyun 			info.size = 0;
1448*4882a593Smuzhiyun 			info.flags = 0;
1449*4882a593Smuzhiyun 
1450*4882a593Smuzhiyun 			gvt_dbg_core("get region info index:%d\n", info.index);
1451*4882a593Smuzhiyun 			break;
1452*4882a593Smuzhiyun 		default:
1453*4882a593Smuzhiyun 			{
1454*4882a593Smuzhiyun 				struct vfio_region_info_cap_type cap_type = {
1455*4882a593Smuzhiyun 					.header.id = VFIO_REGION_INFO_CAP_TYPE,
1456*4882a593Smuzhiyun 					.header.version = 1 };
1457*4882a593Smuzhiyun 
1458*4882a593Smuzhiyun 				if (info.index >= VFIO_PCI_NUM_REGIONS +
1459*4882a593Smuzhiyun 						vdev->num_regions)
1460*4882a593Smuzhiyun 					return -EINVAL;
1461*4882a593Smuzhiyun 				info.index =
1462*4882a593Smuzhiyun 					array_index_nospec(info.index,
1463*4882a593Smuzhiyun 							VFIO_PCI_NUM_REGIONS +
1464*4882a593Smuzhiyun 							vdev->num_regions);
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun 				i = info.index - VFIO_PCI_NUM_REGIONS;
1467*4882a593Smuzhiyun 
1468*4882a593Smuzhiyun 				info.offset =
1469*4882a593Smuzhiyun 					VFIO_PCI_INDEX_TO_OFFSET(info.index);
1470*4882a593Smuzhiyun 				info.size = vdev->region[i].size;
1471*4882a593Smuzhiyun 				info.flags = vdev->region[i].flags;
1472*4882a593Smuzhiyun 
1473*4882a593Smuzhiyun 				cap_type.type = vdev->region[i].type;
1474*4882a593Smuzhiyun 				cap_type.subtype = vdev->region[i].subtype;
1475*4882a593Smuzhiyun 
1476*4882a593Smuzhiyun 				ret = vfio_info_add_capability(&caps,
1477*4882a593Smuzhiyun 							&cap_type.header,
1478*4882a593Smuzhiyun 							sizeof(cap_type));
1479*4882a593Smuzhiyun 				if (ret)
1480*4882a593Smuzhiyun 					return ret;
1481*4882a593Smuzhiyun 			}
1482*4882a593Smuzhiyun 		}
1483*4882a593Smuzhiyun 
1484*4882a593Smuzhiyun 		if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
1485*4882a593Smuzhiyun 			switch (cap_type_id) {
1486*4882a593Smuzhiyun 			case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
1487*4882a593Smuzhiyun 				ret = vfio_info_add_capability(&caps,
1488*4882a593Smuzhiyun 					&sparse->header,
1489*4882a593Smuzhiyun 					struct_size(sparse, areas,
1490*4882a593Smuzhiyun 						    sparse->nr_areas));
1491*4882a593Smuzhiyun 				if (ret) {
1492*4882a593Smuzhiyun 					kfree(sparse);
1493*4882a593Smuzhiyun 					return ret;
1494*4882a593Smuzhiyun 				}
1495*4882a593Smuzhiyun 				break;
1496*4882a593Smuzhiyun 			default:
1497*4882a593Smuzhiyun 				kfree(sparse);
1498*4882a593Smuzhiyun 				return -EINVAL;
1499*4882a593Smuzhiyun 			}
1500*4882a593Smuzhiyun 		}
1501*4882a593Smuzhiyun 
1502*4882a593Smuzhiyun 		if (caps.size) {
1503*4882a593Smuzhiyun 			info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
1504*4882a593Smuzhiyun 			if (info.argsz < sizeof(info) + caps.size) {
1505*4882a593Smuzhiyun 				info.argsz = sizeof(info) + caps.size;
1506*4882a593Smuzhiyun 				info.cap_offset = 0;
1507*4882a593Smuzhiyun 			} else {
1508*4882a593Smuzhiyun 				vfio_info_cap_shift(&caps, sizeof(info));
1509*4882a593Smuzhiyun 				if (copy_to_user((void __user *)arg +
1510*4882a593Smuzhiyun 						  sizeof(info), caps.buf,
1511*4882a593Smuzhiyun 						  caps.size)) {
1512*4882a593Smuzhiyun 					kfree(caps.buf);
1513*4882a593Smuzhiyun 					kfree(sparse);
1514*4882a593Smuzhiyun 					return -EFAULT;
1515*4882a593Smuzhiyun 				}
1516*4882a593Smuzhiyun 				info.cap_offset = sizeof(info);
1517*4882a593Smuzhiyun 			}
1518*4882a593Smuzhiyun 
1519*4882a593Smuzhiyun 			kfree(caps.buf);
1520*4882a593Smuzhiyun 		}
1521*4882a593Smuzhiyun 
1522*4882a593Smuzhiyun 		kfree(sparse);
1523*4882a593Smuzhiyun 		return copy_to_user((void __user *)arg, &info, minsz) ?
1524*4882a593Smuzhiyun 			-EFAULT : 0;
1525*4882a593Smuzhiyun 	} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
1526*4882a593Smuzhiyun 		struct vfio_irq_info info;
1527*4882a593Smuzhiyun 
1528*4882a593Smuzhiyun 		minsz = offsetofend(struct vfio_irq_info, count);
1529*4882a593Smuzhiyun 
1530*4882a593Smuzhiyun 		if (copy_from_user(&info, (void __user *)arg, minsz))
1531*4882a593Smuzhiyun 			return -EFAULT;
1532*4882a593Smuzhiyun 
1533*4882a593Smuzhiyun 		if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
1534*4882a593Smuzhiyun 			return -EINVAL;
1535*4882a593Smuzhiyun 
1536*4882a593Smuzhiyun 		switch (info.index) {
1537*4882a593Smuzhiyun 		case VFIO_PCI_INTX_IRQ_INDEX:
1538*4882a593Smuzhiyun 		case VFIO_PCI_MSI_IRQ_INDEX:
1539*4882a593Smuzhiyun 			break;
1540*4882a593Smuzhiyun 		default:
1541*4882a593Smuzhiyun 			return -EINVAL;
1542*4882a593Smuzhiyun 		}
1543*4882a593Smuzhiyun 
1544*4882a593Smuzhiyun 		info.flags = VFIO_IRQ_INFO_EVENTFD;
1545*4882a593Smuzhiyun 
1546*4882a593Smuzhiyun 		info.count = intel_vgpu_get_irq_count(vgpu, info.index);
1547*4882a593Smuzhiyun 
1548*4882a593Smuzhiyun 		if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
1549*4882a593Smuzhiyun 			info.flags |= (VFIO_IRQ_INFO_MASKABLE |
1550*4882a593Smuzhiyun 				       VFIO_IRQ_INFO_AUTOMASKED);
1551*4882a593Smuzhiyun 		else
1552*4882a593Smuzhiyun 			info.flags |= VFIO_IRQ_INFO_NORESIZE;
1553*4882a593Smuzhiyun 
1554*4882a593Smuzhiyun 		return copy_to_user((void __user *)arg, &info, minsz) ?
1555*4882a593Smuzhiyun 			-EFAULT : 0;
1556*4882a593Smuzhiyun 	} else if (cmd == VFIO_DEVICE_SET_IRQS) {
1557*4882a593Smuzhiyun 		struct vfio_irq_set hdr;
1558*4882a593Smuzhiyun 		u8 *data = NULL;
1559*4882a593Smuzhiyun 		int ret = 0;
1560*4882a593Smuzhiyun 		size_t data_size = 0;
1561*4882a593Smuzhiyun 
1562*4882a593Smuzhiyun 		minsz = offsetofend(struct vfio_irq_set, count);
1563*4882a593Smuzhiyun 
1564*4882a593Smuzhiyun 		if (copy_from_user(&hdr, (void __user *)arg, minsz))
1565*4882a593Smuzhiyun 			return -EFAULT;
1566*4882a593Smuzhiyun 
1567*4882a593Smuzhiyun 		if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
1568*4882a593Smuzhiyun 			int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
1569*4882a593Smuzhiyun 
1570*4882a593Smuzhiyun 			ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
1571*4882a593Smuzhiyun 						VFIO_PCI_NUM_IRQS, &data_size);
1572*4882a593Smuzhiyun 			if (ret) {
1573*4882a593Smuzhiyun 				gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
1574*4882a593Smuzhiyun 				return -EINVAL;
1575*4882a593Smuzhiyun 			}
1576*4882a593Smuzhiyun 			if (data_size) {
1577*4882a593Smuzhiyun 				data = memdup_user((void __user *)(arg + minsz),
1578*4882a593Smuzhiyun 						   data_size);
1579*4882a593Smuzhiyun 				if (IS_ERR(data))
1580*4882a593Smuzhiyun 					return PTR_ERR(data);
1581*4882a593Smuzhiyun 			}
1582*4882a593Smuzhiyun 		}
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun 		ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
1585*4882a593Smuzhiyun 					hdr.start, hdr.count, data);
1586*4882a593Smuzhiyun 		kfree(data);
1587*4882a593Smuzhiyun 
1588*4882a593Smuzhiyun 		return ret;
1589*4882a593Smuzhiyun 	} else if (cmd == VFIO_DEVICE_RESET) {
1590*4882a593Smuzhiyun 		intel_gvt_ops->vgpu_reset(vgpu);
1591*4882a593Smuzhiyun 		return 0;
1592*4882a593Smuzhiyun 	} else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) {
1593*4882a593Smuzhiyun 		struct vfio_device_gfx_plane_info dmabuf;
1594*4882a593Smuzhiyun 		int ret = 0;
1595*4882a593Smuzhiyun 
1596*4882a593Smuzhiyun 		minsz = offsetofend(struct vfio_device_gfx_plane_info,
1597*4882a593Smuzhiyun 				    dmabuf_id);
1598*4882a593Smuzhiyun 		if (copy_from_user(&dmabuf, (void __user *)arg, minsz))
1599*4882a593Smuzhiyun 			return -EFAULT;
1600*4882a593Smuzhiyun 		if (dmabuf.argsz < minsz)
1601*4882a593Smuzhiyun 			return -EINVAL;
1602*4882a593Smuzhiyun 
1603*4882a593Smuzhiyun 		ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf);
1604*4882a593Smuzhiyun 		if (ret != 0)
1605*4882a593Smuzhiyun 			return ret;
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 		return copy_to_user((void __user *)arg, &dmabuf, minsz) ?
1608*4882a593Smuzhiyun 								-EFAULT : 0;
1609*4882a593Smuzhiyun 	} else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) {
1610*4882a593Smuzhiyun 		__u32 dmabuf_id;
1611*4882a593Smuzhiyun 		__s32 dmabuf_fd;
1612*4882a593Smuzhiyun 
1613*4882a593Smuzhiyun 		if (get_user(dmabuf_id, (__u32 __user *)arg))
1614*4882a593Smuzhiyun 			return -EFAULT;
1615*4882a593Smuzhiyun 
1616*4882a593Smuzhiyun 		dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id);
1617*4882a593Smuzhiyun 		return dmabuf_fd;
1618*4882a593Smuzhiyun 
1619*4882a593Smuzhiyun 	}
1620*4882a593Smuzhiyun 
1621*4882a593Smuzhiyun 	return -ENOTTY;
1622*4882a593Smuzhiyun }
1623*4882a593Smuzhiyun 
1624*4882a593Smuzhiyun static ssize_t
vgpu_id_show(struct device * dev,struct device_attribute * attr,char * buf)1625*4882a593Smuzhiyun vgpu_id_show(struct device *dev, struct device_attribute *attr,
1626*4882a593Smuzhiyun 	     char *buf)
1627*4882a593Smuzhiyun {
1628*4882a593Smuzhiyun 	struct mdev_device *mdev = mdev_from_dev(dev);
1629*4882a593Smuzhiyun 
1630*4882a593Smuzhiyun 	if (mdev) {
1631*4882a593Smuzhiyun 		struct intel_vgpu *vgpu = (struct intel_vgpu *)
1632*4882a593Smuzhiyun 			mdev_get_drvdata(mdev);
1633*4882a593Smuzhiyun 		return sprintf(buf, "%d\n", vgpu->id);
1634*4882a593Smuzhiyun 	}
1635*4882a593Smuzhiyun 	return sprintf(buf, "\n");
1636*4882a593Smuzhiyun }
1637*4882a593Smuzhiyun 
1638*4882a593Smuzhiyun static DEVICE_ATTR_RO(vgpu_id);
1639*4882a593Smuzhiyun 
1640*4882a593Smuzhiyun static struct attribute *intel_vgpu_attrs[] = {
1641*4882a593Smuzhiyun 	&dev_attr_vgpu_id.attr,
1642*4882a593Smuzhiyun 	NULL
1643*4882a593Smuzhiyun };
1644*4882a593Smuzhiyun 
1645*4882a593Smuzhiyun static const struct attribute_group intel_vgpu_group = {
1646*4882a593Smuzhiyun 	.name = "intel_vgpu",
1647*4882a593Smuzhiyun 	.attrs = intel_vgpu_attrs,
1648*4882a593Smuzhiyun };
1649*4882a593Smuzhiyun 
1650*4882a593Smuzhiyun static const struct attribute_group *intel_vgpu_groups[] = {
1651*4882a593Smuzhiyun 	&intel_vgpu_group,
1652*4882a593Smuzhiyun 	NULL,
1653*4882a593Smuzhiyun };
1654*4882a593Smuzhiyun 
1655*4882a593Smuzhiyun static struct mdev_parent_ops intel_vgpu_ops = {
1656*4882a593Smuzhiyun 	.mdev_attr_groups       = intel_vgpu_groups,
1657*4882a593Smuzhiyun 	.create			= intel_vgpu_create,
1658*4882a593Smuzhiyun 	.remove			= intel_vgpu_remove,
1659*4882a593Smuzhiyun 
1660*4882a593Smuzhiyun 	.open			= intel_vgpu_open,
1661*4882a593Smuzhiyun 	.release		= intel_vgpu_release,
1662*4882a593Smuzhiyun 
1663*4882a593Smuzhiyun 	.read			= intel_vgpu_read,
1664*4882a593Smuzhiyun 	.write			= intel_vgpu_write,
1665*4882a593Smuzhiyun 	.mmap			= intel_vgpu_mmap,
1666*4882a593Smuzhiyun 	.ioctl			= intel_vgpu_ioctl,
1667*4882a593Smuzhiyun };
1668*4882a593Smuzhiyun 
kvmgt_host_init(struct device * dev,void * gvt,const void * ops)1669*4882a593Smuzhiyun static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
1670*4882a593Smuzhiyun {
1671*4882a593Smuzhiyun 	struct attribute_group **kvm_vgpu_type_groups;
1672*4882a593Smuzhiyun 
1673*4882a593Smuzhiyun 	intel_gvt_ops = ops;
1674*4882a593Smuzhiyun 	if (!intel_gvt_ops->get_gvt_attrs(&kvm_vgpu_type_groups))
1675*4882a593Smuzhiyun 		return -EFAULT;
1676*4882a593Smuzhiyun 	intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
1677*4882a593Smuzhiyun 
1678*4882a593Smuzhiyun 	return mdev_register_device(dev, &intel_vgpu_ops);
1679*4882a593Smuzhiyun }
1680*4882a593Smuzhiyun 
kvmgt_host_exit(struct device * dev)1681*4882a593Smuzhiyun static void kvmgt_host_exit(struct device *dev)
1682*4882a593Smuzhiyun {
1683*4882a593Smuzhiyun 	mdev_unregister_device(dev);
1684*4882a593Smuzhiyun }
1685*4882a593Smuzhiyun 
kvmgt_page_track_add(unsigned long handle,u64 gfn)1686*4882a593Smuzhiyun static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
1687*4882a593Smuzhiyun {
1688*4882a593Smuzhiyun 	struct kvmgt_guest_info *info;
1689*4882a593Smuzhiyun 	struct kvm *kvm;
1690*4882a593Smuzhiyun 	struct kvm_memory_slot *slot;
1691*4882a593Smuzhiyun 	int idx;
1692*4882a593Smuzhiyun 
1693*4882a593Smuzhiyun 	if (!handle_valid(handle))
1694*4882a593Smuzhiyun 		return -ESRCH;
1695*4882a593Smuzhiyun 
1696*4882a593Smuzhiyun 	info = (struct kvmgt_guest_info *)handle;
1697*4882a593Smuzhiyun 	kvm = info->kvm;
1698*4882a593Smuzhiyun 
1699*4882a593Smuzhiyun 	idx = srcu_read_lock(&kvm->srcu);
1700*4882a593Smuzhiyun 	slot = gfn_to_memslot(kvm, gfn);
1701*4882a593Smuzhiyun 	if (!slot) {
1702*4882a593Smuzhiyun 		srcu_read_unlock(&kvm->srcu, idx);
1703*4882a593Smuzhiyun 		return -EINVAL;
1704*4882a593Smuzhiyun 	}
1705*4882a593Smuzhiyun 
1706*4882a593Smuzhiyun 	spin_lock(&kvm->mmu_lock);
1707*4882a593Smuzhiyun 
1708*4882a593Smuzhiyun 	if (kvmgt_gfn_is_write_protected(info, gfn))
1709*4882a593Smuzhiyun 		goto out;
1710*4882a593Smuzhiyun 
1711*4882a593Smuzhiyun 	kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1712*4882a593Smuzhiyun 	kvmgt_protect_table_add(info, gfn);
1713*4882a593Smuzhiyun 
1714*4882a593Smuzhiyun out:
1715*4882a593Smuzhiyun 	spin_unlock(&kvm->mmu_lock);
1716*4882a593Smuzhiyun 	srcu_read_unlock(&kvm->srcu, idx);
1717*4882a593Smuzhiyun 	return 0;
1718*4882a593Smuzhiyun }
1719*4882a593Smuzhiyun 
kvmgt_page_track_remove(unsigned long handle,u64 gfn)1720*4882a593Smuzhiyun static int kvmgt_page_track_remove(unsigned long handle, u64 gfn)
1721*4882a593Smuzhiyun {
1722*4882a593Smuzhiyun 	struct kvmgt_guest_info *info;
1723*4882a593Smuzhiyun 	struct kvm *kvm;
1724*4882a593Smuzhiyun 	struct kvm_memory_slot *slot;
1725*4882a593Smuzhiyun 	int idx;
1726*4882a593Smuzhiyun 
1727*4882a593Smuzhiyun 	if (!handle_valid(handle))
1728*4882a593Smuzhiyun 		return 0;
1729*4882a593Smuzhiyun 
1730*4882a593Smuzhiyun 	info = (struct kvmgt_guest_info *)handle;
1731*4882a593Smuzhiyun 	kvm = info->kvm;
1732*4882a593Smuzhiyun 
1733*4882a593Smuzhiyun 	idx = srcu_read_lock(&kvm->srcu);
1734*4882a593Smuzhiyun 	slot = gfn_to_memslot(kvm, gfn);
1735*4882a593Smuzhiyun 	if (!slot) {
1736*4882a593Smuzhiyun 		srcu_read_unlock(&kvm->srcu, idx);
1737*4882a593Smuzhiyun 		return -EINVAL;
1738*4882a593Smuzhiyun 	}
1739*4882a593Smuzhiyun 
1740*4882a593Smuzhiyun 	spin_lock(&kvm->mmu_lock);
1741*4882a593Smuzhiyun 
1742*4882a593Smuzhiyun 	if (!kvmgt_gfn_is_write_protected(info, gfn))
1743*4882a593Smuzhiyun 		goto out;
1744*4882a593Smuzhiyun 
1745*4882a593Smuzhiyun 	kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1746*4882a593Smuzhiyun 	kvmgt_protect_table_del(info, gfn);
1747*4882a593Smuzhiyun 
1748*4882a593Smuzhiyun out:
1749*4882a593Smuzhiyun 	spin_unlock(&kvm->mmu_lock);
1750*4882a593Smuzhiyun 	srcu_read_unlock(&kvm->srcu, idx);
1751*4882a593Smuzhiyun 	return 0;
1752*4882a593Smuzhiyun }
1753*4882a593Smuzhiyun 
kvmgt_page_track_write(struct kvm_vcpu * vcpu,gpa_t gpa,const u8 * val,int len,struct kvm_page_track_notifier_node * node)1754*4882a593Smuzhiyun static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1755*4882a593Smuzhiyun 		const u8 *val, int len,
1756*4882a593Smuzhiyun 		struct kvm_page_track_notifier_node *node)
1757*4882a593Smuzhiyun {
1758*4882a593Smuzhiyun 	struct kvmgt_guest_info *info = container_of(node,
1759*4882a593Smuzhiyun 					struct kvmgt_guest_info, track_node);
1760*4882a593Smuzhiyun 
1761*4882a593Smuzhiyun 	if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
1762*4882a593Smuzhiyun 		intel_gvt_ops->write_protect_handler(info->vgpu, gpa,
1763*4882a593Smuzhiyun 						     (void *)val, len);
1764*4882a593Smuzhiyun }
1765*4882a593Smuzhiyun 
kvmgt_page_track_flush_slot(struct kvm * kvm,struct kvm_memory_slot * slot,struct kvm_page_track_notifier_node * node)1766*4882a593Smuzhiyun static void kvmgt_page_track_flush_slot(struct kvm *kvm,
1767*4882a593Smuzhiyun 		struct kvm_memory_slot *slot,
1768*4882a593Smuzhiyun 		struct kvm_page_track_notifier_node *node)
1769*4882a593Smuzhiyun {
1770*4882a593Smuzhiyun 	int i;
1771*4882a593Smuzhiyun 	gfn_t gfn;
1772*4882a593Smuzhiyun 	struct kvmgt_guest_info *info = container_of(node,
1773*4882a593Smuzhiyun 					struct kvmgt_guest_info, track_node);
1774*4882a593Smuzhiyun 
1775*4882a593Smuzhiyun 	spin_lock(&kvm->mmu_lock);
1776*4882a593Smuzhiyun 	for (i = 0; i < slot->npages; i++) {
1777*4882a593Smuzhiyun 		gfn = slot->base_gfn + i;
1778*4882a593Smuzhiyun 		if (kvmgt_gfn_is_write_protected(info, gfn)) {
1779*4882a593Smuzhiyun 			kvm_slot_page_track_remove_page(kvm, slot, gfn,
1780*4882a593Smuzhiyun 						KVM_PAGE_TRACK_WRITE);
1781*4882a593Smuzhiyun 			kvmgt_protect_table_del(info, gfn);
1782*4882a593Smuzhiyun 		}
1783*4882a593Smuzhiyun 	}
1784*4882a593Smuzhiyun 	spin_unlock(&kvm->mmu_lock);
1785*4882a593Smuzhiyun }
1786*4882a593Smuzhiyun 
__kvmgt_vgpu_exist(struct intel_vgpu * vgpu,struct kvm * kvm)1787*4882a593Smuzhiyun static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
1788*4882a593Smuzhiyun {
1789*4882a593Smuzhiyun 	struct intel_vgpu *itr;
1790*4882a593Smuzhiyun 	struct kvmgt_guest_info *info;
1791*4882a593Smuzhiyun 	int id;
1792*4882a593Smuzhiyun 	bool ret = false;
1793*4882a593Smuzhiyun 
1794*4882a593Smuzhiyun 	mutex_lock(&vgpu->gvt->lock);
1795*4882a593Smuzhiyun 	for_each_active_vgpu(vgpu->gvt, itr, id) {
1796*4882a593Smuzhiyun 		if (!handle_valid(itr->handle))
1797*4882a593Smuzhiyun 			continue;
1798*4882a593Smuzhiyun 
1799*4882a593Smuzhiyun 		info = (struct kvmgt_guest_info *)itr->handle;
1800*4882a593Smuzhiyun 		if (kvm && kvm == info->kvm) {
1801*4882a593Smuzhiyun 			ret = true;
1802*4882a593Smuzhiyun 			goto out;
1803*4882a593Smuzhiyun 		}
1804*4882a593Smuzhiyun 	}
1805*4882a593Smuzhiyun out:
1806*4882a593Smuzhiyun 	mutex_unlock(&vgpu->gvt->lock);
1807*4882a593Smuzhiyun 	return ret;
1808*4882a593Smuzhiyun }
1809*4882a593Smuzhiyun 
kvmgt_guest_init(struct mdev_device * mdev)1810*4882a593Smuzhiyun static int kvmgt_guest_init(struct mdev_device *mdev)
1811*4882a593Smuzhiyun {
1812*4882a593Smuzhiyun 	struct kvmgt_guest_info *info;
1813*4882a593Smuzhiyun 	struct intel_vgpu *vgpu;
1814*4882a593Smuzhiyun 	struct kvmgt_vdev *vdev;
1815*4882a593Smuzhiyun 	struct kvm *kvm;
1816*4882a593Smuzhiyun 
1817*4882a593Smuzhiyun 	vgpu = mdev_get_drvdata(mdev);
1818*4882a593Smuzhiyun 	if (handle_valid(vgpu->handle))
1819*4882a593Smuzhiyun 		return -EEXIST;
1820*4882a593Smuzhiyun 
1821*4882a593Smuzhiyun 	vdev = kvmgt_vdev(vgpu);
1822*4882a593Smuzhiyun 	kvm = vdev->kvm;
1823*4882a593Smuzhiyun 	if (!kvm || kvm->mm != current->mm) {
1824*4882a593Smuzhiyun 		gvt_vgpu_err("KVM is required to use Intel vGPU\n");
1825*4882a593Smuzhiyun 		return -ESRCH;
1826*4882a593Smuzhiyun 	}
1827*4882a593Smuzhiyun 
1828*4882a593Smuzhiyun 	if (__kvmgt_vgpu_exist(vgpu, kvm))
1829*4882a593Smuzhiyun 		return -EEXIST;
1830*4882a593Smuzhiyun 
1831*4882a593Smuzhiyun 	info = vzalloc(sizeof(struct kvmgt_guest_info));
1832*4882a593Smuzhiyun 	if (!info)
1833*4882a593Smuzhiyun 		return -ENOMEM;
1834*4882a593Smuzhiyun 
1835*4882a593Smuzhiyun 	vgpu->handle = (unsigned long)info;
1836*4882a593Smuzhiyun 	info->vgpu = vgpu;
1837*4882a593Smuzhiyun 	info->kvm = kvm;
1838*4882a593Smuzhiyun 	kvm_get_kvm(info->kvm);
1839*4882a593Smuzhiyun 
1840*4882a593Smuzhiyun 	kvmgt_protect_table_init(info);
1841*4882a593Smuzhiyun 	gvt_cache_init(vgpu);
1842*4882a593Smuzhiyun 
1843*4882a593Smuzhiyun 	info->track_node.track_write = kvmgt_page_track_write;
1844*4882a593Smuzhiyun 	info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
1845*4882a593Smuzhiyun 	kvm_page_track_register_notifier(kvm, &info->track_node);
1846*4882a593Smuzhiyun 
1847*4882a593Smuzhiyun 	info->debugfs_cache_entries = debugfs_create_ulong(
1848*4882a593Smuzhiyun 						"kvmgt_nr_cache_entries",
1849*4882a593Smuzhiyun 						0444, vgpu->debugfs,
1850*4882a593Smuzhiyun 						&vdev->nr_cache_entries);
1851*4882a593Smuzhiyun 	return 0;
1852*4882a593Smuzhiyun }
1853*4882a593Smuzhiyun 
kvmgt_guest_exit(struct kvmgt_guest_info * info)1854*4882a593Smuzhiyun static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
1855*4882a593Smuzhiyun {
1856*4882a593Smuzhiyun 	debugfs_remove(info->debugfs_cache_entries);
1857*4882a593Smuzhiyun 
1858*4882a593Smuzhiyun 	kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
1859*4882a593Smuzhiyun 	kvm_put_kvm(info->kvm);
1860*4882a593Smuzhiyun 	kvmgt_protect_table_destroy(info);
1861*4882a593Smuzhiyun 	gvt_cache_destroy(info->vgpu);
1862*4882a593Smuzhiyun 	vfree(info);
1863*4882a593Smuzhiyun 
1864*4882a593Smuzhiyun 	return true;
1865*4882a593Smuzhiyun }
1866*4882a593Smuzhiyun 
kvmgt_attach_vgpu(void * p_vgpu,unsigned long * handle)1867*4882a593Smuzhiyun static int kvmgt_attach_vgpu(void *p_vgpu, unsigned long *handle)
1868*4882a593Smuzhiyun {
1869*4882a593Smuzhiyun 	struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
1870*4882a593Smuzhiyun 
1871*4882a593Smuzhiyun 	vgpu->vdev = kzalloc(sizeof(struct kvmgt_vdev), GFP_KERNEL);
1872*4882a593Smuzhiyun 
1873*4882a593Smuzhiyun 	if (!vgpu->vdev)
1874*4882a593Smuzhiyun 		return -ENOMEM;
1875*4882a593Smuzhiyun 
1876*4882a593Smuzhiyun 	kvmgt_vdev(vgpu)->vgpu = vgpu;
1877*4882a593Smuzhiyun 
1878*4882a593Smuzhiyun 	return 0;
1879*4882a593Smuzhiyun }
1880*4882a593Smuzhiyun 
kvmgt_detach_vgpu(void * p_vgpu)1881*4882a593Smuzhiyun static void kvmgt_detach_vgpu(void *p_vgpu)
1882*4882a593Smuzhiyun {
1883*4882a593Smuzhiyun 	int i;
1884*4882a593Smuzhiyun 	struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
1885*4882a593Smuzhiyun 	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
1886*4882a593Smuzhiyun 
1887*4882a593Smuzhiyun 	if (!vdev->region)
1888*4882a593Smuzhiyun 		return;
1889*4882a593Smuzhiyun 
1890*4882a593Smuzhiyun 	for (i = 0; i < vdev->num_regions; i++)
1891*4882a593Smuzhiyun 		if (vdev->region[i].ops->release)
1892*4882a593Smuzhiyun 			vdev->region[i].ops->release(vgpu,
1893*4882a593Smuzhiyun 					&vdev->region[i]);
1894*4882a593Smuzhiyun 	vdev->num_regions = 0;
1895*4882a593Smuzhiyun 	kfree(vdev->region);
1896*4882a593Smuzhiyun 	vdev->region = NULL;
1897*4882a593Smuzhiyun 
1898*4882a593Smuzhiyun 	kfree(vdev);
1899*4882a593Smuzhiyun }
1900*4882a593Smuzhiyun 
kvmgt_inject_msi(unsigned long handle,u32 addr,u16 data)1901*4882a593Smuzhiyun static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
1902*4882a593Smuzhiyun {
1903*4882a593Smuzhiyun 	struct kvmgt_guest_info *info;
1904*4882a593Smuzhiyun 	struct intel_vgpu *vgpu;
1905*4882a593Smuzhiyun 	struct kvmgt_vdev *vdev;
1906*4882a593Smuzhiyun 
1907*4882a593Smuzhiyun 	if (!handle_valid(handle))
1908*4882a593Smuzhiyun 		return -ESRCH;
1909*4882a593Smuzhiyun 
1910*4882a593Smuzhiyun 	info = (struct kvmgt_guest_info *)handle;
1911*4882a593Smuzhiyun 	vgpu = info->vgpu;
1912*4882a593Smuzhiyun 	vdev = kvmgt_vdev(vgpu);
1913*4882a593Smuzhiyun 
1914*4882a593Smuzhiyun 	/*
1915*4882a593Smuzhiyun 	 * When guest is poweroff, msi_trigger is set to NULL, but vgpu's
1916*4882a593Smuzhiyun 	 * config and mmio register isn't restored to default during guest
1917*4882a593Smuzhiyun 	 * poweroff. If this vgpu is still used in next vm, this vgpu's pipe
1918*4882a593Smuzhiyun 	 * may be enabled, then once this vgpu is active, it will get inject
1919*4882a593Smuzhiyun 	 * vblank interrupt request. But msi_trigger is null until msi is
1920*4882a593Smuzhiyun 	 * enabled by guest. so if msi_trigger is null, success is still
1921*4882a593Smuzhiyun 	 * returned and don't inject interrupt into guest.
1922*4882a593Smuzhiyun 	 */
1923*4882a593Smuzhiyun 	if (vdev->msi_trigger == NULL)
1924*4882a593Smuzhiyun 		return 0;
1925*4882a593Smuzhiyun 
1926*4882a593Smuzhiyun 	if (eventfd_signal(vdev->msi_trigger, 1) == 1)
1927*4882a593Smuzhiyun 		return 0;
1928*4882a593Smuzhiyun 
1929*4882a593Smuzhiyun 	return -EFAULT;
1930*4882a593Smuzhiyun }
1931*4882a593Smuzhiyun 
kvmgt_gfn_to_pfn(unsigned long handle,unsigned long gfn)1932*4882a593Smuzhiyun static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1933*4882a593Smuzhiyun {
1934*4882a593Smuzhiyun 	struct kvmgt_guest_info *info;
1935*4882a593Smuzhiyun 	kvm_pfn_t pfn;
1936*4882a593Smuzhiyun 
1937*4882a593Smuzhiyun 	if (!handle_valid(handle))
1938*4882a593Smuzhiyun 		return INTEL_GVT_INVALID_ADDR;
1939*4882a593Smuzhiyun 
1940*4882a593Smuzhiyun 	info = (struct kvmgt_guest_info *)handle;
1941*4882a593Smuzhiyun 
1942*4882a593Smuzhiyun 	pfn = gfn_to_pfn(info->kvm, gfn);
1943*4882a593Smuzhiyun 	if (is_error_noslot_pfn(pfn))
1944*4882a593Smuzhiyun 		return INTEL_GVT_INVALID_ADDR;
1945*4882a593Smuzhiyun 
1946*4882a593Smuzhiyun 	return pfn;
1947*4882a593Smuzhiyun }
1948*4882a593Smuzhiyun 
kvmgt_dma_map_guest_page(unsigned long handle,unsigned long gfn,unsigned long size,dma_addr_t * dma_addr)1949*4882a593Smuzhiyun static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
1950*4882a593Smuzhiyun 		unsigned long size, dma_addr_t *dma_addr)
1951*4882a593Smuzhiyun {
1952*4882a593Smuzhiyun 	struct intel_vgpu *vgpu;
1953*4882a593Smuzhiyun 	struct kvmgt_vdev *vdev;
1954*4882a593Smuzhiyun 	struct gvt_dma *entry;
1955*4882a593Smuzhiyun 	int ret;
1956*4882a593Smuzhiyun 
1957*4882a593Smuzhiyun 	if (!handle_valid(handle))
1958*4882a593Smuzhiyun 		return -EINVAL;
1959*4882a593Smuzhiyun 
1960*4882a593Smuzhiyun 	vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
1961*4882a593Smuzhiyun 	vdev = kvmgt_vdev(vgpu);
1962*4882a593Smuzhiyun 
1963*4882a593Smuzhiyun 	mutex_lock(&vdev->cache_lock);
1964*4882a593Smuzhiyun 
1965*4882a593Smuzhiyun 	entry = __gvt_cache_find_gfn(vgpu, gfn);
1966*4882a593Smuzhiyun 	if (!entry) {
1967*4882a593Smuzhiyun 		ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
1968*4882a593Smuzhiyun 		if (ret)
1969*4882a593Smuzhiyun 			goto err_unlock;
1970*4882a593Smuzhiyun 
1971*4882a593Smuzhiyun 		ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
1972*4882a593Smuzhiyun 		if (ret)
1973*4882a593Smuzhiyun 			goto err_unmap;
1974*4882a593Smuzhiyun 	} else if (entry->size != size) {
1975*4882a593Smuzhiyun 		/* the same gfn with different size: unmap and re-map */
1976*4882a593Smuzhiyun 		gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size);
1977*4882a593Smuzhiyun 		__gvt_cache_remove_entry(vgpu, entry);
1978*4882a593Smuzhiyun 
1979*4882a593Smuzhiyun 		ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
1980*4882a593Smuzhiyun 		if (ret)
1981*4882a593Smuzhiyun 			goto err_unlock;
1982*4882a593Smuzhiyun 
1983*4882a593Smuzhiyun 		ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
1984*4882a593Smuzhiyun 		if (ret)
1985*4882a593Smuzhiyun 			goto err_unmap;
1986*4882a593Smuzhiyun 	} else {
1987*4882a593Smuzhiyun 		kref_get(&entry->ref);
1988*4882a593Smuzhiyun 		*dma_addr = entry->dma_addr;
1989*4882a593Smuzhiyun 	}
1990*4882a593Smuzhiyun 
1991*4882a593Smuzhiyun 	mutex_unlock(&vdev->cache_lock);
1992*4882a593Smuzhiyun 	return 0;
1993*4882a593Smuzhiyun 
1994*4882a593Smuzhiyun err_unmap:
1995*4882a593Smuzhiyun 	gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
1996*4882a593Smuzhiyun err_unlock:
1997*4882a593Smuzhiyun 	mutex_unlock(&vdev->cache_lock);
1998*4882a593Smuzhiyun 	return ret;
1999*4882a593Smuzhiyun }
2000*4882a593Smuzhiyun 
kvmgt_dma_pin_guest_page(unsigned long handle,dma_addr_t dma_addr)2001*4882a593Smuzhiyun static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
2002*4882a593Smuzhiyun {
2003*4882a593Smuzhiyun 	struct kvmgt_guest_info *info;
2004*4882a593Smuzhiyun 	struct kvmgt_vdev *vdev;
2005*4882a593Smuzhiyun 	struct gvt_dma *entry;
2006*4882a593Smuzhiyun 	int ret = 0;
2007*4882a593Smuzhiyun 
2008*4882a593Smuzhiyun 	if (!handle_valid(handle))
2009*4882a593Smuzhiyun 		return -ENODEV;
2010*4882a593Smuzhiyun 
2011*4882a593Smuzhiyun 	info = (struct kvmgt_guest_info *)handle;
2012*4882a593Smuzhiyun 	vdev = kvmgt_vdev(info->vgpu);
2013*4882a593Smuzhiyun 
2014*4882a593Smuzhiyun 	mutex_lock(&vdev->cache_lock);
2015*4882a593Smuzhiyun 	entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
2016*4882a593Smuzhiyun 	if (entry)
2017*4882a593Smuzhiyun 		kref_get(&entry->ref);
2018*4882a593Smuzhiyun 	else
2019*4882a593Smuzhiyun 		ret = -ENOMEM;
2020*4882a593Smuzhiyun 	mutex_unlock(&vdev->cache_lock);
2021*4882a593Smuzhiyun 
2022*4882a593Smuzhiyun 	return ret;
2023*4882a593Smuzhiyun }
2024*4882a593Smuzhiyun 
__gvt_dma_release(struct kref * ref)2025*4882a593Smuzhiyun static void __gvt_dma_release(struct kref *ref)
2026*4882a593Smuzhiyun {
2027*4882a593Smuzhiyun 	struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
2028*4882a593Smuzhiyun 
2029*4882a593Smuzhiyun 	gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr,
2030*4882a593Smuzhiyun 			   entry->size);
2031*4882a593Smuzhiyun 	__gvt_cache_remove_entry(entry->vgpu, entry);
2032*4882a593Smuzhiyun }
2033*4882a593Smuzhiyun 
kvmgt_dma_unmap_guest_page(unsigned long handle,dma_addr_t dma_addr)2034*4882a593Smuzhiyun static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
2035*4882a593Smuzhiyun {
2036*4882a593Smuzhiyun 	struct intel_vgpu *vgpu;
2037*4882a593Smuzhiyun 	struct kvmgt_vdev *vdev;
2038*4882a593Smuzhiyun 	struct gvt_dma *entry;
2039*4882a593Smuzhiyun 
2040*4882a593Smuzhiyun 	if (!handle_valid(handle))
2041*4882a593Smuzhiyun 		return;
2042*4882a593Smuzhiyun 
2043*4882a593Smuzhiyun 	vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
2044*4882a593Smuzhiyun 	vdev = kvmgt_vdev(vgpu);
2045*4882a593Smuzhiyun 
2046*4882a593Smuzhiyun 	mutex_lock(&vdev->cache_lock);
2047*4882a593Smuzhiyun 	entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
2048*4882a593Smuzhiyun 	if (entry)
2049*4882a593Smuzhiyun 		kref_put(&entry->ref, __gvt_dma_release);
2050*4882a593Smuzhiyun 	mutex_unlock(&vdev->cache_lock);
2051*4882a593Smuzhiyun }
2052*4882a593Smuzhiyun 
kvmgt_rw_gpa(unsigned long handle,unsigned long gpa,void * buf,unsigned long len,bool write)2053*4882a593Smuzhiyun static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
2054*4882a593Smuzhiyun 			void *buf, unsigned long len, bool write)
2055*4882a593Smuzhiyun {
2056*4882a593Smuzhiyun 	struct kvmgt_guest_info *info;
2057*4882a593Smuzhiyun 
2058*4882a593Smuzhiyun 	if (!handle_valid(handle))
2059*4882a593Smuzhiyun 		return -ESRCH;
2060*4882a593Smuzhiyun 
2061*4882a593Smuzhiyun 	info = (struct kvmgt_guest_info *)handle;
2062*4882a593Smuzhiyun 
2063*4882a593Smuzhiyun 	return vfio_dma_rw(kvmgt_vdev(info->vgpu)->vfio_group,
2064*4882a593Smuzhiyun 			   gpa, buf, len, write);
2065*4882a593Smuzhiyun }
2066*4882a593Smuzhiyun 
kvmgt_read_gpa(unsigned long handle,unsigned long gpa,void * buf,unsigned long len)2067*4882a593Smuzhiyun static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
2068*4882a593Smuzhiyun 			void *buf, unsigned long len)
2069*4882a593Smuzhiyun {
2070*4882a593Smuzhiyun 	return kvmgt_rw_gpa(handle, gpa, buf, len, false);
2071*4882a593Smuzhiyun }
2072*4882a593Smuzhiyun 
kvmgt_write_gpa(unsigned long handle,unsigned long gpa,void * buf,unsigned long len)2073*4882a593Smuzhiyun static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa,
2074*4882a593Smuzhiyun 			void *buf, unsigned long len)
2075*4882a593Smuzhiyun {
2076*4882a593Smuzhiyun 	return kvmgt_rw_gpa(handle, gpa, buf, len, true);
2077*4882a593Smuzhiyun }
2078*4882a593Smuzhiyun 
kvmgt_virt_to_pfn(void * addr)2079*4882a593Smuzhiyun static unsigned long kvmgt_virt_to_pfn(void *addr)
2080*4882a593Smuzhiyun {
2081*4882a593Smuzhiyun 	return PFN_DOWN(__pa(addr));
2082*4882a593Smuzhiyun }
2083*4882a593Smuzhiyun 
kvmgt_is_valid_gfn(unsigned long handle,unsigned long gfn)2084*4882a593Smuzhiyun static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
2085*4882a593Smuzhiyun {
2086*4882a593Smuzhiyun 	struct kvmgt_guest_info *info;
2087*4882a593Smuzhiyun 	struct kvm *kvm;
2088*4882a593Smuzhiyun 	int idx;
2089*4882a593Smuzhiyun 	bool ret;
2090*4882a593Smuzhiyun 
2091*4882a593Smuzhiyun 	if (!handle_valid(handle))
2092*4882a593Smuzhiyun 		return false;
2093*4882a593Smuzhiyun 
2094*4882a593Smuzhiyun 	info = (struct kvmgt_guest_info *)handle;
2095*4882a593Smuzhiyun 	kvm = info->kvm;
2096*4882a593Smuzhiyun 
2097*4882a593Smuzhiyun 	idx = srcu_read_lock(&kvm->srcu);
2098*4882a593Smuzhiyun 	ret = kvm_is_visible_gfn(kvm, gfn);
2099*4882a593Smuzhiyun 	srcu_read_unlock(&kvm->srcu, idx);
2100*4882a593Smuzhiyun 
2101*4882a593Smuzhiyun 	return ret;
2102*4882a593Smuzhiyun }
2103*4882a593Smuzhiyun 
2104*4882a593Smuzhiyun static struct intel_gvt_mpt kvmgt_mpt = {
2105*4882a593Smuzhiyun 	.type = INTEL_GVT_HYPERVISOR_KVM,
2106*4882a593Smuzhiyun 	.host_init = kvmgt_host_init,
2107*4882a593Smuzhiyun 	.host_exit = kvmgt_host_exit,
2108*4882a593Smuzhiyun 	.attach_vgpu = kvmgt_attach_vgpu,
2109*4882a593Smuzhiyun 	.detach_vgpu = kvmgt_detach_vgpu,
2110*4882a593Smuzhiyun 	.inject_msi = kvmgt_inject_msi,
2111*4882a593Smuzhiyun 	.from_virt_to_mfn = kvmgt_virt_to_pfn,
2112*4882a593Smuzhiyun 	.enable_page_track = kvmgt_page_track_add,
2113*4882a593Smuzhiyun 	.disable_page_track = kvmgt_page_track_remove,
2114*4882a593Smuzhiyun 	.read_gpa = kvmgt_read_gpa,
2115*4882a593Smuzhiyun 	.write_gpa = kvmgt_write_gpa,
2116*4882a593Smuzhiyun 	.gfn_to_mfn = kvmgt_gfn_to_pfn,
2117*4882a593Smuzhiyun 	.dma_map_guest_page = kvmgt_dma_map_guest_page,
2118*4882a593Smuzhiyun 	.dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
2119*4882a593Smuzhiyun 	.dma_pin_guest_page = kvmgt_dma_pin_guest_page,
2120*4882a593Smuzhiyun 	.set_opregion = kvmgt_set_opregion,
2121*4882a593Smuzhiyun 	.set_edid = kvmgt_set_edid,
2122*4882a593Smuzhiyun 	.get_vfio_device = kvmgt_get_vfio_device,
2123*4882a593Smuzhiyun 	.put_vfio_device = kvmgt_put_vfio_device,
2124*4882a593Smuzhiyun 	.is_valid_gfn = kvmgt_is_valid_gfn,
2125*4882a593Smuzhiyun };
2126*4882a593Smuzhiyun 
kvmgt_init(void)2127*4882a593Smuzhiyun static int __init kvmgt_init(void)
2128*4882a593Smuzhiyun {
2129*4882a593Smuzhiyun 	if (intel_gvt_register_hypervisor(&kvmgt_mpt) < 0)
2130*4882a593Smuzhiyun 		return -ENODEV;
2131*4882a593Smuzhiyun 	return 0;
2132*4882a593Smuzhiyun }
2133*4882a593Smuzhiyun 
kvmgt_exit(void)2134*4882a593Smuzhiyun static void __exit kvmgt_exit(void)
2135*4882a593Smuzhiyun {
2136*4882a593Smuzhiyun 	intel_gvt_unregister_hypervisor();
2137*4882a593Smuzhiyun }
2138*4882a593Smuzhiyun 
2139*4882a593Smuzhiyun module_init(kvmgt_init);
2140*4882a593Smuzhiyun module_exit(kvmgt_exit);
2141*4882a593Smuzhiyun 
2142*4882a593Smuzhiyun MODULE_LICENSE("GPL and additional rights");
2143*4882a593Smuzhiyun MODULE_AUTHOR("Intel Corporation");
2144