xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/gvt/dmabuf.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2017 Intel Corporation. All rights reserved.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the next
12*4882a593Smuzhiyun  * paragraph) shall be included in all copies or substantial portions of the
13*4882a593Smuzhiyun  * Software.
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18*4882a593Smuzhiyun  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19*4882a593Smuzhiyun  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20*4882a593Smuzhiyun  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21*4882a593Smuzhiyun  * DEALINGS IN THE SOFTWARE.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * Authors:
24*4882a593Smuzhiyun  *    Zhiyuan Lv <zhiyuan.lv@intel.com>
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * Contributors:
27*4882a593Smuzhiyun  *    Xiaoguang Chen
28*4882a593Smuzhiyun  *    Tina Zhang <tina.zhang@intel.com>
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #include <linux/dma-buf.h>
32*4882a593Smuzhiyun #include <linux/vfio.h>
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #include "i915_drv.h"
35*4882a593Smuzhiyun #include "gvt.h"
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
38*4882a593Smuzhiyun 
vgpu_pin_dma_address(struct intel_vgpu * vgpu,unsigned long size,dma_addr_t dma_addr)39*4882a593Smuzhiyun static int vgpu_pin_dma_address(struct intel_vgpu *vgpu,
40*4882a593Smuzhiyun 				unsigned long size,
41*4882a593Smuzhiyun 				dma_addr_t dma_addr)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun 	int ret = 0;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr))
46*4882a593Smuzhiyun 		ret = -EINVAL;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	return ret;
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
vgpu_unpin_dma_address(struct intel_vgpu * vgpu,dma_addr_t dma_addr)51*4882a593Smuzhiyun static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu,
52*4882a593Smuzhiyun 				   dma_addr_t dma_addr)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr);
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
vgpu_gem_get_pages(struct drm_i915_gem_object * obj)57*4882a593Smuzhiyun static int vgpu_gem_get_pages(
58*4882a593Smuzhiyun 		struct drm_i915_gem_object *obj)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
61*4882a593Smuzhiyun 	struct intel_vgpu *vgpu;
62*4882a593Smuzhiyun 	struct sg_table *st;
63*4882a593Smuzhiyun 	struct scatterlist *sg;
64*4882a593Smuzhiyun 	int i, j, ret;
65*4882a593Smuzhiyun 	gen8_pte_t __iomem *gtt_entries;
66*4882a593Smuzhiyun 	struct intel_vgpu_fb_info *fb_info;
67*4882a593Smuzhiyun 	u32 page_num;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
70*4882a593Smuzhiyun 	if (drm_WARN_ON(&dev_priv->drm, !fb_info))
71*4882a593Smuzhiyun 		return -ENODEV;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	vgpu = fb_info->obj->vgpu;
74*4882a593Smuzhiyun 	if (drm_WARN_ON(&dev_priv->drm, !vgpu))
75*4882a593Smuzhiyun 		return -ENODEV;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	st = kmalloc(sizeof(*st), GFP_KERNEL);
78*4882a593Smuzhiyun 	if (unlikely(!st))
79*4882a593Smuzhiyun 		return -ENOMEM;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	page_num = obj->base.size >> PAGE_SHIFT;
82*4882a593Smuzhiyun 	ret = sg_alloc_table(st, page_num, GFP_KERNEL);
83*4882a593Smuzhiyun 	if (ret) {
84*4882a593Smuzhiyun 		kfree(st);
85*4882a593Smuzhiyun 		return ret;
86*4882a593Smuzhiyun 	}
87*4882a593Smuzhiyun 	gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
88*4882a593Smuzhiyun 		(fb_info->start >> PAGE_SHIFT);
89*4882a593Smuzhiyun 	for_each_sg(st->sgl, sg, page_num, i) {
90*4882a593Smuzhiyun 		dma_addr_t dma_addr =
91*4882a593Smuzhiyun 			GEN8_DECODE_PTE(readq(&gtt_entries[i]));
92*4882a593Smuzhiyun 		if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) {
93*4882a593Smuzhiyun 			ret = -EINVAL;
94*4882a593Smuzhiyun 			goto out;
95*4882a593Smuzhiyun 		}
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 		sg->offset = 0;
98*4882a593Smuzhiyun 		sg->length = PAGE_SIZE;
99*4882a593Smuzhiyun 		sg_dma_len(sg) = PAGE_SIZE;
100*4882a593Smuzhiyun 		sg_dma_address(sg) = dma_addr;
101*4882a593Smuzhiyun 	}
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	__i915_gem_object_set_pages(obj, st, PAGE_SIZE);
104*4882a593Smuzhiyun out:
105*4882a593Smuzhiyun 	if (ret) {
106*4882a593Smuzhiyun 		dma_addr_t dma_addr;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 		for_each_sg(st->sgl, sg, i, j) {
109*4882a593Smuzhiyun 			dma_addr = sg_dma_address(sg);
110*4882a593Smuzhiyun 			if (dma_addr)
111*4882a593Smuzhiyun 				vgpu_unpin_dma_address(vgpu, dma_addr);
112*4882a593Smuzhiyun 		}
113*4882a593Smuzhiyun 		sg_free_table(st);
114*4882a593Smuzhiyun 		kfree(st);
115*4882a593Smuzhiyun 	}
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	return ret;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
vgpu_gem_put_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)121*4882a593Smuzhiyun static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
122*4882a593Smuzhiyun 		struct sg_table *pages)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun 	struct scatterlist *sg;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	if (obj->base.dma_buf) {
127*4882a593Smuzhiyun 		struct intel_vgpu_fb_info *fb_info = obj->gvt_info;
128*4882a593Smuzhiyun 		struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
129*4882a593Smuzhiyun 		struct intel_vgpu *vgpu = obj->vgpu;
130*4882a593Smuzhiyun 		int i;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 		for_each_sg(pages->sgl, sg, fb_info->size, i)
133*4882a593Smuzhiyun 			vgpu_unpin_dma_address(vgpu,
134*4882a593Smuzhiyun 					       sg_dma_address(sg));
135*4882a593Smuzhiyun 	}
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	sg_free_table(pages);
138*4882a593Smuzhiyun 	kfree(pages);
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun 
dmabuf_gem_object_free(struct kref * kref)141*4882a593Smuzhiyun static void dmabuf_gem_object_free(struct kref *kref)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun 	struct intel_vgpu_dmabuf_obj *obj =
144*4882a593Smuzhiyun 		container_of(kref, struct intel_vgpu_dmabuf_obj, kref);
145*4882a593Smuzhiyun 	struct intel_vgpu *vgpu = obj->vgpu;
146*4882a593Smuzhiyun 	struct list_head *pos;
147*4882a593Smuzhiyun 	struct intel_vgpu_dmabuf_obj *dmabuf_obj;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) {
150*4882a593Smuzhiyun 		list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
151*4882a593Smuzhiyun 			dmabuf_obj = container_of(pos,
152*4882a593Smuzhiyun 					struct intel_vgpu_dmabuf_obj, list);
153*4882a593Smuzhiyun 			if (dmabuf_obj == obj) {
154*4882a593Smuzhiyun 				list_del(pos);
155*4882a593Smuzhiyun 				intel_gvt_hypervisor_put_vfio_device(vgpu);
156*4882a593Smuzhiyun 				idr_remove(&vgpu->object_idr,
157*4882a593Smuzhiyun 					   dmabuf_obj->dmabuf_id);
158*4882a593Smuzhiyun 				kfree(dmabuf_obj->info);
159*4882a593Smuzhiyun 				kfree(dmabuf_obj);
160*4882a593Smuzhiyun 				break;
161*4882a593Smuzhiyun 			}
162*4882a593Smuzhiyun 		}
163*4882a593Smuzhiyun 	} else {
164*4882a593Smuzhiyun 		/* Free the orphan dmabuf_objs here */
165*4882a593Smuzhiyun 		kfree(obj->info);
166*4882a593Smuzhiyun 		kfree(obj);
167*4882a593Smuzhiyun 	}
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 
dmabuf_obj_get(struct intel_vgpu_dmabuf_obj * obj)171*4882a593Smuzhiyun static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj *obj)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	kref_get(&obj->kref);
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun 
dmabuf_obj_put(struct intel_vgpu_dmabuf_obj * obj)176*4882a593Smuzhiyun static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj *obj)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun 	kref_put(&obj->kref, dmabuf_gem_object_free);
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun 
vgpu_gem_release(struct drm_i915_gem_object * gem_obj)181*4882a593Smuzhiyun static void vgpu_gem_release(struct drm_i915_gem_object *gem_obj)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	struct intel_vgpu_fb_info *fb_info = gem_obj->gvt_info;
185*4882a593Smuzhiyun 	struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
186*4882a593Smuzhiyun 	struct intel_vgpu *vgpu = obj->vgpu;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	if (vgpu) {
189*4882a593Smuzhiyun 		mutex_lock(&vgpu->dmabuf_lock);
190*4882a593Smuzhiyun 		gem_obj->base.dma_buf = NULL;
191*4882a593Smuzhiyun 		dmabuf_obj_put(obj);
192*4882a593Smuzhiyun 		mutex_unlock(&vgpu->dmabuf_lock);
193*4882a593Smuzhiyun 	} else {
194*4882a593Smuzhiyun 		/* vgpu is NULL, as it has been removed already */
195*4882a593Smuzhiyun 		gem_obj->base.dma_buf = NULL;
196*4882a593Smuzhiyun 		dmabuf_obj_put(obj);
197*4882a593Smuzhiyun 	}
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
201*4882a593Smuzhiyun 	.name = "i915_gem_object_vgpu",
202*4882a593Smuzhiyun 	.flags = I915_GEM_OBJECT_IS_PROXY,
203*4882a593Smuzhiyun 	.get_pages = vgpu_gem_get_pages,
204*4882a593Smuzhiyun 	.put_pages = vgpu_gem_put_pages,
205*4882a593Smuzhiyun 	.release = vgpu_gem_release,
206*4882a593Smuzhiyun };
207*4882a593Smuzhiyun 
vgpu_create_gem(struct drm_device * dev,struct intel_vgpu_fb_info * info)208*4882a593Smuzhiyun static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
209*4882a593Smuzhiyun 		struct intel_vgpu_fb_info *info)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun 	static struct lock_class_key lock_class;
212*4882a593Smuzhiyun 	struct drm_i915_private *dev_priv = to_i915(dev);
213*4882a593Smuzhiyun 	struct drm_i915_gem_object *obj;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	obj = i915_gem_object_alloc();
216*4882a593Smuzhiyun 	if (obj == NULL)
217*4882a593Smuzhiyun 		return NULL;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	drm_gem_private_object_init(dev, &obj->base,
220*4882a593Smuzhiyun 		roundup(info->size, PAGE_SIZE));
221*4882a593Smuzhiyun 	i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class);
222*4882a593Smuzhiyun 	i915_gem_object_set_readonly(obj);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	obj->read_domains = I915_GEM_DOMAIN_GTT;
225*4882a593Smuzhiyun 	obj->write_domain = 0;
226*4882a593Smuzhiyun 	if (INTEL_GEN(dev_priv) >= 9) {
227*4882a593Smuzhiyun 		unsigned int tiling_mode = 0;
228*4882a593Smuzhiyun 		unsigned int stride = 0;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 		switch (info->drm_format_mod) {
231*4882a593Smuzhiyun 		case DRM_FORMAT_MOD_LINEAR:
232*4882a593Smuzhiyun 			tiling_mode = I915_TILING_NONE;
233*4882a593Smuzhiyun 			break;
234*4882a593Smuzhiyun 		case I915_FORMAT_MOD_X_TILED:
235*4882a593Smuzhiyun 			tiling_mode = I915_TILING_X;
236*4882a593Smuzhiyun 			stride = info->stride;
237*4882a593Smuzhiyun 			break;
238*4882a593Smuzhiyun 		case I915_FORMAT_MOD_Y_TILED:
239*4882a593Smuzhiyun 		case I915_FORMAT_MOD_Yf_TILED:
240*4882a593Smuzhiyun 			tiling_mode = I915_TILING_Y;
241*4882a593Smuzhiyun 			stride = info->stride;
242*4882a593Smuzhiyun 			break;
243*4882a593Smuzhiyun 		default:
244*4882a593Smuzhiyun 			gvt_dbg_core("invalid drm_format_mod %llx for tiling\n",
245*4882a593Smuzhiyun 				     info->drm_format_mod);
246*4882a593Smuzhiyun 		}
247*4882a593Smuzhiyun 		obj->tiling_and_stride = tiling_mode | stride;
248*4882a593Smuzhiyun 	} else {
249*4882a593Smuzhiyun 		obj->tiling_and_stride = info->drm_format_mod ?
250*4882a593Smuzhiyun 					I915_TILING_X : 0;
251*4882a593Smuzhiyun 	}
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	return obj;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun 
validate_hotspot(struct intel_vgpu_cursor_plane_format * c)256*4882a593Smuzhiyun static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun 	if (c && c->x_hot <= c->width && c->y_hot <= c->height)
259*4882a593Smuzhiyun 		return true;
260*4882a593Smuzhiyun 	else
261*4882a593Smuzhiyun 		return false;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
vgpu_get_plane_info(struct drm_device * dev,struct intel_vgpu * vgpu,struct intel_vgpu_fb_info * info,int plane_id)264*4882a593Smuzhiyun static int vgpu_get_plane_info(struct drm_device *dev,
265*4882a593Smuzhiyun 		struct intel_vgpu *vgpu,
266*4882a593Smuzhiyun 		struct intel_vgpu_fb_info *info,
267*4882a593Smuzhiyun 		int plane_id)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun 	struct intel_vgpu_primary_plane_format p;
270*4882a593Smuzhiyun 	struct intel_vgpu_cursor_plane_format c;
271*4882a593Smuzhiyun 	int ret, tile_height = 1;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	memset(info, 0, sizeof(*info));
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
276*4882a593Smuzhiyun 		ret = intel_vgpu_decode_primary_plane(vgpu, &p);
277*4882a593Smuzhiyun 		if (ret)
278*4882a593Smuzhiyun 			return ret;
279*4882a593Smuzhiyun 		info->start = p.base;
280*4882a593Smuzhiyun 		info->start_gpa = p.base_gpa;
281*4882a593Smuzhiyun 		info->width = p.width;
282*4882a593Smuzhiyun 		info->height = p.height;
283*4882a593Smuzhiyun 		info->stride = p.stride;
284*4882a593Smuzhiyun 		info->drm_format = p.drm_format;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 		switch (p.tiled) {
287*4882a593Smuzhiyun 		case PLANE_CTL_TILED_LINEAR:
288*4882a593Smuzhiyun 			info->drm_format_mod = DRM_FORMAT_MOD_LINEAR;
289*4882a593Smuzhiyun 			break;
290*4882a593Smuzhiyun 		case PLANE_CTL_TILED_X:
291*4882a593Smuzhiyun 			info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
292*4882a593Smuzhiyun 			tile_height = 8;
293*4882a593Smuzhiyun 			break;
294*4882a593Smuzhiyun 		case PLANE_CTL_TILED_Y:
295*4882a593Smuzhiyun 			info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
296*4882a593Smuzhiyun 			tile_height = 32;
297*4882a593Smuzhiyun 			break;
298*4882a593Smuzhiyun 		case PLANE_CTL_TILED_YF:
299*4882a593Smuzhiyun 			info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
300*4882a593Smuzhiyun 			tile_height = 32;
301*4882a593Smuzhiyun 			break;
302*4882a593Smuzhiyun 		default:
303*4882a593Smuzhiyun 			gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
304*4882a593Smuzhiyun 		}
305*4882a593Smuzhiyun 	} else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
306*4882a593Smuzhiyun 		ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
307*4882a593Smuzhiyun 		if (ret)
308*4882a593Smuzhiyun 			return ret;
309*4882a593Smuzhiyun 		info->start = c.base;
310*4882a593Smuzhiyun 		info->start_gpa = c.base_gpa;
311*4882a593Smuzhiyun 		info->width = c.width;
312*4882a593Smuzhiyun 		info->height = c.height;
313*4882a593Smuzhiyun 		info->stride = c.width * (c.bpp / 8);
314*4882a593Smuzhiyun 		info->drm_format = c.drm_format;
315*4882a593Smuzhiyun 		info->drm_format_mod = 0;
316*4882a593Smuzhiyun 		info->x_pos = c.x_pos;
317*4882a593Smuzhiyun 		info->y_pos = c.y_pos;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 		if (validate_hotspot(&c)) {
320*4882a593Smuzhiyun 			info->x_hot = c.x_hot;
321*4882a593Smuzhiyun 			info->y_hot = c.y_hot;
322*4882a593Smuzhiyun 		} else {
323*4882a593Smuzhiyun 			info->x_hot = UINT_MAX;
324*4882a593Smuzhiyun 			info->y_hot = UINT_MAX;
325*4882a593Smuzhiyun 		}
326*4882a593Smuzhiyun 	} else {
327*4882a593Smuzhiyun 		gvt_vgpu_err("invalid plane id:%d\n", plane_id);
328*4882a593Smuzhiyun 		return -EINVAL;
329*4882a593Smuzhiyun 	}
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	info->size = info->stride * roundup(info->height, tile_height);
332*4882a593Smuzhiyun 	if (info->size == 0) {
333*4882a593Smuzhiyun 		gvt_vgpu_err("fb size is zero\n");
334*4882a593Smuzhiyun 		return -EINVAL;
335*4882a593Smuzhiyun 	}
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	if (info->start & (PAGE_SIZE - 1)) {
338*4882a593Smuzhiyun 		gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start);
339*4882a593Smuzhiyun 		return -EFAULT;
340*4882a593Smuzhiyun 	}
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) {
343*4882a593Smuzhiyun 		gvt_vgpu_err("invalid gma addr\n");
344*4882a593Smuzhiyun 		return -EFAULT;
345*4882a593Smuzhiyun 	}
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	return 0;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun static struct intel_vgpu_dmabuf_obj *
pick_dmabuf_by_info(struct intel_vgpu * vgpu,struct intel_vgpu_fb_info * latest_info)351*4882a593Smuzhiyun pick_dmabuf_by_info(struct intel_vgpu *vgpu,
352*4882a593Smuzhiyun 		    struct intel_vgpu_fb_info *latest_info)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun 	struct list_head *pos;
355*4882a593Smuzhiyun 	struct intel_vgpu_fb_info *fb_info;
356*4882a593Smuzhiyun 	struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
357*4882a593Smuzhiyun 	struct intel_vgpu_dmabuf_obj *ret = NULL;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
360*4882a593Smuzhiyun 		dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
361*4882a593Smuzhiyun 						list);
362*4882a593Smuzhiyun 		if ((dmabuf_obj == NULL) ||
363*4882a593Smuzhiyun 		    (dmabuf_obj->info == NULL))
364*4882a593Smuzhiyun 			continue;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 		fb_info = (struct intel_vgpu_fb_info *)dmabuf_obj->info;
367*4882a593Smuzhiyun 		if ((fb_info->start == latest_info->start) &&
368*4882a593Smuzhiyun 		    (fb_info->start_gpa == latest_info->start_gpa) &&
369*4882a593Smuzhiyun 		    (fb_info->size == latest_info->size) &&
370*4882a593Smuzhiyun 		    (fb_info->drm_format_mod == latest_info->drm_format_mod) &&
371*4882a593Smuzhiyun 		    (fb_info->drm_format == latest_info->drm_format) &&
372*4882a593Smuzhiyun 		    (fb_info->width == latest_info->width) &&
373*4882a593Smuzhiyun 		    (fb_info->height == latest_info->height)) {
374*4882a593Smuzhiyun 			ret = dmabuf_obj;
375*4882a593Smuzhiyun 			break;
376*4882a593Smuzhiyun 		}
377*4882a593Smuzhiyun 	}
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	return ret;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun static struct intel_vgpu_dmabuf_obj *
pick_dmabuf_by_num(struct intel_vgpu * vgpu,u32 id)383*4882a593Smuzhiyun pick_dmabuf_by_num(struct intel_vgpu *vgpu, u32 id)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun 	struct list_head *pos;
386*4882a593Smuzhiyun 	struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
387*4882a593Smuzhiyun 	struct intel_vgpu_dmabuf_obj *ret = NULL;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
390*4882a593Smuzhiyun 		dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
391*4882a593Smuzhiyun 						list);
392*4882a593Smuzhiyun 		if (!dmabuf_obj)
393*4882a593Smuzhiyun 			continue;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 		if (dmabuf_obj->dmabuf_id == id) {
396*4882a593Smuzhiyun 			ret = dmabuf_obj;
397*4882a593Smuzhiyun 			break;
398*4882a593Smuzhiyun 		}
399*4882a593Smuzhiyun 	}
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	return ret;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun 
update_fb_info(struct vfio_device_gfx_plane_info * gvt_dmabuf,struct intel_vgpu_fb_info * fb_info)404*4882a593Smuzhiyun static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
405*4882a593Smuzhiyun 		      struct intel_vgpu_fb_info *fb_info)
406*4882a593Smuzhiyun {
407*4882a593Smuzhiyun 	gvt_dmabuf->drm_format = fb_info->drm_format;
408*4882a593Smuzhiyun 	gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod;
409*4882a593Smuzhiyun 	gvt_dmabuf->width = fb_info->width;
410*4882a593Smuzhiyun 	gvt_dmabuf->height = fb_info->height;
411*4882a593Smuzhiyun 	gvt_dmabuf->stride = fb_info->stride;
412*4882a593Smuzhiyun 	gvt_dmabuf->size = fb_info->size;
413*4882a593Smuzhiyun 	gvt_dmabuf->x_pos = fb_info->x_pos;
414*4882a593Smuzhiyun 	gvt_dmabuf->y_pos = fb_info->y_pos;
415*4882a593Smuzhiyun 	gvt_dmabuf->x_hot = fb_info->x_hot;
416*4882a593Smuzhiyun 	gvt_dmabuf->y_hot = fb_info->y_hot;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun 
intel_vgpu_query_plane(struct intel_vgpu * vgpu,void * args)419*4882a593Smuzhiyun int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun 	struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
422*4882a593Smuzhiyun 	struct vfio_device_gfx_plane_info *gfx_plane_info = args;
423*4882a593Smuzhiyun 	struct intel_vgpu_dmabuf_obj *dmabuf_obj;
424*4882a593Smuzhiyun 	struct intel_vgpu_fb_info fb_info;
425*4882a593Smuzhiyun 	int ret = 0;
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	if (gfx_plane_info->flags == (VFIO_GFX_PLANE_TYPE_DMABUF |
428*4882a593Smuzhiyun 				       VFIO_GFX_PLANE_TYPE_PROBE))
429*4882a593Smuzhiyun 		return ret;
430*4882a593Smuzhiyun 	else if ((gfx_plane_info->flags & ~VFIO_GFX_PLANE_TYPE_DMABUF) ||
431*4882a593Smuzhiyun 			(!gfx_plane_info->flags))
432*4882a593Smuzhiyun 		return -EINVAL;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	ret = vgpu_get_plane_info(dev, vgpu, &fb_info,
435*4882a593Smuzhiyun 					gfx_plane_info->drm_plane_type);
436*4882a593Smuzhiyun 	if (ret != 0)
437*4882a593Smuzhiyun 		goto out;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	mutex_lock(&vgpu->dmabuf_lock);
440*4882a593Smuzhiyun 	/* If exists, pick up the exposed dmabuf_obj */
441*4882a593Smuzhiyun 	dmabuf_obj = pick_dmabuf_by_info(vgpu, &fb_info);
442*4882a593Smuzhiyun 	if (dmabuf_obj) {
443*4882a593Smuzhiyun 		update_fb_info(gfx_plane_info, &fb_info);
444*4882a593Smuzhiyun 		gfx_plane_info->dmabuf_id = dmabuf_obj->dmabuf_id;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 		/* This buffer may be released between query_plane ioctl and
447*4882a593Smuzhiyun 		 * get_dmabuf ioctl. Add the refcount to make sure it won't
448*4882a593Smuzhiyun 		 * be released between the two ioctls.
449*4882a593Smuzhiyun 		 */
450*4882a593Smuzhiyun 		if (!dmabuf_obj->initref) {
451*4882a593Smuzhiyun 			dmabuf_obj->initref = true;
452*4882a593Smuzhiyun 			dmabuf_obj_get(dmabuf_obj);
453*4882a593Smuzhiyun 		}
454*4882a593Smuzhiyun 		ret = 0;
455*4882a593Smuzhiyun 		gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n",
456*4882a593Smuzhiyun 			    vgpu->id, kref_read(&dmabuf_obj->kref),
457*4882a593Smuzhiyun 			    gfx_plane_info->dmabuf_id);
458*4882a593Smuzhiyun 		mutex_unlock(&vgpu->dmabuf_lock);
459*4882a593Smuzhiyun 		goto out;
460*4882a593Smuzhiyun 	}
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	mutex_unlock(&vgpu->dmabuf_lock);
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	/* Need to allocate a new one*/
465*4882a593Smuzhiyun 	dmabuf_obj = kmalloc(sizeof(struct intel_vgpu_dmabuf_obj), GFP_KERNEL);
466*4882a593Smuzhiyun 	if (unlikely(!dmabuf_obj)) {
467*4882a593Smuzhiyun 		gvt_vgpu_err("alloc dmabuf_obj failed\n");
468*4882a593Smuzhiyun 		ret = -ENOMEM;
469*4882a593Smuzhiyun 		goto out;
470*4882a593Smuzhiyun 	}
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	dmabuf_obj->info = kmalloc(sizeof(struct intel_vgpu_fb_info),
473*4882a593Smuzhiyun 				   GFP_KERNEL);
474*4882a593Smuzhiyun 	if (unlikely(!dmabuf_obj->info)) {
475*4882a593Smuzhiyun 		gvt_vgpu_err("allocate intel vgpu fb info failed\n");
476*4882a593Smuzhiyun 		ret = -ENOMEM;
477*4882a593Smuzhiyun 		goto out_free_dmabuf;
478*4882a593Smuzhiyun 	}
479*4882a593Smuzhiyun 	memcpy(dmabuf_obj->info, &fb_info, sizeof(struct intel_vgpu_fb_info));
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	((struct intel_vgpu_fb_info *)dmabuf_obj->info)->obj = dmabuf_obj;
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	dmabuf_obj->vgpu = vgpu;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	ret = idr_alloc(&vgpu->object_idr, dmabuf_obj, 1, 0, GFP_NOWAIT);
486*4882a593Smuzhiyun 	if (ret < 0)
487*4882a593Smuzhiyun 		goto out_free_info;
488*4882a593Smuzhiyun 	gfx_plane_info->dmabuf_id = ret;
489*4882a593Smuzhiyun 	dmabuf_obj->dmabuf_id = ret;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	dmabuf_obj->initref = true;
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	kref_init(&dmabuf_obj->kref);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	mutex_lock(&vgpu->dmabuf_lock);
496*4882a593Smuzhiyun 	if (intel_gvt_hypervisor_get_vfio_device(vgpu)) {
497*4882a593Smuzhiyun 		gvt_vgpu_err("get vfio device failed\n");
498*4882a593Smuzhiyun 		mutex_unlock(&vgpu->dmabuf_lock);
499*4882a593Smuzhiyun 		goto out_free_info;
500*4882a593Smuzhiyun 	}
501*4882a593Smuzhiyun 	mutex_unlock(&vgpu->dmabuf_lock);
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	update_fb_info(gfx_plane_info, &fb_info);
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	INIT_LIST_HEAD(&dmabuf_obj->list);
506*4882a593Smuzhiyun 	mutex_lock(&vgpu->dmabuf_lock);
507*4882a593Smuzhiyun 	list_add_tail(&dmabuf_obj->list, &vgpu->dmabuf_obj_list_head);
508*4882a593Smuzhiyun 	mutex_unlock(&vgpu->dmabuf_lock);
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n", vgpu->id,
511*4882a593Smuzhiyun 		    __func__, kref_read(&dmabuf_obj->kref), ret);
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	return 0;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun out_free_info:
516*4882a593Smuzhiyun 	kfree(dmabuf_obj->info);
517*4882a593Smuzhiyun out_free_dmabuf:
518*4882a593Smuzhiyun 	kfree(dmabuf_obj);
519*4882a593Smuzhiyun out:
520*4882a593Smuzhiyun 	/* ENODEV means plane isn't ready, which might be a normal case. */
521*4882a593Smuzhiyun 	return (ret == -ENODEV) ? 0 : ret;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun /* To associate an exposed dmabuf with the dmabuf_obj */
intel_vgpu_get_dmabuf(struct intel_vgpu * vgpu,unsigned int dmabuf_id)525*4882a593Smuzhiyun int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
526*4882a593Smuzhiyun {
527*4882a593Smuzhiyun 	struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
528*4882a593Smuzhiyun 	struct intel_vgpu_dmabuf_obj *dmabuf_obj;
529*4882a593Smuzhiyun 	struct drm_i915_gem_object *obj;
530*4882a593Smuzhiyun 	struct dma_buf *dmabuf;
531*4882a593Smuzhiyun 	int dmabuf_fd;
532*4882a593Smuzhiyun 	int ret = 0;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	mutex_lock(&vgpu->dmabuf_lock);
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	dmabuf_obj = pick_dmabuf_by_num(vgpu, dmabuf_id);
537*4882a593Smuzhiyun 	if (dmabuf_obj == NULL) {
538*4882a593Smuzhiyun 		gvt_vgpu_err("invalid dmabuf id:%d\n", dmabuf_id);
539*4882a593Smuzhiyun 		ret = -EINVAL;
540*4882a593Smuzhiyun 		goto out;
541*4882a593Smuzhiyun 	}
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	obj = vgpu_create_gem(dev, dmabuf_obj->info);
544*4882a593Smuzhiyun 	if (obj == NULL) {
545*4882a593Smuzhiyun 		gvt_vgpu_err("create gvt gem obj failed\n");
546*4882a593Smuzhiyun 		ret = -ENOMEM;
547*4882a593Smuzhiyun 		goto out;
548*4882a593Smuzhiyun 	}
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	obj->gvt_info = dmabuf_obj->info;
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	dmabuf = i915_gem_prime_export(&obj->base, DRM_CLOEXEC | DRM_RDWR);
553*4882a593Smuzhiyun 	if (IS_ERR(dmabuf)) {
554*4882a593Smuzhiyun 		gvt_vgpu_err("export dma-buf failed\n");
555*4882a593Smuzhiyun 		ret = PTR_ERR(dmabuf);
556*4882a593Smuzhiyun 		goto out_free_gem;
557*4882a593Smuzhiyun 	}
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
560*4882a593Smuzhiyun 	if (ret < 0) {
561*4882a593Smuzhiyun 		gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
562*4882a593Smuzhiyun 		goto out_free_dmabuf;
563*4882a593Smuzhiyun 	}
564*4882a593Smuzhiyun 	dmabuf_fd = ret;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	dmabuf_obj_get(dmabuf_obj);
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	if (dmabuf_obj->initref) {
569*4882a593Smuzhiyun 		dmabuf_obj->initref = false;
570*4882a593Smuzhiyun 		dmabuf_obj_put(dmabuf_obj);
571*4882a593Smuzhiyun 	}
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	mutex_unlock(&vgpu->dmabuf_lock);
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n"
576*4882a593Smuzhiyun 		    "        file count: %ld, GEM ref: %d\n",
577*4882a593Smuzhiyun 		    vgpu->id, dmabuf_obj->dmabuf_id,
578*4882a593Smuzhiyun 		    kref_read(&dmabuf_obj->kref),
579*4882a593Smuzhiyun 		    dmabuf_fd,
580*4882a593Smuzhiyun 		    file_count(dmabuf->file),
581*4882a593Smuzhiyun 		    kref_read(&obj->base.refcount));
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	i915_gem_object_put(obj);
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	return dmabuf_fd;
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun out_free_dmabuf:
588*4882a593Smuzhiyun 	dma_buf_put(dmabuf);
589*4882a593Smuzhiyun out_free_gem:
590*4882a593Smuzhiyun 	i915_gem_object_put(obj);
591*4882a593Smuzhiyun out:
592*4882a593Smuzhiyun 	mutex_unlock(&vgpu->dmabuf_lock);
593*4882a593Smuzhiyun 	return ret;
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun 
intel_vgpu_dmabuf_cleanup(struct intel_vgpu * vgpu)596*4882a593Smuzhiyun void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu)
597*4882a593Smuzhiyun {
598*4882a593Smuzhiyun 	struct list_head *pos, *n;
599*4882a593Smuzhiyun 	struct intel_vgpu_dmabuf_obj *dmabuf_obj;
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	mutex_lock(&vgpu->dmabuf_lock);
602*4882a593Smuzhiyun 	list_for_each_safe(pos, n, &vgpu->dmabuf_obj_list_head) {
603*4882a593Smuzhiyun 		dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
604*4882a593Smuzhiyun 						list);
605*4882a593Smuzhiyun 		dmabuf_obj->vgpu = NULL;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 		idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id);
608*4882a593Smuzhiyun 		intel_gvt_hypervisor_put_vfio_device(vgpu);
609*4882a593Smuzhiyun 		list_del(pos);
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 		/* dmabuf_obj might be freed in dmabuf_obj_put */
612*4882a593Smuzhiyun 		if (dmabuf_obj->initref) {
613*4882a593Smuzhiyun 			dmabuf_obj->initref = false;
614*4882a593Smuzhiyun 			dmabuf_obj_put(dmabuf_obj);
615*4882a593Smuzhiyun 		}
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	}
618*4882a593Smuzhiyun 	mutex_unlock(&vgpu->dmabuf_lock);
619*4882a593Smuzhiyun }
620