xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/gvt/gvt.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the next
12*4882a593Smuzhiyun  * paragraph) shall be included in all copies or substantial portions of the
13*4882a593Smuzhiyun  * Software.
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18*4882a593Smuzhiyun  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19*4882a593Smuzhiyun  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20*4882a593Smuzhiyun  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21*4882a593Smuzhiyun  * SOFTWARE.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * Authors:
24*4882a593Smuzhiyun  *    Kevin Tian <kevin.tian@intel.com>
25*4882a593Smuzhiyun  *    Eddie Dong <eddie.dong@intel.com>
26*4882a593Smuzhiyun  *
27*4882a593Smuzhiyun  * Contributors:
28*4882a593Smuzhiyun  *    Niu Bing <bing.niu@intel.com>
29*4882a593Smuzhiyun  *    Zhi Wang <zhi.a.wang@intel.com>
30*4882a593Smuzhiyun  *
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #include <linux/types.h>
34*4882a593Smuzhiyun #include <linux/kthread.h>
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #include "i915_drv.h"
37*4882a593Smuzhiyun #include "intel_gvt.h"
38*4882a593Smuzhiyun #include "gvt.h"
39*4882a593Smuzhiyun #include <linux/vfio.h>
40*4882a593Smuzhiyun #include <linux/mdev.h>
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun struct intel_gvt_host intel_gvt_host;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun static const char * const supported_hypervisors[] = {
45*4882a593Smuzhiyun 	[INTEL_GVT_HYPERVISOR_XEN] = "XEN",
46*4882a593Smuzhiyun 	[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
47*4882a593Smuzhiyun };
48*4882a593Smuzhiyun 
intel_gvt_find_vgpu_type(struct intel_gvt * gvt,const char * name)49*4882a593Smuzhiyun static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
50*4882a593Smuzhiyun 		const char *name)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun 	const char *driver_name =
53*4882a593Smuzhiyun 		dev_driver_string(&gvt->gt->i915->drm.pdev->dev);
54*4882a593Smuzhiyun 	int i;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	name += strlen(driver_name) + 1;
57*4882a593Smuzhiyun 	for (i = 0; i < gvt->num_types; i++) {
58*4882a593Smuzhiyun 		struct intel_vgpu_type *t = &gvt->types[i];
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 		if (!strncmp(t->name, name, sizeof(t->name)))
61*4882a593Smuzhiyun 			return t;
62*4882a593Smuzhiyun 	}
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	return NULL;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
available_instances_show(struct kobject * kobj,struct device * dev,char * buf)67*4882a593Smuzhiyun static ssize_t available_instances_show(struct kobject *kobj,
68*4882a593Smuzhiyun 					struct device *dev, char *buf)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	struct intel_vgpu_type *type;
71*4882a593Smuzhiyun 	unsigned int num = 0;
72*4882a593Smuzhiyun 	void *gvt = kdev_to_i915(dev)->gvt;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
75*4882a593Smuzhiyun 	if (!type)
76*4882a593Smuzhiyun 		num = 0;
77*4882a593Smuzhiyun 	else
78*4882a593Smuzhiyun 		num = type->avail_instance;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	return sprintf(buf, "%u\n", num);
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
device_api_show(struct kobject * kobj,struct device * dev,char * buf)83*4882a593Smuzhiyun static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
84*4882a593Smuzhiyun 		char *buf)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun 	return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
description_show(struct kobject * kobj,struct device * dev,char * buf)89*4882a593Smuzhiyun static ssize_t description_show(struct kobject *kobj, struct device *dev,
90*4882a593Smuzhiyun 		char *buf)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun 	struct intel_vgpu_type *type;
93*4882a593Smuzhiyun 	void *gvt = kdev_to_i915(dev)->gvt;
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
96*4882a593Smuzhiyun 	if (!type)
97*4882a593Smuzhiyun 		return 0;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
100*4882a593Smuzhiyun 		       "fence: %d\nresolution: %s\n"
101*4882a593Smuzhiyun 		       "weight: %d\n",
102*4882a593Smuzhiyun 		       BYTES_TO_MB(type->low_gm_size),
103*4882a593Smuzhiyun 		       BYTES_TO_MB(type->high_gm_size),
104*4882a593Smuzhiyun 		       type->fence, vgpu_edid_str(type->resolution),
105*4882a593Smuzhiyun 		       type->weight);
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun static MDEV_TYPE_ATTR_RO(available_instances);
109*4882a593Smuzhiyun static MDEV_TYPE_ATTR_RO(device_api);
110*4882a593Smuzhiyun static MDEV_TYPE_ATTR_RO(description);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun static struct attribute *gvt_type_attrs[] = {
113*4882a593Smuzhiyun 	&mdev_type_attr_available_instances.attr,
114*4882a593Smuzhiyun 	&mdev_type_attr_device_api.attr,
115*4882a593Smuzhiyun 	&mdev_type_attr_description.attr,
116*4882a593Smuzhiyun 	NULL,
117*4882a593Smuzhiyun };
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun static struct attribute_group *gvt_vgpu_type_groups[] = {
120*4882a593Smuzhiyun 	[0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
121*4882a593Smuzhiyun };
122*4882a593Smuzhiyun 
intel_get_gvt_attrs(struct attribute_group *** intel_vgpu_type_groups)123*4882a593Smuzhiyun static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	*intel_vgpu_type_groups = gvt_vgpu_type_groups;
126*4882a593Smuzhiyun 	return true;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
intel_gvt_init_vgpu_type_groups(struct intel_gvt * gvt)129*4882a593Smuzhiyun static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	int i, j;
132*4882a593Smuzhiyun 	struct intel_vgpu_type *type;
133*4882a593Smuzhiyun 	struct attribute_group *group;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	for (i = 0; i < gvt->num_types; i++) {
136*4882a593Smuzhiyun 		type = &gvt->types[i];
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 		group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
139*4882a593Smuzhiyun 		if (WARN_ON(!group))
140*4882a593Smuzhiyun 			goto unwind;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 		group->name = type->name;
143*4882a593Smuzhiyun 		group->attrs = gvt_type_attrs;
144*4882a593Smuzhiyun 		gvt_vgpu_type_groups[i] = group;
145*4882a593Smuzhiyun 	}
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	return 0;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun unwind:
150*4882a593Smuzhiyun 	for (j = 0; j < i; j++) {
151*4882a593Smuzhiyun 		group = gvt_vgpu_type_groups[j];
152*4882a593Smuzhiyun 		kfree(group);
153*4882a593Smuzhiyun 	}
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	return -ENOMEM;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt * gvt)158*4882a593Smuzhiyun static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	int i;
161*4882a593Smuzhiyun 	struct attribute_group *group;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	for (i = 0; i < gvt->num_types; i++) {
164*4882a593Smuzhiyun 		group = gvt_vgpu_type_groups[i];
165*4882a593Smuzhiyun 		gvt_vgpu_type_groups[i] = NULL;
166*4882a593Smuzhiyun 		kfree(group);
167*4882a593Smuzhiyun 	}
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun static const struct intel_gvt_ops intel_gvt_ops = {
171*4882a593Smuzhiyun 	.emulate_cfg_read = intel_vgpu_emulate_cfg_read,
172*4882a593Smuzhiyun 	.emulate_cfg_write = intel_vgpu_emulate_cfg_write,
173*4882a593Smuzhiyun 	.emulate_mmio_read = intel_vgpu_emulate_mmio_read,
174*4882a593Smuzhiyun 	.emulate_mmio_write = intel_vgpu_emulate_mmio_write,
175*4882a593Smuzhiyun 	.vgpu_create = intel_gvt_create_vgpu,
176*4882a593Smuzhiyun 	.vgpu_destroy = intel_gvt_destroy_vgpu,
177*4882a593Smuzhiyun 	.vgpu_release = intel_gvt_release_vgpu,
178*4882a593Smuzhiyun 	.vgpu_reset = intel_gvt_reset_vgpu,
179*4882a593Smuzhiyun 	.vgpu_activate = intel_gvt_activate_vgpu,
180*4882a593Smuzhiyun 	.vgpu_deactivate = intel_gvt_deactivate_vgpu,
181*4882a593Smuzhiyun 	.gvt_find_vgpu_type = intel_gvt_find_vgpu_type,
182*4882a593Smuzhiyun 	.get_gvt_attrs = intel_get_gvt_attrs,
183*4882a593Smuzhiyun 	.vgpu_query_plane = intel_vgpu_query_plane,
184*4882a593Smuzhiyun 	.vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
185*4882a593Smuzhiyun 	.write_protect_handler = intel_vgpu_page_track_handler,
186*4882a593Smuzhiyun 	.emulate_hotplug = intel_vgpu_emulate_hotplug,
187*4882a593Smuzhiyun };
188*4882a593Smuzhiyun 
init_device_info(struct intel_gvt * gvt)189*4882a593Smuzhiyun static void init_device_info(struct intel_gvt *gvt)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun 	struct intel_gvt_device_info *info = &gvt->device_info;
192*4882a593Smuzhiyun 	struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	info->max_support_vgpus = 8;
195*4882a593Smuzhiyun 	info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
196*4882a593Smuzhiyun 	info->mmio_size = 2 * 1024 * 1024;
197*4882a593Smuzhiyun 	info->mmio_bar = 0;
198*4882a593Smuzhiyun 	info->gtt_start_offset = 8 * 1024 * 1024;
199*4882a593Smuzhiyun 	info->gtt_entry_size = 8;
200*4882a593Smuzhiyun 	info->gtt_entry_size_shift = 3;
201*4882a593Smuzhiyun 	info->gmadr_bytes_in_cmd = 8;
202*4882a593Smuzhiyun 	info->max_surface_size = 36 * 1024 * 1024;
203*4882a593Smuzhiyun 	info->msi_cap_offset = pdev->msi_cap;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun 
gvt_service_thread(void * data)206*4882a593Smuzhiyun static int gvt_service_thread(void *data)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	struct intel_gvt *gvt = (struct intel_gvt *)data;
209*4882a593Smuzhiyun 	int ret;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	gvt_dbg_core("service thread start\n");
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	while (!kthread_should_stop()) {
214*4882a593Smuzhiyun 		ret = wait_event_interruptible(gvt->service_thread_wq,
215*4882a593Smuzhiyun 				kthread_should_stop() || gvt->service_request);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 		if (kthread_should_stop())
218*4882a593Smuzhiyun 			break;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 		if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
221*4882a593Smuzhiyun 			continue;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 		if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK,
224*4882a593Smuzhiyun 					(void *)&gvt->service_request))
225*4882a593Smuzhiyun 			intel_gvt_emulate_vblank(gvt);
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 		if (test_bit(INTEL_GVT_REQUEST_SCHED,
228*4882a593Smuzhiyun 				(void *)&gvt->service_request) ||
229*4882a593Smuzhiyun 			test_bit(INTEL_GVT_REQUEST_EVENT_SCHED,
230*4882a593Smuzhiyun 					(void *)&gvt->service_request)) {
231*4882a593Smuzhiyun 			intel_gvt_schedule(gvt);
232*4882a593Smuzhiyun 		}
233*4882a593Smuzhiyun 	}
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	return 0;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
clean_service_thread(struct intel_gvt * gvt)238*4882a593Smuzhiyun static void clean_service_thread(struct intel_gvt *gvt)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 	kthread_stop(gvt->service_thread);
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun 
init_service_thread(struct intel_gvt * gvt)243*4882a593Smuzhiyun static int init_service_thread(struct intel_gvt *gvt)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun 	init_waitqueue_head(&gvt->service_thread_wq);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	gvt->service_thread = kthread_run(gvt_service_thread,
248*4882a593Smuzhiyun 			gvt, "gvt_service_thread");
249*4882a593Smuzhiyun 	if (IS_ERR(gvt->service_thread)) {
250*4882a593Smuzhiyun 		gvt_err("fail to start service thread.\n");
251*4882a593Smuzhiyun 		return PTR_ERR(gvt->service_thread);
252*4882a593Smuzhiyun 	}
253*4882a593Smuzhiyun 	return 0;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun /**
257*4882a593Smuzhiyun  * intel_gvt_clean_device - clean a GVT device
258*4882a593Smuzhiyun  * @i915: i915 private
259*4882a593Smuzhiyun  *
260*4882a593Smuzhiyun  * This function is called at the driver unloading stage, to free the
261*4882a593Smuzhiyun  * resources owned by a GVT device.
262*4882a593Smuzhiyun  *
263*4882a593Smuzhiyun  */
intel_gvt_clean_device(struct drm_i915_private * i915)264*4882a593Smuzhiyun void intel_gvt_clean_device(struct drm_i915_private *i915)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	struct intel_gvt *gvt = fetch_and_zero(&i915->gvt);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	if (drm_WARN_ON(&i915->drm, !gvt))
269*4882a593Smuzhiyun 		return;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
272*4882a593Smuzhiyun 	intel_gvt_cleanup_vgpu_type_groups(gvt);
273*4882a593Smuzhiyun 	intel_gvt_clean_vgpu_types(gvt);
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	intel_gvt_debugfs_clean(gvt);
276*4882a593Smuzhiyun 	clean_service_thread(gvt);
277*4882a593Smuzhiyun 	intel_gvt_clean_cmd_parser(gvt);
278*4882a593Smuzhiyun 	intel_gvt_clean_sched_policy(gvt);
279*4882a593Smuzhiyun 	intel_gvt_clean_workload_scheduler(gvt);
280*4882a593Smuzhiyun 	intel_gvt_clean_gtt(gvt);
281*4882a593Smuzhiyun 	intel_gvt_clean_irq(gvt);
282*4882a593Smuzhiyun 	intel_gvt_free_firmware(gvt);
283*4882a593Smuzhiyun 	intel_gvt_clean_mmio_info(gvt);
284*4882a593Smuzhiyun 	idr_destroy(&gvt->vgpu_idr);
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	kfree(i915->gvt);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun /**
290*4882a593Smuzhiyun  * intel_gvt_init_device - initialize a GVT device
291*4882a593Smuzhiyun  * @i915: drm i915 private data
292*4882a593Smuzhiyun  *
293*4882a593Smuzhiyun  * This function is called at the initialization stage, to initialize
294*4882a593Smuzhiyun  * necessary GVT components.
295*4882a593Smuzhiyun  *
296*4882a593Smuzhiyun  * Returns:
297*4882a593Smuzhiyun  * Zero on success, negative error code if failed.
298*4882a593Smuzhiyun  *
299*4882a593Smuzhiyun  */
intel_gvt_init_device(struct drm_i915_private * i915)300*4882a593Smuzhiyun int intel_gvt_init_device(struct drm_i915_private *i915)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	struct intel_gvt *gvt;
303*4882a593Smuzhiyun 	struct intel_vgpu *vgpu;
304*4882a593Smuzhiyun 	int ret;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	if (drm_WARN_ON(&i915->drm, i915->gvt))
307*4882a593Smuzhiyun 		return -EEXIST;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
310*4882a593Smuzhiyun 	if (!gvt)
311*4882a593Smuzhiyun 		return -ENOMEM;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	gvt_dbg_core("init gvt device\n");
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	idr_init(&gvt->vgpu_idr);
316*4882a593Smuzhiyun 	spin_lock_init(&gvt->scheduler.mmio_context_lock);
317*4882a593Smuzhiyun 	mutex_init(&gvt->lock);
318*4882a593Smuzhiyun 	mutex_init(&gvt->sched_lock);
319*4882a593Smuzhiyun 	gvt->gt = &i915->gt;
320*4882a593Smuzhiyun 	i915->gvt = gvt;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	init_device_info(gvt);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	ret = intel_gvt_setup_mmio_info(gvt);
325*4882a593Smuzhiyun 	if (ret)
326*4882a593Smuzhiyun 		goto out_clean_idr;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	intel_gvt_init_engine_mmio_context(gvt);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	ret = intel_gvt_load_firmware(gvt);
331*4882a593Smuzhiyun 	if (ret)
332*4882a593Smuzhiyun 		goto out_clean_mmio_info;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	ret = intel_gvt_init_irq(gvt);
335*4882a593Smuzhiyun 	if (ret)
336*4882a593Smuzhiyun 		goto out_free_firmware;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	ret = intel_gvt_init_gtt(gvt);
339*4882a593Smuzhiyun 	if (ret)
340*4882a593Smuzhiyun 		goto out_clean_irq;
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	ret = intel_gvt_init_workload_scheduler(gvt);
343*4882a593Smuzhiyun 	if (ret)
344*4882a593Smuzhiyun 		goto out_clean_gtt;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	ret = intel_gvt_init_sched_policy(gvt);
347*4882a593Smuzhiyun 	if (ret)
348*4882a593Smuzhiyun 		goto out_clean_workload_scheduler;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	ret = intel_gvt_init_cmd_parser(gvt);
351*4882a593Smuzhiyun 	if (ret)
352*4882a593Smuzhiyun 		goto out_clean_sched_policy;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	ret = init_service_thread(gvt);
355*4882a593Smuzhiyun 	if (ret)
356*4882a593Smuzhiyun 		goto out_clean_cmd_parser;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	ret = intel_gvt_init_vgpu_types(gvt);
359*4882a593Smuzhiyun 	if (ret)
360*4882a593Smuzhiyun 		goto out_clean_thread;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	ret = intel_gvt_init_vgpu_type_groups(gvt);
363*4882a593Smuzhiyun 	if (ret) {
364*4882a593Smuzhiyun 		gvt_err("failed to init vgpu type groups: %d\n", ret);
365*4882a593Smuzhiyun 		goto out_clean_types;
366*4882a593Smuzhiyun 	}
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	vgpu = intel_gvt_create_idle_vgpu(gvt);
369*4882a593Smuzhiyun 	if (IS_ERR(vgpu)) {
370*4882a593Smuzhiyun 		ret = PTR_ERR(vgpu);
371*4882a593Smuzhiyun 		gvt_err("failed to create idle vgpu\n");
372*4882a593Smuzhiyun 		goto out_clean_types;
373*4882a593Smuzhiyun 	}
374*4882a593Smuzhiyun 	gvt->idle_vgpu = vgpu;
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	intel_gvt_debugfs_init(gvt);
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	gvt_dbg_core("gvt device initialization is done\n");
379*4882a593Smuzhiyun 	intel_gvt_host.dev = &i915->drm.pdev->dev;
380*4882a593Smuzhiyun 	intel_gvt_host.initialized = true;
381*4882a593Smuzhiyun 	return 0;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun out_clean_types:
384*4882a593Smuzhiyun 	intel_gvt_clean_vgpu_types(gvt);
385*4882a593Smuzhiyun out_clean_thread:
386*4882a593Smuzhiyun 	clean_service_thread(gvt);
387*4882a593Smuzhiyun out_clean_cmd_parser:
388*4882a593Smuzhiyun 	intel_gvt_clean_cmd_parser(gvt);
389*4882a593Smuzhiyun out_clean_sched_policy:
390*4882a593Smuzhiyun 	intel_gvt_clean_sched_policy(gvt);
391*4882a593Smuzhiyun out_clean_workload_scheduler:
392*4882a593Smuzhiyun 	intel_gvt_clean_workload_scheduler(gvt);
393*4882a593Smuzhiyun out_clean_gtt:
394*4882a593Smuzhiyun 	intel_gvt_clean_gtt(gvt);
395*4882a593Smuzhiyun out_clean_irq:
396*4882a593Smuzhiyun 	intel_gvt_clean_irq(gvt);
397*4882a593Smuzhiyun out_free_firmware:
398*4882a593Smuzhiyun 	intel_gvt_free_firmware(gvt);
399*4882a593Smuzhiyun out_clean_mmio_info:
400*4882a593Smuzhiyun 	intel_gvt_clean_mmio_info(gvt);
401*4882a593Smuzhiyun out_clean_idr:
402*4882a593Smuzhiyun 	idr_destroy(&gvt->vgpu_idr);
403*4882a593Smuzhiyun 	kfree(gvt);
404*4882a593Smuzhiyun 	i915->gvt = NULL;
405*4882a593Smuzhiyun 	return ret;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun int
intel_gvt_register_hypervisor(struct intel_gvt_mpt * m)409*4882a593Smuzhiyun intel_gvt_register_hypervisor(struct intel_gvt_mpt *m)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun 	int ret;
412*4882a593Smuzhiyun 	void *gvt;
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	if (!intel_gvt_host.initialized)
415*4882a593Smuzhiyun 		return -ENODEV;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	if (m->type != INTEL_GVT_HYPERVISOR_KVM &&
418*4882a593Smuzhiyun 	    m->type != INTEL_GVT_HYPERVISOR_XEN)
419*4882a593Smuzhiyun 		return -EINVAL;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	/* Get a reference for device model module */
422*4882a593Smuzhiyun 	if (!try_module_get(THIS_MODULE))
423*4882a593Smuzhiyun 		return -ENODEV;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	intel_gvt_host.mpt = m;
426*4882a593Smuzhiyun 	intel_gvt_host.hypervisor_type = m->type;
427*4882a593Smuzhiyun 	gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	ret = intel_gvt_hypervisor_host_init(intel_gvt_host.dev, gvt,
430*4882a593Smuzhiyun 					     &intel_gvt_ops);
431*4882a593Smuzhiyun 	if (ret < 0) {
432*4882a593Smuzhiyun 		gvt_err("Failed to init %s hypervisor module\n",
433*4882a593Smuzhiyun 			supported_hypervisors[intel_gvt_host.hypervisor_type]);
434*4882a593Smuzhiyun 		module_put(THIS_MODULE);
435*4882a593Smuzhiyun 		return -ENODEV;
436*4882a593Smuzhiyun 	}
437*4882a593Smuzhiyun 	gvt_dbg_core("Running with hypervisor %s in host mode\n",
438*4882a593Smuzhiyun 		     supported_hypervisors[intel_gvt_host.hypervisor_type]);
439*4882a593Smuzhiyun 	return 0;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor);
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun void
intel_gvt_unregister_hypervisor(void)444*4882a593Smuzhiyun intel_gvt_unregister_hypervisor(void)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun 	intel_gvt_hypervisor_host_exit(intel_gvt_host.dev);
447*4882a593Smuzhiyun 	module_put(THIS_MODULE);
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor);
450