1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the next
12*4882a593Smuzhiyun * paragraph) shall be included in all copies or substantial portions of the
13*4882a593Smuzhiyun * Software.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18*4882a593Smuzhiyun * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19*4882a593Smuzhiyun * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20*4882a593Smuzhiyun * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21*4882a593Smuzhiyun * SOFTWARE.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * Authors:
24*4882a593Smuzhiyun * Eddie Dong <eddie.dong@intel.com>
25*4882a593Smuzhiyun * Kevin Tian <kevin.tian@intel.com>
26*4882a593Smuzhiyun *
27*4882a593Smuzhiyun * Contributors:
28*4882a593Smuzhiyun * Ping Gao <ping.a.gao@intel.com>
29*4882a593Smuzhiyun * Zhi Wang <zhi.a.wang@intel.com>
30*4882a593Smuzhiyun * Bing Niu <bing.niu@intel.com>
31*4882a593Smuzhiyun *
32*4882a593Smuzhiyun */
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #include "i915_drv.h"
35*4882a593Smuzhiyun #include "gvt.h"
36*4882a593Smuzhiyun #include "i915_pvinfo.h"
37*4882a593Smuzhiyun
populate_pvinfo_page(struct intel_vgpu * vgpu)38*4882a593Smuzhiyun void populate_pvinfo_page(struct intel_vgpu *vgpu)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
41*4882a593Smuzhiyun /* setup the ballooning information */
42*4882a593Smuzhiyun vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
43*4882a593Smuzhiyun vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1;
44*4882a593Smuzhiyun vgpu_vreg_t(vgpu, vgtif_reg(version_minor)) = 0;
45*4882a593Smuzhiyun vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0;
46*4882a593Smuzhiyun vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_PPGTT;
49*4882a593Smuzhiyun vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
50*4882a593Smuzhiyun vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
53*4882a593Smuzhiyun vgpu_aperture_gmadr_base(vgpu);
54*4882a593Smuzhiyun vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
55*4882a593Smuzhiyun vgpu_aperture_sz(vgpu);
56*4882a593Smuzhiyun vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
57*4882a593Smuzhiyun vgpu_hidden_gmadr_base(vgpu);
58*4882a593Smuzhiyun vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
59*4882a593Smuzhiyun vgpu_hidden_sz(vgpu);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)) = UINT_MAX;
64*4882a593Smuzhiyun vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)) = UINT_MAX;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
67*4882a593Smuzhiyun gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
68*4882a593Smuzhiyun vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
69*4882a593Smuzhiyun gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
70*4882a593Smuzhiyun vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
71*4882a593Smuzhiyun gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun drm_WARN_ON(&i915->drm, sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun #define VGPU_MAX_WEIGHT 16
77*4882a593Smuzhiyun #define VGPU_WEIGHT(vgpu_num) \
78*4882a593Smuzhiyun (VGPU_MAX_WEIGHT / (vgpu_num))
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun static struct {
81*4882a593Smuzhiyun unsigned int low_mm;
82*4882a593Smuzhiyun unsigned int high_mm;
83*4882a593Smuzhiyun unsigned int fence;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun /* A vGPU with a weight of 8 will get twice as much GPU as a vGPU
86*4882a593Smuzhiyun * with a weight of 4 on a contended host, different vGPU type has
87*4882a593Smuzhiyun * different weight set. Legal weights range from 1 to 16.
88*4882a593Smuzhiyun */
89*4882a593Smuzhiyun unsigned int weight;
90*4882a593Smuzhiyun enum intel_vgpu_edid edid;
91*4882a593Smuzhiyun char *name;
92*4882a593Smuzhiyun } vgpu_types[] = {
93*4882a593Smuzhiyun /* Fixed vGPU type table */
94*4882a593Smuzhiyun { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" },
95*4882a593Smuzhiyun { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200, "4" },
96*4882a593Smuzhiyun { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200, "2" },
97*4882a593Smuzhiyun { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, VGPU_WEIGHT(1), GVT_EDID_1920_1200, "1" },
98*4882a593Smuzhiyun };
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /**
101*4882a593Smuzhiyun * intel_gvt_init_vgpu_types - initialize vGPU type list
102*4882a593Smuzhiyun * @gvt : GVT device
103*4882a593Smuzhiyun *
104*4882a593Smuzhiyun * Initialize vGPU type list based on available resource.
105*4882a593Smuzhiyun *
106*4882a593Smuzhiyun */
intel_gvt_init_vgpu_types(struct intel_gvt * gvt)107*4882a593Smuzhiyun int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun unsigned int num_types;
110*4882a593Smuzhiyun unsigned int i, low_avail, high_avail;
111*4882a593Smuzhiyun unsigned int min_low;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /* vGPU type name is defined as GVTg_Vx_y which contains
114*4882a593Smuzhiyun * physical GPU generation type (e.g V4 as BDW server, V5 as
115*4882a593Smuzhiyun * SKL server).
116*4882a593Smuzhiyun *
117*4882a593Smuzhiyun * Depend on physical SKU resource, might see vGPU types like
118*4882a593Smuzhiyun * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
119*4882a593Smuzhiyun * different types of vGPU on same physical GPU depending on
120*4882a593Smuzhiyun * available resource. Each vGPU type will have "avail_instance"
121*4882a593Smuzhiyun * to indicate how many vGPU instance can be created for this
122*4882a593Smuzhiyun * type.
123*4882a593Smuzhiyun *
124*4882a593Smuzhiyun */
125*4882a593Smuzhiyun low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
126*4882a593Smuzhiyun high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
127*4882a593Smuzhiyun num_types = ARRAY_SIZE(vgpu_types);
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
130*4882a593Smuzhiyun GFP_KERNEL);
131*4882a593Smuzhiyun if (!gvt->types)
132*4882a593Smuzhiyun return -ENOMEM;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun min_low = MB_TO_BYTES(32);
135*4882a593Smuzhiyun for (i = 0; i < num_types; ++i) {
136*4882a593Smuzhiyun if (low_avail / vgpu_types[i].low_mm == 0)
137*4882a593Smuzhiyun break;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
140*4882a593Smuzhiyun gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
141*4882a593Smuzhiyun gvt->types[i].fence = vgpu_types[i].fence;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun if (vgpu_types[i].weight < 1 ||
144*4882a593Smuzhiyun vgpu_types[i].weight > VGPU_MAX_WEIGHT)
145*4882a593Smuzhiyun return -EINVAL;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun gvt->types[i].weight = vgpu_types[i].weight;
148*4882a593Smuzhiyun gvt->types[i].resolution = vgpu_types[i].edid;
149*4882a593Smuzhiyun gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
150*4882a593Smuzhiyun high_avail / vgpu_types[i].high_mm);
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun if (IS_GEN(gvt->gt->i915, 8))
153*4882a593Smuzhiyun sprintf(gvt->types[i].name, "GVTg_V4_%s",
154*4882a593Smuzhiyun vgpu_types[i].name);
155*4882a593Smuzhiyun else if (IS_GEN(gvt->gt->i915, 9))
156*4882a593Smuzhiyun sprintf(gvt->types[i].name, "GVTg_V5_%s",
157*4882a593Smuzhiyun vgpu_types[i].name);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
160*4882a593Smuzhiyun i, gvt->types[i].name,
161*4882a593Smuzhiyun gvt->types[i].avail_instance,
162*4882a593Smuzhiyun gvt->types[i].low_gm_size,
163*4882a593Smuzhiyun gvt->types[i].high_gm_size, gvt->types[i].fence,
164*4882a593Smuzhiyun gvt->types[i].weight,
165*4882a593Smuzhiyun vgpu_edid_str(gvt->types[i].resolution));
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun gvt->num_types = i;
169*4882a593Smuzhiyun return 0;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
intel_gvt_clean_vgpu_types(struct intel_gvt * gvt)172*4882a593Smuzhiyun void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun kfree(gvt->types);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
intel_gvt_update_vgpu_types(struct intel_gvt * gvt)177*4882a593Smuzhiyun static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun int i;
180*4882a593Smuzhiyun unsigned int low_gm_avail, high_gm_avail, fence_avail;
181*4882a593Smuzhiyun unsigned int low_gm_min, high_gm_min, fence_min;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /* Need to depend on maxium hw resource size but keep on
184*4882a593Smuzhiyun * static config for now.
185*4882a593Smuzhiyun */
186*4882a593Smuzhiyun low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
187*4882a593Smuzhiyun gvt->gm.vgpu_allocated_low_gm_size;
188*4882a593Smuzhiyun high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
189*4882a593Smuzhiyun gvt->gm.vgpu_allocated_high_gm_size;
190*4882a593Smuzhiyun fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
191*4882a593Smuzhiyun gvt->fence.vgpu_allocated_fence_num;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun for (i = 0; i < gvt->num_types; i++) {
194*4882a593Smuzhiyun low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
195*4882a593Smuzhiyun high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
196*4882a593Smuzhiyun fence_min = fence_avail / gvt->types[i].fence;
197*4882a593Smuzhiyun gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min),
198*4882a593Smuzhiyun fence_min);
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n",
201*4882a593Smuzhiyun i, gvt->types[i].name,
202*4882a593Smuzhiyun gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
203*4882a593Smuzhiyun gvt->types[i].high_gm_size, gvt->types[i].fence);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /**
208*4882a593Smuzhiyun * intel_gvt_active_vgpu - activate a virtual GPU
209*4882a593Smuzhiyun * @vgpu: virtual GPU
210*4882a593Smuzhiyun *
211*4882a593Smuzhiyun * This function is called when user wants to activate a virtual GPU.
212*4882a593Smuzhiyun *
213*4882a593Smuzhiyun */
intel_gvt_activate_vgpu(struct intel_vgpu * vgpu)214*4882a593Smuzhiyun void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun mutex_lock(&vgpu->vgpu_lock);
217*4882a593Smuzhiyun vgpu->active = true;
218*4882a593Smuzhiyun mutex_unlock(&vgpu->vgpu_lock);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /**
222*4882a593Smuzhiyun * intel_gvt_deactive_vgpu - deactivate a virtual GPU
223*4882a593Smuzhiyun * @vgpu: virtual GPU
224*4882a593Smuzhiyun *
225*4882a593Smuzhiyun * This function is called when user wants to deactivate a virtual GPU.
226*4882a593Smuzhiyun * The virtual GPU will be stopped.
227*4882a593Smuzhiyun *
228*4882a593Smuzhiyun */
intel_gvt_deactivate_vgpu(struct intel_vgpu * vgpu)229*4882a593Smuzhiyun void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun mutex_lock(&vgpu->vgpu_lock);
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun vgpu->active = false;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun if (atomic_read(&vgpu->submission.running_workload_num)) {
236*4882a593Smuzhiyun mutex_unlock(&vgpu->vgpu_lock);
237*4882a593Smuzhiyun intel_gvt_wait_vgpu_idle(vgpu);
238*4882a593Smuzhiyun mutex_lock(&vgpu->vgpu_lock);
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun intel_vgpu_stop_schedule(vgpu);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun mutex_unlock(&vgpu->vgpu_lock);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /**
247*4882a593Smuzhiyun * intel_gvt_release_vgpu - release a virtual GPU
248*4882a593Smuzhiyun * @vgpu: virtual GPU
249*4882a593Smuzhiyun *
250*4882a593Smuzhiyun * This function is called when user wants to release a virtual GPU.
251*4882a593Smuzhiyun * The virtual GPU will be stopped and all runtime information will be
252*4882a593Smuzhiyun * destroyed.
253*4882a593Smuzhiyun *
254*4882a593Smuzhiyun */
intel_gvt_release_vgpu(struct intel_vgpu * vgpu)255*4882a593Smuzhiyun void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun intel_gvt_deactivate_vgpu(vgpu);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun mutex_lock(&vgpu->vgpu_lock);
260*4882a593Smuzhiyun vgpu->d3_entered = false;
261*4882a593Smuzhiyun intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
262*4882a593Smuzhiyun intel_vgpu_dmabuf_cleanup(vgpu);
263*4882a593Smuzhiyun mutex_unlock(&vgpu->vgpu_lock);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun /**
267*4882a593Smuzhiyun * intel_gvt_destroy_vgpu - destroy a virtual GPU
268*4882a593Smuzhiyun * @vgpu: virtual GPU
269*4882a593Smuzhiyun *
270*4882a593Smuzhiyun * This function is called when user wants to destroy a virtual GPU.
271*4882a593Smuzhiyun *
272*4882a593Smuzhiyun */
intel_gvt_destroy_vgpu(struct intel_vgpu * vgpu)273*4882a593Smuzhiyun void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun struct intel_gvt *gvt = vgpu->gvt;
276*4882a593Smuzhiyun struct drm_i915_private *i915 = gvt->gt->i915;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun drm_WARN(&i915->drm, vgpu->active, "vGPU is still active!\n");
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun /*
281*4882a593Smuzhiyun * remove idr first so later clean can judge if need to stop
282*4882a593Smuzhiyun * service if no active vgpu.
283*4882a593Smuzhiyun */
284*4882a593Smuzhiyun mutex_lock(&gvt->lock);
285*4882a593Smuzhiyun idr_remove(&gvt->vgpu_idr, vgpu->id);
286*4882a593Smuzhiyun mutex_unlock(&gvt->lock);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun mutex_lock(&vgpu->vgpu_lock);
289*4882a593Smuzhiyun intel_gvt_debugfs_remove_vgpu(vgpu);
290*4882a593Smuzhiyun intel_vgpu_clean_sched_policy(vgpu);
291*4882a593Smuzhiyun intel_vgpu_clean_submission(vgpu);
292*4882a593Smuzhiyun intel_vgpu_clean_display(vgpu);
293*4882a593Smuzhiyun intel_vgpu_clean_opregion(vgpu);
294*4882a593Smuzhiyun intel_vgpu_reset_ggtt(vgpu, true);
295*4882a593Smuzhiyun intel_vgpu_clean_gtt(vgpu);
296*4882a593Smuzhiyun intel_gvt_hypervisor_detach_vgpu(vgpu);
297*4882a593Smuzhiyun intel_vgpu_free_resource(vgpu);
298*4882a593Smuzhiyun intel_vgpu_clean_mmio(vgpu);
299*4882a593Smuzhiyun intel_vgpu_dmabuf_cleanup(vgpu);
300*4882a593Smuzhiyun mutex_unlock(&vgpu->vgpu_lock);
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun mutex_lock(&gvt->lock);
303*4882a593Smuzhiyun if (idr_is_empty(&gvt->vgpu_idr))
304*4882a593Smuzhiyun intel_gvt_clean_irq(gvt);
305*4882a593Smuzhiyun intel_gvt_update_vgpu_types(gvt);
306*4882a593Smuzhiyun mutex_unlock(&gvt->lock);
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun vfree(vgpu);
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun #define IDLE_VGPU_IDR 0
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun /**
314*4882a593Smuzhiyun * intel_gvt_create_idle_vgpu - create an idle virtual GPU
315*4882a593Smuzhiyun * @gvt: GVT device
316*4882a593Smuzhiyun *
317*4882a593Smuzhiyun * This function is called when user wants to create an idle virtual GPU.
318*4882a593Smuzhiyun *
319*4882a593Smuzhiyun * Returns:
320*4882a593Smuzhiyun * pointer to intel_vgpu, error pointer if failed.
321*4882a593Smuzhiyun */
intel_gvt_create_idle_vgpu(struct intel_gvt * gvt)322*4882a593Smuzhiyun struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun struct intel_vgpu *vgpu;
325*4882a593Smuzhiyun enum intel_engine_id i;
326*4882a593Smuzhiyun int ret;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun vgpu = vzalloc(sizeof(*vgpu));
329*4882a593Smuzhiyun if (!vgpu)
330*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun vgpu->id = IDLE_VGPU_IDR;
333*4882a593Smuzhiyun vgpu->gvt = gvt;
334*4882a593Smuzhiyun mutex_init(&vgpu->vgpu_lock);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun for (i = 0; i < I915_NUM_ENGINES; i++)
337*4882a593Smuzhiyun INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun ret = intel_vgpu_init_sched_policy(vgpu);
340*4882a593Smuzhiyun if (ret)
341*4882a593Smuzhiyun goto out_free_vgpu;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun vgpu->active = false;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun return vgpu;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun out_free_vgpu:
348*4882a593Smuzhiyun vfree(vgpu);
349*4882a593Smuzhiyun return ERR_PTR(ret);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun /**
353*4882a593Smuzhiyun * intel_gvt_destroy_vgpu - destroy an idle virtual GPU
354*4882a593Smuzhiyun * @vgpu: virtual GPU
355*4882a593Smuzhiyun *
356*4882a593Smuzhiyun * This function is called when user wants to destroy an idle virtual GPU.
357*4882a593Smuzhiyun *
358*4882a593Smuzhiyun */
intel_gvt_destroy_idle_vgpu(struct intel_vgpu * vgpu)359*4882a593Smuzhiyun void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun mutex_lock(&vgpu->vgpu_lock);
362*4882a593Smuzhiyun intel_vgpu_clean_sched_policy(vgpu);
363*4882a593Smuzhiyun mutex_unlock(&vgpu->vgpu_lock);
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun vfree(vgpu);
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
__intel_gvt_create_vgpu(struct intel_gvt * gvt,struct intel_vgpu_creation_params * param)368*4882a593Smuzhiyun static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
369*4882a593Smuzhiyun struct intel_vgpu_creation_params *param)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun struct drm_i915_private *dev_priv = gvt->gt->i915;
372*4882a593Smuzhiyun struct intel_vgpu *vgpu;
373*4882a593Smuzhiyun int ret;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
376*4882a593Smuzhiyun param->handle, param->low_gm_sz, param->high_gm_sz,
377*4882a593Smuzhiyun param->fence_sz);
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun vgpu = vzalloc(sizeof(*vgpu));
380*4882a593Smuzhiyun if (!vgpu)
381*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
384*4882a593Smuzhiyun GFP_KERNEL);
385*4882a593Smuzhiyun if (ret < 0)
386*4882a593Smuzhiyun goto out_free_vgpu;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun vgpu->id = ret;
389*4882a593Smuzhiyun vgpu->handle = param->handle;
390*4882a593Smuzhiyun vgpu->gvt = gvt;
391*4882a593Smuzhiyun vgpu->sched_ctl.weight = param->weight;
392*4882a593Smuzhiyun mutex_init(&vgpu->vgpu_lock);
393*4882a593Smuzhiyun mutex_init(&vgpu->dmabuf_lock);
394*4882a593Smuzhiyun INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
395*4882a593Smuzhiyun INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
396*4882a593Smuzhiyun idr_init(&vgpu->object_idr);
397*4882a593Smuzhiyun intel_vgpu_init_cfg_space(vgpu, param->primary);
398*4882a593Smuzhiyun vgpu->d3_entered = false;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun ret = intel_vgpu_init_mmio(vgpu);
401*4882a593Smuzhiyun if (ret)
402*4882a593Smuzhiyun goto out_clean_idr;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun ret = intel_vgpu_alloc_resource(vgpu, param);
405*4882a593Smuzhiyun if (ret)
406*4882a593Smuzhiyun goto out_clean_vgpu_mmio;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun populate_pvinfo_page(vgpu);
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
411*4882a593Smuzhiyun if (ret)
412*4882a593Smuzhiyun goto out_clean_vgpu_resource;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun ret = intel_vgpu_init_gtt(vgpu);
415*4882a593Smuzhiyun if (ret)
416*4882a593Smuzhiyun goto out_detach_hypervisor_vgpu;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun ret = intel_vgpu_init_opregion(vgpu);
419*4882a593Smuzhiyun if (ret)
420*4882a593Smuzhiyun goto out_clean_gtt;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun ret = intel_vgpu_init_display(vgpu, param->resolution);
423*4882a593Smuzhiyun if (ret)
424*4882a593Smuzhiyun goto out_clean_opregion;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun ret = intel_vgpu_setup_submission(vgpu);
427*4882a593Smuzhiyun if (ret)
428*4882a593Smuzhiyun goto out_clean_display;
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun ret = intel_vgpu_init_sched_policy(vgpu);
431*4882a593Smuzhiyun if (ret)
432*4882a593Smuzhiyun goto out_clean_submission;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun intel_gvt_debugfs_add_vgpu(vgpu);
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun ret = intel_gvt_hypervisor_set_opregion(vgpu);
437*4882a593Smuzhiyun if (ret)
438*4882a593Smuzhiyun goto out_clean_sched_policy;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv))
441*4882a593Smuzhiyun ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B);
442*4882a593Smuzhiyun else
443*4882a593Smuzhiyun ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
444*4882a593Smuzhiyun if (ret)
445*4882a593Smuzhiyun goto out_clean_sched_policy;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun return vgpu;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun out_clean_sched_policy:
450*4882a593Smuzhiyun intel_vgpu_clean_sched_policy(vgpu);
451*4882a593Smuzhiyun out_clean_submission:
452*4882a593Smuzhiyun intel_vgpu_clean_submission(vgpu);
453*4882a593Smuzhiyun out_clean_display:
454*4882a593Smuzhiyun intel_vgpu_clean_display(vgpu);
455*4882a593Smuzhiyun out_clean_opregion:
456*4882a593Smuzhiyun intel_vgpu_clean_opregion(vgpu);
457*4882a593Smuzhiyun out_clean_gtt:
458*4882a593Smuzhiyun intel_vgpu_clean_gtt(vgpu);
459*4882a593Smuzhiyun out_detach_hypervisor_vgpu:
460*4882a593Smuzhiyun intel_gvt_hypervisor_detach_vgpu(vgpu);
461*4882a593Smuzhiyun out_clean_vgpu_resource:
462*4882a593Smuzhiyun intel_vgpu_free_resource(vgpu);
463*4882a593Smuzhiyun out_clean_vgpu_mmio:
464*4882a593Smuzhiyun intel_vgpu_clean_mmio(vgpu);
465*4882a593Smuzhiyun out_clean_idr:
466*4882a593Smuzhiyun idr_remove(&gvt->vgpu_idr, vgpu->id);
467*4882a593Smuzhiyun out_free_vgpu:
468*4882a593Smuzhiyun vfree(vgpu);
469*4882a593Smuzhiyun return ERR_PTR(ret);
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun /**
473*4882a593Smuzhiyun * intel_gvt_create_vgpu - create a virtual GPU
474*4882a593Smuzhiyun * @gvt: GVT device
475*4882a593Smuzhiyun * @type: type of the vGPU to create
476*4882a593Smuzhiyun *
477*4882a593Smuzhiyun * This function is called when user wants to create a virtual GPU.
478*4882a593Smuzhiyun *
479*4882a593Smuzhiyun * Returns:
480*4882a593Smuzhiyun * pointer to intel_vgpu, error pointer if failed.
481*4882a593Smuzhiyun */
intel_gvt_create_vgpu(struct intel_gvt * gvt,struct intel_vgpu_type * type)482*4882a593Smuzhiyun struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
483*4882a593Smuzhiyun struct intel_vgpu_type *type)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun struct intel_vgpu_creation_params param;
486*4882a593Smuzhiyun struct intel_vgpu *vgpu;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun param.handle = 0;
489*4882a593Smuzhiyun param.primary = 1;
490*4882a593Smuzhiyun param.low_gm_sz = type->low_gm_size;
491*4882a593Smuzhiyun param.high_gm_sz = type->high_gm_size;
492*4882a593Smuzhiyun param.fence_sz = type->fence;
493*4882a593Smuzhiyun param.weight = type->weight;
494*4882a593Smuzhiyun param.resolution = type->resolution;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun /* XXX current param based on MB */
497*4882a593Smuzhiyun param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
498*4882a593Smuzhiyun param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun mutex_lock(&gvt->lock);
501*4882a593Smuzhiyun vgpu = __intel_gvt_create_vgpu(gvt, ¶m);
502*4882a593Smuzhiyun if (!IS_ERR(vgpu))
503*4882a593Smuzhiyun /* calculate left instance change for types */
504*4882a593Smuzhiyun intel_gvt_update_vgpu_types(gvt);
505*4882a593Smuzhiyun mutex_unlock(&gvt->lock);
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun return vgpu;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun /**
511*4882a593Smuzhiyun * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
512*4882a593Smuzhiyun * @vgpu: virtual GPU
513*4882a593Smuzhiyun * @dmlr: vGPU Device Model Level Reset or GT Reset
514*4882a593Smuzhiyun * @engine_mask: engines to reset for GT reset
515*4882a593Smuzhiyun *
516*4882a593Smuzhiyun * This function is called when user wants to reset a virtual GPU through
517*4882a593Smuzhiyun * device model reset or GT reset. The caller should hold the vgpu lock.
518*4882a593Smuzhiyun *
519*4882a593Smuzhiyun * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
520*4882a593Smuzhiyun * the whole vGPU to default state as when it is created. This vGPU function
521*4882a593Smuzhiyun * is required both for functionary and security concerns.The ultimate goal
522*4882a593Smuzhiyun * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
523*4882a593Smuzhiyun * assign a vGPU to a virtual machine we must isse such reset first.
524*4882a593Smuzhiyun *
525*4882a593Smuzhiyun * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
526*4882a593Smuzhiyun * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
527*4882a593Smuzhiyun * Unlike the FLR, GT reset only reset particular resource of a vGPU per
528*4882a593Smuzhiyun * the reset request. Guest driver can issue a GT reset by programming the
529*4882a593Smuzhiyun * virtual GDRST register to reset specific virtual GPU engine or all
530*4882a593Smuzhiyun * engines.
531*4882a593Smuzhiyun *
532*4882a593Smuzhiyun * The parameter dev_level is to identify if we will do DMLR or GT reset.
533*4882a593Smuzhiyun * The parameter engine_mask is to specific the engines that need to be
534*4882a593Smuzhiyun * resetted. If value ALL_ENGINES is given for engine_mask, it means
535*4882a593Smuzhiyun * the caller requests a full GT reset that we will reset all virtual
536*4882a593Smuzhiyun * GPU engines. For FLR, engine_mask is ignored.
537*4882a593Smuzhiyun */
intel_gvt_reset_vgpu_locked(struct intel_vgpu * vgpu,bool dmlr,intel_engine_mask_t engine_mask)538*4882a593Smuzhiyun void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
539*4882a593Smuzhiyun intel_engine_mask_t engine_mask)
540*4882a593Smuzhiyun {
541*4882a593Smuzhiyun struct intel_gvt *gvt = vgpu->gvt;
542*4882a593Smuzhiyun struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
543*4882a593Smuzhiyun intel_engine_mask_t resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun gvt_dbg_core("------------------------------------------\n");
546*4882a593Smuzhiyun gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
547*4882a593Smuzhiyun vgpu->id, dmlr, engine_mask);
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun vgpu->resetting_eng = resetting_eng;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun intel_vgpu_stop_schedule(vgpu);
552*4882a593Smuzhiyun /*
553*4882a593Smuzhiyun * The current_vgpu will set to NULL after stopping the
554*4882a593Smuzhiyun * scheduler when the reset is triggered by current vgpu.
555*4882a593Smuzhiyun */
556*4882a593Smuzhiyun if (scheduler->current_vgpu == NULL) {
557*4882a593Smuzhiyun mutex_unlock(&vgpu->vgpu_lock);
558*4882a593Smuzhiyun intel_gvt_wait_vgpu_idle(vgpu);
559*4882a593Smuzhiyun mutex_lock(&vgpu->vgpu_lock);
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun intel_vgpu_reset_submission(vgpu, resetting_eng);
563*4882a593Smuzhiyun /* full GPU reset or device model level reset */
564*4882a593Smuzhiyun if (engine_mask == ALL_ENGINES || dmlr) {
565*4882a593Smuzhiyun intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
566*4882a593Smuzhiyun if (engine_mask == ALL_ENGINES)
567*4882a593Smuzhiyun intel_vgpu_invalidate_ppgtt(vgpu);
568*4882a593Smuzhiyun /*fence will not be reset during virtual reset */
569*4882a593Smuzhiyun if (dmlr) {
570*4882a593Smuzhiyun if(!vgpu->d3_entered) {
571*4882a593Smuzhiyun intel_vgpu_invalidate_ppgtt(vgpu);
572*4882a593Smuzhiyun intel_vgpu_destroy_all_ppgtt_mm(vgpu);
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun intel_vgpu_reset_ggtt(vgpu, true);
575*4882a593Smuzhiyun intel_vgpu_reset_resource(vgpu);
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun intel_vgpu_reset_mmio(vgpu, dmlr);
579*4882a593Smuzhiyun populate_pvinfo_page(vgpu);
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun if (dmlr) {
582*4882a593Smuzhiyun intel_vgpu_reset_display(vgpu);
583*4882a593Smuzhiyun intel_vgpu_reset_cfg_space(vgpu);
584*4882a593Smuzhiyun /* only reset the failsafe mode when dmlr reset */
585*4882a593Smuzhiyun vgpu->failsafe = false;
586*4882a593Smuzhiyun /*
587*4882a593Smuzhiyun * PCI_D0 is set before dmlr, so reset d3_entered here
588*4882a593Smuzhiyun * after done using.
589*4882a593Smuzhiyun */
590*4882a593Smuzhiyun if(vgpu->d3_entered)
591*4882a593Smuzhiyun vgpu->d3_entered = false;
592*4882a593Smuzhiyun else
593*4882a593Smuzhiyun vgpu->pv_notified = false;
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun vgpu->resetting_eng = 0;
598*4882a593Smuzhiyun gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
599*4882a593Smuzhiyun gvt_dbg_core("------------------------------------------\n");
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun /**
603*4882a593Smuzhiyun * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level)
604*4882a593Smuzhiyun * @vgpu: virtual GPU
605*4882a593Smuzhiyun *
606*4882a593Smuzhiyun * This function is called when user wants to reset a virtual GPU.
607*4882a593Smuzhiyun *
608*4882a593Smuzhiyun */
intel_gvt_reset_vgpu(struct intel_vgpu * vgpu)609*4882a593Smuzhiyun void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun mutex_lock(&vgpu->vgpu_lock);
612*4882a593Smuzhiyun intel_gvt_reset_vgpu_locked(vgpu, true, 0);
613*4882a593Smuzhiyun mutex_unlock(&vgpu->vgpu_lock);
614*4882a593Smuzhiyun }
615