1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the next
12*4882a593Smuzhiyun * paragraph) shall be included in all copies or substantial portions of the
13*4882a593Smuzhiyun * Software.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18*4882a593Smuzhiyun * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19*4882a593Smuzhiyun * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20*4882a593Smuzhiyun * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21*4882a593Smuzhiyun * SOFTWARE.
22*4882a593Smuzhiyun */
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include <linux/acpi.h>
25*4882a593Smuzhiyun #include "i915_drv.h"
26*4882a593Smuzhiyun #include "gvt.h"
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun /*
29*4882a593Smuzhiyun * Note: Only for GVT-g virtual VBT generation, other usage must
30*4882a593Smuzhiyun * not do like this.
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun #define _INTEL_BIOS_PRIVATE
33*4882a593Smuzhiyun #include "display/intel_vbt_defs.h"
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #define OPREGION_SIGNATURE "IntelGraphicsMem"
36*4882a593Smuzhiyun #define MBOX_VBT (1<<3)
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /* device handle */
39*4882a593Smuzhiyun #define DEVICE_TYPE_CRT 0x01
40*4882a593Smuzhiyun #define DEVICE_TYPE_EFP1 0x04
41*4882a593Smuzhiyun #define DEVICE_TYPE_EFP2 0x40
42*4882a593Smuzhiyun #define DEVICE_TYPE_EFP3 0x20
43*4882a593Smuzhiyun #define DEVICE_TYPE_EFP4 0x10
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun struct opregion_header {
46*4882a593Smuzhiyun u8 signature[16];
47*4882a593Smuzhiyun u32 size;
48*4882a593Smuzhiyun u32 opregion_ver;
49*4882a593Smuzhiyun u8 bios_ver[32];
50*4882a593Smuzhiyun u8 vbios_ver[16];
51*4882a593Smuzhiyun u8 driver_ver[16];
52*4882a593Smuzhiyun u32 mboxes;
53*4882a593Smuzhiyun u32 driver_model;
54*4882a593Smuzhiyun u32 pcon;
55*4882a593Smuzhiyun u8 dver[32];
56*4882a593Smuzhiyun u8 rsvd[124];
57*4882a593Smuzhiyun } __packed;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun struct bdb_data_header {
60*4882a593Smuzhiyun u8 id;
61*4882a593Smuzhiyun u16 size; /* data size */
62*4882a593Smuzhiyun } __packed;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /* For supporting windows guest with opregion, here hardcode the emulated
65*4882a593Smuzhiyun * bdb header version as '186', and the corresponding child_device_config
66*4882a593Smuzhiyun * length should be '33' but not '38'.
67*4882a593Smuzhiyun */
68*4882a593Smuzhiyun struct efp_child_device_config {
69*4882a593Smuzhiyun u16 handle;
70*4882a593Smuzhiyun u16 device_type;
71*4882a593Smuzhiyun u16 device_class;
72*4882a593Smuzhiyun u8 i2c_speed;
73*4882a593Smuzhiyun u8 dp_onboard_redriver; /* 158 */
74*4882a593Smuzhiyun u8 dp_ondock_redriver; /* 158 */
75*4882a593Smuzhiyun u8 hdmi_level_shifter_value:4; /* 169 */
76*4882a593Smuzhiyun u8 hdmi_max_data_rate:4; /* 204 */
77*4882a593Smuzhiyun u16 dtd_buf_ptr; /* 161 */
78*4882a593Smuzhiyun u8 edidless_efp:1; /* 161 */
79*4882a593Smuzhiyun u8 compression_enable:1; /* 198 */
80*4882a593Smuzhiyun u8 compression_method:1; /* 198 */
81*4882a593Smuzhiyun u8 ganged_edp:1; /* 202 */
82*4882a593Smuzhiyun u8 skip0:4;
83*4882a593Smuzhiyun u8 compression_structure_index:4; /* 198 */
84*4882a593Smuzhiyun u8 skip1:4;
85*4882a593Smuzhiyun u8 slave_port; /* 202 */
86*4882a593Smuzhiyun u8 skip2;
87*4882a593Smuzhiyun u8 dvo_port;
88*4882a593Smuzhiyun u8 i2c_pin; /* for add-in card */
89*4882a593Smuzhiyun u8 slave_addr; /* for add-in card */
90*4882a593Smuzhiyun u8 ddc_pin;
91*4882a593Smuzhiyun u16 edid_ptr;
92*4882a593Smuzhiyun u8 dvo_config;
93*4882a593Smuzhiyun u8 efp_docked_port:1; /* 158 */
94*4882a593Smuzhiyun u8 lane_reversal:1; /* 184 */
95*4882a593Smuzhiyun u8 onboard_lspcon:1; /* 192 */
96*4882a593Smuzhiyun u8 iboost_enable:1; /* 196 */
97*4882a593Smuzhiyun u8 hpd_invert:1; /* BXT 196 */
98*4882a593Smuzhiyun u8 slip3:3;
99*4882a593Smuzhiyun u8 hdmi_compat:1;
100*4882a593Smuzhiyun u8 dp_compat:1;
101*4882a593Smuzhiyun u8 tmds_compat:1;
102*4882a593Smuzhiyun u8 skip4:5;
103*4882a593Smuzhiyun u8 aux_channel;
104*4882a593Smuzhiyun u8 dongle_detect;
105*4882a593Smuzhiyun u8 pipe_cap:2;
106*4882a593Smuzhiyun u8 sdvo_stall:1; /* 158 */
107*4882a593Smuzhiyun u8 hpd_status:2;
108*4882a593Smuzhiyun u8 integrated_encoder:1;
109*4882a593Smuzhiyun u8 skip5:2;
110*4882a593Smuzhiyun u8 dvo_wiring;
111*4882a593Smuzhiyun u8 mipi_bridge_type; /* 171 */
112*4882a593Smuzhiyun u16 device_class_ext;
113*4882a593Smuzhiyun u8 dvo_function;
114*4882a593Smuzhiyun } __packed;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun struct vbt {
117*4882a593Smuzhiyun /* header->bdb_offset point to bdb_header offset */
118*4882a593Smuzhiyun struct vbt_header header;
119*4882a593Smuzhiyun struct bdb_header bdb_header;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun struct bdb_data_header general_features_header;
122*4882a593Smuzhiyun struct bdb_general_features general_features;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun struct bdb_data_header general_definitions_header;
125*4882a593Smuzhiyun struct bdb_general_definitions general_definitions;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun struct efp_child_device_config child0;
128*4882a593Smuzhiyun struct efp_child_device_config child1;
129*4882a593Smuzhiyun struct efp_child_device_config child2;
130*4882a593Smuzhiyun struct efp_child_device_config child3;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun struct bdb_data_header driver_features_header;
133*4882a593Smuzhiyun struct bdb_driver_features driver_features;
134*4882a593Smuzhiyun };
135*4882a593Smuzhiyun
virt_vbt_generation(struct vbt * v)136*4882a593Smuzhiyun static void virt_vbt_generation(struct vbt *v)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun int num_child;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun memset(v, 0, sizeof(struct vbt));
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun v->header.signature[0] = '$';
143*4882a593Smuzhiyun v->header.signature[1] = 'V';
144*4882a593Smuzhiyun v->header.signature[2] = 'B';
145*4882a593Smuzhiyun v->header.signature[3] = 'T';
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /* there's features depending on version! */
148*4882a593Smuzhiyun v->header.version = 155;
149*4882a593Smuzhiyun v->header.header_size = sizeof(v->header);
150*4882a593Smuzhiyun v->header.vbt_size = sizeof(struct vbt);
151*4882a593Smuzhiyun v->header.bdb_offset = offsetof(struct vbt, bdb_header);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK");
154*4882a593Smuzhiyun v->bdb_header.version = 186; /* child_dev_size = 33 */
155*4882a593Smuzhiyun v->bdb_header.header_size = sizeof(v->bdb_header);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /* general features */
160*4882a593Smuzhiyun v->general_features_header.id = BDB_GENERAL_FEATURES;
161*4882a593Smuzhiyun v->general_features_header.size = sizeof(struct bdb_general_features);
162*4882a593Smuzhiyun v->general_features.int_crt_support = 0;
163*4882a593Smuzhiyun v->general_features.int_tv_support = 0;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /* child device */
166*4882a593Smuzhiyun num_child = 4; /* each port has one child */
167*4882a593Smuzhiyun v->general_definitions.child_dev_size =
168*4882a593Smuzhiyun sizeof(struct efp_child_device_config);
169*4882a593Smuzhiyun v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS;
170*4882a593Smuzhiyun /* size will include child devices */
171*4882a593Smuzhiyun v->general_definitions_header.size =
172*4882a593Smuzhiyun sizeof(struct bdb_general_definitions) +
173*4882a593Smuzhiyun num_child * v->general_definitions.child_dev_size;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /* portA */
176*4882a593Smuzhiyun v->child0.handle = DEVICE_TYPE_EFP1;
177*4882a593Smuzhiyun v->child0.device_type = DEVICE_TYPE_DP;
178*4882a593Smuzhiyun v->child0.dvo_port = DVO_PORT_DPA;
179*4882a593Smuzhiyun v->child0.aux_channel = DP_AUX_A;
180*4882a593Smuzhiyun v->child0.dp_compat = true;
181*4882a593Smuzhiyun v->child0.integrated_encoder = true;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /* portB */
184*4882a593Smuzhiyun v->child1.handle = DEVICE_TYPE_EFP2;
185*4882a593Smuzhiyun v->child1.device_type = DEVICE_TYPE_DP;
186*4882a593Smuzhiyun v->child1.dvo_port = DVO_PORT_DPB;
187*4882a593Smuzhiyun v->child1.aux_channel = DP_AUX_B;
188*4882a593Smuzhiyun v->child1.dp_compat = true;
189*4882a593Smuzhiyun v->child1.integrated_encoder = true;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /* portC */
192*4882a593Smuzhiyun v->child2.handle = DEVICE_TYPE_EFP3;
193*4882a593Smuzhiyun v->child2.device_type = DEVICE_TYPE_DP;
194*4882a593Smuzhiyun v->child2.dvo_port = DVO_PORT_DPC;
195*4882a593Smuzhiyun v->child2.aux_channel = DP_AUX_C;
196*4882a593Smuzhiyun v->child2.dp_compat = true;
197*4882a593Smuzhiyun v->child2.integrated_encoder = true;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun /* portD */
200*4882a593Smuzhiyun v->child3.handle = DEVICE_TYPE_EFP4;
201*4882a593Smuzhiyun v->child3.device_type = DEVICE_TYPE_DP;
202*4882a593Smuzhiyun v->child3.dvo_port = DVO_PORT_DPD;
203*4882a593Smuzhiyun v->child3.aux_channel = DP_AUX_D;
204*4882a593Smuzhiyun v->child3.dp_compat = true;
205*4882a593Smuzhiyun v->child3.integrated_encoder = true;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /* driver features */
208*4882a593Smuzhiyun v->driver_features_header.id = BDB_DRIVER_FEATURES;
209*4882a593Smuzhiyun v->driver_features_header.size = sizeof(struct bdb_driver_features);
210*4882a593Smuzhiyun v->driver_features.lvds_config = BDB_DRIVER_FEATURE_NO_LVDS;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /**
214*4882a593Smuzhiyun * intel_vgpu_init_opregion - initialize the stuff used to emulate opregion
215*4882a593Smuzhiyun * @vgpu: a vGPU
216*4882a593Smuzhiyun *
217*4882a593Smuzhiyun * Returns:
218*4882a593Smuzhiyun * Zero on success, negative error code if failed.
219*4882a593Smuzhiyun */
intel_vgpu_init_opregion(struct intel_vgpu * vgpu)220*4882a593Smuzhiyun int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun u8 *buf;
223*4882a593Smuzhiyun struct opregion_header *header;
224*4882a593Smuzhiyun struct vbt v;
225*4882a593Smuzhiyun const char opregion_signature[16] = OPREGION_SIGNATURE;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun gvt_dbg_core("init vgpu%d opregion\n", vgpu->id);
228*4882a593Smuzhiyun vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
229*4882a593Smuzhiyun __GFP_ZERO,
230*4882a593Smuzhiyun get_order(INTEL_GVT_OPREGION_SIZE));
231*4882a593Smuzhiyun if (!vgpu_opregion(vgpu)->va) {
232*4882a593Smuzhiyun gvt_err("fail to get memory for vgpu virt opregion\n");
233*4882a593Smuzhiyun return -ENOMEM;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun /* emulated opregion with VBT mailbox only */
237*4882a593Smuzhiyun buf = (u8 *)vgpu_opregion(vgpu)->va;
238*4882a593Smuzhiyun header = (struct opregion_header *)buf;
239*4882a593Smuzhiyun memcpy(header->signature, opregion_signature,
240*4882a593Smuzhiyun sizeof(opregion_signature));
241*4882a593Smuzhiyun header->size = 0x8;
242*4882a593Smuzhiyun header->opregion_ver = 0x02000000;
243*4882a593Smuzhiyun header->mboxes = MBOX_VBT;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun /* for unknown reason, the value in LID field is incorrect
246*4882a593Smuzhiyun * which block the windows guest, so workaround it by force
247*4882a593Smuzhiyun * setting it to "OPEN"
248*4882a593Smuzhiyun */
249*4882a593Smuzhiyun buf[INTEL_GVT_OPREGION_CLID] = 0x3;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /* emulated vbt from virt vbt generation */
252*4882a593Smuzhiyun virt_vbt_generation(&v);
253*4882a593Smuzhiyun memcpy(buf + INTEL_GVT_OPREGION_VBT_OFFSET, &v, sizeof(struct vbt));
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun return 0;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
map_vgpu_opregion(struct intel_vgpu * vgpu,bool map)258*4882a593Smuzhiyun static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun u64 mfn;
261*4882a593Smuzhiyun int i, ret;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
264*4882a593Smuzhiyun mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
265*4882a593Smuzhiyun + i * PAGE_SIZE);
266*4882a593Smuzhiyun if (mfn == INTEL_GVT_INVALID_ADDR) {
267*4882a593Smuzhiyun gvt_vgpu_err("fail to get MFN from VA\n");
268*4882a593Smuzhiyun return -EINVAL;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
271*4882a593Smuzhiyun vgpu_opregion(vgpu)->gfn[i],
272*4882a593Smuzhiyun mfn, 1, map);
273*4882a593Smuzhiyun if (ret) {
274*4882a593Smuzhiyun gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n",
275*4882a593Smuzhiyun ret);
276*4882a593Smuzhiyun return ret;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun vgpu_opregion(vgpu)->mapped = map;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun return 0;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /**
286*4882a593Smuzhiyun * intel_vgpu_opregion_base_write_handler - Opregion base register write handler
287*4882a593Smuzhiyun *
288*4882a593Smuzhiyun * @vgpu: a vGPU
289*4882a593Smuzhiyun * @gpa: guest physical address of opregion
290*4882a593Smuzhiyun *
291*4882a593Smuzhiyun * Returns:
292*4882a593Smuzhiyun * Zero on success, negative error code if failed.
293*4882a593Smuzhiyun */
intel_vgpu_opregion_base_write_handler(struct intel_vgpu * vgpu,u32 gpa)294*4882a593Smuzhiyun int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun int i, ret = 0;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun gvt_dbg_core("emulate opregion from kernel\n");
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun switch (intel_gvt_host.hypervisor_type) {
302*4882a593Smuzhiyun case INTEL_GVT_HYPERVISOR_KVM:
303*4882a593Smuzhiyun for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
304*4882a593Smuzhiyun vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
305*4882a593Smuzhiyun break;
306*4882a593Smuzhiyun case INTEL_GVT_HYPERVISOR_XEN:
307*4882a593Smuzhiyun /**
308*4882a593Smuzhiyun * Wins guest on Xengt will write this register twice: xen
309*4882a593Smuzhiyun * hvmloader and windows graphic driver.
310*4882a593Smuzhiyun */
311*4882a593Smuzhiyun if (vgpu_opregion(vgpu)->mapped)
312*4882a593Smuzhiyun map_vgpu_opregion(vgpu, false);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
315*4882a593Smuzhiyun vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun ret = map_vgpu_opregion(vgpu, true);
318*4882a593Smuzhiyun break;
319*4882a593Smuzhiyun default:
320*4882a593Smuzhiyun ret = -EINVAL;
321*4882a593Smuzhiyun gvt_vgpu_err("not supported hypervisor\n");
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun return ret;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun /**
328*4882a593Smuzhiyun * intel_vgpu_clean_opregion - clean the stuff used to emulate opregion
329*4882a593Smuzhiyun * @vgpu: a vGPU
330*4882a593Smuzhiyun *
331*4882a593Smuzhiyun */
intel_vgpu_clean_opregion(struct intel_vgpu * vgpu)332*4882a593Smuzhiyun void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun if (!vgpu_opregion(vgpu)->va)
337*4882a593Smuzhiyun return;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
340*4882a593Smuzhiyun if (vgpu_opregion(vgpu)->mapped)
341*4882a593Smuzhiyun map_vgpu_opregion(vgpu, false);
342*4882a593Smuzhiyun } else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
343*4882a593Smuzhiyun /* Guest opregion is released by VFIO */
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun free_pages((unsigned long)vgpu_opregion(vgpu)->va,
346*4882a593Smuzhiyun get_order(INTEL_GVT_OPREGION_SIZE));
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun vgpu_opregion(vgpu)->va = NULL;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun #define GVT_OPREGION_FUNC(scic) \
354*4882a593Smuzhiyun ({ \
355*4882a593Smuzhiyun u32 __ret; \
356*4882a593Smuzhiyun __ret = (scic & OPREGION_SCIC_FUNC_MASK) >> \
357*4882a593Smuzhiyun OPREGION_SCIC_FUNC_SHIFT; \
358*4882a593Smuzhiyun __ret; \
359*4882a593Smuzhiyun })
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun #define GVT_OPREGION_SUBFUNC(scic) \
362*4882a593Smuzhiyun ({ \
363*4882a593Smuzhiyun u32 __ret; \
364*4882a593Smuzhiyun __ret = (scic & OPREGION_SCIC_SUBFUNC_MASK) >> \
365*4882a593Smuzhiyun OPREGION_SCIC_SUBFUNC_SHIFT; \
366*4882a593Smuzhiyun __ret; \
367*4882a593Smuzhiyun })
368*4882a593Smuzhiyun
opregion_func_name(u32 func)369*4882a593Smuzhiyun static const char *opregion_func_name(u32 func)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun const char *name = NULL;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun switch (func) {
374*4882a593Smuzhiyun case 0 ... 3:
375*4882a593Smuzhiyun case 5:
376*4882a593Smuzhiyun case 7 ... 15:
377*4882a593Smuzhiyun name = "Reserved";
378*4882a593Smuzhiyun break;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun case 4:
381*4882a593Smuzhiyun name = "Get BIOS Data";
382*4882a593Smuzhiyun break;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun case 6:
385*4882a593Smuzhiyun name = "System BIOS Callbacks";
386*4882a593Smuzhiyun break;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun default:
389*4882a593Smuzhiyun name = "Unknown";
390*4882a593Smuzhiyun break;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun return name;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
opregion_subfunc_name(u32 subfunc)395*4882a593Smuzhiyun static const char *opregion_subfunc_name(u32 subfunc)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun const char *name = NULL;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun switch (subfunc) {
400*4882a593Smuzhiyun case 0:
401*4882a593Smuzhiyun name = "Supported Calls";
402*4882a593Smuzhiyun break;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun case 1:
405*4882a593Smuzhiyun name = "Requested Callbacks";
406*4882a593Smuzhiyun break;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun case 2 ... 3:
409*4882a593Smuzhiyun case 8 ... 9:
410*4882a593Smuzhiyun name = "Reserved";
411*4882a593Smuzhiyun break;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun case 5:
414*4882a593Smuzhiyun name = "Boot Display";
415*4882a593Smuzhiyun break;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun case 6:
418*4882a593Smuzhiyun name = "TV-Standard/Video-Connector";
419*4882a593Smuzhiyun break;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun case 7:
422*4882a593Smuzhiyun name = "Internal Graphics";
423*4882a593Smuzhiyun break;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun case 10:
426*4882a593Smuzhiyun name = "Spread Spectrum Clocks";
427*4882a593Smuzhiyun break;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun case 11:
430*4882a593Smuzhiyun name = "Get AKSV";
431*4882a593Smuzhiyun break;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun default:
434*4882a593Smuzhiyun name = "Unknown";
435*4882a593Smuzhiyun break;
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun return name;
438*4882a593Smuzhiyun };
439*4882a593Smuzhiyun
querying_capabilities(u32 scic)440*4882a593Smuzhiyun static bool querying_capabilities(u32 scic)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun u32 func, subfunc;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun func = GVT_OPREGION_FUNC(scic);
445*4882a593Smuzhiyun subfunc = GVT_OPREGION_SUBFUNC(scic);
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun if ((func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
448*4882a593Smuzhiyun subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)
449*4882a593Smuzhiyun || (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
450*4882a593Smuzhiyun subfunc == INTEL_GVT_OPREGION_SCIC_SF_REQEUSTEDCALLBACKS)
451*4882a593Smuzhiyun || (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSCALLBACKS &&
452*4882a593Smuzhiyun subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)) {
453*4882a593Smuzhiyun return true;
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun return false;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun /**
459*4882a593Smuzhiyun * intel_vgpu_emulate_opregion_request - emulating OpRegion request
460*4882a593Smuzhiyun * @vgpu: a vGPU
461*4882a593Smuzhiyun * @swsci: SWSCI request
462*4882a593Smuzhiyun *
463*4882a593Smuzhiyun * Returns:
464*4882a593Smuzhiyun * Zero on success, negative error code if failed
465*4882a593Smuzhiyun */
intel_vgpu_emulate_opregion_request(struct intel_vgpu * vgpu,u32 swsci)466*4882a593Smuzhiyun int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun u32 scic, parm;
469*4882a593Smuzhiyun u32 func, subfunc;
470*4882a593Smuzhiyun u64 scic_pa = 0, parm_pa = 0;
471*4882a593Smuzhiyun int ret;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun switch (intel_gvt_host.hypervisor_type) {
474*4882a593Smuzhiyun case INTEL_GVT_HYPERVISOR_XEN:
475*4882a593Smuzhiyun scic = *((u32 *)vgpu_opregion(vgpu)->va +
476*4882a593Smuzhiyun INTEL_GVT_OPREGION_SCIC);
477*4882a593Smuzhiyun parm = *((u32 *)vgpu_opregion(vgpu)->va +
478*4882a593Smuzhiyun INTEL_GVT_OPREGION_PARM);
479*4882a593Smuzhiyun break;
480*4882a593Smuzhiyun case INTEL_GVT_HYPERVISOR_KVM:
481*4882a593Smuzhiyun scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
482*4882a593Smuzhiyun INTEL_GVT_OPREGION_SCIC;
483*4882a593Smuzhiyun parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
484*4882a593Smuzhiyun INTEL_GVT_OPREGION_PARM;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa,
487*4882a593Smuzhiyun &scic, sizeof(scic));
488*4882a593Smuzhiyun if (ret) {
489*4882a593Smuzhiyun gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
490*4882a593Smuzhiyun ret, scic_pa, sizeof(scic));
491*4882a593Smuzhiyun return ret;
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa,
495*4882a593Smuzhiyun &parm, sizeof(parm));
496*4882a593Smuzhiyun if (ret) {
497*4882a593Smuzhiyun gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
498*4882a593Smuzhiyun ret, scic_pa, sizeof(scic));
499*4882a593Smuzhiyun return ret;
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun break;
503*4882a593Smuzhiyun default:
504*4882a593Smuzhiyun gvt_vgpu_err("not supported hypervisor\n");
505*4882a593Smuzhiyun return -EINVAL;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun if (!(swsci & SWSCI_SCI_SELECT)) {
509*4882a593Smuzhiyun gvt_vgpu_err("requesting SMI service\n");
510*4882a593Smuzhiyun return 0;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun /* ignore non 0->1 trasitions */
513*4882a593Smuzhiyun if ((vgpu_cfg_space(vgpu)[INTEL_GVT_PCI_SWSCI]
514*4882a593Smuzhiyun & SWSCI_SCI_TRIGGER) ||
515*4882a593Smuzhiyun !(swsci & SWSCI_SCI_TRIGGER)) {
516*4882a593Smuzhiyun return 0;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun func = GVT_OPREGION_FUNC(scic);
520*4882a593Smuzhiyun subfunc = GVT_OPREGION_SUBFUNC(scic);
521*4882a593Smuzhiyun if (!querying_capabilities(scic)) {
522*4882a593Smuzhiyun gvt_vgpu_err("requesting runtime service: func \"%s\","
523*4882a593Smuzhiyun " subfunc \"%s\"\n",
524*4882a593Smuzhiyun opregion_func_name(func),
525*4882a593Smuzhiyun opregion_subfunc_name(subfunc));
526*4882a593Smuzhiyun /*
527*4882a593Smuzhiyun * emulate exit status of function call, '0' means
528*4882a593Smuzhiyun * "failure, generic, unsupported or unknown cause"
529*4882a593Smuzhiyun */
530*4882a593Smuzhiyun scic &= ~OPREGION_SCIC_EXIT_MASK;
531*4882a593Smuzhiyun goto out;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun scic = 0;
535*4882a593Smuzhiyun parm = 0;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun out:
538*4882a593Smuzhiyun switch (intel_gvt_host.hypervisor_type) {
539*4882a593Smuzhiyun case INTEL_GVT_HYPERVISOR_XEN:
540*4882a593Smuzhiyun *((u32 *)vgpu_opregion(vgpu)->va +
541*4882a593Smuzhiyun INTEL_GVT_OPREGION_SCIC) = scic;
542*4882a593Smuzhiyun *((u32 *)vgpu_opregion(vgpu)->va +
543*4882a593Smuzhiyun INTEL_GVT_OPREGION_PARM) = parm;
544*4882a593Smuzhiyun break;
545*4882a593Smuzhiyun case INTEL_GVT_HYPERVISOR_KVM:
546*4882a593Smuzhiyun ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa,
547*4882a593Smuzhiyun &scic, sizeof(scic));
548*4882a593Smuzhiyun if (ret) {
549*4882a593Smuzhiyun gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
550*4882a593Smuzhiyun ret, scic_pa, sizeof(scic));
551*4882a593Smuzhiyun return ret;
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa,
555*4882a593Smuzhiyun &parm, sizeof(parm));
556*4882a593Smuzhiyun if (ret) {
557*4882a593Smuzhiyun gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
558*4882a593Smuzhiyun ret, scic_pa, sizeof(scic));
559*4882a593Smuzhiyun return ret;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun break;
563*4882a593Smuzhiyun default:
564*4882a593Smuzhiyun gvt_vgpu_err("not supported hypervisor\n");
565*4882a593Smuzhiyun return -EINVAL;
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun return 0;
569*4882a593Smuzhiyun }
570