1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the next
12*4882a593Smuzhiyun * paragraph) shall be included in all copies or substantial portions of the
13*4882a593Smuzhiyun * Software.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18*4882a593Smuzhiyun * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19*4882a593Smuzhiyun * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20*4882a593Smuzhiyun * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21*4882a593Smuzhiyun * SOFTWARE.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * Authors:
24*4882a593Smuzhiyun * Eddie Dong <eddie.dong@intel.com>
25*4882a593Smuzhiyun * Jike Song <jike.song@intel.com>
26*4882a593Smuzhiyun *
27*4882a593Smuzhiyun * Contributors:
28*4882a593Smuzhiyun * Zhi Wang <zhi.a.wang@intel.com>
29*4882a593Smuzhiyun * Min He <min.he@intel.com>
30*4882a593Smuzhiyun * Bing Niu <bing.niu@intel.com>
31*4882a593Smuzhiyun *
32*4882a593Smuzhiyun */
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #include "i915_drv.h"
35*4882a593Smuzhiyun #include "gvt.h"
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun enum {
38*4882a593Smuzhiyun INTEL_GVT_PCI_BAR_GTTMMIO = 0,
39*4882a593Smuzhiyun INTEL_GVT_PCI_BAR_APERTURE,
40*4882a593Smuzhiyun INTEL_GVT_PCI_BAR_PIO,
41*4882a593Smuzhiyun INTEL_GVT_PCI_BAR_MAX,
42*4882a593Smuzhiyun };
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /* bitmap for writable bits (RW or RW1C bits, but cannot co-exist in one
45*4882a593Smuzhiyun * byte) byte by byte in standard pci configuration space. (not the full
46*4882a593Smuzhiyun * 256 bytes.)
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = {
49*4882a593Smuzhiyun [PCI_COMMAND] = 0xff, 0x07,
50*4882a593Smuzhiyun [PCI_STATUS] = 0x00, 0xf9, /* the only one RW1C byte */
51*4882a593Smuzhiyun [PCI_CACHE_LINE_SIZE] = 0xff,
52*4882a593Smuzhiyun [PCI_BASE_ADDRESS_0 ... PCI_CARDBUS_CIS - 1] = 0xff,
53*4882a593Smuzhiyun [PCI_ROM_ADDRESS] = 0x01, 0xf8, 0xff, 0xff,
54*4882a593Smuzhiyun [PCI_INTERRUPT_LINE] = 0xff,
55*4882a593Smuzhiyun };
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /**
58*4882a593Smuzhiyun * vgpu_pci_cfg_mem_write - write virtual cfg space memory
59*4882a593Smuzhiyun * @vgpu: target vgpu
60*4882a593Smuzhiyun * @off: offset
61*4882a593Smuzhiyun * @src: src ptr to write
62*4882a593Smuzhiyun * @bytes: number of bytes
63*4882a593Smuzhiyun *
64*4882a593Smuzhiyun * Use this function to write virtual cfg space memory.
65*4882a593Smuzhiyun * For standard cfg space, only RW bits can be changed,
66*4882a593Smuzhiyun * and we emulates the RW1C behavior of PCI_STATUS register.
67*4882a593Smuzhiyun */
vgpu_pci_cfg_mem_write(struct intel_vgpu * vgpu,unsigned int off,u8 * src,unsigned int bytes)68*4882a593Smuzhiyun static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
69*4882a593Smuzhiyun u8 *src, unsigned int bytes)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun u8 *cfg_base = vgpu_cfg_space(vgpu);
72*4882a593Smuzhiyun u8 mask, new, old;
73*4882a593Smuzhiyun pci_power_t pwr;
74*4882a593Smuzhiyun int i = 0;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) {
77*4882a593Smuzhiyun mask = pci_cfg_space_rw_bmp[off + i];
78*4882a593Smuzhiyun old = cfg_base[off + i];
79*4882a593Smuzhiyun new = src[i] & mask;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /**
82*4882a593Smuzhiyun * The PCI_STATUS high byte has RW1C bits, here
83*4882a593Smuzhiyun * emulates clear by writing 1 for these bits.
84*4882a593Smuzhiyun * Writing a 0b to RW1C bits has no effect.
85*4882a593Smuzhiyun */
86*4882a593Smuzhiyun if (off + i == PCI_STATUS + 1)
87*4882a593Smuzhiyun new = (~new & old) & mask;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun cfg_base[off + i] = (old & ~mask) | new;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /* For other configuration space directly copy as it is. */
93*4882a593Smuzhiyun if (i < bytes)
94*4882a593Smuzhiyun memcpy(cfg_base + off + i, src + i, bytes - i);
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun if (off == vgpu->cfg_space.pmcsr_off && vgpu->cfg_space.pmcsr_off) {
97*4882a593Smuzhiyun pwr = (pci_power_t __force)(*(u16*)(&vgpu_cfg_space(vgpu)[off])
98*4882a593Smuzhiyun & PCI_PM_CTRL_STATE_MASK);
99*4882a593Smuzhiyun if (pwr == PCI_D3hot)
100*4882a593Smuzhiyun vgpu->d3_entered = true;
101*4882a593Smuzhiyun gvt_dbg_core("vgpu-%d power status changed to %d\n",
102*4882a593Smuzhiyun vgpu->id, pwr);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /**
107*4882a593Smuzhiyun * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
108*4882a593Smuzhiyun * @vgpu: target vgpu
109*4882a593Smuzhiyun * @offset: offset
110*4882a593Smuzhiyun * @p_data: return data ptr
111*4882a593Smuzhiyun * @bytes: number of bytes to read
112*4882a593Smuzhiyun *
113*4882a593Smuzhiyun * Returns:
114*4882a593Smuzhiyun * Zero on success, negative error code if failed.
115*4882a593Smuzhiyun */
intel_vgpu_emulate_cfg_read(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)116*4882a593Smuzhiyun int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
117*4882a593Smuzhiyun void *p_data, unsigned int bytes)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun if (drm_WARN_ON(&i915->drm, bytes > 4))
122*4882a593Smuzhiyun return -EINVAL;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun if (drm_WARN_ON(&i915->drm,
125*4882a593Smuzhiyun offset + bytes > vgpu->gvt->device_info.cfg_space_size))
126*4882a593Smuzhiyun return -EINVAL;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes);
129*4882a593Smuzhiyun return 0;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
map_aperture(struct intel_vgpu * vgpu,bool map)132*4882a593Smuzhiyun static int map_aperture(struct intel_vgpu *vgpu, bool map)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun phys_addr_t aperture_pa = vgpu_aperture_pa_base(vgpu);
135*4882a593Smuzhiyun unsigned long aperture_sz = vgpu_aperture_sz(vgpu);
136*4882a593Smuzhiyun u64 first_gfn;
137*4882a593Smuzhiyun u64 val;
138*4882a593Smuzhiyun int ret;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
141*4882a593Smuzhiyun return 0;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
144*4882a593Smuzhiyun if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
145*4882a593Smuzhiyun val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
146*4882a593Smuzhiyun else
147*4882a593Smuzhiyun val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
152*4882a593Smuzhiyun aperture_pa >> PAGE_SHIFT,
153*4882a593Smuzhiyun aperture_sz >> PAGE_SHIFT,
154*4882a593Smuzhiyun map);
155*4882a593Smuzhiyun if (ret)
156*4882a593Smuzhiyun return ret;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
159*4882a593Smuzhiyun return 0;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
trap_gttmmio(struct intel_vgpu * vgpu,bool trap)162*4882a593Smuzhiyun static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun u64 start, end;
165*4882a593Smuzhiyun u64 val;
166*4882a593Smuzhiyun int ret;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked)
169*4882a593Smuzhiyun return 0;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0];
172*4882a593Smuzhiyun if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
173*4882a593Smuzhiyun start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
174*4882a593Smuzhiyun else
175*4882a593Smuzhiyun start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun start &= ~GENMASK(3, 0);
178*4882a593Smuzhiyun end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap);
181*4882a593Smuzhiyun if (ret)
182*4882a593Smuzhiyun return ret;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap;
185*4882a593Smuzhiyun return 0;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
emulate_pci_command_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)188*4882a593Smuzhiyun static int emulate_pci_command_write(struct intel_vgpu *vgpu,
189*4882a593Smuzhiyun unsigned int offset, void *p_data, unsigned int bytes)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun u8 old = vgpu_cfg_space(vgpu)[offset];
192*4882a593Smuzhiyun u8 new = *(u8 *)p_data;
193*4882a593Smuzhiyun u8 changed = old ^ new;
194*4882a593Smuzhiyun int ret;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
197*4882a593Smuzhiyun if (!(changed & PCI_COMMAND_MEMORY))
198*4882a593Smuzhiyun return 0;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun if (old & PCI_COMMAND_MEMORY) {
201*4882a593Smuzhiyun ret = trap_gttmmio(vgpu, false);
202*4882a593Smuzhiyun if (ret)
203*4882a593Smuzhiyun return ret;
204*4882a593Smuzhiyun ret = map_aperture(vgpu, false);
205*4882a593Smuzhiyun if (ret)
206*4882a593Smuzhiyun return ret;
207*4882a593Smuzhiyun } else {
208*4882a593Smuzhiyun ret = trap_gttmmio(vgpu, true);
209*4882a593Smuzhiyun if (ret)
210*4882a593Smuzhiyun return ret;
211*4882a593Smuzhiyun ret = map_aperture(vgpu, true);
212*4882a593Smuzhiyun if (ret)
213*4882a593Smuzhiyun return ret;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun return 0;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
emulate_pci_rom_bar_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)219*4882a593Smuzhiyun static int emulate_pci_rom_bar_write(struct intel_vgpu *vgpu,
220*4882a593Smuzhiyun unsigned int offset, void *p_data, unsigned int bytes)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun u32 *pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
223*4882a593Smuzhiyun u32 new = *(u32 *)(p_data);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun if ((new & PCI_ROM_ADDRESS_MASK) == PCI_ROM_ADDRESS_MASK)
226*4882a593Smuzhiyun /* We don't have rom, return size of 0. */
227*4882a593Smuzhiyun *pval = 0;
228*4882a593Smuzhiyun else
229*4882a593Smuzhiyun vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
230*4882a593Smuzhiyun return 0;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
emulate_pci_bar_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)233*4882a593Smuzhiyun static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
234*4882a593Smuzhiyun void *p_data, unsigned int bytes)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun u32 new = *(u32 *)(p_data);
237*4882a593Smuzhiyun bool lo = IS_ALIGNED(offset, 8);
238*4882a593Smuzhiyun u64 size;
239*4882a593Smuzhiyun int ret = 0;
240*4882a593Smuzhiyun bool mmio_enabled =
241*4882a593Smuzhiyun vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
242*4882a593Smuzhiyun struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /*
245*4882a593Smuzhiyun * Power-up software can determine how much address
246*4882a593Smuzhiyun * space the device requires by writing a value of
247*4882a593Smuzhiyun * all 1's to the register and then reading the value
248*4882a593Smuzhiyun * back. The device will return 0's in all don't-care
249*4882a593Smuzhiyun * address bits.
250*4882a593Smuzhiyun */
251*4882a593Smuzhiyun if (new == 0xffffffff) {
252*4882a593Smuzhiyun switch (offset) {
253*4882a593Smuzhiyun case PCI_BASE_ADDRESS_0:
254*4882a593Smuzhiyun case PCI_BASE_ADDRESS_1:
255*4882a593Smuzhiyun size = ~(bars[INTEL_GVT_PCI_BAR_GTTMMIO].size -1);
256*4882a593Smuzhiyun intel_vgpu_write_pci_bar(vgpu, offset,
257*4882a593Smuzhiyun size >> (lo ? 0 : 32), lo);
258*4882a593Smuzhiyun /*
259*4882a593Smuzhiyun * Untrap the BAR, since guest hasn't configured a
260*4882a593Smuzhiyun * valid GPA
261*4882a593Smuzhiyun */
262*4882a593Smuzhiyun ret = trap_gttmmio(vgpu, false);
263*4882a593Smuzhiyun break;
264*4882a593Smuzhiyun case PCI_BASE_ADDRESS_2:
265*4882a593Smuzhiyun case PCI_BASE_ADDRESS_3:
266*4882a593Smuzhiyun size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1);
267*4882a593Smuzhiyun intel_vgpu_write_pci_bar(vgpu, offset,
268*4882a593Smuzhiyun size >> (lo ? 0 : 32), lo);
269*4882a593Smuzhiyun ret = map_aperture(vgpu, false);
270*4882a593Smuzhiyun break;
271*4882a593Smuzhiyun default:
272*4882a593Smuzhiyun /* Unimplemented BARs */
273*4882a593Smuzhiyun intel_vgpu_write_pci_bar(vgpu, offset, 0x0, false);
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun } else {
276*4882a593Smuzhiyun switch (offset) {
277*4882a593Smuzhiyun case PCI_BASE_ADDRESS_0:
278*4882a593Smuzhiyun case PCI_BASE_ADDRESS_1:
279*4882a593Smuzhiyun /*
280*4882a593Smuzhiyun * Untrap the old BAR first, since guest has
281*4882a593Smuzhiyun * re-configured the BAR
282*4882a593Smuzhiyun */
283*4882a593Smuzhiyun trap_gttmmio(vgpu, false);
284*4882a593Smuzhiyun intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
285*4882a593Smuzhiyun ret = trap_gttmmio(vgpu, mmio_enabled);
286*4882a593Smuzhiyun break;
287*4882a593Smuzhiyun case PCI_BASE_ADDRESS_2:
288*4882a593Smuzhiyun case PCI_BASE_ADDRESS_3:
289*4882a593Smuzhiyun map_aperture(vgpu, false);
290*4882a593Smuzhiyun intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
291*4882a593Smuzhiyun ret = map_aperture(vgpu, mmio_enabled);
292*4882a593Smuzhiyun break;
293*4882a593Smuzhiyun default:
294*4882a593Smuzhiyun intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun return ret;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /**
301*4882a593Smuzhiyun * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space write
302*4882a593Smuzhiyun * @vgpu: target vgpu
303*4882a593Smuzhiyun * @offset: offset
304*4882a593Smuzhiyun * @p_data: write data ptr
305*4882a593Smuzhiyun * @bytes: number of bytes to write
306*4882a593Smuzhiyun *
307*4882a593Smuzhiyun * Returns:
308*4882a593Smuzhiyun * Zero on success, negative error code if failed.
309*4882a593Smuzhiyun */
intel_vgpu_emulate_cfg_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)310*4882a593Smuzhiyun int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
311*4882a593Smuzhiyun void *p_data, unsigned int bytes)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
314*4882a593Smuzhiyun int ret;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun if (drm_WARN_ON(&i915->drm, bytes > 4))
317*4882a593Smuzhiyun return -EINVAL;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun if (drm_WARN_ON(&i915->drm,
320*4882a593Smuzhiyun offset + bytes > vgpu->gvt->device_info.cfg_space_size))
321*4882a593Smuzhiyun return -EINVAL;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /* First check if it's PCI_COMMAND */
324*4882a593Smuzhiyun if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) {
325*4882a593Smuzhiyun if (drm_WARN_ON(&i915->drm, bytes > 2))
326*4882a593Smuzhiyun return -EINVAL;
327*4882a593Smuzhiyun return emulate_pci_command_write(vgpu, offset, p_data, bytes);
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun switch (rounddown(offset, 4)) {
331*4882a593Smuzhiyun case PCI_ROM_ADDRESS:
332*4882a593Smuzhiyun if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
333*4882a593Smuzhiyun return -EINVAL;
334*4882a593Smuzhiyun return emulate_pci_rom_bar_write(vgpu, offset, p_data, bytes);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
337*4882a593Smuzhiyun if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
338*4882a593Smuzhiyun return -EINVAL;
339*4882a593Smuzhiyun return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun case INTEL_GVT_PCI_SWSCI:
342*4882a593Smuzhiyun if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
343*4882a593Smuzhiyun return -EINVAL;
344*4882a593Smuzhiyun ret = intel_vgpu_emulate_opregion_request(vgpu, *(u32 *)p_data);
345*4882a593Smuzhiyun if (ret)
346*4882a593Smuzhiyun return ret;
347*4882a593Smuzhiyun break;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun case INTEL_GVT_PCI_OPREGION:
350*4882a593Smuzhiyun if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
351*4882a593Smuzhiyun return -EINVAL;
352*4882a593Smuzhiyun ret = intel_vgpu_opregion_base_write_handler(vgpu,
353*4882a593Smuzhiyun *(u32 *)p_data);
354*4882a593Smuzhiyun if (ret)
355*4882a593Smuzhiyun return ret;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
358*4882a593Smuzhiyun break;
359*4882a593Smuzhiyun default:
360*4882a593Smuzhiyun vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
361*4882a593Smuzhiyun break;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun return 0;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun /**
367*4882a593Smuzhiyun * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU
368*4882a593Smuzhiyun *
369*4882a593Smuzhiyun * @vgpu: a vGPU
370*4882a593Smuzhiyun * @primary: is the vGPU presented as primary
371*4882a593Smuzhiyun *
372*4882a593Smuzhiyun */
intel_vgpu_init_cfg_space(struct intel_vgpu * vgpu,bool primary)373*4882a593Smuzhiyun void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
374*4882a593Smuzhiyun bool primary)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun struct intel_gvt *gvt = vgpu->gvt;
377*4882a593Smuzhiyun const struct intel_gvt_device_info *info = &gvt->device_info;
378*4882a593Smuzhiyun u16 *gmch_ctl;
379*4882a593Smuzhiyun u8 next;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
382*4882a593Smuzhiyun info->cfg_space_size);
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun if (!primary) {
385*4882a593Smuzhiyun vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
386*4882a593Smuzhiyun INTEL_GVT_PCI_CLASS_VGA_OTHER;
387*4882a593Smuzhiyun vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
388*4882a593Smuzhiyun INTEL_GVT_PCI_CLASS_VGA_OTHER;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun /* Show guest that there isn't any stolen memory.*/
392*4882a593Smuzhiyun gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
393*4882a593Smuzhiyun *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
396*4882a593Smuzhiyun gvt_aperture_pa_base(gvt), true);
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
399*4882a593Smuzhiyun | PCI_COMMAND_MEMORY
400*4882a593Smuzhiyun | PCI_COMMAND_MASTER);
401*4882a593Smuzhiyun /*
402*4882a593Smuzhiyun * Clear the bar upper 32bit and let guest to assign the new value
403*4882a593Smuzhiyun */
404*4882a593Smuzhiyun memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
405*4882a593Smuzhiyun memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
406*4882a593Smuzhiyun memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_4, 0, 8);
407*4882a593Smuzhiyun memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
410*4882a593Smuzhiyun pci_resource_len(gvt->gt->i915->drm.pdev, 0);
411*4882a593Smuzhiyun vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
412*4882a593Smuzhiyun pci_resource_len(gvt->gt->i915->drm.pdev, 2);
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /* PM Support */
417*4882a593Smuzhiyun vgpu->cfg_space.pmcsr_off = 0;
418*4882a593Smuzhiyun if (vgpu_cfg_space(vgpu)[PCI_STATUS] & PCI_STATUS_CAP_LIST) {
419*4882a593Smuzhiyun next = vgpu_cfg_space(vgpu)[PCI_CAPABILITY_LIST];
420*4882a593Smuzhiyun do {
421*4882a593Smuzhiyun if (vgpu_cfg_space(vgpu)[next + PCI_CAP_LIST_ID] == PCI_CAP_ID_PM) {
422*4882a593Smuzhiyun vgpu->cfg_space.pmcsr_off = next + PCI_PM_CTRL;
423*4882a593Smuzhiyun break;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun next = vgpu_cfg_space(vgpu)[next + PCI_CAP_LIST_NEXT];
426*4882a593Smuzhiyun } while (next);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun /**
431*4882a593Smuzhiyun * intel_vgpu_reset_cfg_space - reset vGPU configuration space
432*4882a593Smuzhiyun *
433*4882a593Smuzhiyun * @vgpu: a vGPU
434*4882a593Smuzhiyun *
435*4882a593Smuzhiyun */
intel_vgpu_reset_cfg_space(struct intel_vgpu * vgpu)436*4882a593Smuzhiyun void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND];
439*4882a593Smuzhiyun bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] !=
440*4882a593Smuzhiyun INTEL_GVT_PCI_CLASS_VGA_OTHER;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun if (cmd & PCI_COMMAND_MEMORY) {
443*4882a593Smuzhiyun trap_gttmmio(vgpu, false);
444*4882a593Smuzhiyun map_aperture(vgpu, false);
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun /**
448*4882a593Smuzhiyun * Currently we only do such reset when vGPU is not
449*4882a593Smuzhiyun * owned by any VM, so we simply restore entire cfg
450*4882a593Smuzhiyun * space to default value.
451*4882a593Smuzhiyun */
452*4882a593Smuzhiyun intel_vgpu_init_cfg_space(vgpu, primary);
453*4882a593Smuzhiyun }
454