1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the next
12*4882a593Smuzhiyun * paragraph) shall be included in all copies or substantial portions of the
13*4882a593Smuzhiyun * Software.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18*4882a593Smuzhiyun * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19*4882a593Smuzhiyun * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20*4882a593Smuzhiyun * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21*4882a593Smuzhiyun * SOFTWARE.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * Authors:
24*4882a593Smuzhiyun * Eddie Dong <eddie.dong@intel.com>
25*4882a593Smuzhiyun * Dexuan Cui
26*4882a593Smuzhiyun * Jike Song <jike.song@intel.com>
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * Contributors:
29*4882a593Smuzhiyun * Zhi Wang <zhi.a.wang@intel.com>
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #ifndef _GVT_MPT_H_
34*4882a593Smuzhiyun #define _GVT_MPT_H_
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /**
37*4882a593Smuzhiyun * DOC: Hypervisor Service APIs for GVT-g Core Logic
38*4882a593Smuzhiyun *
39*4882a593Smuzhiyun * This is the glue layer between specific hypervisor MPT modules and GVT-g core
40*4882a593Smuzhiyun * logic. Each kind of hypervisor MPT module provides a collection of function
41*4882a593Smuzhiyun * callbacks and will be attached to GVT host when the driver is loading.
42*4882a593Smuzhiyun * GVT-g core logic will call these APIs to request specific services from
43*4882a593Smuzhiyun * hypervisor.
44*4882a593Smuzhiyun */
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /**
47*4882a593Smuzhiyun * intel_gvt_hypervisor_host_init - init GVT-g host side
48*4882a593Smuzhiyun *
49*4882a593Smuzhiyun * Returns:
50*4882a593Smuzhiyun * Zero on success, negative error code if failed
51*4882a593Smuzhiyun */
intel_gvt_hypervisor_host_init(struct device * dev,void * gvt,const void * ops)52*4882a593Smuzhiyun static inline int intel_gvt_hypervisor_host_init(struct device *dev,
53*4882a593Smuzhiyun void *gvt, const void *ops)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun if (!intel_gvt_host.mpt->host_init)
56*4882a593Smuzhiyun return -ENODEV;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun return intel_gvt_host.mpt->host_init(dev, gvt, ops);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /**
62*4882a593Smuzhiyun * intel_gvt_hypervisor_host_exit - exit GVT-g host side
63*4882a593Smuzhiyun */
intel_gvt_hypervisor_host_exit(struct device * dev)64*4882a593Smuzhiyun static inline void intel_gvt_hypervisor_host_exit(struct device *dev)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun /* optional to provide */
67*4882a593Smuzhiyun if (!intel_gvt_host.mpt->host_exit)
68*4882a593Smuzhiyun return;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun intel_gvt_host.mpt->host_exit(dev);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /**
74*4882a593Smuzhiyun * intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU
75*4882a593Smuzhiyun * related stuffs inside hypervisor.
76*4882a593Smuzhiyun *
77*4882a593Smuzhiyun * Returns:
78*4882a593Smuzhiyun * Zero on success, negative error code if failed.
79*4882a593Smuzhiyun */
intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu * vgpu)80*4882a593Smuzhiyun static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun /* optional to provide */
83*4882a593Smuzhiyun if (!intel_gvt_host.mpt->attach_vgpu)
84*4882a593Smuzhiyun return 0;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /**
90*4882a593Smuzhiyun * intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU
91*4882a593Smuzhiyun * related stuffs inside hypervisor.
92*4882a593Smuzhiyun *
93*4882a593Smuzhiyun * Returns:
94*4882a593Smuzhiyun * Zero on success, negative error code if failed.
95*4882a593Smuzhiyun */
intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu * vgpu)96*4882a593Smuzhiyun static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun /* optional to provide */
99*4882a593Smuzhiyun if (!intel_gvt_host.mpt->detach_vgpu)
100*4882a593Smuzhiyun return;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun intel_gvt_host.mpt->detach_vgpu(vgpu);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun #define MSI_CAP_CONTROL(offset) (offset + 2)
106*4882a593Smuzhiyun #define MSI_CAP_ADDRESS(offset) (offset + 4)
107*4882a593Smuzhiyun #define MSI_CAP_DATA(offset) (offset + 8)
108*4882a593Smuzhiyun #define MSI_CAP_EN 0x1
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /**
111*4882a593Smuzhiyun * intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU
112*4882a593Smuzhiyun *
113*4882a593Smuzhiyun * Returns:
114*4882a593Smuzhiyun * Zero on success, negative error code if failed.
115*4882a593Smuzhiyun */
intel_gvt_hypervisor_inject_msi(struct intel_vgpu * vgpu)116*4882a593Smuzhiyun static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
119*4882a593Smuzhiyun u16 control, data;
120*4882a593Smuzhiyun u32 addr;
121*4882a593Smuzhiyun int ret;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
124*4882a593Smuzhiyun addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
125*4882a593Smuzhiyun data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /* Do not generate MSI if MSIEN is disable */
128*4882a593Smuzhiyun if (!(control & MSI_CAP_EN))
129*4882a593Smuzhiyun return 0;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
132*4882a593Smuzhiyun return -EINVAL;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun trace_inject_msi(vgpu->id, addr, data);
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
137*4882a593Smuzhiyun if (ret)
138*4882a593Smuzhiyun return ret;
139*4882a593Smuzhiyun return 0;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /**
143*4882a593Smuzhiyun * intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN
144*4882a593Smuzhiyun * @p: host kernel virtual address
145*4882a593Smuzhiyun *
146*4882a593Smuzhiyun * Returns:
147*4882a593Smuzhiyun * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
148*4882a593Smuzhiyun */
intel_gvt_hypervisor_virt_to_mfn(void * p)149*4882a593Smuzhiyun static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun return intel_gvt_host.mpt->from_virt_to_mfn(p);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /**
155*4882a593Smuzhiyun * intel_gvt_hypervisor_enable_page_track - track a guest page
156*4882a593Smuzhiyun * @vgpu: a vGPU
157*4882a593Smuzhiyun * @gfn: the gfn of guest
158*4882a593Smuzhiyun *
159*4882a593Smuzhiyun * Returns:
160*4882a593Smuzhiyun * Zero on success, negative error code if failed.
161*4882a593Smuzhiyun */
intel_gvt_hypervisor_enable_page_track(struct intel_vgpu * vgpu,unsigned long gfn)162*4882a593Smuzhiyun static inline int intel_gvt_hypervisor_enable_page_track(
163*4882a593Smuzhiyun struct intel_vgpu *vgpu, unsigned long gfn)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun return intel_gvt_host.mpt->enable_page_track(vgpu->handle, gfn);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /**
169*4882a593Smuzhiyun * intel_gvt_hypervisor_disable_page_track - untrack a guest page
170*4882a593Smuzhiyun * @vgpu: a vGPU
171*4882a593Smuzhiyun * @gfn: the gfn of guest
172*4882a593Smuzhiyun *
173*4882a593Smuzhiyun * Returns:
174*4882a593Smuzhiyun * Zero on success, negative error code if failed.
175*4882a593Smuzhiyun */
intel_gvt_hypervisor_disable_page_track(struct intel_vgpu * vgpu,unsigned long gfn)176*4882a593Smuzhiyun static inline int intel_gvt_hypervisor_disable_page_track(
177*4882a593Smuzhiyun struct intel_vgpu *vgpu, unsigned long gfn)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun return intel_gvt_host.mpt->disable_page_track(vgpu->handle, gfn);
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun /**
183*4882a593Smuzhiyun * intel_gvt_hypervisor_read_gpa - copy data from GPA to host data buffer
184*4882a593Smuzhiyun * @vgpu: a vGPU
185*4882a593Smuzhiyun * @gpa: guest physical address
186*4882a593Smuzhiyun * @buf: host data buffer
187*4882a593Smuzhiyun * @len: data length
188*4882a593Smuzhiyun *
189*4882a593Smuzhiyun * Returns:
190*4882a593Smuzhiyun * Zero on success, negative error code if failed.
191*4882a593Smuzhiyun */
intel_gvt_hypervisor_read_gpa(struct intel_vgpu * vgpu,unsigned long gpa,void * buf,unsigned long len)192*4882a593Smuzhiyun static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu,
193*4882a593Smuzhiyun unsigned long gpa, void *buf, unsigned long len)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /**
199*4882a593Smuzhiyun * intel_gvt_hypervisor_write_gpa - copy data from host data buffer to GPA
200*4882a593Smuzhiyun * @vgpu: a vGPU
201*4882a593Smuzhiyun * @gpa: guest physical address
202*4882a593Smuzhiyun * @buf: host data buffer
203*4882a593Smuzhiyun * @len: data length
204*4882a593Smuzhiyun *
205*4882a593Smuzhiyun * Returns:
206*4882a593Smuzhiyun * Zero on success, negative error code if failed.
207*4882a593Smuzhiyun */
intel_gvt_hypervisor_write_gpa(struct intel_vgpu * vgpu,unsigned long gpa,void * buf,unsigned long len)208*4882a593Smuzhiyun static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu,
209*4882a593Smuzhiyun unsigned long gpa, void *buf, unsigned long len)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len);
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun /**
215*4882a593Smuzhiyun * intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN
216*4882a593Smuzhiyun * @vgpu: a vGPU
217*4882a593Smuzhiyun * @gpfn: guest pfn
218*4882a593Smuzhiyun *
219*4882a593Smuzhiyun * Returns:
220*4882a593Smuzhiyun * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
221*4882a593Smuzhiyun */
intel_gvt_hypervisor_gfn_to_mfn(struct intel_vgpu * vgpu,unsigned long gfn)222*4882a593Smuzhiyun static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
223*4882a593Smuzhiyun struct intel_vgpu *vgpu, unsigned long gfn)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun /**
229*4882a593Smuzhiyun * intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page
230*4882a593Smuzhiyun * @vgpu: a vGPU
231*4882a593Smuzhiyun * @gfn: guest pfn
232*4882a593Smuzhiyun * @size: page size
233*4882a593Smuzhiyun * @dma_addr: retrieve allocated dma addr
234*4882a593Smuzhiyun *
235*4882a593Smuzhiyun * Returns:
236*4882a593Smuzhiyun * 0 on success, negative error code if failed.
237*4882a593Smuzhiyun */
intel_gvt_hypervisor_dma_map_guest_page(struct intel_vgpu * vgpu,unsigned long gfn,unsigned long size,dma_addr_t * dma_addr)238*4882a593Smuzhiyun static inline int intel_gvt_hypervisor_dma_map_guest_page(
239*4882a593Smuzhiyun struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size,
240*4882a593Smuzhiyun dma_addr_t *dma_addr)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, size,
243*4882a593Smuzhiyun dma_addr);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /**
247*4882a593Smuzhiyun * intel_gvt_hypervisor_dma_unmap_guest_page - cancel dma map for guest page
248*4882a593Smuzhiyun * @vgpu: a vGPU
249*4882a593Smuzhiyun * @dma_addr: the mapped dma addr
250*4882a593Smuzhiyun */
intel_gvt_hypervisor_dma_unmap_guest_page(struct intel_vgpu * vgpu,dma_addr_t dma_addr)251*4882a593Smuzhiyun static inline void intel_gvt_hypervisor_dma_unmap_guest_page(
252*4882a593Smuzhiyun struct intel_vgpu *vgpu, dma_addr_t dma_addr)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun intel_gvt_host.mpt->dma_unmap_guest_page(vgpu->handle, dma_addr);
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /**
258*4882a593Smuzhiyun * intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf
259*4882a593Smuzhiyun * @vgpu: a vGPU
260*4882a593Smuzhiyun * @dma_addr: guest dma addr
261*4882a593Smuzhiyun *
262*4882a593Smuzhiyun * Returns:
263*4882a593Smuzhiyun * 0 on success, negative error code if failed.
264*4882a593Smuzhiyun */
265*4882a593Smuzhiyun static inline int
intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu * vgpu,dma_addr_t dma_addr)266*4882a593Smuzhiyun intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu,
267*4882a593Smuzhiyun dma_addr_t dma_addr)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun return intel_gvt_host.mpt->dma_pin_guest_page(vgpu->handle, dma_addr);
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun /**
273*4882a593Smuzhiyun * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
274*4882a593Smuzhiyun * @vgpu: a vGPU
275*4882a593Smuzhiyun * @gfn: guest PFN
276*4882a593Smuzhiyun * @mfn: host PFN
277*4882a593Smuzhiyun * @nr: amount of PFNs
278*4882a593Smuzhiyun * @map: map or unmap
279*4882a593Smuzhiyun *
280*4882a593Smuzhiyun * Returns:
281*4882a593Smuzhiyun * Zero on success, negative error code if failed.
282*4882a593Smuzhiyun */
intel_gvt_hypervisor_map_gfn_to_mfn(struct intel_vgpu * vgpu,unsigned long gfn,unsigned long mfn,unsigned int nr,bool map)283*4882a593Smuzhiyun static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
284*4882a593Smuzhiyun struct intel_vgpu *vgpu, unsigned long gfn,
285*4882a593Smuzhiyun unsigned long mfn, unsigned int nr,
286*4882a593Smuzhiyun bool map)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun /* a MPT implementation could have MMIO mapped elsewhere */
289*4882a593Smuzhiyun if (!intel_gvt_host.mpt->map_gfn_to_mfn)
290*4882a593Smuzhiyun return 0;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
293*4882a593Smuzhiyun map);
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /**
297*4882a593Smuzhiyun * intel_gvt_hypervisor_set_trap_area - Trap a guest PA region
298*4882a593Smuzhiyun * @vgpu: a vGPU
299*4882a593Smuzhiyun * @start: the beginning of the guest physical address region
300*4882a593Smuzhiyun * @end: the end of the guest physical address region
301*4882a593Smuzhiyun * @map: map or unmap
302*4882a593Smuzhiyun *
303*4882a593Smuzhiyun * Returns:
304*4882a593Smuzhiyun * Zero on success, negative error code if failed.
305*4882a593Smuzhiyun */
intel_gvt_hypervisor_set_trap_area(struct intel_vgpu * vgpu,u64 start,u64 end,bool map)306*4882a593Smuzhiyun static inline int intel_gvt_hypervisor_set_trap_area(
307*4882a593Smuzhiyun struct intel_vgpu *vgpu, u64 start, u64 end, bool map)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun /* a MPT implementation could have MMIO trapped elsewhere */
310*4882a593Smuzhiyun if (!intel_gvt_host.mpt->set_trap_area)
311*4882a593Smuzhiyun return 0;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun /**
317*4882a593Smuzhiyun * intel_gvt_hypervisor_set_opregion - Set opregion for guest
318*4882a593Smuzhiyun * @vgpu: a vGPU
319*4882a593Smuzhiyun *
320*4882a593Smuzhiyun * Returns:
321*4882a593Smuzhiyun * Zero on success, negative error code if failed.
322*4882a593Smuzhiyun */
intel_gvt_hypervisor_set_opregion(struct intel_vgpu * vgpu)323*4882a593Smuzhiyun static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun if (!intel_gvt_host.mpt->set_opregion)
326*4882a593Smuzhiyun return 0;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun return intel_gvt_host.mpt->set_opregion(vgpu);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun /**
332*4882a593Smuzhiyun * intel_gvt_hypervisor_set_edid - Set EDID region for guest
333*4882a593Smuzhiyun * @vgpu: a vGPU
334*4882a593Smuzhiyun * @port_num: display port number
335*4882a593Smuzhiyun *
336*4882a593Smuzhiyun * Returns:
337*4882a593Smuzhiyun * Zero on success, negative error code if failed.
338*4882a593Smuzhiyun */
intel_gvt_hypervisor_set_edid(struct intel_vgpu * vgpu,int port_num)339*4882a593Smuzhiyun static inline int intel_gvt_hypervisor_set_edid(struct intel_vgpu *vgpu,
340*4882a593Smuzhiyun int port_num)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun if (!intel_gvt_host.mpt->set_edid)
343*4882a593Smuzhiyun return 0;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun return intel_gvt_host.mpt->set_edid(vgpu, port_num);
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun /**
349*4882a593Smuzhiyun * intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count
350*4882a593Smuzhiyun * @vgpu: a vGPU
351*4882a593Smuzhiyun *
352*4882a593Smuzhiyun * Returns:
353*4882a593Smuzhiyun * Zero on success, negative error code if failed.
354*4882a593Smuzhiyun */
intel_gvt_hypervisor_get_vfio_device(struct intel_vgpu * vgpu)355*4882a593Smuzhiyun static inline int intel_gvt_hypervisor_get_vfio_device(struct intel_vgpu *vgpu)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun if (!intel_gvt_host.mpt->get_vfio_device)
358*4882a593Smuzhiyun return 0;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun return intel_gvt_host.mpt->get_vfio_device(vgpu);
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun /**
364*4882a593Smuzhiyun * intel_gvt_hypervisor_put_vfio_device - decrease vfio device ref count
365*4882a593Smuzhiyun * @vgpu: a vGPU
366*4882a593Smuzhiyun *
367*4882a593Smuzhiyun * Returns:
368*4882a593Smuzhiyun * Zero on success, negative error code if failed.
369*4882a593Smuzhiyun */
intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu * vgpu)370*4882a593Smuzhiyun static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun if (!intel_gvt_host.mpt->put_vfio_device)
373*4882a593Smuzhiyun return;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun intel_gvt_host.mpt->put_vfio_device(vgpu);
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun /**
379*4882a593Smuzhiyun * intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn
380*4882a593Smuzhiyun * @vgpu: a vGPU
381*4882a593Smuzhiyun * @gfn: guest PFN
382*4882a593Smuzhiyun *
383*4882a593Smuzhiyun * Returns:
384*4882a593Smuzhiyun * true on valid gfn, false on not.
385*4882a593Smuzhiyun */
intel_gvt_hypervisor_is_valid_gfn(struct intel_vgpu * vgpu,unsigned long gfn)386*4882a593Smuzhiyun static inline bool intel_gvt_hypervisor_is_valid_gfn(
387*4882a593Smuzhiyun struct intel_vgpu *vgpu, unsigned long gfn)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun if (!intel_gvt_host.mpt->is_valid_gfn)
390*4882a593Smuzhiyun return true;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn);
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun int intel_gvt_register_hypervisor(struct intel_gvt_mpt *);
396*4882a593Smuzhiyun void intel_gvt_unregister_hypervisor(void);
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun #endif /* _GVT_MPT_H_ */
399