xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/gvt/gvt.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the next
12*4882a593Smuzhiyun  * paragraph) shall be included in all copies or substantial portions of the
13*4882a593Smuzhiyun  * Software.
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18*4882a593Smuzhiyun  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19*4882a593Smuzhiyun  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20*4882a593Smuzhiyun  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21*4882a593Smuzhiyun  * SOFTWARE.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * Authors:
24*4882a593Smuzhiyun  *    Kevin Tian <kevin.tian@intel.com>
25*4882a593Smuzhiyun  *    Eddie Dong <eddie.dong@intel.com>
26*4882a593Smuzhiyun  *
27*4882a593Smuzhiyun  * Contributors:
28*4882a593Smuzhiyun  *    Niu Bing <bing.niu@intel.com>
29*4882a593Smuzhiyun  *    Zhi Wang <zhi.a.wang@intel.com>
30*4882a593Smuzhiyun  *
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #ifndef _GVT_H_
34*4882a593Smuzhiyun #define _GVT_H_
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #include "debug.h"
37*4882a593Smuzhiyun #include "hypercall.h"
38*4882a593Smuzhiyun #include "mmio.h"
39*4882a593Smuzhiyun #include "reg.h"
40*4882a593Smuzhiyun #include "interrupt.h"
41*4882a593Smuzhiyun #include "gtt.h"
42*4882a593Smuzhiyun #include "display.h"
43*4882a593Smuzhiyun #include "edid.h"
44*4882a593Smuzhiyun #include "execlist.h"
45*4882a593Smuzhiyun #include "scheduler.h"
46*4882a593Smuzhiyun #include "sched_policy.h"
47*4882a593Smuzhiyun #include "mmio_context.h"
48*4882a593Smuzhiyun #include "cmd_parser.h"
49*4882a593Smuzhiyun #include "fb_decoder.h"
50*4882a593Smuzhiyun #include "dmabuf.h"
51*4882a593Smuzhiyun #include "page_track.h"
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun #define GVT_MAX_VGPU 8
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun struct intel_gvt_host {
56*4882a593Smuzhiyun 	struct device *dev;
57*4882a593Smuzhiyun 	bool initialized;
58*4882a593Smuzhiyun 	int hypervisor_type;
59*4882a593Smuzhiyun 	struct intel_gvt_mpt *mpt;
60*4882a593Smuzhiyun };
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun extern struct intel_gvt_host intel_gvt_host;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun /* Describe per-platform limitations. */
65*4882a593Smuzhiyun struct intel_gvt_device_info {
66*4882a593Smuzhiyun 	u32 max_support_vgpus;
67*4882a593Smuzhiyun 	u32 cfg_space_size;
68*4882a593Smuzhiyun 	u32 mmio_size;
69*4882a593Smuzhiyun 	u32 mmio_bar;
70*4882a593Smuzhiyun 	unsigned long msi_cap_offset;
71*4882a593Smuzhiyun 	u32 gtt_start_offset;
72*4882a593Smuzhiyun 	u32 gtt_entry_size;
73*4882a593Smuzhiyun 	u32 gtt_entry_size_shift;
74*4882a593Smuzhiyun 	int gmadr_bytes_in_cmd;
75*4882a593Smuzhiyun 	u32 max_surface_size;
76*4882a593Smuzhiyun };
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun /* GM resources owned by a vGPU */
79*4882a593Smuzhiyun struct intel_vgpu_gm {
80*4882a593Smuzhiyun 	u64 aperture_sz;
81*4882a593Smuzhiyun 	u64 hidden_sz;
82*4882a593Smuzhiyun 	struct drm_mm_node low_gm_node;
83*4882a593Smuzhiyun 	struct drm_mm_node high_gm_node;
84*4882a593Smuzhiyun };
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun #define INTEL_GVT_MAX_NUM_FENCES 32
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun /* Fences owned by a vGPU */
89*4882a593Smuzhiyun struct intel_vgpu_fence {
90*4882a593Smuzhiyun 	struct i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
91*4882a593Smuzhiyun 	u32 base;
92*4882a593Smuzhiyun 	u32 size;
93*4882a593Smuzhiyun };
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun struct intel_vgpu_mmio {
96*4882a593Smuzhiyun 	void *vreg;
97*4882a593Smuzhiyun };
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun #define INTEL_GVT_MAX_BAR_NUM 4
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun struct intel_vgpu_pci_bar {
102*4882a593Smuzhiyun 	u64 size;
103*4882a593Smuzhiyun 	bool tracked;
104*4882a593Smuzhiyun };
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun struct intel_vgpu_cfg_space {
107*4882a593Smuzhiyun 	unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE];
108*4882a593Smuzhiyun 	struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
109*4882a593Smuzhiyun 	u32 pmcsr_off;
110*4882a593Smuzhiyun };
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun struct intel_vgpu_irq {
115*4882a593Smuzhiyun 	bool irq_warn_once[INTEL_GVT_EVENT_MAX];
116*4882a593Smuzhiyun 	DECLARE_BITMAP(flip_done_event[I915_MAX_PIPES],
117*4882a593Smuzhiyun 		       INTEL_GVT_EVENT_MAX);
118*4882a593Smuzhiyun };
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun struct intel_vgpu_opregion {
121*4882a593Smuzhiyun 	bool mapped;
122*4882a593Smuzhiyun 	void *va;
123*4882a593Smuzhiyun 	u32 gfn[INTEL_GVT_OPREGION_PAGES];
124*4882a593Smuzhiyun };
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun #define vgpu_opregion(vgpu) (&(vgpu->opregion))
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun struct intel_vgpu_display {
129*4882a593Smuzhiyun 	struct intel_vgpu_i2c_edid i2c_edid;
130*4882a593Smuzhiyun 	struct intel_vgpu_port ports[I915_MAX_PORTS];
131*4882a593Smuzhiyun 	struct intel_vgpu_sbi sbi;
132*4882a593Smuzhiyun };
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun struct vgpu_sched_ctl {
135*4882a593Smuzhiyun 	int weight;
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun enum {
139*4882a593Smuzhiyun 	INTEL_VGPU_EXECLIST_SUBMISSION = 1,
140*4882a593Smuzhiyun 	INTEL_VGPU_GUC_SUBMISSION,
141*4882a593Smuzhiyun };
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun struct intel_vgpu_submission_ops {
144*4882a593Smuzhiyun 	const char *name;
145*4882a593Smuzhiyun 	int (*init)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
146*4882a593Smuzhiyun 	void (*clean)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
147*4882a593Smuzhiyun 	void (*reset)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
148*4882a593Smuzhiyun };
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun struct intel_vgpu_submission {
151*4882a593Smuzhiyun 	struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
152*4882a593Smuzhiyun 	struct list_head workload_q_head[I915_NUM_ENGINES];
153*4882a593Smuzhiyun 	struct intel_context *shadow[I915_NUM_ENGINES];
154*4882a593Smuzhiyun 	struct kmem_cache *workloads;
155*4882a593Smuzhiyun 	atomic_t running_workload_num;
156*4882a593Smuzhiyun 	union {
157*4882a593Smuzhiyun 		u64 i915_context_pml4;
158*4882a593Smuzhiyun 		u64 i915_context_pdps[GEN8_3LVL_PDPES];
159*4882a593Smuzhiyun 	};
160*4882a593Smuzhiyun 	DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
161*4882a593Smuzhiyun 	DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
162*4882a593Smuzhiyun 	void *ring_scan_buffer[I915_NUM_ENGINES];
163*4882a593Smuzhiyun 	int ring_scan_buffer_size[I915_NUM_ENGINES];
164*4882a593Smuzhiyun 	const struct intel_vgpu_submission_ops *ops;
165*4882a593Smuzhiyun 	int virtual_submission_interface;
166*4882a593Smuzhiyun 	bool active;
167*4882a593Smuzhiyun 	struct {
168*4882a593Smuzhiyun 		u32 lrca;
169*4882a593Smuzhiyun 		bool valid;
170*4882a593Smuzhiyun 		u64 ring_context_gpa;
171*4882a593Smuzhiyun 	} last_ctx[I915_NUM_ENGINES];
172*4882a593Smuzhiyun };
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun struct intel_vgpu {
175*4882a593Smuzhiyun 	struct intel_gvt *gvt;
176*4882a593Smuzhiyun 	struct mutex vgpu_lock;
177*4882a593Smuzhiyun 	int id;
178*4882a593Smuzhiyun 	unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
179*4882a593Smuzhiyun 	bool active;
180*4882a593Smuzhiyun 	bool pv_notified;
181*4882a593Smuzhiyun 	bool failsafe;
182*4882a593Smuzhiyun 	unsigned int resetting_eng;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	/* Both sched_data and sched_ctl can be seen a part of the global gvt
185*4882a593Smuzhiyun 	 * scheduler structure. So below 2 vgpu data are protected
186*4882a593Smuzhiyun 	 * by sched_lock, not vgpu_lock.
187*4882a593Smuzhiyun 	 */
188*4882a593Smuzhiyun 	void *sched_data;
189*4882a593Smuzhiyun 	struct vgpu_sched_ctl sched_ctl;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	struct intel_vgpu_fence fence;
192*4882a593Smuzhiyun 	struct intel_vgpu_gm gm;
193*4882a593Smuzhiyun 	struct intel_vgpu_cfg_space cfg_space;
194*4882a593Smuzhiyun 	struct intel_vgpu_mmio mmio;
195*4882a593Smuzhiyun 	struct intel_vgpu_irq irq;
196*4882a593Smuzhiyun 	struct intel_vgpu_gtt gtt;
197*4882a593Smuzhiyun 	struct intel_vgpu_opregion opregion;
198*4882a593Smuzhiyun 	struct intel_vgpu_display display;
199*4882a593Smuzhiyun 	struct intel_vgpu_submission submission;
200*4882a593Smuzhiyun 	struct radix_tree_root page_track_tree;
201*4882a593Smuzhiyun 	u32 hws_pga[I915_NUM_ENGINES];
202*4882a593Smuzhiyun 	/* Set on PCI_D3, reset on DMLR, not reflecting the actual PM state */
203*4882a593Smuzhiyun 	bool d3_entered;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	struct dentry *debugfs;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	/* Hypervisor-specific device state. */
208*4882a593Smuzhiyun 	void *vdev;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	struct list_head dmabuf_obj_list_head;
211*4882a593Smuzhiyun 	struct mutex dmabuf_lock;
212*4882a593Smuzhiyun 	struct idr object_idr;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	u32 scan_nonprivbb;
215*4882a593Smuzhiyun };
216*4882a593Smuzhiyun 
intel_vgpu_vdev(struct intel_vgpu * vgpu)217*4882a593Smuzhiyun static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	return vgpu->vdev;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun /* validating GM healthy status*/
223*4882a593Smuzhiyun #define vgpu_is_vm_unhealthy(ret_val) \
224*4882a593Smuzhiyun 	(((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun struct intel_gvt_gm {
227*4882a593Smuzhiyun 	unsigned long vgpu_allocated_low_gm_size;
228*4882a593Smuzhiyun 	unsigned long vgpu_allocated_high_gm_size;
229*4882a593Smuzhiyun };
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun struct intel_gvt_fence {
232*4882a593Smuzhiyun 	unsigned long vgpu_allocated_fence_num;
233*4882a593Smuzhiyun };
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun /* Special MMIO blocks. */
236*4882a593Smuzhiyun struct gvt_mmio_block {
237*4882a593Smuzhiyun 	unsigned int device;
238*4882a593Smuzhiyun 	i915_reg_t   offset;
239*4882a593Smuzhiyun 	unsigned int size;
240*4882a593Smuzhiyun 	gvt_mmio_func read;
241*4882a593Smuzhiyun 	gvt_mmio_func write;
242*4882a593Smuzhiyun };
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun #define INTEL_GVT_MMIO_HASH_BITS 11
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun struct intel_gvt_mmio {
247*4882a593Smuzhiyun 	u8 *mmio_attribute;
248*4882a593Smuzhiyun /* Register contains RO bits */
249*4882a593Smuzhiyun #define F_RO		(1 << 0)
250*4882a593Smuzhiyun /* Register contains graphics address */
251*4882a593Smuzhiyun #define F_GMADR		(1 << 1)
252*4882a593Smuzhiyun /* Mode mask registers with high 16 bits as the mask bits */
253*4882a593Smuzhiyun #define F_MODE_MASK	(1 << 2)
254*4882a593Smuzhiyun /* This reg can be accessed by GPU commands */
255*4882a593Smuzhiyun #define F_CMD_ACCESS	(1 << 3)
256*4882a593Smuzhiyun /* This reg has been accessed by a VM */
257*4882a593Smuzhiyun #define F_ACCESSED	(1 << 4)
258*4882a593Smuzhiyun /* This reg could be accessed by unaligned address */
259*4882a593Smuzhiyun #define F_UNALIGN	(1 << 6)
260*4882a593Smuzhiyun /* This reg is in GVT's mmio save-restor list and in hardware
261*4882a593Smuzhiyun  * logical context image
262*4882a593Smuzhiyun  */
263*4882a593Smuzhiyun #define F_SR_IN_CTX	(1 << 7)
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	struct gvt_mmio_block *mmio_block;
266*4882a593Smuzhiyun 	unsigned int num_mmio_block;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
269*4882a593Smuzhiyun 	unsigned long num_tracked_mmio;
270*4882a593Smuzhiyun };
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun struct intel_gvt_firmware {
273*4882a593Smuzhiyun 	void *cfg_space;
274*4882a593Smuzhiyun 	void *mmio;
275*4882a593Smuzhiyun 	bool firmware_loaded;
276*4882a593Smuzhiyun };
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun #define NR_MAX_INTEL_VGPU_TYPES 20
279*4882a593Smuzhiyun struct intel_vgpu_type {
280*4882a593Smuzhiyun 	char name[16];
281*4882a593Smuzhiyun 	unsigned int avail_instance;
282*4882a593Smuzhiyun 	unsigned int low_gm_size;
283*4882a593Smuzhiyun 	unsigned int high_gm_size;
284*4882a593Smuzhiyun 	unsigned int fence;
285*4882a593Smuzhiyun 	unsigned int weight;
286*4882a593Smuzhiyun 	enum intel_vgpu_edid resolution;
287*4882a593Smuzhiyun };
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun struct intel_gvt {
290*4882a593Smuzhiyun 	/* GVT scope lock, protect GVT itself, and all resource currently
291*4882a593Smuzhiyun 	 * not yet protected by special locks(vgpu and scheduler lock).
292*4882a593Smuzhiyun 	 */
293*4882a593Smuzhiyun 	struct mutex lock;
294*4882a593Smuzhiyun 	/* scheduler scope lock, protect gvt and vgpu schedule related data */
295*4882a593Smuzhiyun 	struct mutex sched_lock;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	struct intel_gt *gt;
298*4882a593Smuzhiyun 	struct idr vgpu_idr;	/* vGPU IDR pool */
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	struct intel_gvt_device_info device_info;
301*4882a593Smuzhiyun 	struct intel_gvt_gm gm;
302*4882a593Smuzhiyun 	struct intel_gvt_fence fence;
303*4882a593Smuzhiyun 	struct intel_gvt_mmio mmio;
304*4882a593Smuzhiyun 	struct intel_gvt_firmware firmware;
305*4882a593Smuzhiyun 	struct intel_gvt_irq irq;
306*4882a593Smuzhiyun 	struct intel_gvt_gtt gtt;
307*4882a593Smuzhiyun 	struct intel_gvt_workload_scheduler scheduler;
308*4882a593Smuzhiyun 	struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
309*4882a593Smuzhiyun 	DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
310*4882a593Smuzhiyun 	struct intel_vgpu_type *types;
311*4882a593Smuzhiyun 	unsigned int num_types;
312*4882a593Smuzhiyun 	struct intel_vgpu *idle_vgpu;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	struct task_struct *service_thread;
315*4882a593Smuzhiyun 	wait_queue_head_t service_thread_wq;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	/* service_request is always used in bit operation, we should always
318*4882a593Smuzhiyun 	 * use it with atomic bit ops so that no need to use gvt big lock.
319*4882a593Smuzhiyun 	 */
320*4882a593Smuzhiyun 	unsigned long service_request;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	struct {
323*4882a593Smuzhiyun 		struct engine_mmio *mmio;
324*4882a593Smuzhiyun 		int ctx_mmio_count[I915_NUM_ENGINES];
325*4882a593Smuzhiyun 		u32 *tlb_mmio_offset_list;
326*4882a593Smuzhiyun 		u32 tlb_mmio_offset_list_cnt;
327*4882a593Smuzhiyun 		u32 *mocs_mmio_offset_list;
328*4882a593Smuzhiyun 		u32 mocs_mmio_offset_list_cnt;
329*4882a593Smuzhiyun 	} engine_mmio_list;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	struct dentry *debugfs_root;
332*4882a593Smuzhiyun };
333*4882a593Smuzhiyun 
to_gvt(struct drm_i915_private * i915)334*4882a593Smuzhiyun static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun 	return i915->gvt;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun enum {
340*4882a593Smuzhiyun 	INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	/* Scheduling trigger by timer */
343*4882a593Smuzhiyun 	INTEL_GVT_REQUEST_SCHED = 1,
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	/* Scheduling trigger by event */
346*4882a593Smuzhiyun 	INTEL_GVT_REQUEST_EVENT_SCHED = 2,
347*4882a593Smuzhiyun };
348*4882a593Smuzhiyun 
intel_gvt_request_service(struct intel_gvt * gvt,int service)349*4882a593Smuzhiyun static inline void intel_gvt_request_service(struct intel_gvt *gvt,
350*4882a593Smuzhiyun 		int service)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun 	set_bit(service, (void *)&gvt->service_request);
353*4882a593Smuzhiyun 	wake_up(&gvt->service_thread_wq);
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun void intel_gvt_free_firmware(struct intel_gvt *gvt);
357*4882a593Smuzhiyun int intel_gvt_load_firmware(struct intel_gvt *gvt);
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun /* Aperture/GM space definitions for GVT device */
360*4882a593Smuzhiyun #define MB_TO_BYTES(mb) ((mb) << 20ULL)
361*4882a593Smuzhiyun #define BYTES_TO_MB(b) ((b) >> 20ULL)
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun #define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
364*4882a593Smuzhiyun #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
365*4882a593Smuzhiyun #define HOST_FENCE 4
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun #define gvt_to_ggtt(gvt)	((gvt)->gt->ggtt)
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun /* Aperture/GM space definitions for GVT device */
370*4882a593Smuzhiyun #define gvt_aperture_sz(gvt)	  gvt_to_ggtt(gvt)->mappable_end
371*4882a593Smuzhiyun #define gvt_aperture_pa_base(gvt) gvt_to_ggtt(gvt)->gmadr.start
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun #define gvt_ggtt_gm_sz(gvt)	gvt_to_ggtt(gvt)->vm.total
374*4882a593Smuzhiyun #define gvt_ggtt_sz(gvt)	(gvt_to_ggtt(gvt)->vm.total >> PAGE_SHIFT << 3)
375*4882a593Smuzhiyun #define gvt_hidden_sz(gvt)	(gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun #define gvt_aperture_gmadr_base(gvt) (0)
378*4882a593Smuzhiyun #define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
379*4882a593Smuzhiyun 				     + gvt_aperture_sz(gvt) - 1)
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun #define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
382*4882a593Smuzhiyun 				    + gvt_aperture_sz(gvt))
383*4882a593Smuzhiyun #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
384*4882a593Smuzhiyun 				   + gvt_hidden_sz(gvt) - 1)
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun #define gvt_fence_sz(gvt) (gvt_to_ggtt(gvt)->num_fences)
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun /* Aperture/GM space definitions for vGPU */
389*4882a593Smuzhiyun #define vgpu_aperture_offset(vgpu)	((vgpu)->gm.low_gm_node.start)
390*4882a593Smuzhiyun #define vgpu_hidden_offset(vgpu)	((vgpu)->gm.high_gm_node.start)
391*4882a593Smuzhiyun #define vgpu_aperture_sz(vgpu)		((vgpu)->gm.aperture_sz)
392*4882a593Smuzhiyun #define vgpu_hidden_sz(vgpu)		((vgpu)->gm.hidden_sz)
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun #define vgpu_aperture_pa_base(vgpu) \
395*4882a593Smuzhiyun 	(gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun #define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun #define vgpu_aperture_pa_end(vgpu) \
400*4882a593Smuzhiyun 	(vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun #define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
403*4882a593Smuzhiyun #define vgpu_aperture_gmadr_end(vgpu) \
404*4882a593Smuzhiyun 	(vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun #define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
407*4882a593Smuzhiyun #define vgpu_hidden_gmadr_end(vgpu) \
408*4882a593Smuzhiyun 	(vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun #define vgpu_fence_base(vgpu) (vgpu->fence.base)
411*4882a593Smuzhiyun #define vgpu_fence_sz(vgpu) (vgpu->fence.size)
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun struct intel_vgpu_creation_params {
414*4882a593Smuzhiyun 	__u64 handle;
415*4882a593Smuzhiyun 	__u64 low_gm_sz;  /* in MB */
416*4882a593Smuzhiyun 	__u64 high_gm_sz; /* in MB */
417*4882a593Smuzhiyun 	__u64 fence_sz;
418*4882a593Smuzhiyun 	__u64 resolution;
419*4882a593Smuzhiyun 	__s32 primary;
420*4882a593Smuzhiyun 	__u64 vgpu_id;
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	__u32 weight;
423*4882a593Smuzhiyun };
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
426*4882a593Smuzhiyun 			      struct intel_vgpu_creation_params *param);
427*4882a593Smuzhiyun void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
428*4882a593Smuzhiyun void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
429*4882a593Smuzhiyun void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
430*4882a593Smuzhiyun 	u32 fence, u64 value);
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun /* Macros for easily accessing vGPU virtual/shadow register.
433*4882a593Smuzhiyun    Explicitly seperate use for typed MMIO reg or real offset.*/
434*4882a593Smuzhiyun #define vgpu_vreg_t(vgpu, reg) \
435*4882a593Smuzhiyun 	(*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
436*4882a593Smuzhiyun #define vgpu_vreg(vgpu, offset) \
437*4882a593Smuzhiyun 	(*(u32 *)(vgpu->mmio.vreg + (offset)))
438*4882a593Smuzhiyun #define vgpu_vreg64_t(vgpu, reg) \
439*4882a593Smuzhiyun 	(*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
440*4882a593Smuzhiyun #define vgpu_vreg64(vgpu, offset) \
441*4882a593Smuzhiyun 	(*(u64 *)(vgpu->mmio.vreg + (offset)))
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun #define for_each_active_vgpu(gvt, vgpu, id) \
444*4882a593Smuzhiyun 	idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
445*4882a593Smuzhiyun 		for_each_if(vgpu->active)
446*4882a593Smuzhiyun 
intel_vgpu_write_pci_bar(struct intel_vgpu * vgpu,u32 offset,u32 val,bool low)447*4882a593Smuzhiyun static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
448*4882a593Smuzhiyun 					    u32 offset, u32 val, bool low)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun 	u32 *pval;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	/* BAR offset should be 32 bits algiend */
453*4882a593Smuzhiyun 	offset = rounddown(offset, 4);
454*4882a593Smuzhiyun 	pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	if (low) {
457*4882a593Smuzhiyun 		/*
458*4882a593Smuzhiyun 		 * only update bit 31 - bit 4,
459*4882a593Smuzhiyun 		 * leave the bit 3 - bit 0 unchanged.
460*4882a593Smuzhiyun 		 */
461*4882a593Smuzhiyun 		*pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0));
462*4882a593Smuzhiyun 	} else {
463*4882a593Smuzhiyun 		*pval = val;
464*4882a593Smuzhiyun 	}
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun int intel_gvt_init_vgpu_types(struct intel_gvt *gvt);
468*4882a593Smuzhiyun void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt);
471*4882a593Smuzhiyun void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
472*4882a593Smuzhiyun struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
473*4882a593Smuzhiyun 					 struct intel_vgpu_type *type);
474*4882a593Smuzhiyun void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
475*4882a593Smuzhiyun void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
476*4882a593Smuzhiyun void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
477*4882a593Smuzhiyun 				 intel_engine_mask_t engine_mask);
478*4882a593Smuzhiyun void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
479*4882a593Smuzhiyun void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
480*4882a593Smuzhiyun void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun /* validating GM functions */
483*4882a593Smuzhiyun #define vgpu_gmadr_is_aperture(vgpu, gmadr) \
484*4882a593Smuzhiyun 	((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
485*4882a593Smuzhiyun 	 (gmadr <= vgpu_aperture_gmadr_end(vgpu)))
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun #define vgpu_gmadr_is_hidden(vgpu, gmadr) \
488*4882a593Smuzhiyun 	((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
489*4882a593Smuzhiyun 	 (gmadr <= vgpu_hidden_gmadr_end(vgpu)))
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun #define vgpu_gmadr_is_valid(vgpu, gmadr) \
492*4882a593Smuzhiyun 	 ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
493*4882a593Smuzhiyun 	  (vgpu_gmadr_is_hidden(vgpu, gmadr))))
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun #define gvt_gmadr_is_aperture(gvt, gmadr) \
496*4882a593Smuzhiyun 	 ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
497*4882a593Smuzhiyun 	  (gmadr <= gvt_aperture_gmadr_end(gvt)))
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun #define gvt_gmadr_is_hidden(gvt, gmadr) \
500*4882a593Smuzhiyun 	  ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
501*4882a593Smuzhiyun 	   (gmadr <= gvt_hidden_gmadr_end(gvt)))
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun #define gvt_gmadr_is_valid(gvt, gmadr) \
504*4882a593Smuzhiyun 	  (gvt_gmadr_is_aperture(gvt, gmadr) || \
505*4882a593Smuzhiyun 	    gvt_gmadr_is_hidden(gvt, gmadr))
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
508*4882a593Smuzhiyun int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
509*4882a593Smuzhiyun int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
510*4882a593Smuzhiyun int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
511*4882a593Smuzhiyun 			     unsigned long *h_index);
512*4882a593Smuzhiyun int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
513*4882a593Smuzhiyun 			     unsigned long *g_index);
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
516*4882a593Smuzhiyun 		bool primary);
517*4882a593Smuzhiyun void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
520*4882a593Smuzhiyun 		void *p_data, unsigned int bytes);
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
523*4882a593Smuzhiyun 		void *p_data, unsigned int bytes);
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected);
526*4882a593Smuzhiyun 
intel_vgpu_get_bar_gpa(struct intel_vgpu * vgpu,int bar)527*4882a593Smuzhiyun static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun 	/* We are 64bit bar. */
530*4882a593Smuzhiyun 	return (*(u64 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
531*4882a593Smuzhiyun 			PCI_BASE_ADDRESS_MEM_MASK;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
535*4882a593Smuzhiyun int intel_vgpu_init_opregion(struct intel_vgpu *vgpu);
536*4882a593Smuzhiyun int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa);
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
539*4882a593Smuzhiyun void populate_pvinfo_page(struct intel_vgpu *vgpu);
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
542*4882a593Smuzhiyun void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason);
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun struct intel_gvt_ops {
545*4882a593Smuzhiyun 	int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
546*4882a593Smuzhiyun 				unsigned int);
547*4882a593Smuzhiyun 	int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *,
548*4882a593Smuzhiyun 				unsigned int);
549*4882a593Smuzhiyun 	int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *,
550*4882a593Smuzhiyun 				unsigned int);
551*4882a593Smuzhiyun 	int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *,
552*4882a593Smuzhiyun 				unsigned int);
553*4882a593Smuzhiyun 	struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
554*4882a593Smuzhiyun 				struct intel_vgpu_type *);
555*4882a593Smuzhiyun 	void (*vgpu_destroy)(struct intel_vgpu *vgpu);
556*4882a593Smuzhiyun 	void (*vgpu_release)(struct intel_vgpu *vgpu);
557*4882a593Smuzhiyun 	void (*vgpu_reset)(struct intel_vgpu *);
558*4882a593Smuzhiyun 	void (*vgpu_activate)(struct intel_vgpu *);
559*4882a593Smuzhiyun 	void (*vgpu_deactivate)(struct intel_vgpu *);
560*4882a593Smuzhiyun 	struct intel_vgpu_type *(*gvt_find_vgpu_type)(struct intel_gvt *gvt,
561*4882a593Smuzhiyun 			const char *name);
562*4882a593Smuzhiyun 	bool (*get_gvt_attrs)(struct attribute_group ***intel_vgpu_type_groups);
563*4882a593Smuzhiyun 	int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
564*4882a593Smuzhiyun 	int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
565*4882a593Smuzhiyun 	int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
566*4882a593Smuzhiyun 				     unsigned int);
567*4882a593Smuzhiyun 	void (*emulate_hotplug)(struct intel_vgpu *vgpu, bool connected);
568*4882a593Smuzhiyun };
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun enum {
572*4882a593Smuzhiyun 	GVT_FAILSAFE_UNSUPPORTED_GUEST,
573*4882a593Smuzhiyun 	GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
574*4882a593Smuzhiyun 	GVT_FAILSAFE_GUEST_ERR,
575*4882a593Smuzhiyun };
576*4882a593Smuzhiyun 
mmio_hw_access_pre(struct intel_gt * gt)577*4882a593Smuzhiyun static inline void mmio_hw_access_pre(struct intel_gt *gt)
578*4882a593Smuzhiyun {
579*4882a593Smuzhiyun 	intel_runtime_pm_get(gt->uncore->rpm);
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun 
mmio_hw_access_post(struct intel_gt * gt)582*4882a593Smuzhiyun static inline void mmio_hw_access_post(struct intel_gt *gt)
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun 	intel_runtime_pm_put_unchecked(gt->uncore->rpm);
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun /**
588*4882a593Smuzhiyun  * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
589*4882a593Smuzhiyun  * @gvt: a GVT device
590*4882a593Smuzhiyun  * @offset: register offset
591*4882a593Smuzhiyun  *
592*4882a593Smuzhiyun  */
intel_gvt_mmio_set_accessed(struct intel_gvt * gvt,unsigned int offset)593*4882a593Smuzhiyun static inline void intel_gvt_mmio_set_accessed(
594*4882a593Smuzhiyun 			struct intel_gvt *gvt, unsigned int offset)
595*4882a593Smuzhiyun {
596*4882a593Smuzhiyun 	gvt->mmio.mmio_attribute[offset >> 2] |= F_ACCESSED;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun /**
600*4882a593Smuzhiyun  * intel_gvt_mmio_is_cmd_accessible - if a MMIO could be accessed by command
601*4882a593Smuzhiyun  * @gvt: a GVT device
602*4882a593Smuzhiyun  * @offset: register offset
603*4882a593Smuzhiyun  *
604*4882a593Smuzhiyun  * Returns:
605*4882a593Smuzhiyun  * True if an MMIO is able to be accessed by GPU commands
606*4882a593Smuzhiyun  */
intel_gvt_mmio_is_cmd_accessible(struct intel_gvt * gvt,unsigned int offset)607*4882a593Smuzhiyun static inline bool intel_gvt_mmio_is_cmd_accessible(
608*4882a593Smuzhiyun 			struct intel_gvt *gvt, unsigned int offset)
609*4882a593Smuzhiyun {
610*4882a593Smuzhiyun 	return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun /**
614*4882a593Smuzhiyun  * intel_gvt_mmio_set_cmd_accessible -
615*4882a593Smuzhiyun  *				mark a MMIO could be accessible by command
616*4882a593Smuzhiyun  * @gvt: a GVT device
617*4882a593Smuzhiyun  * @offset: register offset
618*4882a593Smuzhiyun  *
619*4882a593Smuzhiyun  */
intel_gvt_mmio_set_cmd_accessible(struct intel_gvt * gvt,unsigned int offset)620*4882a593Smuzhiyun static inline void intel_gvt_mmio_set_cmd_accessible(
621*4882a593Smuzhiyun 			struct intel_gvt *gvt, unsigned int offset)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun 	gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESS;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun /**
627*4882a593Smuzhiyun  * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
628*4882a593Smuzhiyun  * @gvt: a GVT device
629*4882a593Smuzhiyun  * @offset: register offset
630*4882a593Smuzhiyun  *
631*4882a593Smuzhiyun  */
intel_gvt_mmio_is_unalign(struct intel_gvt * gvt,unsigned int offset)632*4882a593Smuzhiyun static inline bool intel_gvt_mmio_is_unalign(
633*4882a593Smuzhiyun 			struct intel_gvt *gvt, unsigned int offset)
634*4882a593Smuzhiyun {
635*4882a593Smuzhiyun 	return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun /**
639*4882a593Smuzhiyun  * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
640*4882a593Smuzhiyun  * @gvt: a GVT device
641*4882a593Smuzhiyun  * @offset: register offset
642*4882a593Smuzhiyun  *
643*4882a593Smuzhiyun  * Returns:
644*4882a593Smuzhiyun  * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
645*4882a593Smuzhiyun  *
646*4882a593Smuzhiyun  */
intel_gvt_mmio_has_mode_mask(struct intel_gvt * gvt,unsigned int offset)647*4882a593Smuzhiyun static inline bool intel_gvt_mmio_has_mode_mask(
648*4882a593Smuzhiyun 			struct intel_gvt *gvt, unsigned int offset)
649*4882a593Smuzhiyun {
650*4882a593Smuzhiyun 	return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun /**
654*4882a593Smuzhiyun  * intel_gvt_mmio_is_sr_in_ctx -
655*4882a593Smuzhiyun  *		check if an MMIO has F_SR_IN_CTX mask
656*4882a593Smuzhiyun  * @gvt: a GVT device
657*4882a593Smuzhiyun  * @offset: register offset
658*4882a593Smuzhiyun  *
659*4882a593Smuzhiyun  * Returns:
660*4882a593Smuzhiyun  * True if an MMIO has an F_SR_IN_CTX  mask, false if it isn't.
661*4882a593Smuzhiyun  *
662*4882a593Smuzhiyun  */
intel_gvt_mmio_is_sr_in_ctx(struct intel_gvt * gvt,unsigned int offset)663*4882a593Smuzhiyun static inline bool intel_gvt_mmio_is_sr_in_ctx(
664*4882a593Smuzhiyun 			struct intel_gvt *gvt, unsigned int offset)
665*4882a593Smuzhiyun {
666*4882a593Smuzhiyun 	return gvt->mmio.mmio_attribute[offset >> 2] & F_SR_IN_CTX;
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun /**
670*4882a593Smuzhiyun  * intel_gvt_mmio_set_sr_in_ctx -
671*4882a593Smuzhiyun  *		mask an MMIO in GVT's mmio save-restore list and also
672*4882a593Smuzhiyun  *		in hardware logical context image
673*4882a593Smuzhiyun  * @gvt: a GVT device
674*4882a593Smuzhiyun  * @offset: register offset
675*4882a593Smuzhiyun  *
676*4882a593Smuzhiyun  */
intel_gvt_mmio_set_sr_in_ctx(struct intel_gvt * gvt,unsigned int offset)677*4882a593Smuzhiyun static inline void intel_gvt_mmio_set_sr_in_ctx(
678*4882a593Smuzhiyun 			struct intel_gvt *gvt, unsigned int offset)
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun 	gvt->mmio.mmio_attribute[offset >> 2] |= F_SR_IN_CTX;
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
684*4882a593Smuzhiyun void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
685*4882a593Smuzhiyun void intel_gvt_debugfs_init(struct intel_gvt *gvt);
686*4882a593Smuzhiyun void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun #include "trace.h"
690*4882a593Smuzhiyun #include "mpt.h"
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun #endif
693