xref: /OK3568_Linux_fs/kernel/include/uapi/drm/i915_drm.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
3*4882a593Smuzhiyun  * All Rights Reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
6*4882a593Smuzhiyun  * copy of this software and associated documentation files (the
7*4882a593Smuzhiyun  * "Software"), to deal in the Software without restriction, including
8*4882a593Smuzhiyun  * without limitation the rights to use, copy, modify, merge, publish,
9*4882a593Smuzhiyun  * distribute, sub license, and/or sell copies of the Software, and to
10*4882a593Smuzhiyun  * permit persons to whom the Software is furnished to do so, subject to
11*4882a593Smuzhiyun  * the following conditions:
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the
14*4882a593Smuzhiyun  * next paragraph) shall be included in all copies or substantial portions
15*4882a593Smuzhiyun  * of the Software.
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18*4882a593Smuzhiyun  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20*4882a593Smuzhiyun  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
21*4882a593Smuzhiyun  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22*4882a593Smuzhiyun  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23*4882a593Smuzhiyun  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  */
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #ifndef _UAPI_I915_DRM_H_
28*4882a593Smuzhiyun #define _UAPI_I915_DRM_H_
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #include "drm.h"
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #if defined(__cplusplus)
33*4882a593Smuzhiyun extern "C" {
34*4882a593Smuzhiyun #endif
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /* Please note that modifications to all structs defined here are
37*4882a593Smuzhiyun  * subject to backwards-compatibility constraints.
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /**
41*4882a593Smuzhiyun  * DOC: uevents generated by i915 on it's device node
42*4882a593Smuzhiyun  *
43*4882a593Smuzhiyun  * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
44*4882a593Smuzhiyun  *	event from the gpu l3 cache. Additional information supplied is ROW,
45*4882a593Smuzhiyun  *	BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
46*4882a593Smuzhiyun  *	track of these events and if a specific cache-line seems to have a
47*4882a593Smuzhiyun  *	persistent error remap it with the l3 remapping tool supplied in
48*4882a593Smuzhiyun  *	intel-gpu-tools.  The value supplied with the event is always 1.
49*4882a593Smuzhiyun  *
50*4882a593Smuzhiyun  * I915_ERROR_UEVENT - Generated upon error detection, currently only via
51*4882a593Smuzhiyun  *	hangcheck. The error detection event is a good indicator of when things
52*4882a593Smuzhiyun  *	began to go badly. The value supplied with the event is a 1 upon error
53*4882a593Smuzhiyun  *	detection, and a 0 upon reset completion, signifying no more error
54*4882a593Smuzhiyun  *	exists. NOTE: Disabling hangcheck or reset via module parameter will
55*4882a593Smuzhiyun  *	cause the related events to not be seen.
56*4882a593Smuzhiyun  *
57*4882a593Smuzhiyun  * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
58*4882a593Smuzhiyun  *	GPU. The value supplied with the event is always 1. NOTE: Disable
59*4882a593Smuzhiyun  *	reset via module parameter will cause this event to not be seen.
60*4882a593Smuzhiyun  */
61*4882a593Smuzhiyun #define I915_L3_PARITY_UEVENT		"L3_PARITY_ERROR"
62*4882a593Smuzhiyun #define I915_ERROR_UEVENT		"ERROR"
63*4882a593Smuzhiyun #define I915_RESET_UEVENT		"RESET"
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun  * i915_user_extension: Base class for defining a chain of extensions
67*4882a593Smuzhiyun  *
68*4882a593Smuzhiyun  * Many interfaces need to grow over time. In most cases we can simply
69*4882a593Smuzhiyun  * extend the struct and have userspace pass in more data. Another option,
70*4882a593Smuzhiyun  * as demonstrated by Vulkan's approach to providing extensions for forward
71*4882a593Smuzhiyun  * and backward compatibility, is to use a list of optional structs to
72*4882a593Smuzhiyun  * provide those extra details.
73*4882a593Smuzhiyun  *
74*4882a593Smuzhiyun  * The key advantage to using an extension chain is that it allows us to
75*4882a593Smuzhiyun  * redefine the interface more easily than an ever growing struct of
76*4882a593Smuzhiyun  * increasing complexity, and for large parts of that interface to be
77*4882a593Smuzhiyun  * entirely optional. The downside is more pointer chasing; chasing across
78*4882a593Smuzhiyun  * the __user boundary with pointers encapsulated inside u64.
79*4882a593Smuzhiyun  */
80*4882a593Smuzhiyun struct i915_user_extension {
81*4882a593Smuzhiyun 	__u64 next_extension;
82*4882a593Smuzhiyun 	__u32 name;
83*4882a593Smuzhiyun 	__u32 flags; /* All undefined bits must be zero. */
84*4882a593Smuzhiyun 	__u32 rsvd[4]; /* Reserved for future use; must be zero. */
85*4882a593Smuzhiyun };
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun /*
88*4882a593Smuzhiyun  * MOCS indexes used for GPU surfaces, defining the cacheability of the
89*4882a593Smuzhiyun  * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
90*4882a593Smuzhiyun  */
91*4882a593Smuzhiyun enum i915_mocs_table_index {
92*4882a593Smuzhiyun 	/*
93*4882a593Smuzhiyun 	 * Not cached anywhere, coherency between CPU and GPU accesses is
94*4882a593Smuzhiyun 	 * guaranteed.
95*4882a593Smuzhiyun 	 */
96*4882a593Smuzhiyun 	I915_MOCS_UNCACHED,
97*4882a593Smuzhiyun 	/*
98*4882a593Smuzhiyun 	 * Cacheability and coherency controlled by the kernel automatically
99*4882a593Smuzhiyun 	 * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
100*4882a593Smuzhiyun 	 * usage of the surface (used for display scanout or not).
101*4882a593Smuzhiyun 	 */
102*4882a593Smuzhiyun 	I915_MOCS_PTE,
103*4882a593Smuzhiyun 	/*
104*4882a593Smuzhiyun 	 * Cached in all GPU caches available on the platform.
105*4882a593Smuzhiyun 	 * Coherency between CPU and GPU accesses to the surface is not
106*4882a593Smuzhiyun 	 * guaranteed without extra synchronization.
107*4882a593Smuzhiyun 	 */
108*4882a593Smuzhiyun 	I915_MOCS_CACHED,
109*4882a593Smuzhiyun };
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun  * Different engines serve different roles, and there may be more than one
113*4882a593Smuzhiyun  * engine serving each role. enum drm_i915_gem_engine_class provides a
114*4882a593Smuzhiyun  * classification of the role of the engine, which may be used when requesting
115*4882a593Smuzhiyun  * operations to be performed on a certain subset of engines, or for providing
116*4882a593Smuzhiyun  * information about that group.
117*4882a593Smuzhiyun  */
118*4882a593Smuzhiyun enum drm_i915_gem_engine_class {
119*4882a593Smuzhiyun 	I915_ENGINE_CLASS_RENDER	= 0,
120*4882a593Smuzhiyun 	I915_ENGINE_CLASS_COPY		= 1,
121*4882a593Smuzhiyun 	I915_ENGINE_CLASS_VIDEO		= 2,
122*4882a593Smuzhiyun 	I915_ENGINE_CLASS_VIDEO_ENHANCE	= 3,
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	/* should be kept compact */
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	I915_ENGINE_CLASS_INVALID	= -1
127*4882a593Smuzhiyun };
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun /*
130*4882a593Smuzhiyun  * There may be more than one engine fulfilling any role within the system.
131*4882a593Smuzhiyun  * Each engine of a class is given a unique instance number and therefore
132*4882a593Smuzhiyun  * any engine can be specified by its class:instance tuplet. APIs that allow
133*4882a593Smuzhiyun  * access to any engine in the system will use struct i915_engine_class_instance
134*4882a593Smuzhiyun  * for this identification.
135*4882a593Smuzhiyun  */
136*4882a593Smuzhiyun struct i915_engine_class_instance {
137*4882a593Smuzhiyun 	__u16 engine_class; /* see enum drm_i915_gem_engine_class */
138*4882a593Smuzhiyun 	__u16 engine_instance;
139*4882a593Smuzhiyun #define I915_ENGINE_CLASS_INVALID_NONE -1
140*4882a593Smuzhiyun #define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
141*4882a593Smuzhiyun };
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun /**
144*4882a593Smuzhiyun  * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
145*4882a593Smuzhiyun  *
146*4882a593Smuzhiyun  */
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun enum drm_i915_pmu_engine_sample {
149*4882a593Smuzhiyun 	I915_SAMPLE_BUSY = 0,
150*4882a593Smuzhiyun 	I915_SAMPLE_WAIT = 1,
151*4882a593Smuzhiyun 	I915_SAMPLE_SEMA = 2
152*4882a593Smuzhiyun };
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun #define I915_PMU_SAMPLE_BITS (4)
155*4882a593Smuzhiyun #define I915_PMU_SAMPLE_MASK (0xf)
156*4882a593Smuzhiyun #define I915_PMU_SAMPLE_INSTANCE_BITS (8)
157*4882a593Smuzhiyun #define I915_PMU_CLASS_SHIFT \
158*4882a593Smuzhiyun 	(I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun #define __I915_PMU_ENGINE(class, instance, sample) \
161*4882a593Smuzhiyun 	((class) << I915_PMU_CLASS_SHIFT | \
162*4882a593Smuzhiyun 	(instance) << I915_PMU_SAMPLE_BITS | \
163*4882a593Smuzhiyun 	(sample))
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun #define I915_PMU_ENGINE_BUSY(class, instance) \
166*4882a593Smuzhiyun 	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun #define I915_PMU_ENGINE_WAIT(class, instance) \
169*4882a593Smuzhiyun 	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun #define I915_PMU_ENGINE_SEMA(class, instance) \
172*4882a593Smuzhiyun 	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun #define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun #define I915_PMU_ACTUAL_FREQUENCY	__I915_PMU_OTHER(0)
177*4882a593Smuzhiyun #define I915_PMU_REQUESTED_FREQUENCY	__I915_PMU_OTHER(1)
178*4882a593Smuzhiyun #define I915_PMU_INTERRUPTS		__I915_PMU_OTHER(2)
179*4882a593Smuzhiyun #define I915_PMU_RC6_RESIDENCY		__I915_PMU_OTHER(3)
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun #define I915_PMU_LAST I915_PMU_RC6_RESIDENCY
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun /* Each region is a minimum of 16k, and there are at most 255 of them.
184*4882a593Smuzhiyun  */
185*4882a593Smuzhiyun #define I915_NR_TEX_REGIONS 255	/* table size 2k - maximum due to use
186*4882a593Smuzhiyun 				 * of chars for next/prev indices */
187*4882a593Smuzhiyun #define I915_LOG_MIN_TEX_REGION_SIZE 14
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun typedef struct _drm_i915_init {
190*4882a593Smuzhiyun 	enum {
191*4882a593Smuzhiyun 		I915_INIT_DMA = 0x01,
192*4882a593Smuzhiyun 		I915_CLEANUP_DMA = 0x02,
193*4882a593Smuzhiyun 		I915_RESUME_DMA = 0x03
194*4882a593Smuzhiyun 	} func;
195*4882a593Smuzhiyun 	unsigned int mmio_offset;
196*4882a593Smuzhiyun 	int sarea_priv_offset;
197*4882a593Smuzhiyun 	unsigned int ring_start;
198*4882a593Smuzhiyun 	unsigned int ring_end;
199*4882a593Smuzhiyun 	unsigned int ring_size;
200*4882a593Smuzhiyun 	unsigned int front_offset;
201*4882a593Smuzhiyun 	unsigned int back_offset;
202*4882a593Smuzhiyun 	unsigned int depth_offset;
203*4882a593Smuzhiyun 	unsigned int w;
204*4882a593Smuzhiyun 	unsigned int h;
205*4882a593Smuzhiyun 	unsigned int pitch;
206*4882a593Smuzhiyun 	unsigned int pitch_bits;
207*4882a593Smuzhiyun 	unsigned int back_pitch;
208*4882a593Smuzhiyun 	unsigned int depth_pitch;
209*4882a593Smuzhiyun 	unsigned int cpp;
210*4882a593Smuzhiyun 	unsigned int chipset;
211*4882a593Smuzhiyun } drm_i915_init_t;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun typedef struct _drm_i915_sarea {
214*4882a593Smuzhiyun 	struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
215*4882a593Smuzhiyun 	int last_upload;	/* last time texture was uploaded */
216*4882a593Smuzhiyun 	int last_enqueue;	/* last time a buffer was enqueued */
217*4882a593Smuzhiyun 	int last_dispatch;	/* age of the most recently dispatched buffer */
218*4882a593Smuzhiyun 	int ctxOwner;		/* last context to upload state */
219*4882a593Smuzhiyun 	int texAge;
220*4882a593Smuzhiyun 	int pf_enabled;		/* is pageflipping allowed? */
221*4882a593Smuzhiyun 	int pf_active;
222*4882a593Smuzhiyun 	int pf_current_page;	/* which buffer is being displayed? */
223*4882a593Smuzhiyun 	int perf_boxes;		/* performance boxes to be displayed */
224*4882a593Smuzhiyun 	int width, height;      /* screen size in pixels */
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	drm_handle_t front_handle;
227*4882a593Smuzhiyun 	int front_offset;
228*4882a593Smuzhiyun 	int front_size;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	drm_handle_t back_handle;
231*4882a593Smuzhiyun 	int back_offset;
232*4882a593Smuzhiyun 	int back_size;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	drm_handle_t depth_handle;
235*4882a593Smuzhiyun 	int depth_offset;
236*4882a593Smuzhiyun 	int depth_size;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	drm_handle_t tex_handle;
239*4882a593Smuzhiyun 	int tex_offset;
240*4882a593Smuzhiyun 	int tex_size;
241*4882a593Smuzhiyun 	int log_tex_granularity;
242*4882a593Smuzhiyun 	int pitch;
243*4882a593Smuzhiyun 	int rotation;           /* 0, 90, 180 or 270 */
244*4882a593Smuzhiyun 	int rotated_offset;
245*4882a593Smuzhiyun 	int rotated_size;
246*4882a593Smuzhiyun 	int rotated_pitch;
247*4882a593Smuzhiyun 	int virtualX, virtualY;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	unsigned int front_tiled;
250*4882a593Smuzhiyun 	unsigned int back_tiled;
251*4882a593Smuzhiyun 	unsigned int depth_tiled;
252*4882a593Smuzhiyun 	unsigned int rotated_tiled;
253*4882a593Smuzhiyun 	unsigned int rotated2_tiled;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	int pipeA_x;
256*4882a593Smuzhiyun 	int pipeA_y;
257*4882a593Smuzhiyun 	int pipeA_w;
258*4882a593Smuzhiyun 	int pipeA_h;
259*4882a593Smuzhiyun 	int pipeB_x;
260*4882a593Smuzhiyun 	int pipeB_y;
261*4882a593Smuzhiyun 	int pipeB_w;
262*4882a593Smuzhiyun 	int pipeB_h;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	/* fill out some space for old userspace triple buffer */
265*4882a593Smuzhiyun 	drm_handle_t unused_handle;
266*4882a593Smuzhiyun 	__u32 unused1, unused2, unused3;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	/* buffer object handles for static buffers. May change
269*4882a593Smuzhiyun 	 * over the lifetime of the client.
270*4882a593Smuzhiyun 	 */
271*4882a593Smuzhiyun 	__u32 front_bo_handle;
272*4882a593Smuzhiyun 	__u32 back_bo_handle;
273*4882a593Smuzhiyun 	__u32 unused_bo_handle;
274*4882a593Smuzhiyun 	__u32 depth_bo_handle;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun } drm_i915_sarea_t;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun /* due to userspace building against these headers we need some compat here */
279*4882a593Smuzhiyun #define planeA_x pipeA_x
280*4882a593Smuzhiyun #define planeA_y pipeA_y
281*4882a593Smuzhiyun #define planeA_w pipeA_w
282*4882a593Smuzhiyun #define planeA_h pipeA_h
283*4882a593Smuzhiyun #define planeB_x pipeB_x
284*4882a593Smuzhiyun #define planeB_y pipeB_y
285*4882a593Smuzhiyun #define planeB_w pipeB_w
286*4882a593Smuzhiyun #define planeB_h pipeB_h
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun /* Flags for perf_boxes
289*4882a593Smuzhiyun  */
290*4882a593Smuzhiyun #define I915_BOX_RING_EMPTY    0x1
291*4882a593Smuzhiyun #define I915_BOX_FLIP          0x2
292*4882a593Smuzhiyun #define I915_BOX_WAIT          0x4
293*4882a593Smuzhiyun #define I915_BOX_TEXTURE_LOAD  0x8
294*4882a593Smuzhiyun #define I915_BOX_LOST_CONTEXT  0x10
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun /*
297*4882a593Smuzhiyun  * i915 specific ioctls.
298*4882a593Smuzhiyun  *
299*4882a593Smuzhiyun  * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
300*4882a593Smuzhiyun  * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
301*4882a593Smuzhiyun  * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
302*4882a593Smuzhiyun  */
303*4882a593Smuzhiyun #define DRM_I915_INIT		0x00
304*4882a593Smuzhiyun #define DRM_I915_FLUSH		0x01
305*4882a593Smuzhiyun #define DRM_I915_FLIP		0x02
306*4882a593Smuzhiyun #define DRM_I915_BATCHBUFFER	0x03
307*4882a593Smuzhiyun #define DRM_I915_IRQ_EMIT	0x04
308*4882a593Smuzhiyun #define DRM_I915_IRQ_WAIT	0x05
309*4882a593Smuzhiyun #define DRM_I915_GETPARAM	0x06
310*4882a593Smuzhiyun #define DRM_I915_SETPARAM	0x07
311*4882a593Smuzhiyun #define DRM_I915_ALLOC		0x08
312*4882a593Smuzhiyun #define DRM_I915_FREE		0x09
313*4882a593Smuzhiyun #define DRM_I915_INIT_HEAP	0x0a
314*4882a593Smuzhiyun #define DRM_I915_CMDBUFFER	0x0b
315*4882a593Smuzhiyun #define DRM_I915_DESTROY_HEAP	0x0c
316*4882a593Smuzhiyun #define DRM_I915_SET_VBLANK_PIPE	0x0d
317*4882a593Smuzhiyun #define DRM_I915_GET_VBLANK_PIPE	0x0e
318*4882a593Smuzhiyun #define DRM_I915_VBLANK_SWAP	0x0f
319*4882a593Smuzhiyun #define DRM_I915_HWS_ADDR	0x11
320*4882a593Smuzhiyun #define DRM_I915_GEM_INIT	0x13
321*4882a593Smuzhiyun #define DRM_I915_GEM_EXECBUFFER	0x14
322*4882a593Smuzhiyun #define DRM_I915_GEM_PIN	0x15
323*4882a593Smuzhiyun #define DRM_I915_GEM_UNPIN	0x16
324*4882a593Smuzhiyun #define DRM_I915_GEM_BUSY	0x17
325*4882a593Smuzhiyun #define DRM_I915_GEM_THROTTLE	0x18
326*4882a593Smuzhiyun #define DRM_I915_GEM_ENTERVT	0x19
327*4882a593Smuzhiyun #define DRM_I915_GEM_LEAVEVT	0x1a
328*4882a593Smuzhiyun #define DRM_I915_GEM_CREATE	0x1b
329*4882a593Smuzhiyun #define DRM_I915_GEM_PREAD	0x1c
330*4882a593Smuzhiyun #define DRM_I915_GEM_PWRITE	0x1d
331*4882a593Smuzhiyun #define DRM_I915_GEM_MMAP	0x1e
332*4882a593Smuzhiyun #define DRM_I915_GEM_SET_DOMAIN	0x1f
333*4882a593Smuzhiyun #define DRM_I915_GEM_SW_FINISH	0x20
334*4882a593Smuzhiyun #define DRM_I915_GEM_SET_TILING	0x21
335*4882a593Smuzhiyun #define DRM_I915_GEM_GET_TILING	0x22
336*4882a593Smuzhiyun #define DRM_I915_GEM_GET_APERTURE 0x23
337*4882a593Smuzhiyun #define DRM_I915_GEM_MMAP_GTT	0x24
338*4882a593Smuzhiyun #define DRM_I915_GET_PIPE_FROM_CRTC_ID	0x25
339*4882a593Smuzhiyun #define DRM_I915_GEM_MADVISE	0x26
340*4882a593Smuzhiyun #define DRM_I915_OVERLAY_PUT_IMAGE	0x27
341*4882a593Smuzhiyun #define DRM_I915_OVERLAY_ATTRS	0x28
342*4882a593Smuzhiyun #define DRM_I915_GEM_EXECBUFFER2	0x29
343*4882a593Smuzhiyun #define DRM_I915_GEM_EXECBUFFER2_WR	DRM_I915_GEM_EXECBUFFER2
344*4882a593Smuzhiyun #define DRM_I915_GET_SPRITE_COLORKEY	0x2a
345*4882a593Smuzhiyun #define DRM_I915_SET_SPRITE_COLORKEY	0x2b
346*4882a593Smuzhiyun #define DRM_I915_GEM_WAIT	0x2c
347*4882a593Smuzhiyun #define DRM_I915_GEM_CONTEXT_CREATE	0x2d
348*4882a593Smuzhiyun #define DRM_I915_GEM_CONTEXT_DESTROY	0x2e
349*4882a593Smuzhiyun #define DRM_I915_GEM_SET_CACHING	0x2f
350*4882a593Smuzhiyun #define DRM_I915_GEM_GET_CACHING	0x30
351*4882a593Smuzhiyun #define DRM_I915_REG_READ		0x31
352*4882a593Smuzhiyun #define DRM_I915_GET_RESET_STATS	0x32
353*4882a593Smuzhiyun #define DRM_I915_GEM_USERPTR		0x33
354*4882a593Smuzhiyun #define DRM_I915_GEM_CONTEXT_GETPARAM	0x34
355*4882a593Smuzhiyun #define DRM_I915_GEM_CONTEXT_SETPARAM	0x35
356*4882a593Smuzhiyun #define DRM_I915_PERF_OPEN		0x36
357*4882a593Smuzhiyun #define DRM_I915_PERF_ADD_CONFIG	0x37
358*4882a593Smuzhiyun #define DRM_I915_PERF_REMOVE_CONFIG	0x38
359*4882a593Smuzhiyun #define DRM_I915_QUERY			0x39
360*4882a593Smuzhiyun #define DRM_I915_GEM_VM_CREATE		0x3a
361*4882a593Smuzhiyun #define DRM_I915_GEM_VM_DESTROY		0x3b
362*4882a593Smuzhiyun /* Must be kept compact -- no holes */
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
365*4882a593Smuzhiyun #define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
366*4882a593Smuzhiyun #define DRM_IOCTL_I915_FLIP		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
367*4882a593Smuzhiyun #define DRM_IOCTL_I915_BATCHBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
368*4882a593Smuzhiyun #define DRM_IOCTL_I915_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
369*4882a593Smuzhiyun #define DRM_IOCTL_I915_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
370*4882a593Smuzhiyun #define DRM_IOCTL_I915_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
371*4882a593Smuzhiyun #define DRM_IOCTL_I915_SETPARAM         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
372*4882a593Smuzhiyun #define DRM_IOCTL_I915_ALLOC            DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
373*4882a593Smuzhiyun #define DRM_IOCTL_I915_FREE             DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
374*4882a593Smuzhiyun #define DRM_IOCTL_I915_INIT_HEAP        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
375*4882a593Smuzhiyun #define DRM_IOCTL_I915_CMDBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
376*4882a593Smuzhiyun #define DRM_IOCTL_I915_DESTROY_HEAP	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
377*4882a593Smuzhiyun #define DRM_IOCTL_I915_SET_VBLANK_PIPE	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
378*4882a593Smuzhiyun #define DRM_IOCTL_I915_GET_VBLANK_PIPE	DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
379*4882a593Smuzhiyun #define DRM_IOCTL_I915_VBLANK_SWAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
380*4882a593Smuzhiyun #define DRM_IOCTL_I915_HWS_ADDR		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
381*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_INIT		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
382*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_EXECBUFFER	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
383*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_EXECBUFFER2	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
384*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)
385*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_PIN		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
386*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_UNPIN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
387*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_BUSY		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
388*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_SET_CACHING		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
389*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_GET_CACHING		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
390*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_THROTTLE	DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
391*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_ENTERVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
392*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_LEAVEVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
393*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
394*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_PREAD	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
395*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_PWRITE	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
396*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_MMAP		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
397*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_MMAP_GTT	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
398*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_MMAP_OFFSET	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_offset)
399*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_SET_DOMAIN	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
400*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_SW_FINISH	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
401*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_SET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
402*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_GET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
403*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_GET_APERTURE	DRM_IOR  (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
404*4882a593Smuzhiyun #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
405*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_MADVISE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
406*4882a593Smuzhiyun #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
407*4882a593Smuzhiyun #define DRM_IOCTL_I915_OVERLAY_ATTRS	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
408*4882a593Smuzhiyun #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
409*4882a593Smuzhiyun #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
410*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_WAIT		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
411*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
412*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext)
413*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
414*4882a593Smuzhiyun #define DRM_IOCTL_I915_REG_READ			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
415*4882a593Smuzhiyun #define DRM_IOCTL_I915_GET_RESET_STATS		DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
416*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_USERPTR			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
417*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
418*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
419*4882a593Smuzhiyun #define DRM_IOCTL_I915_PERF_OPEN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
420*4882a593Smuzhiyun #define DRM_IOCTL_I915_PERF_ADD_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
421*4882a593Smuzhiyun #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
422*4882a593Smuzhiyun #define DRM_IOCTL_I915_QUERY			DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
423*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_VM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
424*4882a593Smuzhiyun #define DRM_IOCTL_I915_GEM_VM_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun /* Allow drivers to submit batchbuffers directly to hardware, relying
427*4882a593Smuzhiyun  * on the security mechanisms provided by hardware.
428*4882a593Smuzhiyun  */
429*4882a593Smuzhiyun typedef struct drm_i915_batchbuffer {
430*4882a593Smuzhiyun 	int start;		/* agp offset */
431*4882a593Smuzhiyun 	int used;		/* nr bytes in use */
432*4882a593Smuzhiyun 	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
433*4882a593Smuzhiyun 	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
434*4882a593Smuzhiyun 	int num_cliprects;	/* mulitpass with multiple cliprects? */
435*4882a593Smuzhiyun 	struct drm_clip_rect __user *cliprects;	/* pointer to userspace cliprects */
436*4882a593Smuzhiyun } drm_i915_batchbuffer_t;
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun /* As above, but pass a pointer to userspace buffer which can be
439*4882a593Smuzhiyun  * validated by the kernel prior to sending to hardware.
440*4882a593Smuzhiyun  */
441*4882a593Smuzhiyun typedef struct _drm_i915_cmdbuffer {
442*4882a593Smuzhiyun 	char __user *buf;	/* pointer to userspace command buffer */
443*4882a593Smuzhiyun 	int sz;			/* nr bytes in buf */
444*4882a593Smuzhiyun 	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
445*4882a593Smuzhiyun 	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
446*4882a593Smuzhiyun 	int num_cliprects;	/* mulitpass with multiple cliprects? */
447*4882a593Smuzhiyun 	struct drm_clip_rect __user *cliprects;	/* pointer to userspace cliprects */
448*4882a593Smuzhiyun } drm_i915_cmdbuffer_t;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun /* Userspace can request & wait on irq's:
451*4882a593Smuzhiyun  */
452*4882a593Smuzhiyun typedef struct drm_i915_irq_emit {
453*4882a593Smuzhiyun 	int __user *irq_seq;
454*4882a593Smuzhiyun } drm_i915_irq_emit_t;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun typedef struct drm_i915_irq_wait {
457*4882a593Smuzhiyun 	int irq_seq;
458*4882a593Smuzhiyun } drm_i915_irq_wait_t;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun /*
461*4882a593Smuzhiyun  * Different modes of per-process Graphics Translation Table,
462*4882a593Smuzhiyun  * see I915_PARAM_HAS_ALIASING_PPGTT
463*4882a593Smuzhiyun  */
464*4882a593Smuzhiyun #define I915_GEM_PPGTT_NONE	0
465*4882a593Smuzhiyun #define I915_GEM_PPGTT_ALIASING	1
466*4882a593Smuzhiyun #define I915_GEM_PPGTT_FULL	2
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun /* Ioctl to query kernel params:
469*4882a593Smuzhiyun  */
470*4882a593Smuzhiyun #define I915_PARAM_IRQ_ACTIVE            1
471*4882a593Smuzhiyun #define I915_PARAM_ALLOW_BATCHBUFFER     2
472*4882a593Smuzhiyun #define I915_PARAM_LAST_DISPATCH         3
473*4882a593Smuzhiyun #define I915_PARAM_CHIPSET_ID            4
474*4882a593Smuzhiyun #define I915_PARAM_HAS_GEM               5
475*4882a593Smuzhiyun #define I915_PARAM_NUM_FENCES_AVAIL      6
476*4882a593Smuzhiyun #define I915_PARAM_HAS_OVERLAY           7
477*4882a593Smuzhiyun #define I915_PARAM_HAS_PAGEFLIPPING	 8
478*4882a593Smuzhiyun #define I915_PARAM_HAS_EXECBUF2          9
479*4882a593Smuzhiyun #define I915_PARAM_HAS_BSD		 10
480*4882a593Smuzhiyun #define I915_PARAM_HAS_BLT		 11
481*4882a593Smuzhiyun #define I915_PARAM_HAS_RELAXED_FENCING	 12
482*4882a593Smuzhiyun #define I915_PARAM_HAS_COHERENT_RINGS	 13
483*4882a593Smuzhiyun #define I915_PARAM_HAS_EXEC_CONSTANTS	 14
484*4882a593Smuzhiyun #define I915_PARAM_HAS_RELAXED_DELTA	 15
485*4882a593Smuzhiyun #define I915_PARAM_HAS_GEN7_SOL_RESET	 16
486*4882a593Smuzhiyun #define I915_PARAM_HAS_LLC     	 	 17
487*4882a593Smuzhiyun #define I915_PARAM_HAS_ALIASING_PPGTT	 18
488*4882a593Smuzhiyun #define I915_PARAM_HAS_WAIT_TIMEOUT	 19
489*4882a593Smuzhiyun #define I915_PARAM_HAS_SEMAPHORES	 20
490*4882a593Smuzhiyun #define I915_PARAM_HAS_PRIME_VMAP_FLUSH	 21
491*4882a593Smuzhiyun #define I915_PARAM_HAS_VEBOX		 22
492*4882a593Smuzhiyun #define I915_PARAM_HAS_SECURE_BATCHES	 23
493*4882a593Smuzhiyun #define I915_PARAM_HAS_PINNED_BATCHES	 24
494*4882a593Smuzhiyun #define I915_PARAM_HAS_EXEC_NO_RELOC	 25
495*4882a593Smuzhiyun #define I915_PARAM_HAS_EXEC_HANDLE_LUT   26
496*4882a593Smuzhiyun #define I915_PARAM_HAS_WT     	 	 27
497*4882a593Smuzhiyun #define I915_PARAM_CMD_PARSER_VERSION	 28
498*4882a593Smuzhiyun #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
499*4882a593Smuzhiyun #define I915_PARAM_MMAP_VERSION          30
500*4882a593Smuzhiyun #define I915_PARAM_HAS_BSD2		 31
501*4882a593Smuzhiyun #define I915_PARAM_REVISION              32
502*4882a593Smuzhiyun #define I915_PARAM_SUBSLICE_TOTAL	 33
503*4882a593Smuzhiyun #define I915_PARAM_EU_TOTAL		 34
504*4882a593Smuzhiyun #define I915_PARAM_HAS_GPU_RESET	 35
505*4882a593Smuzhiyun #define I915_PARAM_HAS_RESOURCE_STREAMER 36
506*4882a593Smuzhiyun #define I915_PARAM_HAS_EXEC_SOFTPIN	 37
507*4882a593Smuzhiyun #define I915_PARAM_HAS_POOLED_EU	 38
508*4882a593Smuzhiyun #define I915_PARAM_MIN_EU_IN_POOL	 39
509*4882a593Smuzhiyun #define I915_PARAM_MMAP_GTT_VERSION	 40
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun /*
512*4882a593Smuzhiyun  * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
513*4882a593Smuzhiyun  * priorities and the driver will attempt to execute batches in priority order.
514*4882a593Smuzhiyun  * The param returns a capability bitmask, nonzero implies that the scheduler
515*4882a593Smuzhiyun  * is enabled, with different features present according to the mask.
516*4882a593Smuzhiyun  *
517*4882a593Smuzhiyun  * The initial priority for each batch is supplied by the context and is
518*4882a593Smuzhiyun  * controlled via I915_CONTEXT_PARAM_PRIORITY.
519*4882a593Smuzhiyun  */
520*4882a593Smuzhiyun #define I915_PARAM_HAS_SCHEDULER	 41
521*4882a593Smuzhiyun #define   I915_SCHEDULER_CAP_ENABLED	(1ul << 0)
522*4882a593Smuzhiyun #define   I915_SCHEDULER_CAP_PRIORITY	(1ul << 1)
523*4882a593Smuzhiyun #define   I915_SCHEDULER_CAP_PREEMPTION	(1ul << 2)
524*4882a593Smuzhiyun #define   I915_SCHEDULER_CAP_SEMAPHORES	(1ul << 3)
525*4882a593Smuzhiyun #define   I915_SCHEDULER_CAP_ENGINE_BUSY_STATS	(1ul << 4)
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun #define I915_PARAM_HUC_STATUS		 42
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
530*4882a593Smuzhiyun  * synchronisation with implicit fencing on individual objects.
531*4882a593Smuzhiyun  * See EXEC_OBJECT_ASYNC.
532*4882a593Smuzhiyun  */
533*4882a593Smuzhiyun #define I915_PARAM_HAS_EXEC_ASYNC	 43
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun /* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -
536*4882a593Smuzhiyun  * both being able to pass in a sync_file fd to wait upon before executing,
537*4882a593Smuzhiyun  * and being able to return a new sync_file fd that is signaled when the
538*4882a593Smuzhiyun  * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.
539*4882a593Smuzhiyun  */
540*4882a593Smuzhiyun #define I915_PARAM_HAS_EXEC_FENCE	 44
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
543*4882a593Smuzhiyun  * user specified bufffers for post-mortem debugging of GPU hangs. See
544*4882a593Smuzhiyun  * EXEC_OBJECT_CAPTURE.
545*4882a593Smuzhiyun  */
546*4882a593Smuzhiyun #define I915_PARAM_HAS_EXEC_CAPTURE	 45
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun #define I915_PARAM_SLICE_MASK		 46
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun /* Assuming it's uniform for each slice, this queries the mask of subslices
551*4882a593Smuzhiyun  * per-slice for this system.
552*4882a593Smuzhiyun  */
553*4882a593Smuzhiyun #define I915_PARAM_SUBSLICE_MASK	 47
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun /*
556*4882a593Smuzhiyun  * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
557*4882a593Smuzhiyun  * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
558*4882a593Smuzhiyun  */
559*4882a593Smuzhiyun #define I915_PARAM_HAS_EXEC_BATCH_FIRST	 48
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
562*4882a593Smuzhiyun  * drm_i915_gem_exec_fence structures.  See I915_EXEC_FENCE_ARRAY.
563*4882a593Smuzhiyun  */
564*4882a593Smuzhiyun #define I915_PARAM_HAS_EXEC_FENCE_ARRAY  49
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun /*
567*4882a593Smuzhiyun  * Query whether every context (both per-file default and user created) is
568*4882a593Smuzhiyun  * isolated (insofar as HW supports). If this parameter is not true, then
569*4882a593Smuzhiyun  * freshly created contexts may inherit values from an existing context,
570*4882a593Smuzhiyun  * rather than default HW values. If true, it also ensures (insofar as HW
571*4882a593Smuzhiyun  * supports) that all state set by this context will not leak to any other
572*4882a593Smuzhiyun  * context.
573*4882a593Smuzhiyun  *
574*4882a593Smuzhiyun  * As not every engine across every gen support contexts, the returned
575*4882a593Smuzhiyun  * value reports the support of context isolation for individual engines by
576*4882a593Smuzhiyun  * returning a bitmask of each engine class set to true if that class supports
577*4882a593Smuzhiyun  * isolation.
578*4882a593Smuzhiyun  */
579*4882a593Smuzhiyun #define I915_PARAM_HAS_CONTEXT_ISOLATION 50
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun /* Frequency of the command streamer timestamps given by the *_TIMESTAMP
582*4882a593Smuzhiyun  * registers. This used to be fixed per platform but from CNL onwards, this
583*4882a593Smuzhiyun  * might vary depending on the parts.
584*4882a593Smuzhiyun  */
585*4882a593Smuzhiyun #define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun /*
588*4882a593Smuzhiyun  * Once upon a time we supposed that writes through the GGTT would be
589*4882a593Smuzhiyun  * immediately in physical memory (once flushed out of the CPU path). However,
590*4882a593Smuzhiyun  * on a few different processors and chipsets, this is not necessarily the case
591*4882a593Smuzhiyun  * as the writes appear to be buffered internally. Thus a read of the backing
592*4882a593Smuzhiyun  * storage (physical memory) via a different path (with different physical tags
593*4882a593Smuzhiyun  * to the indirect write via the GGTT) will see stale values from before
594*4882a593Smuzhiyun  * the GGTT write. Inside the kernel, we can for the most part keep track of
595*4882a593Smuzhiyun  * the different read/write domains in use (e.g. set-domain), but the assumption
596*4882a593Smuzhiyun  * of coherency is baked into the ABI, hence reporting its true state in this
597*4882a593Smuzhiyun  * parameter.
598*4882a593Smuzhiyun  *
599*4882a593Smuzhiyun  * Reports true when writes via mmap_gtt are immediately visible following an
600*4882a593Smuzhiyun  * lfence to flush the WCB.
601*4882a593Smuzhiyun  *
602*4882a593Smuzhiyun  * Reports false when writes via mmap_gtt are indeterminately delayed in an in
603*4882a593Smuzhiyun  * internal buffer and are _not_ immediately visible to third parties accessing
604*4882a593Smuzhiyun  * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
605*4882a593Smuzhiyun  * communications channel when reporting false is strongly disadvised.
606*4882a593Smuzhiyun  */
607*4882a593Smuzhiyun #define I915_PARAM_MMAP_GTT_COHERENT	52
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun /*
610*4882a593Smuzhiyun  * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel
611*4882a593Smuzhiyun  * execution through use of explicit fence support.
612*4882a593Smuzhiyun  * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
613*4882a593Smuzhiyun  */
614*4882a593Smuzhiyun #define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun /*
617*4882a593Smuzhiyun  * Revision of the i915-perf uAPI. The value returned helps determine what
618*4882a593Smuzhiyun  * i915-perf features are available. See drm_i915_perf_property_id.
619*4882a593Smuzhiyun  */
620*4882a593Smuzhiyun #define I915_PARAM_PERF_REVISION	54
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
623*4882a593Smuzhiyun  * timeline syncobj through drm_i915_gem_execbuffer_ext_timeline_fences. See
624*4882a593Smuzhiyun  * I915_EXEC_USE_EXTENSIONS.
625*4882a593Smuzhiyun  */
626*4882a593Smuzhiyun #define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun /* Must be kept compact -- no holes and well documented */
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun typedef struct drm_i915_getparam {
631*4882a593Smuzhiyun 	__s32 param;
632*4882a593Smuzhiyun 	/*
633*4882a593Smuzhiyun 	 * WARNING: Using pointers instead of fixed-size u64 means we need to write
634*4882a593Smuzhiyun 	 * compat32 code. Don't repeat this mistake.
635*4882a593Smuzhiyun 	 */
636*4882a593Smuzhiyun 	int __user *value;
637*4882a593Smuzhiyun } drm_i915_getparam_t;
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun /* Ioctl to set kernel params:
640*4882a593Smuzhiyun  */
641*4882a593Smuzhiyun #define I915_SETPARAM_USE_MI_BATCHBUFFER_START            1
642*4882a593Smuzhiyun #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY             2
643*4882a593Smuzhiyun #define I915_SETPARAM_ALLOW_BATCHBUFFER                   3
644*4882a593Smuzhiyun #define I915_SETPARAM_NUM_USED_FENCES                     4
645*4882a593Smuzhiyun /* Must be kept compact -- no holes */
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun typedef struct drm_i915_setparam {
648*4882a593Smuzhiyun 	int param;
649*4882a593Smuzhiyun 	int value;
650*4882a593Smuzhiyun } drm_i915_setparam_t;
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun /* A memory manager for regions of shared memory:
653*4882a593Smuzhiyun  */
654*4882a593Smuzhiyun #define I915_MEM_REGION_AGP 1
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun typedef struct drm_i915_mem_alloc {
657*4882a593Smuzhiyun 	int region;
658*4882a593Smuzhiyun 	int alignment;
659*4882a593Smuzhiyun 	int size;
660*4882a593Smuzhiyun 	int __user *region_offset;	/* offset from start of fb or agp */
661*4882a593Smuzhiyun } drm_i915_mem_alloc_t;
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun typedef struct drm_i915_mem_free {
664*4882a593Smuzhiyun 	int region;
665*4882a593Smuzhiyun 	int region_offset;
666*4882a593Smuzhiyun } drm_i915_mem_free_t;
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun typedef struct drm_i915_mem_init_heap {
669*4882a593Smuzhiyun 	int region;
670*4882a593Smuzhiyun 	int size;
671*4882a593Smuzhiyun 	int start;
672*4882a593Smuzhiyun } drm_i915_mem_init_heap_t;
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun /* Allow memory manager to be torn down and re-initialized (eg on
675*4882a593Smuzhiyun  * rotate):
676*4882a593Smuzhiyun  */
677*4882a593Smuzhiyun typedef struct drm_i915_mem_destroy_heap {
678*4882a593Smuzhiyun 	int region;
679*4882a593Smuzhiyun } drm_i915_mem_destroy_heap_t;
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun /* Allow X server to configure which pipes to monitor for vblank signals
682*4882a593Smuzhiyun  */
683*4882a593Smuzhiyun #define	DRM_I915_VBLANK_PIPE_A	1
684*4882a593Smuzhiyun #define	DRM_I915_VBLANK_PIPE_B	2
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun typedef struct drm_i915_vblank_pipe {
687*4882a593Smuzhiyun 	int pipe;
688*4882a593Smuzhiyun } drm_i915_vblank_pipe_t;
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun /* Schedule buffer swap at given vertical blank:
691*4882a593Smuzhiyun  */
692*4882a593Smuzhiyun typedef struct drm_i915_vblank_swap {
693*4882a593Smuzhiyun 	drm_drawable_t drawable;
694*4882a593Smuzhiyun 	enum drm_vblank_seq_type seqtype;
695*4882a593Smuzhiyun 	unsigned int sequence;
696*4882a593Smuzhiyun } drm_i915_vblank_swap_t;
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun typedef struct drm_i915_hws_addr {
699*4882a593Smuzhiyun 	__u64 addr;
700*4882a593Smuzhiyun } drm_i915_hws_addr_t;
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun struct drm_i915_gem_init {
703*4882a593Smuzhiyun 	/**
704*4882a593Smuzhiyun 	 * Beginning offset in the GTT to be managed by the DRM memory
705*4882a593Smuzhiyun 	 * manager.
706*4882a593Smuzhiyun 	 */
707*4882a593Smuzhiyun 	__u64 gtt_start;
708*4882a593Smuzhiyun 	/**
709*4882a593Smuzhiyun 	 * Ending offset in the GTT to be managed by the DRM memory
710*4882a593Smuzhiyun 	 * manager.
711*4882a593Smuzhiyun 	 */
712*4882a593Smuzhiyun 	__u64 gtt_end;
713*4882a593Smuzhiyun };
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun struct drm_i915_gem_create {
716*4882a593Smuzhiyun 	/**
717*4882a593Smuzhiyun 	 * Requested size for the object.
718*4882a593Smuzhiyun 	 *
719*4882a593Smuzhiyun 	 * The (page-aligned) allocated size for the object will be returned.
720*4882a593Smuzhiyun 	 */
721*4882a593Smuzhiyun 	__u64 size;
722*4882a593Smuzhiyun 	/**
723*4882a593Smuzhiyun 	 * Returned handle for the object.
724*4882a593Smuzhiyun 	 *
725*4882a593Smuzhiyun 	 * Object handles are nonzero.
726*4882a593Smuzhiyun 	 */
727*4882a593Smuzhiyun 	__u32 handle;
728*4882a593Smuzhiyun 	__u32 pad;
729*4882a593Smuzhiyun };
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun struct drm_i915_gem_pread {
732*4882a593Smuzhiyun 	/** Handle for the object being read. */
733*4882a593Smuzhiyun 	__u32 handle;
734*4882a593Smuzhiyun 	__u32 pad;
735*4882a593Smuzhiyun 	/** Offset into the object to read from */
736*4882a593Smuzhiyun 	__u64 offset;
737*4882a593Smuzhiyun 	/** Length of data to read */
738*4882a593Smuzhiyun 	__u64 size;
739*4882a593Smuzhiyun 	/**
740*4882a593Smuzhiyun 	 * Pointer to write the data into.
741*4882a593Smuzhiyun 	 *
742*4882a593Smuzhiyun 	 * This is a fixed-size type for 32/64 compatibility.
743*4882a593Smuzhiyun 	 */
744*4882a593Smuzhiyun 	__u64 data_ptr;
745*4882a593Smuzhiyun };
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun struct drm_i915_gem_pwrite {
748*4882a593Smuzhiyun 	/** Handle for the object being written to. */
749*4882a593Smuzhiyun 	__u32 handle;
750*4882a593Smuzhiyun 	__u32 pad;
751*4882a593Smuzhiyun 	/** Offset into the object to write to */
752*4882a593Smuzhiyun 	__u64 offset;
753*4882a593Smuzhiyun 	/** Length of data to write */
754*4882a593Smuzhiyun 	__u64 size;
755*4882a593Smuzhiyun 	/**
756*4882a593Smuzhiyun 	 * Pointer to read the data from.
757*4882a593Smuzhiyun 	 *
758*4882a593Smuzhiyun 	 * This is a fixed-size type for 32/64 compatibility.
759*4882a593Smuzhiyun 	 */
760*4882a593Smuzhiyun 	__u64 data_ptr;
761*4882a593Smuzhiyun };
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun struct drm_i915_gem_mmap {
764*4882a593Smuzhiyun 	/** Handle for the object being mapped. */
765*4882a593Smuzhiyun 	__u32 handle;
766*4882a593Smuzhiyun 	__u32 pad;
767*4882a593Smuzhiyun 	/** Offset in the object to map. */
768*4882a593Smuzhiyun 	__u64 offset;
769*4882a593Smuzhiyun 	/**
770*4882a593Smuzhiyun 	 * Length of data to map.
771*4882a593Smuzhiyun 	 *
772*4882a593Smuzhiyun 	 * The value will be page-aligned.
773*4882a593Smuzhiyun 	 */
774*4882a593Smuzhiyun 	__u64 size;
775*4882a593Smuzhiyun 	/**
776*4882a593Smuzhiyun 	 * Returned pointer the data was mapped at.
777*4882a593Smuzhiyun 	 *
778*4882a593Smuzhiyun 	 * This is a fixed-size type for 32/64 compatibility.
779*4882a593Smuzhiyun 	 */
780*4882a593Smuzhiyun 	__u64 addr_ptr;
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	/**
783*4882a593Smuzhiyun 	 * Flags for extended behaviour.
784*4882a593Smuzhiyun 	 *
785*4882a593Smuzhiyun 	 * Added in version 2.
786*4882a593Smuzhiyun 	 */
787*4882a593Smuzhiyun 	__u64 flags;
788*4882a593Smuzhiyun #define I915_MMAP_WC 0x1
789*4882a593Smuzhiyun };
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun struct drm_i915_gem_mmap_gtt {
792*4882a593Smuzhiyun 	/** Handle for the object being mapped. */
793*4882a593Smuzhiyun 	__u32 handle;
794*4882a593Smuzhiyun 	__u32 pad;
795*4882a593Smuzhiyun 	/**
796*4882a593Smuzhiyun 	 * Fake offset to use for subsequent mmap call
797*4882a593Smuzhiyun 	 *
798*4882a593Smuzhiyun 	 * This is a fixed-size type for 32/64 compatibility.
799*4882a593Smuzhiyun 	 */
800*4882a593Smuzhiyun 	__u64 offset;
801*4882a593Smuzhiyun };
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun struct drm_i915_gem_mmap_offset {
804*4882a593Smuzhiyun 	/** Handle for the object being mapped. */
805*4882a593Smuzhiyun 	__u32 handle;
806*4882a593Smuzhiyun 	__u32 pad;
807*4882a593Smuzhiyun 	/**
808*4882a593Smuzhiyun 	 * Fake offset to use for subsequent mmap call
809*4882a593Smuzhiyun 	 *
810*4882a593Smuzhiyun 	 * This is a fixed-size type for 32/64 compatibility.
811*4882a593Smuzhiyun 	 */
812*4882a593Smuzhiyun 	__u64 offset;
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	/**
815*4882a593Smuzhiyun 	 * Flags for extended behaviour.
816*4882a593Smuzhiyun 	 *
817*4882a593Smuzhiyun 	 * It is mandatory that one of the MMAP_OFFSET types
818*4882a593Smuzhiyun 	 * (GTT, WC, WB, UC, etc) should be included.
819*4882a593Smuzhiyun 	 */
820*4882a593Smuzhiyun 	__u64 flags;
821*4882a593Smuzhiyun #define I915_MMAP_OFFSET_GTT 0
822*4882a593Smuzhiyun #define I915_MMAP_OFFSET_WC  1
823*4882a593Smuzhiyun #define I915_MMAP_OFFSET_WB  2
824*4882a593Smuzhiyun #define I915_MMAP_OFFSET_UC  3
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	/*
827*4882a593Smuzhiyun 	 * Zero-terminated chain of extensions.
828*4882a593Smuzhiyun 	 *
829*4882a593Smuzhiyun 	 * No current extensions defined; mbz.
830*4882a593Smuzhiyun 	 */
831*4882a593Smuzhiyun 	__u64 extensions;
832*4882a593Smuzhiyun };
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun struct drm_i915_gem_set_domain {
835*4882a593Smuzhiyun 	/** Handle for the object */
836*4882a593Smuzhiyun 	__u32 handle;
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	/** New read domains */
839*4882a593Smuzhiyun 	__u32 read_domains;
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	/** New write domain */
842*4882a593Smuzhiyun 	__u32 write_domain;
843*4882a593Smuzhiyun };
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun struct drm_i915_gem_sw_finish {
846*4882a593Smuzhiyun 	/** Handle for the object */
847*4882a593Smuzhiyun 	__u32 handle;
848*4882a593Smuzhiyun };
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun struct drm_i915_gem_relocation_entry {
851*4882a593Smuzhiyun 	/**
852*4882a593Smuzhiyun 	 * Handle of the buffer being pointed to by this relocation entry.
853*4882a593Smuzhiyun 	 *
854*4882a593Smuzhiyun 	 * It's appealing to make this be an index into the mm_validate_entry
855*4882a593Smuzhiyun 	 * list to refer to the buffer, but this allows the driver to create
856*4882a593Smuzhiyun 	 * a relocation list for state buffers and not re-write it per
857*4882a593Smuzhiyun 	 * exec using the buffer.
858*4882a593Smuzhiyun 	 */
859*4882a593Smuzhiyun 	__u32 target_handle;
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	/**
862*4882a593Smuzhiyun 	 * Value to be added to the offset of the target buffer to make up
863*4882a593Smuzhiyun 	 * the relocation entry.
864*4882a593Smuzhiyun 	 */
865*4882a593Smuzhiyun 	__u32 delta;
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	/** Offset in the buffer the relocation entry will be written into */
868*4882a593Smuzhiyun 	__u64 offset;
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	/**
871*4882a593Smuzhiyun 	 * Offset value of the target buffer that the relocation entry was last
872*4882a593Smuzhiyun 	 * written as.
873*4882a593Smuzhiyun 	 *
874*4882a593Smuzhiyun 	 * If the buffer has the same offset as last time, we can skip syncing
875*4882a593Smuzhiyun 	 * and writing the relocation.  This value is written back out by
876*4882a593Smuzhiyun 	 * the execbuffer ioctl when the relocation is written.
877*4882a593Smuzhiyun 	 */
878*4882a593Smuzhiyun 	__u64 presumed_offset;
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 	/**
881*4882a593Smuzhiyun 	 * Target memory domains read by this operation.
882*4882a593Smuzhiyun 	 */
883*4882a593Smuzhiyun 	__u32 read_domains;
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	/**
886*4882a593Smuzhiyun 	 * Target memory domains written by this operation.
887*4882a593Smuzhiyun 	 *
888*4882a593Smuzhiyun 	 * Note that only one domain may be written by the whole
889*4882a593Smuzhiyun 	 * execbuffer operation, so that where there are conflicts,
890*4882a593Smuzhiyun 	 * the application will get -EINVAL back.
891*4882a593Smuzhiyun 	 */
892*4882a593Smuzhiyun 	__u32 write_domain;
893*4882a593Smuzhiyun };
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun /** @{
896*4882a593Smuzhiyun  * Intel memory domains
897*4882a593Smuzhiyun  *
898*4882a593Smuzhiyun  * Most of these just align with the various caches in
899*4882a593Smuzhiyun  * the system and are used to flush and invalidate as
900*4882a593Smuzhiyun  * objects end up cached in different domains.
901*4882a593Smuzhiyun  */
902*4882a593Smuzhiyun /** CPU cache */
903*4882a593Smuzhiyun #define I915_GEM_DOMAIN_CPU		0x00000001
904*4882a593Smuzhiyun /** Render cache, used by 2D and 3D drawing */
905*4882a593Smuzhiyun #define I915_GEM_DOMAIN_RENDER		0x00000002
906*4882a593Smuzhiyun /** Sampler cache, used by texture engine */
907*4882a593Smuzhiyun #define I915_GEM_DOMAIN_SAMPLER		0x00000004
908*4882a593Smuzhiyun /** Command queue, used to load batch buffers */
909*4882a593Smuzhiyun #define I915_GEM_DOMAIN_COMMAND		0x00000008
910*4882a593Smuzhiyun /** Instruction cache, used by shader programs */
911*4882a593Smuzhiyun #define I915_GEM_DOMAIN_INSTRUCTION	0x00000010
912*4882a593Smuzhiyun /** Vertex address cache */
913*4882a593Smuzhiyun #define I915_GEM_DOMAIN_VERTEX		0x00000020
914*4882a593Smuzhiyun /** GTT domain - aperture and scanout */
915*4882a593Smuzhiyun #define I915_GEM_DOMAIN_GTT		0x00000040
916*4882a593Smuzhiyun /** WC domain - uncached access */
917*4882a593Smuzhiyun #define I915_GEM_DOMAIN_WC		0x00000080
918*4882a593Smuzhiyun /** @} */
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun struct drm_i915_gem_exec_object {
921*4882a593Smuzhiyun 	/**
922*4882a593Smuzhiyun 	 * User's handle for a buffer to be bound into the GTT for this
923*4882a593Smuzhiyun 	 * operation.
924*4882a593Smuzhiyun 	 */
925*4882a593Smuzhiyun 	__u32 handle;
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	/** Number of relocations to be performed on this buffer */
928*4882a593Smuzhiyun 	__u32 relocation_count;
929*4882a593Smuzhiyun 	/**
930*4882a593Smuzhiyun 	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
931*4882a593Smuzhiyun 	 * the relocations to be performed in this buffer.
932*4882a593Smuzhiyun 	 */
933*4882a593Smuzhiyun 	__u64 relocs_ptr;
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	/** Required alignment in graphics aperture */
936*4882a593Smuzhiyun 	__u64 alignment;
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 	/**
939*4882a593Smuzhiyun 	 * Returned value of the updated offset of the object, for future
940*4882a593Smuzhiyun 	 * presumed_offset writes.
941*4882a593Smuzhiyun 	 */
942*4882a593Smuzhiyun 	__u64 offset;
943*4882a593Smuzhiyun };
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun struct drm_i915_gem_execbuffer {
946*4882a593Smuzhiyun 	/**
947*4882a593Smuzhiyun 	 * List of buffers to be validated with their relocations to be
948*4882a593Smuzhiyun 	 * performend on them.
949*4882a593Smuzhiyun 	 *
950*4882a593Smuzhiyun 	 * This is a pointer to an array of struct drm_i915_gem_validate_entry.
951*4882a593Smuzhiyun 	 *
952*4882a593Smuzhiyun 	 * These buffers must be listed in an order such that all relocations
953*4882a593Smuzhiyun 	 * a buffer is performing refer to buffers that have already appeared
954*4882a593Smuzhiyun 	 * in the validate list.
955*4882a593Smuzhiyun 	 */
956*4882a593Smuzhiyun 	__u64 buffers_ptr;
957*4882a593Smuzhiyun 	__u32 buffer_count;
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 	/** Offset in the batchbuffer to start execution from. */
960*4882a593Smuzhiyun 	__u32 batch_start_offset;
961*4882a593Smuzhiyun 	/** Bytes used in batchbuffer from batch_start_offset */
962*4882a593Smuzhiyun 	__u32 batch_len;
963*4882a593Smuzhiyun 	__u32 DR1;
964*4882a593Smuzhiyun 	__u32 DR4;
965*4882a593Smuzhiyun 	__u32 num_cliprects;
966*4882a593Smuzhiyun 	/** This is a struct drm_clip_rect *cliprects */
967*4882a593Smuzhiyun 	__u64 cliprects_ptr;
968*4882a593Smuzhiyun };
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun struct drm_i915_gem_exec_object2 {
971*4882a593Smuzhiyun 	/**
972*4882a593Smuzhiyun 	 * User's handle for a buffer to be bound into the GTT for this
973*4882a593Smuzhiyun 	 * operation.
974*4882a593Smuzhiyun 	 */
975*4882a593Smuzhiyun 	__u32 handle;
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun 	/** Number of relocations to be performed on this buffer */
978*4882a593Smuzhiyun 	__u32 relocation_count;
979*4882a593Smuzhiyun 	/**
980*4882a593Smuzhiyun 	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
981*4882a593Smuzhiyun 	 * the relocations to be performed in this buffer.
982*4882a593Smuzhiyun 	 */
983*4882a593Smuzhiyun 	__u64 relocs_ptr;
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	/** Required alignment in graphics aperture */
986*4882a593Smuzhiyun 	__u64 alignment;
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 	/**
989*4882a593Smuzhiyun 	 * When the EXEC_OBJECT_PINNED flag is specified this is populated by
990*4882a593Smuzhiyun 	 * the user with the GTT offset at which this object will be pinned.
991*4882a593Smuzhiyun 	 * When the I915_EXEC_NO_RELOC flag is specified this must contain the
992*4882a593Smuzhiyun 	 * presumed_offset of the object.
993*4882a593Smuzhiyun 	 * During execbuffer2 the kernel populates it with the value of the
994*4882a593Smuzhiyun 	 * current GTT offset of the object, for future presumed_offset writes.
995*4882a593Smuzhiyun 	 */
996*4882a593Smuzhiyun 	__u64 offset;
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun #define EXEC_OBJECT_NEEDS_FENCE		 (1<<0)
999*4882a593Smuzhiyun #define EXEC_OBJECT_NEEDS_GTT		 (1<<1)
1000*4882a593Smuzhiyun #define EXEC_OBJECT_WRITE		 (1<<2)
1001*4882a593Smuzhiyun #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
1002*4882a593Smuzhiyun #define EXEC_OBJECT_PINNED		 (1<<4)
1003*4882a593Smuzhiyun #define EXEC_OBJECT_PAD_TO_SIZE		 (1<<5)
1004*4882a593Smuzhiyun /* The kernel implicitly tracks GPU activity on all GEM objects, and
1005*4882a593Smuzhiyun  * synchronises operations with outstanding rendering. This includes
1006*4882a593Smuzhiyun  * rendering on other devices if exported via dma-buf. However, sometimes
1007*4882a593Smuzhiyun  * this tracking is too coarse and the user knows better. For example,
1008*4882a593Smuzhiyun  * if the object is split into non-overlapping ranges shared between different
1009*4882a593Smuzhiyun  * clients or engines (i.e. suballocating objects), the implicit tracking
1010*4882a593Smuzhiyun  * by kernel assumes that each operation affects the whole object rather
1011*4882a593Smuzhiyun  * than an individual range, causing needless synchronisation between clients.
1012*4882a593Smuzhiyun  * The kernel will also forgo any CPU cache flushes prior to rendering from
1013*4882a593Smuzhiyun  * the object as the client is expected to be also handling such domain
1014*4882a593Smuzhiyun  * tracking.
1015*4882a593Smuzhiyun  *
1016*4882a593Smuzhiyun  * The kernel maintains the implicit tracking in order to manage resources
1017*4882a593Smuzhiyun  * used by the GPU - this flag only disables the synchronisation prior to
1018*4882a593Smuzhiyun  * rendering with this object in this execbuf.
1019*4882a593Smuzhiyun  *
1020*4882a593Smuzhiyun  * Opting out of implicit synhronisation requires the user to do its own
1021*4882a593Smuzhiyun  * explicit tracking to avoid rendering corruption. See, for example,
1022*4882a593Smuzhiyun  * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
1023*4882a593Smuzhiyun  */
1024*4882a593Smuzhiyun #define EXEC_OBJECT_ASYNC		(1<<6)
1025*4882a593Smuzhiyun /* Request that the contents of this execobject be copied into the error
1026*4882a593Smuzhiyun  * state upon a GPU hang involving this batch for post-mortem debugging.
1027*4882a593Smuzhiyun  * These buffers are recorded in no particular order as "user" in
1028*4882a593Smuzhiyun  * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see
1029*4882a593Smuzhiyun  * if the kernel supports this flag.
1030*4882a593Smuzhiyun  */
1031*4882a593Smuzhiyun #define EXEC_OBJECT_CAPTURE		(1<<7)
1032*4882a593Smuzhiyun /* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
1033*4882a593Smuzhiyun #define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)
1034*4882a593Smuzhiyun 	__u64 flags;
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 	union {
1037*4882a593Smuzhiyun 		__u64 rsvd1;
1038*4882a593Smuzhiyun 		__u64 pad_to_size;
1039*4882a593Smuzhiyun 	};
1040*4882a593Smuzhiyun 	__u64 rsvd2;
1041*4882a593Smuzhiyun };
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun struct drm_i915_gem_exec_fence {
1044*4882a593Smuzhiyun 	/**
1045*4882a593Smuzhiyun 	 * User's handle for a drm_syncobj to wait on or signal.
1046*4882a593Smuzhiyun 	 */
1047*4882a593Smuzhiyun 	__u32 handle;
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun #define I915_EXEC_FENCE_WAIT            (1<<0)
1050*4882a593Smuzhiyun #define I915_EXEC_FENCE_SIGNAL          (1<<1)
1051*4882a593Smuzhiyun #define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
1052*4882a593Smuzhiyun 	__u32 flags;
1053*4882a593Smuzhiyun };
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun /**
1056*4882a593Smuzhiyun  * See drm_i915_gem_execbuffer_ext_timeline_fences.
1057*4882a593Smuzhiyun  */
1058*4882a593Smuzhiyun #define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun /**
1061*4882a593Smuzhiyun  * This structure describes an array of drm_syncobj and associated points for
1062*4882a593Smuzhiyun  * timeline variants of drm_syncobj. It is invalid to append this structure to
1063*4882a593Smuzhiyun  * the execbuf if I915_EXEC_FENCE_ARRAY is set.
1064*4882a593Smuzhiyun  */
1065*4882a593Smuzhiyun struct drm_i915_gem_execbuffer_ext_timeline_fences {
1066*4882a593Smuzhiyun 	struct i915_user_extension base;
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun 	/**
1069*4882a593Smuzhiyun 	 * Number of element in the handles_ptr & value_ptr arrays.
1070*4882a593Smuzhiyun 	 */
1071*4882a593Smuzhiyun 	__u64 fence_count;
1072*4882a593Smuzhiyun 
1073*4882a593Smuzhiyun 	/**
1074*4882a593Smuzhiyun 	 * Pointer to an array of struct drm_i915_gem_exec_fence of length
1075*4882a593Smuzhiyun 	 * fence_count.
1076*4882a593Smuzhiyun 	 */
1077*4882a593Smuzhiyun 	__u64 handles_ptr;
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun 	/**
1080*4882a593Smuzhiyun 	 * Pointer to an array of u64 values of length fence_count. Values
1081*4882a593Smuzhiyun 	 * must be 0 for a binary drm_syncobj. A Value of 0 for a timeline
1082*4882a593Smuzhiyun 	 * drm_syncobj is invalid as it turns a drm_syncobj into a binary one.
1083*4882a593Smuzhiyun 	 */
1084*4882a593Smuzhiyun 	__u64 values_ptr;
1085*4882a593Smuzhiyun };
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun struct drm_i915_gem_execbuffer2 {
1088*4882a593Smuzhiyun 	/**
1089*4882a593Smuzhiyun 	 * List of gem_exec_object2 structs
1090*4882a593Smuzhiyun 	 */
1091*4882a593Smuzhiyun 	__u64 buffers_ptr;
1092*4882a593Smuzhiyun 	__u32 buffer_count;
1093*4882a593Smuzhiyun 
1094*4882a593Smuzhiyun 	/** Offset in the batchbuffer to start execution from. */
1095*4882a593Smuzhiyun 	__u32 batch_start_offset;
1096*4882a593Smuzhiyun 	/** Bytes used in batchbuffer from batch_start_offset */
1097*4882a593Smuzhiyun 	__u32 batch_len;
1098*4882a593Smuzhiyun 	__u32 DR1;
1099*4882a593Smuzhiyun 	__u32 DR4;
1100*4882a593Smuzhiyun 	__u32 num_cliprects;
1101*4882a593Smuzhiyun 	/**
1102*4882a593Smuzhiyun 	 * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
1103*4882a593Smuzhiyun 	 * & I915_EXEC_USE_EXTENSIONS are not set.
1104*4882a593Smuzhiyun 	 *
1105*4882a593Smuzhiyun 	 * If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array
1106*4882a593Smuzhiyun 	 * of struct drm_i915_gem_exec_fence and num_cliprects is the length
1107*4882a593Smuzhiyun 	 * of the array.
1108*4882a593Smuzhiyun 	 *
1109*4882a593Smuzhiyun 	 * If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a
1110*4882a593Smuzhiyun 	 * single struct i915_user_extension and num_cliprects is 0.
1111*4882a593Smuzhiyun 	 */
1112*4882a593Smuzhiyun 	__u64 cliprects_ptr;
1113*4882a593Smuzhiyun #define I915_EXEC_RING_MASK              (0x3f)
1114*4882a593Smuzhiyun #define I915_EXEC_DEFAULT                (0<<0)
1115*4882a593Smuzhiyun #define I915_EXEC_RENDER                 (1<<0)
1116*4882a593Smuzhiyun #define I915_EXEC_BSD                    (2<<0)
1117*4882a593Smuzhiyun #define I915_EXEC_BLT                    (3<<0)
1118*4882a593Smuzhiyun #define I915_EXEC_VEBOX                  (4<<0)
1119*4882a593Smuzhiyun 
1120*4882a593Smuzhiyun /* Used for switching the constants addressing mode on gen4+ RENDER ring.
1121*4882a593Smuzhiyun  * Gen6+ only supports relative addressing to dynamic state (default) and
1122*4882a593Smuzhiyun  * absolute addressing.
1123*4882a593Smuzhiyun  *
1124*4882a593Smuzhiyun  * These flags are ignored for the BSD and BLT rings.
1125*4882a593Smuzhiyun  */
1126*4882a593Smuzhiyun #define I915_EXEC_CONSTANTS_MASK 	(3<<6)
1127*4882a593Smuzhiyun #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
1128*4882a593Smuzhiyun #define I915_EXEC_CONSTANTS_ABSOLUTE 	(1<<6)
1129*4882a593Smuzhiyun #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
1130*4882a593Smuzhiyun 	__u64 flags;
1131*4882a593Smuzhiyun 	__u64 rsvd1; /* now used for context info */
1132*4882a593Smuzhiyun 	__u64 rsvd2;
1133*4882a593Smuzhiyun };
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun /** Resets the SO write offset registers for transform feedback on gen7. */
1136*4882a593Smuzhiyun #define I915_EXEC_GEN7_SOL_RESET	(1<<8)
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun /** Request a privileged ("secure") batch buffer. Note only available for
1139*4882a593Smuzhiyun  * DRM_ROOT_ONLY | DRM_MASTER processes.
1140*4882a593Smuzhiyun  */
1141*4882a593Smuzhiyun #define I915_EXEC_SECURE		(1<<9)
1142*4882a593Smuzhiyun 
1143*4882a593Smuzhiyun /** Inform the kernel that the batch is and will always be pinned. This
1144*4882a593Smuzhiyun  * negates the requirement for a workaround to be performed to avoid
1145*4882a593Smuzhiyun  * an incoherent CS (such as can be found on 830/845). If this flag is
1146*4882a593Smuzhiyun  * not passed, the kernel will endeavour to make sure the batch is
1147*4882a593Smuzhiyun  * coherent with the CS before execution. If this flag is passed,
1148*4882a593Smuzhiyun  * userspace assumes the responsibility for ensuring the same.
1149*4882a593Smuzhiyun  */
1150*4882a593Smuzhiyun #define I915_EXEC_IS_PINNED		(1<<10)
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun /** Provide a hint to the kernel that the command stream and auxiliary
1153*4882a593Smuzhiyun  * state buffers already holds the correct presumed addresses and so the
1154*4882a593Smuzhiyun  * relocation process may be skipped if no buffers need to be moved in
1155*4882a593Smuzhiyun  * preparation for the execbuffer.
1156*4882a593Smuzhiyun  */
1157*4882a593Smuzhiyun #define I915_EXEC_NO_RELOC		(1<<11)
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun /** Use the reloc.handle as an index into the exec object array rather
1160*4882a593Smuzhiyun  * than as the per-file handle.
1161*4882a593Smuzhiyun  */
1162*4882a593Smuzhiyun #define I915_EXEC_HANDLE_LUT		(1<<12)
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun /** Used for switching BSD rings on the platforms with two BSD rings */
1165*4882a593Smuzhiyun #define I915_EXEC_BSD_SHIFT	 (13)
1166*4882a593Smuzhiyun #define I915_EXEC_BSD_MASK	 (3 << I915_EXEC_BSD_SHIFT)
1167*4882a593Smuzhiyun /* default ping-pong mode */
1168*4882a593Smuzhiyun #define I915_EXEC_BSD_DEFAULT	 (0 << I915_EXEC_BSD_SHIFT)
1169*4882a593Smuzhiyun #define I915_EXEC_BSD_RING1	 (1 << I915_EXEC_BSD_SHIFT)
1170*4882a593Smuzhiyun #define I915_EXEC_BSD_RING2	 (2 << I915_EXEC_BSD_SHIFT)
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun /** Tell the kernel that the batchbuffer is processed by
1173*4882a593Smuzhiyun  *  the resource streamer.
1174*4882a593Smuzhiyun  */
1175*4882a593Smuzhiyun #define I915_EXEC_RESOURCE_STREAMER     (1<<15)
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun /* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent
1178*4882a593Smuzhiyun  * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
1179*4882a593Smuzhiyun  * the batch.
1180*4882a593Smuzhiyun  *
1181*4882a593Smuzhiyun  * Returns -EINVAL if the sync_file fd cannot be found.
1182*4882a593Smuzhiyun  */
1183*4882a593Smuzhiyun #define I915_EXEC_FENCE_IN		(1<<16)
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun /* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd
1186*4882a593Smuzhiyun  * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given
1187*4882a593Smuzhiyun  * to the caller, and it should be close() after use. (The fd is a regular
1188*4882a593Smuzhiyun  * file descriptor and will be cleaned up on process termination. It holds
1189*4882a593Smuzhiyun  * a reference to the request, but nothing else.)
1190*4882a593Smuzhiyun  *
1191*4882a593Smuzhiyun  * The sync_file fd can be combined with other sync_file and passed either
1192*4882a593Smuzhiyun  * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip
1193*4882a593Smuzhiyun  * will only occur after this request completes), or to other devices.
1194*4882a593Smuzhiyun  *
1195*4882a593Smuzhiyun  * Using I915_EXEC_FENCE_OUT requires use of
1196*4882a593Smuzhiyun  * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written
1197*4882a593Smuzhiyun  * back to userspace. Failure to do so will cause the out-fence to always
1198*4882a593Smuzhiyun  * be reported as zero, and the real fence fd to be leaked.
1199*4882a593Smuzhiyun  */
1200*4882a593Smuzhiyun #define I915_EXEC_FENCE_OUT		(1<<17)
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun /*
1203*4882a593Smuzhiyun  * Traditionally the execbuf ioctl has only considered the final element in
1204*4882a593Smuzhiyun  * the execobject[] to be the executable batch. Often though, the client
1205*4882a593Smuzhiyun  * will known the batch object prior to construction and being able to place
1206*4882a593Smuzhiyun  * it into the execobject[] array first can simplify the relocation tracking.
1207*4882a593Smuzhiyun  * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
1208*4882a593Smuzhiyun  * execobject[] as the * batch instead (the default is to use the last
1209*4882a593Smuzhiyun  * element).
1210*4882a593Smuzhiyun  */
1211*4882a593Smuzhiyun #define I915_EXEC_BATCH_FIRST		(1<<18)
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun /* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
1214*4882a593Smuzhiyun  * define an array of i915_gem_exec_fence structures which specify a set of
1215*4882a593Smuzhiyun  * dma fences to wait upon or signal.
1216*4882a593Smuzhiyun  */
1217*4882a593Smuzhiyun #define I915_EXEC_FENCE_ARRAY   (1<<19)
1218*4882a593Smuzhiyun 
1219*4882a593Smuzhiyun /*
1220*4882a593Smuzhiyun  * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent
1221*4882a593Smuzhiyun  * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
1222*4882a593Smuzhiyun  * the batch.
1223*4882a593Smuzhiyun  *
1224*4882a593Smuzhiyun  * Returns -EINVAL if the sync_file fd cannot be found.
1225*4882a593Smuzhiyun  */
1226*4882a593Smuzhiyun #define I915_EXEC_FENCE_SUBMIT		(1 << 20)
1227*4882a593Smuzhiyun 
1228*4882a593Smuzhiyun /*
1229*4882a593Smuzhiyun  * Setting I915_EXEC_USE_EXTENSIONS implies that
1230*4882a593Smuzhiyun  * drm_i915_gem_execbuffer2.cliprects_ptr is treated as a pointer to an linked
1231*4882a593Smuzhiyun  * list of i915_user_extension. Each i915_user_extension node is the base of a
1232*4882a593Smuzhiyun  * larger structure. The list of supported structures are listed in the
1233*4882a593Smuzhiyun  * drm_i915_gem_execbuffer_ext enum.
1234*4882a593Smuzhiyun  */
1235*4882a593Smuzhiyun #define I915_EXEC_USE_EXTENSIONS	(1 << 21)
1236*4882a593Smuzhiyun 
1237*4882a593Smuzhiyun #define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1))
1238*4882a593Smuzhiyun 
1239*4882a593Smuzhiyun #define I915_EXEC_CONTEXT_ID_MASK	(0xffffffff)
1240*4882a593Smuzhiyun #define i915_execbuffer2_set_context_id(eb2, context) \
1241*4882a593Smuzhiyun 	(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
1242*4882a593Smuzhiyun #define i915_execbuffer2_get_context_id(eb2) \
1243*4882a593Smuzhiyun 	((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
1244*4882a593Smuzhiyun 
1245*4882a593Smuzhiyun struct drm_i915_gem_pin {
1246*4882a593Smuzhiyun 	/** Handle of the buffer to be pinned. */
1247*4882a593Smuzhiyun 	__u32 handle;
1248*4882a593Smuzhiyun 	__u32 pad;
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun 	/** alignment required within the aperture */
1251*4882a593Smuzhiyun 	__u64 alignment;
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun 	/** Returned GTT offset of the buffer. */
1254*4882a593Smuzhiyun 	__u64 offset;
1255*4882a593Smuzhiyun };
1256*4882a593Smuzhiyun 
1257*4882a593Smuzhiyun struct drm_i915_gem_unpin {
1258*4882a593Smuzhiyun 	/** Handle of the buffer to be unpinned. */
1259*4882a593Smuzhiyun 	__u32 handle;
1260*4882a593Smuzhiyun 	__u32 pad;
1261*4882a593Smuzhiyun };
1262*4882a593Smuzhiyun 
1263*4882a593Smuzhiyun struct drm_i915_gem_busy {
1264*4882a593Smuzhiyun 	/** Handle of the buffer to check for busy */
1265*4882a593Smuzhiyun 	__u32 handle;
1266*4882a593Smuzhiyun 
1267*4882a593Smuzhiyun 	/** Return busy status
1268*4882a593Smuzhiyun 	 *
1269*4882a593Smuzhiyun 	 * A return of 0 implies that the object is idle (after
1270*4882a593Smuzhiyun 	 * having flushed any pending activity), and a non-zero return that
1271*4882a593Smuzhiyun 	 * the object is still in-flight on the GPU. (The GPU has not yet
1272*4882a593Smuzhiyun 	 * signaled completion for all pending requests that reference the
1273*4882a593Smuzhiyun 	 * object.) An object is guaranteed to become idle eventually (so
1274*4882a593Smuzhiyun 	 * long as no new GPU commands are executed upon it). Due to the
1275*4882a593Smuzhiyun 	 * asynchronous nature of the hardware, an object reported
1276*4882a593Smuzhiyun 	 * as busy may become idle before the ioctl is completed.
1277*4882a593Smuzhiyun 	 *
1278*4882a593Smuzhiyun 	 * Furthermore, if the object is busy, which engine is busy is only
1279*4882a593Smuzhiyun 	 * provided as a guide and only indirectly by reporting its class
1280*4882a593Smuzhiyun 	 * (there may be more than one engine in each class). There are race
1281*4882a593Smuzhiyun 	 * conditions which prevent the report of which engines are busy from
1282*4882a593Smuzhiyun 	 * being always accurate.  However, the converse is not true. If the
1283*4882a593Smuzhiyun 	 * object is idle, the result of the ioctl, that all engines are idle,
1284*4882a593Smuzhiyun 	 * is accurate.
1285*4882a593Smuzhiyun 	 *
1286*4882a593Smuzhiyun 	 * The returned dword is split into two fields to indicate both
1287*4882a593Smuzhiyun 	 * the engine classess on which the object is being read, and the
1288*4882a593Smuzhiyun 	 * engine class on which it is currently being written (if any).
1289*4882a593Smuzhiyun 	 *
1290*4882a593Smuzhiyun 	 * The low word (bits 0:15) indicate if the object is being written
1291*4882a593Smuzhiyun 	 * to by any engine (there can only be one, as the GEM implicit
1292*4882a593Smuzhiyun 	 * synchronisation rules force writes to be serialised). Only the
1293*4882a593Smuzhiyun 	 * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as
1294*4882a593Smuzhiyun 	 * 1 not 0 etc) for the last write is reported.
1295*4882a593Smuzhiyun 	 *
1296*4882a593Smuzhiyun 	 * The high word (bits 16:31) are a bitmask of which engines classes
1297*4882a593Smuzhiyun 	 * are currently reading from the object. Multiple engines may be
1298*4882a593Smuzhiyun 	 * reading from the object simultaneously.
1299*4882a593Smuzhiyun 	 *
1300*4882a593Smuzhiyun 	 * The value of each engine class is the same as specified in the
1301*4882a593Smuzhiyun 	 * I915_CONTEXT_SET_ENGINES parameter and via perf, i.e.
1302*4882a593Smuzhiyun 	 * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
1303*4882a593Smuzhiyun 	 * reported as active itself. Some hardware may have parallel
1304*4882a593Smuzhiyun 	 * execution engines, e.g. multiple media engines, which are
1305*4882a593Smuzhiyun 	 * mapped to the same class identifier and so are not separately
1306*4882a593Smuzhiyun 	 * reported for busyness.
1307*4882a593Smuzhiyun 	 *
1308*4882a593Smuzhiyun 	 * Caveat emptor:
1309*4882a593Smuzhiyun 	 * Only the boolean result of this query is reliable; that is whether
1310*4882a593Smuzhiyun 	 * the object is idle or busy. The report of which engines are busy
1311*4882a593Smuzhiyun 	 * should be only used as a heuristic.
1312*4882a593Smuzhiyun 	 */
1313*4882a593Smuzhiyun 	__u32 busy;
1314*4882a593Smuzhiyun };
1315*4882a593Smuzhiyun 
1316*4882a593Smuzhiyun /**
1317*4882a593Smuzhiyun  * I915_CACHING_NONE
1318*4882a593Smuzhiyun  *
1319*4882a593Smuzhiyun  * GPU access is not coherent with cpu caches. Default for machines without an
1320*4882a593Smuzhiyun  * LLC.
1321*4882a593Smuzhiyun  */
1322*4882a593Smuzhiyun #define I915_CACHING_NONE		0
1323*4882a593Smuzhiyun /**
1324*4882a593Smuzhiyun  * I915_CACHING_CACHED
1325*4882a593Smuzhiyun  *
1326*4882a593Smuzhiyun  * GPU access is coherent with cpu caches and furthermore the data is cached in
1327*4882a593Smuzhiyun  * last-level caches shared between cpu cores and the gpu GT. Default on
1328*4882a593Smuzhiyun  * machines with HAS_LLC.
1329*4882a593Smuzhiyun  */
1330*4882a593Smuzhiyun #define I915_CACHING_CACHED		1
1331*4882a593Smuzhiyun /**
1332*4882a593Smuzhiyun  * I915_CACHING_DISPLAY
1333*4882a593Smuzhiyun  *
1334*4882a593Smuzhiyun  * Special GPU caching mode which is coherent with the scanout engines.
1335*4882a593Smuzhiyun  * Transparently falls back to I915_CACHING_NONE on platforms where no special
1336*4882a593Smuzhiyun  * cache mode (like write-through or gfdt flushing) is available. The kernel
1337*4882a593Smuzhiyun  * automatically sets this mode when using a buffer as a scanout target.
1338*4882a593Smuzhiyun  * Userspace can manually set this mode to avoid a costly stall and clflush in
1339*4882a593Smuzhiyun  * the hotpath of drawing the first frame.
1340*4882a593Smuzhiyun  */
1341*4882a593Smuzhiyun #define I915_CACHING_DISPLAY		2
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun struct drm_i915_gem_caching {
1344*4882a593Smuzhiyun 	/**
1345*4882a593Smuzhiyun 	 * Handle of the buffer to set/get the caching level of. */
1346*4882a593Smuzhiyun 	__u32 handle;
1347*4882a593Smuzhiyun 
1348*4882a593Smuzhiyun 	/**
1349*4882a593Smuzhiyun 	 * Cacheing level to apply or return value
1350*4882a593Smuzhiyun 	 *
1351*4882a593Smuzhiyun 	 * bits0-15 are for generic caching control (i.e. the above defined
1352*4882a593Smuzhiyun 	 * values). bits16-31 are reserved for platform-specific variations
1353*4882a593Smuzhiyun 	 * (e.g. l3$ caching on gen7). */
1354*4882a593Smuzhiyun 	__u32 caching;
1355*4882a593Smuzhiyun };
1356*4882a593Smuzhiyun 
1357*4882a593Smuzhiyun #define I915_TILING_NONE	0
1358*4882a593Smuzhiyun #define I915_TILING_X		1
1359*4882a593Smuzhiyun #define I915_TILING_Y		2
1360*4882a593Smuzhiyun #define I915_TILING_LAST	I915_TILING_Y
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun #define I915_BIT_6_SWIZZLE_NONE		0
1363*4882a593Smuzhiyun #define I915_BIT_6_SWIZZLE_9		1
1364*4882a593Smuzhiyun #define I915_BIT_6_SWIZZLE_9_10		2
1365*4882a593Smuzhiyun #define I915_BIT_6_SWIZZLE_9_11		3
1366*4882a593Smuzhiyun #define I915_BIT_6_SWIZZLE_9_10_11	4
1367*4882a593Smuzhiyun /* Not seen by userland */
1368*4882a593Smuzhiyun #define I915_BIT_6_SWIZZLE_UNKNOWN	5
1369*4882a593Smuzhiyun /* Seen by userland. */
1370*4882a593Smuzhiyun #define I915_BIT_6_SWIZZLE_9_17		6
1371*4882a593Smuzhiyun #define I915_BIT_6_SWIZZLE_9_10_17	7
1372*4882a593Smuzhiyun 
1373*4882a593Smuzhiyun struct drm_i915_gem_set_tiling {
1374*4882a593Smuzhiyun 	/** Handle of the buffer to have its tiling state updated */
1375*4882a593Smuzhiyun 	__u32 handle;
1376*4882a593Smuzhiyun 
1377*4882a593Smuzhiyun 	/**
1378*4882a593Smuzhiyun 	 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1379*4882a593Smuzhiyun 	 * I915_TILING_Y).
1380*4882a593Smuzhiyun 	 *
1381*4882a593Smuzhiyun 	 * This value is to be set on request, and will be updated by the
1382*4882a593Smuzhiyun 	 * kernel on successful return with the actual chosen tiling layout.
1383*4882a593Smuzhiyun 	 *
1384*4882a593Smuzhiyun 	 * The tiling mode may be demoted to I915_TILING_NONE when the system
1385*4882a593Smuzhiyun 	 * has bit 6 swizzling that can't be managed correctly by GEM.
1386*4882a593Smuzhiyun 	 *
1387*4882a593Smuzhiyun 	 * Buffer contents become undefined when changing tiling_mode.
1388*4882a593Smuzhiyun 	 */
1389*4882a593Smuzhiyun 	__u32 tiling_mode;
1390*4882a593Smuzhiyun 
1391*4882a593Smuzhiyun 	/**
1392*4882a593Smuzhiyun 	 * Stride in bytes for the object when in I915_TILING_X or
1393*4882a593Smuzhiyun 	 * I915_TILING_Y.
1394*4882a593Smuzhiyun 	 */
1395*4882a593Smuzhiyun 	__u32 stride;
1396*4882a593Smuzhiyun 
1397*4882a593Smuzhiyun 	/**
1398*4882a593Smuzhiyun 	 * Returned address bit 6 swizzling required for CPU access through
1399*4882a593Smuzhiyun 	 * mmap mapping.
1400*4882a593Smuzhiyun 	 */
1401*4882a593Smuzhiyun 	__u32 swizzle_mode;
1402*4882a593Smuzhiyun };
1403*4882a593Smuzhiyun 
1404*4882a593Smuzhiyun struct drm_i915_gem_get_tiling {
1405*4882a593Smuzhiyun 	/** Handle of the buffer to get tiling state for. */
1406*4882a593Smuzhiyun 	__u32 handle;
1407*4882a593Smuzhiyun 
1408*4882a593Smuzhiyun 	/**
1409*4882a593Smuzhiyun 	 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1410*4882a593Smuzhiyun 	 * I915_TILING_Y).
1411*4882a593Smuzhiyun 	 */
1412*4882a593Smuzhiyun 	__u32 tiling_mode;
1413*4882a593Smuzhiyun 
1414*4882a593Smuzhiyun 	/**
1415*4882a593Smuzhiyun 	 * Returned address bit 6 swizzling required for CPU access through
1416*4882a593Smuzhiyun 	 * mmap mapping.
1417*4882a593Smuzhiyun 	 */
1418*4882a593Smuzhiyun 	__u32 swizzle_mode;
1419*4882a593Smuzhiyun 
1420*4882a593Smuzhiyun 	/**
1421*4882a593Smuzhiyun 	 * Returned address bit 6 swizzling required for CPU access through
1422*4882a593Smuzhiyun 	 * mmap mapping whilst bound.
1423*4882a593Smuzhiyun 	 */
1424*4882a593Smuzhiyun 	__u32 phys_swizzle_mode;
1425*4882a593Smuzhiyun };
1426*4882a593Smuzhiyun 
1427*4882a593Smuzhiyun struct drm_i915_gem_get_aperture {
1428*4882a593Smuzhiyun 	/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
1429*4882a593Smuzhiyun 	__u64 aper_size;
1430*4882a593Smuzhiyun 
1431*4882a593Smuzhiyun 	/**
1432*4882a593Smuzhiyun 	 * Available space in the aperture used by i915_gem_execbuffer, in
1433*4882a593Smuzhiyun 	 * bytes
1434*4882a593Smuzhiyun 	 */
1435*4882a593Smuzhiyun 	__u64 aper_available_size;
1436*4882a593Smuzhiyun };
1437*4882a593Smuzhiyun 
1438*4882a593Smuzhiyun struct drm_i915_get_pipe_from_crtc_id {
1439*4882a593Smuzhiyun 	/** ID of CRTC being requested **/
1440*4882a593Smuzhiyun 	__u32 crtc_id;
1441*4882a593Smuzhiyun 
1442*4882a593Smuzhiyun 	/** pipe of requested CRTC **/
1443*4882a593Smuzhiyun 	__u32 pipe;
1444*4882a593Smuzhiyun };
1445*4882a593Smuzhiyun 
1446*4882a593Smuzhiyun #define I915_MADV_WILLNEED 0
1447*4882a593Smuzhiyun #define I915_MADV_DONTNEED 1
1448*4882a593Smuzhiyun #define __I915_MADV_PURGED 2 /* internal state */
1449*4882a593Smuzhiyun 
1450*4882a593Smuzhiyun struct drm_i915_gem_madvise {
1451*4882a593Smuzhiyun 	/** Handle of the buffer to change the backing store advice */
1452*4882a593Smuzhiyun 	__u32 handle;
1453*4882a593Smuzhiyun 
1454*4882a593Smuzhiyun 	/* Advice: either the buffer will be needed again in the near future,
1455*4882a593Smuzhiyun 	 *         or wont be and could be discarded under memory pressure.
1456*4882a593Smuzhiyun 	 */
1457*4882a593Smuzhiyun 	__u32 madv;
1458*4882a593Smuzhiyun 
1459*4882a593Smuzhiyun 	/** Whether the backing store still exists. */
1460*4882a593Smuzhiyun 	__u32 retained;
1461*4882a593Smuzhiyun };
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun /* flags */
1464*4882a593Smuzhiyun #define I915_OVERLAY_TYPE_MASK 		0xff
1465*4882a593Smuzhiyun #define I915_OVERLAY_YUV_PLANAR 	0x01
1466*4882a593Smuzhiyun #define I915_OVERLAY_YUV_PACKED 	0x02
1467*4882a593Smuzhiyun #define I915_OVERLAY_RGB		0x03
1468*4882a593Smuzhiyun 
1469*4882a593Smuzhiyun #define I915_OVERLAY_DEPTH_MASK		0xff00
1470*4882a593Smuzhiyun #define I915_OVERLAY_RGB24		0x1000
1471*4882a593Smuzhiyun #define I915_OVERLAY_RGB16		0x2000
1472*4882a593Smuzhiyun #define I915_OVERLAY_RGB15		0x3000
1473*4882a593Smuzhiyun #define I915_OVERLAY_YUV422		0x0100
1474*4882a593Smuzhiyun #define I915_OVERLAY_YUV411		0x0200
1475*4882a593Smuzhiyun #define I915_OVERLAY_YUV420		0x0300
1476*4882a593Smuzhiyun #define I915_OVERLAY_YUV410		0x0400
1477*4882a593Smuzhiyun 
1478*4882a593Smuzhiyun #define I915_OVERLAY_SWAP_MASK		0xff0000
1479*4882a593Smuzhiyun #define I915_OVERLAY_NO_SWAP		0x000000
1480*4882a593Smuzhiyun #define I915_OVERLAY_UV_SWAP		0x010000
1481*4882a593Smuzhiyun #define I915_OVERLAY_Y_SWAP		0x020000
1482*4882a593Smuzhiyun #define I915_OVERLAY_Y_AND_UV_SWAP	0x030000
1483*4882a593Smuzhiyun 
1484*4882a593Smuzhiyun #define I915_OVERLAY_FLAGS_MASK		0xff000000
1485*4882a593Smuzhiyun #define I915_OVERLAY_ENABLE		0x01000000
1486*4882a593Smuzhiyun 
1487*4882a593Smuzhiyun struct drm_intel_overlay_put_image {
1488*4882a593Smuzhiyun 	/* various flags and src format description */
1489*4882a593Smuzhiyun 	__u32 flags;
1490*4882a593Smuzhiyun 	/* source picture description */
1491*4882a593Smuzhiyun 	__u32 bo_handle;
1492*4882a593Smuzhiyun 	/* stride values and offsets are in bytes, buffer relative */
1493*4882a593Smuzhiyun 	__u16 stride_Y; /* stride for packed formats */
1494*4882a593Smuzhiyun 	__u16 stride_UV;
1495*4882a593Smuzhiyun 	__u32 offset_Y; /* offset for packet formats */
1496*4882a593Smuzhiyun 	__u32 offset_U;
1497*4882a593Smuzhiyun 	__u32 offset_V;
1498*4882a593Smuzhiyun 	/* in pixels */
1499*4882a593Smuzhiyun 	__u16 src_width;
1500*4882a593Smuzhiyun 	__u16 src_height;
1501*4882a593Smuzhiyun 	/* to compensate the scaling factors for partially covered surfaces */
1502*4882a593Smuzhiyun 	__u16 src_scan_width;
1503*4882a593Smuzhiyun 	__u16 src_scan_height;
1504*4882a593Smuzhiyun 	/* output crtc description */
1505*4882a593Smuzhiyun 	__u32 crtc_id;
1506*4882a593Smuzhiyun 	__u16 dst_x;
1507*4882a593Smuzhiyun 	__u16 dst_y;
1508*4882a593Smuzhiyun 	__u16 dst_width;
1509*4882a593Smuzhiyun 	__u16 dst_height;
1510*4882a593Smuzhiyun };
1511*4882a593Smuzhiyun 
1512*4882a593Smuzhiyun /* flags */
1513*4882a593Smuzhiyun #define I915_OVERLAY_UPDATE_ATTRS	(1<<0)
1514*4882a593Smuzhiyun #define I915_OVERLAY_UPDATE_GAMMA	(1<<1)
1515*4882a593Smuzhiyun #define I915_OVERLAY_DISABLE_DEST_COLORKEY	(1<<2)
1516*4882a593Smuzhiyun struct drm_intel_overlay_attrs {
1517*4882a593Smuzhiyun 	__u32 flags;
1518*4882a593Smuzhiyun 	__u32 color_key;
1519*4882a593Smuzhiyun 	__s32 brightness;
1520*4882a593Smuzhiyun 	__u32 contrast;
1521*4882a593Smuzhiyun 	__u32 saturation;
1522*4882a593Smuzhiyun 	__u32 gamma0;
1523*4882a593Smuzhiyun 	__u32 gamma1;
1524*4882a593Smuzhiyun 	__u32 gamma2;
1525*4882a593Smuzhiyun 	__u32 gamma3;
1526*4882a593Smuzhiyun 	__u32 gamma4;
1527*4882a593Smuzhiyun 	__u32 gamma5;
1528*4882a593Smuzhiyun };
1529*4882a593Smuzhiyun 
1530*4882a593Smuzhiyun /*
1531*4882a593Smuzhiyun  * Intel sprite handling
1532*4882a593Smuzhiyun  *
1533*4882a593Smuzhiyun  * Color keying works with a min/mask/max tuple.  Both source and destination
1534*4882a593Smuzhiyun  * color keying is allowed.
1535*4882a593Smuzhiyun  *
1536*4882a593Smuzhiyun  * Source keying:
1537*4882a593Smuzhiyun  * Sprite pixels within the min & max values, masked against the color channels
1538*4882a593Smuzhiyun  * specified in the mask field, will be transparent.  All other pixels will
1539*4882a593Smuzhiyun  * be displayed on top of the primary plane.  For RGB surfaces, only the min
1540*4882a593Smuzhiyun  * and mask fields will be used; ranged compares are not allowed.
1541*4882a593Smuzhiyun  *
1542*4882a593Smuzhiyun  * Destination keying:
1543*4882a593Smuzhiyun  * Primary plane pixels that match the min value, masked against the color
1544*4882a593Smuzhiyun  * channels specified in the mask field, will be replaced by corresponding
1545*4882a593Smuzhiyun  * pixels from the sprite plane.
1546*4882a593Smuzhiyun  *
1547*4882a593Smuzhiyun  * Note that source & destination keying are exclusive; only one can be
1548*4882a593Smuzhiyun  * active on a given plane.
1549*4882a593Smuzhiyun  */
1550*4882a593Smuzhiyun 
1551*4882a593Smuzhiyun #define I915_SET_COLORKEY_NONE		(1<<0) /* Deprecated. Instead set
1552*4882a593Smuzhiyun 						* flags==0 to disable colorkeying.
1553*4882a593Smuzhiyun 						*/
1554*4882a593Smuzhiyun #define I915_SET_COLORKEY_DESTINATION	(1<<1)
1555*4882a593Smuzhiyun #define I915_SET_COLORKEY_SOURCE	(1<<2)
1556*4882a593Smuzhiyun struct drm_intel_sprite_colorkey {
1557*4882a593Smuzhiyun 	__u32 plane_id;
1558*4882a593Smuzhiyun 	__u32 min_value;
1559*4882a593Smuzhiyun 	__u32 channel_mask;
1560*4882a593Smuzhiyun 	__u32 max_value;
1561*4882a593Smuzhiyun 	__u32 flags;
1562*4882a593Smuzhiyun };
1563*4882a593Smuzhiyun 
1564*4882a593Smuzhiyun struct drm_i915_gem_wait {
1565*4882a593Smuzhiyun 	/** Handle of BO we shall wait on */
1566*4882a593Smuzhiyun 	__u32 bo_handle;
1567*4882a593Smuzhiyun 	__u32 flags;
1568*4882a593Smuzhiyun 	/** Number of nanoseconds to wait, Returns time remaining. */
1569*4882a593Smuzhiyun 	__s64 timeout_ns;
1570*4882a593Smuzhiyun };
1571*4882a593Smuzhiyun 
1572*4882a593Smuzhiyun struct drm_i915_gem_context_create {
1573*4882a593Smuzhiyun 	__u32 ctx_id; /* output: id of new context*/
1574*4882a593Smuzhiyun 	__u32 pad;
1575*4882a593Smuzhiyun };
1576*4882a593Smuzhiyun 
1577*4882a593Smuzhiyun struct drm_i915_gem_context_create_ext {
1578*4882a593Smuzhiyun 	__u32 ctx_id; /* output: id of new context*/
1579*4882a593Smuzhiyun 	__u32 flags;
1580*4882a593Smuzhiyun #define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS	(1u << 0)
1581*4882a593Smuzhiyun #define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE	(1u << 1)
1582*4882a593Smuzhiyun #define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
1583*4882a593Smuzhiyun 	(-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
1584*4882a593Smuzhiyun 	__u64 extensions;
1585*4882a593Smuzhiyun };
1586*4882a593Smuzhiyun 
1587*4882a593Smuzhiyun struct drm_i915_gem_context_param {
1588*4882a593Smuzhiyun 	__u32 ctx_id;
1589*4882a593Smuzhiyun 	__u32 size;
1590*4882a593Smuzhiyun 	__u64 param;
1591*4882a593Smuzhiyun #define I915_CONTEXT_PARAM_BAN_PERIOD	0x1
1592*4882a593Smuzhiyun #define I915_CONTEXT_PARAM_NO_ZEROMAP	0x2
1593*4882a593Smuzhiyun #define I915_CONTEXT_PARAM_GTT_SIZE	0x3
1594*4882a593Smuzhiyun #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE	0x4
1595*4882a593Smuzhiyun #define I915_CONTEXT_PARAM_BANNABLE	0x5
1596*4882a593Smuzhiyun #define I915_CONTEXT_PARAM_PRIORITY	0x6
1597*4882a593Smuzhiyun #define   I915_CONTEXT_MAX_USER_PRIORITY	1023 /* inclusive */
1598*4882a593Smuzhiyun #define   I915_CONTEXT_DEFAULT_PRIORITY		0
1599*4882a593Smuzhiyun #define   I915_CONTEXT_MIN_USER_PRIORITY	-1023 /* inclusive */
1600*4882a593Smuzhiyun 	/*
1601*4882a593Smuzhiyun 	 * When using the following param, value should be a pointer to
1602*4882a593Smuzhiyun 	 * drm_i915_gem_context_param_sseu.
1603*4882a593Smuzhiyun 	 */
1604*4882a593Smuzhiyun #define I915_CONTEXT_PARAM_SSEU		0x7
1605*4882a593Smuzhiyun 
1606*4882a593Smuzhiyun /*
1607*4882a593Smuzhiyun  * Not all clients may want to attempt automatic recover of a context after
1608*4882a593Smuzhiyun  * a hang (for example, some clients may only submit very small incremental
1609*4882a593Smuzhiyun  * batches relying on known logical state of previous batches which will never
1610*4882a593Smuzhiyun  * recover correctly and each attempt will hang), and so would prefer that
1611*4882a593Smuzhiyun  * the context is forever banned instead.
1612*4882a593Smuzhiyun  *
1613*4882a593Smuzhiyun  * If set to false (0), after a reset, subsequent (and in flight) rendering
1614*4882a593Smuzhiyun  * from this context is discarded, and the client will need to create a new
1615*4882a593Smuzhiyun  * context to use instead.
1616*4882a593Smuzhiyun  *
1617*4882a593Smuzhiyun  * If set to true (1), the kernel will automatically attempt to recover the
1618*4882a593Smuzhiyun  * context by skipping the hanging batch and executing the next batch starting
1619*4882a593Smuzhiyun  * from the default context state (discarding the incomplete logical context
1620*4882a593Smuzhiyun  * state lost due to the reset).
1621*4882a593Smuzhiyun  *
1622*4882a593Smuzhiyun  * On creation, all new contexts are marked as recoverable.
1623*4882a593Smuzhiyun  */
1624*4882a593Smuzhiyun #define I915_CONTEXT_PARAM_RECOVERABLE	0x8
1625*4882a593Smuzhiyun 
1626*4882a593Smuzhiyun 	/*
1627*4882a593Smuzhiyun 	 * The id of the associated virtual memory address space (ppGTT) of
1628*4882a593Smuzhiyun 	 * this context. Can be retrieved and passed to another context
1629*4882a593Smuzhiyun 	 * (on the same fd) for both to use the same ppGTT and so share
1630*4882a593Smuzhiyun 	 * address layouts, and avoid reloading the page tables on context
1631*4882a593Smuzhiyun 	 * switches between themselves.
1632*4882a593Smuzhiyun 	 *
1633*4882a593Smuzhiyun 	 * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.
1634*4882a593Smuzhiyun 	 */
1635*4882a593Smuzhiyun #define I915_CONTEXT_PARAM_VM		0x9
1636*4882a593Smuzhiyun 
1637*4882a593Smuzhiyun /*
1638*4882a593Smuzhiyun  * I915_CONTEXT_PARAM_ENGINES:
1639*4882a593Smuzhiyun  *
1640*4882a593Smuzhiyun  * Bind this context to operate on this subset of available engines. Henceforth,
1641*4882a593Smuzhiyun  * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as
1642*4882a593Smuzhiyun  * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0]
1643*4882a593Smuzhiyun  * and upwards. Slots 0...N are filled in using the specified (class, instance).
1644*4882a593Smuzhiyun  * Use
1645*4882a593Smuzhiyun  *	engine_class: I915_ENGINE_CLASS_INVALID,
1646*4882a593Smuzhiyun  *	engine_instance: I915_ENGINE_CLASS_INVALID_NONE
1647*4882a593Smuzhiyun  * to specify a gap in the array that can be filled in later, e.g. by a
1648*4882a593Smuzhiyun  * virtual engine used for load balancing.
1649*4882a593Smuzhiyun  *
1650*4882a593Smuzhiyun  * Setting the number of engines bound to the context to 0, by passing a zero
1651*4882a593Smuzhiyun  * sized argument, will revert back to default settings.
1652*4882a593Smuzhiyun  *
1653*4882a593Smuzhiyun  * See struct i915_context_param_engines.
1654*4882a593Smuzhiyun  *
1655*4882a593Smuzhiyun  * Extensions:
1656*4882a593Smuzhiyun  *   i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
1657*4882a593Smuzhiyun  *   i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
1658*4882a593Smuzhiyun  */
1659*4882a593Smuzhiyun #define I915_CONTEXT_PARAM_ENGINES	0xa
1660*4882a593Smuzhiyun 
1661*4882a593Smuzhiyun /*
1662*4882a593Smuzhiyun  * I915_CONTEXT_PARAM_PERSISTENCE:
1663*4882a593Smuzhiyun  *
1664*4882a593Smuzhiyun  * Allow the context and active rendering to survive the process until
1665*4882a593Smuzhiyun  * completion. Persistence allows fire-and-forget clients to queue up a
1666*4882a593Smuzhiyun  * bunch of work, hand the output over to a display server and then quit.
1667*4882a593Smuzhiyun  * If the context is marked as not persistent, upon closing (either via
1668*4882a593Smuzhiyun  * an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure
1669*4882a593Smuzhiyun  * or process termination), the context and any outstanding requests will be
1670*4882a593Smuzhiyun  * cancelled (and exported fences for cancelled requests marked as -EIO).
1671*4882a593Smuzhiyun  *
1672*4882a593Smuzhiyun  * By default, new contexts allow persistence.
1673*4882a593Smuzhiyun  */
1674*4882a593Smuzhiyun #define I915_CONTEXT_PARAM_PERSISTENCE	0xb
1675*4882a593Smuzhiyun 
1676*4882a593Smuzhiyun /*
1677*4882a593Smuzhiyun  * I915_CONTEXT_PARAM_RINGSIZE:
1678*4882a593Smuzhiyun  *
1679*4882a593Smuzhiyun  * Sets the size of the CS ringbuffer to use for logical ring contexts. This
1680*4882a593Smuzhiyun  * applies a limit of how many batches can be queued to HW before the caller
1681*4882a593Smuzhiyun  * is blocked due to lack of space for more commands.
1682*4882a593Smuzhiyun  *
1683*4882a593Smuzhiyun  * Only reliably possible to be set prior to first use, i.e. during
1684*4882a593Smuzhiyun  * construction. At any later point, the current execution must be flushed as
1685*4882a593Smuzhiyun  * the ring can only be changed while the context is idle. Note, the ringsize
1686*4882a593Smuzhiyun  * can be specified as a constructor property, see
1687*4882a593Smuzhiyun  * I915_CONTEXT_CREATE_EXT_SETPARAM, but can also be set later if required.
1688*4882a593Smuzhiyun  *
1689*4882a593Smuzhiyun  * Only applies to the current set of engine and lost when those engines
1690*4882a593Smuzhiyun  * are replaced by a new mapping (see I915_CONTEXT_PARAM_ENGINES).
1691*4882a593Smuzhiyun  *
1692*4882a593Smuzhiyun  * Must be between 4 - 512 KiB, in intervals of page size [4 KiB].
1693*4882a593Smuzhiyun  * Default is 16 KiB.
1694*4882a593Smuzhiyun  */
1695*4882a593Smuzhiyun #define I915_CONTEXT_PARAM_RINGSIZE	0xc
1696*4882a593Smuzhiyun /* Must be kept compact -- no holes and well documented */
1697*4882a593Smuzhiyun 
1698*4882a593Smuzhiyun 	__u64 value;
1699*4882a593Smuzhiyun };
1700*4882a593Smuzhiyun 
1701*4882a593Smuzhiyun /**
1702*4882a593Smuzhiyun  * Context SSEU programming
1703*4882a593Smuzhiyun  *
1704*4882a593Smuzhiyun  * It may be necessary for either functional or performance reason to configure
1705*4882a593Smuzhiyun  * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
1706*4882a593Smuzhiyun  * Sub-slice/EU).
1707*4882a593Smuzhiyun  *
1708*4882a593Smuzhiyun  * This is done by configuring SSEU configuration using the below
1709*4882a593Smuzhiyun  * @struct drm_i915_gem_context_param_sseu for every supported engine which
1710*4882a593Smuzhiyun  * userspace intends to use.
1711*4882a593Smuzhiyun  *
1712*4882a593Smuzhiyun  * Not all GPUs or engines support this functionality in which case an error
1713*4882a593Smuzhiyun  * code -ENODEV will be returned.
1714*4882a593Smuzhiyun  *
1715*4882a593Smuzhiyun  * Also, flexibility of possible SSEU configuration permutations varies between
1716*4882a593Smuzhiyun  * GPU generations and software imposed limitations. Requesting such a
1717*4882a593Smuzhiyun  * combination will return an error code of -EINVAL.
1718*4882a593Smuzhiyun  *
1719*4882a593Smuzhiyun  * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
1720*4882a593Smuzhiyun  * favour of a single global setting.
1721*4882a593Smuzhiyun  */
1722*4882a593Smuzhiyun struct drm_i915_gem_context_param_sseu {
1723*4882a593Smuzhiyun 	/*
1724*4882a593Smuzhiyun 	 * Engine class & instance to be configured or queried.
1725*4882a593Smuzhiyun 	 */
1726*4882a593Smuzhiyun 	struct i915_engine_class_instance engine;
1727*4882a593Smuzhiyun 
1728*4882a593Smuzhiyun 	/*
1729*4882a593Smuzhiyun 	 * Unknown flags must be cleared to zero.
1730*4882a593Smuzhiyun 	 */
1731*4882a593Smuzhiyun 	__u32 flags;
1732*4882a593Smuzhiyun #define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0)
1733*4882a593Smuzhiyun 
1734*4882a593Smuzhiyun 	/*
1735*4882a593Smuzhiyun 	 * Mask of slices to enable for the context. Valid values are a subset
1736*4882a593Smuzhiyun 	 * of the bitmask value returned for I915_PARAM_SLICE_MASK.
1737*4882a593Smuzhiyun 	 */
1738*4882a593Smuzhiyun 	__u64 slice_mask;
1739*4882a593Smuzhiyun 
1740*4882a593Smuzhiyun 	/*
1741*4882a593Smuzhiyun 	 * Mask of subslices to enable for the context. Valid values are a
1742*4882a593Smuzhiyun 	 * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
1743*4882a593Smuzhiyun 	 */
1744*4882a593Smuzhiyun 	__u64 subslice_mask;
1745*4882a593Smuzhiyun 
1746*4882a593Smuzhiyun 	/*
1747*4882a593Smuzhiyun 	 * Minimum/Maximum number of EUs to enable per subslice for the
1748*4882a593Smuzhiyun 	 * context. min_eus_per_subslice must be inferior or equal to
1749*4882a593Smuzhiyun 	 * max_eus_per_subslice.
1750*4882a593Smuzhiyun 	 */
1751*4882a593Smuzhiyun 	__u16 min_eus_per_subslice;
1752*4882a593Smuzhiyun 	__u16 max_eus_per_subslice;
1753*4882a593Smuzhiyun 
1754*4882a593Smuzhiyun 	/*
1755*4882a593Smuzhiyun 	 * Unused for now. Must be cleared to zero.
1756*4882a593Smuzhiyun 	 */
1757*4882a593Smuzhiyun 	__u32 rsvd;
1758*4882a593Smuzhiyun };
1759*4882a593Smuzhiyun 
1760*4882a593Smuzhiyun /*
1761*4882a593Smuzhiyun  * i915_context_engines_load_balance:
1762*4882a593Smuzhiyun  *
1763*4882a593Smuzhiyun  * Enable load balancing across this set of engines.
1764*4882a593Smuzhiyun  *
1765*4882a593Smuzhiyun  * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when
1766*4882a593Smuzhiyun  * used will proxy the execbuffer request onto one of the set of engines
1767*4882a593Smuzhiyun  * in such a way as to distribute the load evenly across the set.
1768*4882a593Smuzhiyun  *
1769*4882a593Smuzhiyun  * The set of engines must be compatible (e.g. the same HW class) as they
1770*4882a593Smuzhiyun  * will share the same logical GPU context and ring.
1771*4882a593Smuzhiyun  *
1772*4882a593Smuzhiyun  * To intermix rendering with the virtual engine and direct rendering onto
1773*4882a593Smuzhiyun  * the backing engines (bypassing the load balancing proxy), the context must
1774*4882a593Smuzhiyun  * be defined to use a single timeline for all engines.
1775*4882a593Smuzhiyun  */
1776*4882a593Smuzhiyun struct i915_context_engines_load_balance {
1777*4882a593Smuzhiyun 	struct i915_user_extension base;
1778*4882a593Smuzhiyun 
1779*4882a593Smuzhiyun 	__u16 engine_index;
1780*4882a593Smuzhiyun 	__u16 num_siblings;
1781*4882a593Smuzhiyun 	__u32 flags; /* all undefined flags must be zero */
1782*4882a593Smuzhiyun 
1783*4882a593Smuzhiyun 	__u64 mbz64; /* reserved for future use; must be zero */
1784*4882a593Smuzhiyun 
1785*4882a593Smuzhiyun 	struct i915_engine_class_instance engines[0];
1786*4882a593Smuzhiyun } __attribute__((packed));
1787*4882a593Smuzhiyun 
1788*4882a593Smuzhiyun #define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
1789*4882a593Smuzhiyun 	struct i915_user_extension base; \
1790*4882a593Smuzhiyun 	__u16 engine_index; \
1791*4882a593Smuzhiyun 	__u16 num_siblings; \
1792*4882a593Smuzhiyun 	__u32 flags; \
1793*4882a593Smuzhiyun 	__u64 mbz64; \
1794*4882a593Smuzhiyun 	struct i915_engine_class_instance engines[N__]; \
1795*4882a593Smuzhiyun } __attribute__((packed)) name__
1796*4882a593Smuzhiyun 
1797*4882a593Smuzhiyun /*
1798*4882a593Smuzhiyun  * i915_context_engines_bond:
1799*4882a593Smuzhiyun  *
1800*4882a593Smuzhiyun  * Constructed bonded pairs for execution within a virtual engine.
1801*4882a593Smuzhiyun  *
1802*4882a593Smuzhiyun  * All engines are equal, but some are more equal than others. Given
1803*4882a593Smuzhiyun  * the distribution of resources in the HW, it may be preferable to run
1804*4882a593Smuzhiyun  * a request on a given subset of engines in parallel to a request on a
1805*4882a593Smuzhiyun  * specific engine. We enable this selection of engines within a virtual
1806*4882a593Smuzhiyun  * engine by specifying bonding pairs, for any given master engine we will
1807*4882a593Smuzhiyun  * only execute on one of the corresponding siblings within the virtual engine.
1808*4882a593Smuzhiyun  *
1809*4882a593Smuzhiyun  * To execute a request in parallel on the master engine and a sibling requires
1810*4882a593Smuzhiyun  * coordination with a I915_EXEC_FENCE_SUBMIT.
1811*4882a593Smuzhiyun  */
1812*4882a593Smuzhiyun struct i915_context_engines_bond {
1813*4882a593Smuzhiyun 	struct i915_user_extension base;
1814*4882a593Smuzhiyun 
1815*4882a593Smuzhiyun 	struct i915_engine_class_instance master;
1816*4882a593Smuzhiyun 
1817*4882a593Smuzhiyun 	__u16 virtual_index; /* index of virtual engine in ctx->engines[] */
1818*4882a593Smuzhiyun 	__u16 num_bonds;
1819*4882a593Smuzhiyun 
1820*4882a593Smuzhiyun 	__u64 flags; /* all undefined flags must be zero */
1821*4882a593Smuzhiyun 	__u64 mbz64[4]; /* reserved for future use; must be zero */
1822*4882a593Smuzhiyun 
1823*4882a593Smuzhiyun 	struct i915_engine_class_instance engines[0];
1824*4882a593Smuzhiyun } __attribute__((packed));
1825*4882a593Smuzhiyun 
1826*4882a593Smuzhiyun #define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \
1827*4882a593Smuzhiyun 	struct i915_user_extension base; \
1828*4882a593Smuzhiyun 	struct i915_engine_class_instance master; \
1829*4882a593Smuzhiyun 	__u16 virtual_index; \
1830*4882a593Smuzhiyun 	__u16 num_bonds; \
1831*4882a593Smuzhiyun 	__u64 flags; \
1832*4882a593Smuzhiyun 	__u64 mbz64[4]; \
1833*4882a593Smuzhiyun 	struct i915_engine_class_instance engines[N__]; \
1834*4882a593Smuzhiyun } __attribute__((packed)) name__
1835*4882a593Smuzhiyun 
1836*4882a593Smuzhiyun struct i915_context_param_engines {
1837*4882a593Smuzhiyun 	__u64 extensions; /* linked chain of extension blocks, 0 terminates */
1838*4882a593Smuzhiyun #define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
1839*4882a593Smuzhiyun #define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
1840*4882a593Smuzhiyun 	struct i915_engine_class_instance engines[0];
1841*4882a593Smuzhiyun } __attribute__((packed));
1842*4882a593Smuzhiyun 
1843*4882a593Smuzhiyun #define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
1844*4882a593Smuzhiyun 	__u64 extensions; \
1845*4882a593Smuzhiyun 	struct i915_engine_class_instance engines[N__]; \
1846*4882a593Smuzhiyun } __attribute__((packed)) name__
1847*4882a593Smuzhiyun 
1848*4882a593Smuzhiyun struct drm_i915_gem_context_create_ext_setparam {
1849*4882a593Smuzhiyun #define I915_CONTEXT_CREATE_EXT_SETPARAM 0
1850*4882a593Smuzhiyun 	struct i915_user_extension base;
1851*4882a593Smuzhiyun 	struct drm_i915_gem_context_param param;
1852*4882a593Smuzhiyun };
1853*4882a593Smuzhiyun 
1854*4882a593Smuzhiyun struct drm_i915_gem_context_create_ext_clone {
1855*4882a593Smuzhiyun #define I915_CONTEXT_CREATE_EXT_CLONE 1
1856*4882a593Smuzhiyun 	struct i915_user_extension base;
1857*4882a593Smuzhiyun 	__u32 clone_id;
1858*4882a593Smuzhiyun 	__u32 flags;
1859*4882a593Smuzhiyun #define I915_CONTEXT_CLONE_ENGINES	(1u << 0)
1860*4882a593Smuzhiyun #define I915_CONTEXT_CLONE_FLAGS	(1u << 1)
1861*4882a593Smuzhiyun #define I915_CONTEXT_CLONE_SCHEDATTR	(1u << 2)
1862*4882a593Smuzhiyun #define I915_CONTEXT_CLONE_SSEU		(1u << 3)
1863*4882a593Smuzhiyun #define I915_CONTEXT_CLONE_TIMELINE	(1u << 4)
1864*4882a593Smuzhiyun #define I915_CONTEXT_CLONE_VM		(1u << 5)
1865*4882a593Smuzhiyun #define I915_CONTEXT_CLONE_UNKNOWN -(I915_CONTEXT_CLONE_VM << 1)
1866*4882a593Smuzhiyun 	__u64 rsvd;
1867*4882a593Smuzhiyun };
1868*4882a593Smuzhiyun 
1869*4882a593Smuzhiyun struct drm_i915_gem_context_destroy {
1870*4882a593Smuzhiyun 	__u32 ctx_id;
1871*4882a593Smuzhiyun 	__u32 pad;
1872*4882a593Smuzhiyun };
1873*4882a593Smuzhiyun 
1874*4882a593Smuzhiyun /*
1875*4882a593Smuzhiyun  * DRM_I915_GEM_VM_CREATE -
1876*4882a593Smuzhiyun  *
1877*4882a593Smuzhiyun  * Create a new virtual memory address space (ppGTT) for use within a context
1878*4882a593Smuzhiyun  * on the same file. Extensions can be provided to configure exactly how the
1879*4882a593Smuzhiyun  * address space is setup upon creation.
1880*4882a593Smuzhiyun  *
1881*4882a593Smuzhiyun  * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
1882*4882a593Smuzhiyun  * returned in the outparam @id.
1883*4882a593Smuzhiyun  *
1884*4882a593Smuzhiyun  * No flags are defined, with all bits reserved and must be zero.
1885*4882a593Smuzhiyun  *
1886*4882a593Smuzhiyun  * An extension chain maybe provided, starting with @extensions, and terminated
1887*4882a593Smuzhiyun  * by the @next_extension being 0. Currently, no extensions are defined.
1888*4882a593Smuzhiyun  *
1889*4882a593Smuzhiyun  * DRM_I915_GEM_VM_DESTROY -
1890*4882a593Smuzhiyun  *
1891*4882a593Smuzhiyun  * Destroys a previously created VM id, specified in @id.
1892*4882a593Smuzhiyun  *
1893*4882a593Smuzhiyun  * No extensions or flags are allowed currently, and so must be zero.
1894*4882a593Smuzhiyun  */
1895*4882a593Smuzhiyun struct drm_i915_gem_vm_control {
1896*4882a593Smuzhiyun 	__u64 extensions;
1897*4882a593Smuzhiyun 	__u32 flags;
1898*4882a593Smuzhiyun 	__u32 vm_id;
1899*4882a593Smuzhiyun };
1900*4882a593Smuzhiyun 
1901*4882a593Smuzhiyun struct drm_i915_reg_read {
1902*4882a593Smuzhiyun 	/*
1903*4882a593Smuzhiyun 	 * Register offset.
1904*4882a593Smuzhiyun 	 * For 64bit wide registers where the upper 32bits don't immediately
1905*4882a593Smuzhiyun 	 * follow the lower 32bits, the offset of the lower 32bits must
1906*4882a593Smuzhiyun 	 * be specified
1907*4882a593Smuzhiyun 	 */
1908*4882a593Smuzhiyun 	__u64 offset;
1909*4882a593Smuzhiyun #define I915_REG_READ_8B_WA (1ul << 0)
1910*4882a593Smuzhiyun 
1911*4882a593Smuzhiyun 	__u64 val; /* Return value */
1912*4882a593Smuzhiyun };
1913*4882a593Smuzhiyun 
1914*4882a593Smuzhiyun /* Known registers:
1915*4882a593Smuzhiyun  *
1916*4882a593Smuzhiyun  * Render engine timestamp - 0x2358 + 64bit - gen7+
1917*4882a593Smuzhiyun  * - Note this register returns an invalid value if using the default
1918*4882a593Smuzhiyun  *   single instruction 8byte read, in order to workaround that pass
1919*4882a593Smuzhiyun  *   flag I915_REG_READ_8B_WA in offset field.
1920*4882a593Smuzhiyun  *
1921*4882a593Smuzhiyun  */
1922*4882a593Smuzhiyun 
1923*4882a593Smuzhiyun struct drm_i915_reset_stats {
1924*4882a593Smuzhiyun 	__u32 ctx_id;
1925*4882a593Smuzhiyun 	__u32 flags;
1926*4882a593Smuzhiyun 
1927*4882a593Smuzhiyun 	/* All resets since boot/module reload, for all contexts */
1928*4882a593Smuzhiyun 	__u32 reset_count;
1929*4882a593Smuzhiyun 
1930*4882a593Smuzhiyun 	/* Number of batches lost when active in GPU, for this context */
1931*4882a593Smuzhiyun 	__u32 batch_active;
1932*4882a593Smuzhiyun 
1933*4882a593Smuzhiyun 	/* Number of batches lost pending for execution, for this context */
1934*4882a593Smuzhiyun 	__u32 batch_pending;
1935*4882a593Smuzhiyun 
1936*4882a593Smuzhiyun 	__u32 pad;
1937*4882a593Smuzhiyun };
1938*4882a593Smuzhiyun 
1939*4882a593Smuzhiyun struct drm_i915_gem_userptr {
1940*4882a593Smuzhiyun 	__u64 user_ptr;
1941*4882a593Smuzhiyun 	__u64 user_size;
1942*4882a593Smuzhiyun 	__u32 flags;
1943*4882a593Smuzhiyun #define I915_USERPTR_READ_ONLY 0x1
1944*4882a593Smuzhiyun #define I915_USERPTR_UNSYNCHRONIZED 0x80000000
1945*4882a593Smuzhiyun 	/**
1946*4882a593Smuzhiyun 	 * Returned handle for the object.
1947*4882a593Smuzhiyun 	 *
1948*4882a593Smuzhiyun 	 * Object handles are nonzero.
1949*4882a593Smuzhiyun 	 */
1950*4882a593Smuzhiyun 	__u32 handle;
1951*4882a593Smuzhiyun };
1952*4882a593Smuzhiyun 
1953*4882a593Smuzhiyun enum drm_i915_oa_format {
1954*4882a593Smuzhiyun 	I915_OA_FORMAT_A13 = 1,	    /* HSW only */
1955*4882a593Smuzhiyun 	I915_OA_FORMAT_A29,	    /* HSW only */
1956*4882a593Smuzhiyun 	I915_OA_FORMAT_A13_B8_C8,   /* HSW only */
1957*4882a593Smuzhiyun 	I915_OA_FORMAT_B4_C8,	    /* HSW only */
1958*4882a593Smuzhiyun 	I915_OA_FORMAT_A45_B8_C8,   /* HSW only */
1959*4882a593Smuzhiyun 	I915_OA_FORMAT_B4_C8_A16,   /* HSW only */
1960*4882a593Smuzhiyun 	I915_OA_FORMAT_C4_B8,	    /* HSW+ */
1961*4882a593Smuzhiyun 
1962*4882a593Smuzhiyun 	/* Gen8+ */
1963*4882a593Smuzhiyun 	I915_OA_FORMAT_A12,
1964*4882a593Smuzhiyun 	I915_OA_FORMAT_A12_B8_C8,
1965*4882a593Smuzhiyun 	I915_OA_FORMAT_A32u40_A4u32_B8_C8,
1966*4882a593Smuzhiyun 
1967*4882a593Smuzhiyun 	I915_OA_FORMAT_MAX	    /* non-ABI */
1968*4882a593Smuzhiyun };
1969*4882a593Smuzhiyun 
1970*4882a593Smuzhiyun enum drm_i915_perf_property_id {
1971*4882a593Smuzhiyun 	/**
1972*4882a593Smuzhiyun 	 * Open the stream for a specific context handle (as used with
1973*4882a593Smuzhiyun 	 * execbuffer2). A stream opened for a specific context this way
1974*4882a593Smuzhiyun 	 * won't typically require root privileges.
1975*4882a593Smuzhiyun 	 *
1976*4882a593Smuzhiyun 	 * This property is available in perf revision 1.
1977*4882a593Smuzhiyun 	 */
1978*4882a593Smuzhiyun 	DRM_I915_PERF_PROP_CTX_HANDLE = 1,
1979*4882a593Smuzhiyun 
1980*4882a593Smuzhiyun 	/**
1981*4882a593Smuzhiyun 	 * A value of 1 requests the inclusion of raw OA unit reports as
1982*4882a593Smuzhiyun 	 * part of stream samples.
1983*4882a593Smuzhiyun 	 *
1984*4882a593Smuzhiyun 	 * This property is available in perf revision 1.
1985*4882a593Smuzhiyun 	 */
1986*4882a593Smuzhiyun 	DRM_I915_PERF_PROP_SAMPLE_OA,
1987*4882a593Smuzhiyun 
1988*4882a593Smuzhiyun 	/**
1989*4882a593Smuzhiyun 	 * The value specifies which set of OA unit metrics should be
1990*4882a593Smuzhiyun 	 * configured, defining the contents of any OA unit reports.
1991*4882a593Smuzhiyun 	 *
1992*4882a593Smuzhiyun 	 * This property is available in perf revision 1.
1993*4882a593Smuzhiyun 	 */
1994*4882a593Smuzhiyun 	DRM_I915_PERF_PROP_OA_METRICS_SET,
1995*4882a593Smuzhiyun 
1996*4882a593Smuzhiyun 	/**
1997*4882a593Smuzhiyun 	 * The value specifies the size and layout of OA unit reports.
1998*4882a593Smuzhiyun 	 *
1999*4882a593Smuzhiyun 	 * This property is available in perf revision 1.
2000*4882a593Smuzhiyun 	 */
2001*4882a593Smuzhiyun 	DRM_I915_PERF_PROP_OA_FORMAT,
2002*4882a593Smuzhiyun 
2003*4882a593Smuzhiyun 	/**
2004*4882a593Smuzhiyun 	 * Specifying this property implicitly requests periodic OA unit
2005*4882a593Smuzhiyun 	 * sampling and (at least on Haswell) the sampling frequency is derived
2006*4882a593Smuzhiyun 	 * from this exponent as follows:
2007*4882a593Smuzhiyun 	 *
2008*4882a593Smuzhiyun 	 *   80ns * 2^(period_exponent + 1)
2009*4882a593Smuzhiyun 	 *
2010*4882a593Smuzhiyun 	 * This property is available in perf revision 1.
2011*4882a593Smuzhiyun 	 */
2012*4882a593Smuzhiyun 	DRM_I915_PERF_PROP_OA_EXPONENT,
2013*4882a593Smuzhiyun 
2014*4882a593Smuzhiyun 	/**
2015*4882a593Smuzhiyun 	 * Specifying this property is only valid when specify a context to
2016*4882a593Smuzhiyun 	 * filter with DRM_I915_PERF_PROP_CTX_HANDLE. Specifying this property
2017*4882a593Smuzhiyun 	 * will hold preemption of the particular context we want to gather
2018*4882a593Smuzhiyun 	 * performance data about. The execbuf2 submissions must include a
2019*4882a593Smuzhiyun 	 * drm_i915_gem_execbuffer_ext_perf parameter for this to apply.
2020*4882a593Smuzhiyun 	 *
2021*4882a593Smuzhiyun 	 * This property is available in perf revision 3.
2022*4882a593Smuzhiyun 	 */
2023*4882a593Smuzhiyun 	DRM_I915_PERF_PROP_HOLD_PREEMPTION,
2024*4882a593Smuzhiyun 
2025*4882a593Smuzhiyun 	/**
2026*4882a593Smuzhiyun 	 * Specifying this pins all contexts to the specified SSEU power
2027*4882a593Smuzhiyun 	 * configuration for the duration of the recording.
2028*4882a593Smuzhiyun 	 *
2029*4882a593Smuzhiyun 	 * This parameter's value is a pointer to a struct
2030*4882a593Smuzhiyun 	 * drm_i915_gem_context_param_sseu.
2031*4882a593Smuzhiyun 	 *
2032*4882a593Smuzhiyun 	 * This property is available in perf revision 4.
2033*4882a593Smuzhiyun 	 */
2034*4882a593Smuzhiyun 	DRM_I915_PERF_PROP_GLOBAL_SSEU,
2035*4882a593Smuzhiyun 
2036*4882a593Smuzhiyun 	/**
2037*4882a593Smuzhiyun 	 * This optional parameter specifies the timer interval in nanoseconds
2038*4882a593Smuzhiyun 	 * at which the i915 driver will check the OA buffer for available data.
2039*4882a593Smuzhiyun 	 * Minimum allowed value is 100 microseconds. A default value is used by
2040*4882a593Smuzhiyun 	 * the driver if this parameter is not specified. Note that larger timer
2041*4882a593Smuzhiyun 	 * values will reduce cpu consumption during OA perf captures. However,
2042*4882a593Smuzhiyun 	 * excessively large values would potentially result in OA buffer
2043*4882a593Smuzhiyun 	 * overwrites as captures reach end of the OA buffer.
2044*4882a593Smuzhiyun 	 *
2045*4882a593Smuzhiyun 	 * This property is available in perf revision 5.
2046*4882a593Smuzhiyun 	 */
2047*4882a593Smuzhiyun 	DRM_I915_PERF_PROP_POLL_OA_PERIOD,
2048*4882a593Smuzhiyun 
2049*4882a593Smuzhiyun 	DRM_I915_PERF_PROP_MAX /* non-ABI */
2050*4882a593Smuzhiyun };
2051*4882a593Smuzhiyun 
2052*4882a593Smuzhiyun struct drm_i915_perf_open_param {
2053*4882a593Smuzhiyun 	__u32 flags;
2054*4882a593Smuzhiyun #define I915_PERF_FLAG_FD_CLOEXEC	(1<<0)
2055*4882a593Smuzhiyun #define I915_PERF_FLAG_FD_NONBLOCK	(1<<1)
2056*4882a593Smuzhiyun #define I915_PERF_FLAG_DISABLED		(1<<2)
2057*4882a593Smuzhiyun 
2058*4882a593Smuzhiyun 	/** The number of u64 (id, value) pairs */
2059*4882a593Smuzhiyun 	__u32 num_properties;
2060*4882a593Smuzhiyun 
2061*4882a593Smuzhiyun 	/**
2062*4882a593Smuzhiyun 	 * Pointer to array of u64 (id, value) pairs configuring the stream
2063*4882a593Smuzhiyun 	 * to open.
2064*4882a593Smuzhiyun 	 */
2065*4882a593Smuzhiyun 	__u64 properties_ptr;
2066*4882a593Smuzhiyun };
2067*4882a593Smuzhiyun 
2068*4882a593Smuzhiyun /**
2069*4882a593Smuzhiyun  * Enable data capture for a stream that was either opened in a disabled state
2070*4882a593Smuzhiyun  * via I915_PERF_FLAG_DISABLED or was later disabled via
2071*4882a593Smuzhiyun  * I915_PERF_IOCTL_DISABLE.
2072*4882a593Smuzhiyun  *
2073*4882a593Smuzhiyun  * It is intended to be cheaper to disable and enable a stream than it may be
2074*4882a593Smuzhiyun  * to close and re-open a stream with the same configuration.
2075*4882a593Smuzhiyun  *
2076*4882a593Smuzhiyun  * It's undefined whether any pending data for the stream will be lost.
2077*4882a593Smuzhiyun  *
2078*4882a593Smuzhiyun  * This ioctl is available in perf revision 1.
2079*4882a593Smuzhiyun  */
2080*4882a593Smuzhiyun #define I915_PERF_IOCTL_ENABLE	_IO('i', 0x0)
2081*4882a593Smuzhiyun 
2082*4882a593Smuzhiyun /**
2083*4882a593Smuzhiyun  * Disable data capture for a stream.
2084*4882a593Smuzhiyun  *
2085*4882a593Smuzhiyun  * It is an error to try and read a stream that is disabled.
2086*4882a593Smuzhiyun  *
2087*4882a593Smuzhiyun  * This ioctl is available in perf revision 1.
2088*4882a593Smuzhiyun  */
2089*4882a593Smuzhiyun #define I915_PERF_IOCTL_DISABLE	_IO('i', 0x1)
2090*4882a593Smuzhiyun 
2091*4882a593Smuzhiyun /**
2092*4882a593Smuzhiyun  * Change metrics_set captured by a stream.
2093*4882a593Smuzhiyun  *
2094*4882a593Smuzhiyun  * If the stream is bound to a specific context, the configuration change
2095*4882a593Smuzhiyun  * will performed inline with that context such that it takes effect before
2096*4882a593Smuzhiyun  * the next execbuf submission.
2097*4882a593Smuzhiyun  *
2098*4882a593Smuzhiyun  * Returns the previously bound metrics set id, or a negative error code.
2099*4882a593Smuzhiyun  *
2100*4882a593Smuzhiyun  * This ioctl is available in perf revision 2.
2101*4882a593Smuzhiyun  */
2102*4882a593Smuzhiyun #define I915_PERF_IOCTL_CONFIG	_IO('i', 0x2)
2103*4882a593Smuzhiyun 
2104*4882a593Smuzhiyun /**
2105*4882a593Smuzhiyun  * Common to all i915 perf records
2106*4882a593Smuzhiyun  */
2107*4882a593Smuzhiyun struct drm_i915_perf_record_header {
2108*4882a593Smuzhiyun 	__u32 type;
2109*4882a593Smuzhiyun 	__u16 pad;
2110*4882a593Smuzhiyun 	__u16 size;
2111*4882a593Smuzhiyun };
2112*4882a593Smuzhiyun 
2113*4882a593Smuzhiyun enum drm_i915_perf_record_type {
2114*4882a593Smuzhiyun 
2115*4882a593Smuzhiyun 	/**
2116*4882a593Smuzhiyun 	 * Samples are the work horse record type whose contents are extensible
2117*4882a593Smuzhiyun 	 * and defined when opening an i915 perf stream based on the given
2118*4882a593Smuzhiyun 	 * properties.
2119*4882a593Smuzhiyun 	 *
2120*4882a593Smuzhiyun 	 * Boolean properties following the naming convention
2121*4882a593Smuzhiyun 	 * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
2122*4882a593Smuzhiyun 	 * every sample.
2123*4882a593Smuzhiyun 	 *
2124*4882a593Smuzhiyun 	 * The order of these sample properties given by userspace has no
2125*4882a593Smuzhiyun 	 * affect on the ordering of data within a sample. The order is
2126*4882a593Smuzhiyun 	 * documented here.
2127*4882a593Smuzhiyun 	 *
2128*4882a593Smuzhiyun 	 * struct {
2129*4882a593Smuzhiyun 	 *     struct drm_i915_perf_record_header header;
2130*4882a593Smuzhiyun 	 *
2131*4882a593Smuzhiyun 	 *     { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
2132*4882a593Smuzhiyun 	 * };
2133*4882a593Smuzhiyun 	 */
2134*4882a593Smuzhiyun 	DRM_I915_PERF_RECORD_SAMPLE = 1,
2135*4882a593Smuzhiyun 
2136*4882a593Smuzhiyun 	/*
2137*4882a593Smuzhiyun 	 * Indicates that one or more OA reports were not written by the
2138*4882a593Smuzhiyun 	 * hardware. This can happen for example if an MI_REPORT_PERF_COUNT
2139*4882a593Smuzhiyun 	 * command collides with periodic sampling - which would be more likely
2140*4882a593Smuzhiyun 	 * at higher sampling frequencies.
2141*4882a593Smuzhiyun 	 */
2142*4882a593Smuzhiyun 	DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
2143*4882a593Smuzhiyun 
2144*4882a593Smuzhiyun 	/**
2145*4882a593Smuzhiyun 	 * An error occurred that resulted in all pending OA reports being lost.
2146*4882a593Smuzhiyun 	 */
2147*4882a593Smuzhiyun 	DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
2148*4882a593Smuzhiyun 
2149*4882a593Smuzhiyun 	DRM_I915_PERF_RECORD_MAX /* non-ABI */
2150*4882a593Smuzhiyun };
2151*4882a593Smuzhiyun 
2152*4882a593Smuzhiyun /**
2153*4882a593Smuzhiyun  * Structure to upload perf dynamic configuration into the kernel.
2154*4882a593Smuzhiyun  */
2155*4882a593Smuzhiyun struct drm_i915_perf_oa_config {
2156*4882a593Smuzhiyun 	/** String formatted like "%08x-%04x-%04x-%04x-%012x" */
2157*4882a593Smuzhiyun 	char uuid[36];
2158*4882a593Smuzhiyun 
2159*4882a593Smuzhiyun 	__u32 n_mux_regs;
2160*4882a593Smuzhiyun 	__u32 n_boolean_regs;
2161*4882a593Smuzhiyun 	__u32 n_flex_regs;
2162*4882a593Smuzhiyun 
2163*4882a593Smuzhiyun 	/*
2164*4882a593Smuzhiyun 	 * These fields are pointers to tuples of u32 values (register address,
2165*4882a593Smuzhiyun 	 * value). For example the expected length of the buffer pointed by
2166*4882a593Smuzhiyun 	 * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
2167*4882a593Smuzhiyun 	 */
2168*4882a593Smuzhiyun 	__u64 mux_regs_ptr;
2169*4882a593Smuzhiyun 	__u64 boolean_regs_ptr;
2170*4882a593Smuzhiyun 	__u64 flex_regs_ptr;
2171*4882a593Smuzhiyun };
2172*4882a593Smuzhiyun 
2173*4882a593Smuzhiyun struct drm_i915_query_item {
2174*4882a593Smuzhiyun 	__u64 query_id;
2175*4882a593Smuzhiyun #define DRM_I915_QUERY_TOPOLOGY_INFO    1
2176*4882a593Smuzhiyun #define DRM_I915_QUERY_ENGINE_INFO	2
2177*4882a593Smuzhiyun #define DRM_I915_QUERY_PERF_CONFIG      3
2178*4882a593Smuzhiyun /* Must be kept compact -- no holes and well documented */
2179*4882a593Smuzhiyun 
2180*4882a593Smuzhiyun 	/*
2181*4882a593Smuzhiyun 	 * When set to zero by userspace, this is filled with the size of the
2182*4882a593Smuzhiyun 	 * data to be written at the data_ptr pointer. The kernel sets this
2183*4882a593Smuzhiyun 	 * value to a negative value to signal an error on a particular query
2184*4882a593Smuzhiyun 	 * item.
2185*4882a593Smuzhiyun 	 */
2186*4882a593Smuzhiyun 	__s32 length;
2187*4882a593Smuzhiyun 
2188*4882a593Smuzhiyun 	/*
2189*4882a593Smuzhiyun 	 * When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
2190*4882a593Smuzhiyun 	 *
2191*4882a593Smuzhiyun 	 * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the
2192*4882a593Smuzhiyun 	 * following :
2193*4882a593Smuzhiyun 	 *         - DRM_I915_QUERY_PERF_CONFIG_LIST
2194*4882a593Smuzhiyun 	 *         - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
2195*4882a593Smuzhiyun 	 *         - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
2196*4882a593Smuzhiyun 	 */
2197*4882a593Smuzhiyun 	__u32 flags;
2198*4882a593Smuzhiyun #define DRM_I915_QUERY_PERF_CONFIG_LIST          1
2199*4882a593Smuzhiyun #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2
2200*4882a593Smuzhiyun #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID   3
2201*4882a593Smuzhiyun 
2202*4882a593Smuzhiyun 	/*
2203*4882a593Smuzhiyun 	 * Data will be written at the location pointed by data_ptr when the
2204*4882a593Smuzhiyun 	 * value of length matches the length of the data to be written by the
2205*4882a593Smuzhiyun 	 * kernel.
2206*4882a593Smuzhiyun 	 */
2207*4882a593Smuzhiyun 	__u64 data_ptr;
2208*4882a593Smuzhiyun };
2209*4882a593Smuzhiyun 
2210*4882a593Smuzhiyun struct drm_i915_query {
2211*4882a593Smuzhiyun 	__u32 num_items;
2212*4882a593Smuzhiyun 
2213*4882a593Smuzhiyun 	/*
2214*4882a593Smuzhiyun 	 * Unused for now. Must be cleared to zero.
2215*4882a593Smuzhiyun 	 */
2216*4882a593Smuzhiyun 	__u32 flags;
2217*4882a593Smuzhiyun 
2218*4882a593Smuzhiyun 	/*
2219*4882a593Smuzhiyun 	 * This points to an array of num_items drm_i915_query_item structures.
2220*4882a593Smuzhiyun 	 */
2221*4882a593Smuzhiyun 	__u64 items_ptr;
2222*4882a593Smuzhiyun };
2223*4882a593Smuzhiyun 
2224*4882a593Smuzhiyun /*
2225*4882a593Smuzhiyun  * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
2226*4882a593Smuzhiyun  *
2227*4882a593Smuzhiyun  * data: contains the 3 pieces of information :
2228*4882a593Smuzhiyun  *
2229*4882a593Smuzhiyun  * - the slice mask with one bit per slice telling whether a slice is
2230*4882a593Smuzhiyun  *   available. The availability of slice X can be queried with the following
2231*4882a593Smuzhiyun  *   formula :
2232*4882a593Smuzhiyun  *
2233*4882a593Smuzhiyun  *           (data[X / 8] >> (X % 8)) & 1
2234*4882a593Smuzhiyun  *
2235*4882a593Smuzhiyun  * - the subslice mask for each slice with one bit per subslice telling
2236*4882a593Smuzhiyun  *   whether a subslice is available. Gen12 has dual-subslices, which are
2237*4882a593Smuzhiyun  *   similar to two gen11 subslices. For gen12, this array represents dual-
2238*4882a593Smuzhiyun  *   subslices. The availability of subslice Y in slice X can be queried
2239*4882a593Smuzhiyun  *   with the following formula :
2240*4882a593Smuzhiyun  *
2241*4882a593Smuzhiyun  *           (data[subslice_offset +
2242*4882a593Smuzhiyun  *                 X * subslice_stride +
2243*4882a593Smuzhiyun  *                 Y / 8] >> (Y % 8)) & 1
2244*4882a593Smuzhiyun  *
2245*4882a593Smuzhiyun  * - the EU mask for each subslice in each slice with one bit per EU telling
2246*4882a593Smuzhiyun  *   whether an EU is available. The availability of EU Z in subslice Y in
2247*4882a593Smuzhiyun  *   slice X can be queried with the following formula :
2248*4882a593Smuzhiyun  *
2249*4882a593Smuzhiyun  *           (data[eu_offset +
2250*4882a593Smuzhiyun  *                 (X * max_subslices + Y) * eu_stride +
2251*4882a593Smuzhiyun  *                 Z / 8] >> (Z % 8)) & 1
2252*4882a593Smuzhiyun  */
2253*4882a593Smuzhiyun struct drm_i915_query_topology_info {
2254*4882a593Smuzhiyun 	/*
2255*4882a593Smuzhiyun 	 * Unused for now. Must be cleared to zero.
2256*4882a593Smuzhiyun 	 */
2257*4882a593Smuzhiyun 	__u16 flags;
2258*4882a593Smuzhiyun 
2259*4882a593Smuzhiyun 	__u16 max_slices;
2260*4882a593Smuzhiyun 	__u16 max_subslices;
2261*4882a593Smuzhiyun 	__u16 max_eus_per_subslice;
2262*4882a593Smuzhiyun 
2263*4882a593Smuzhiyun 	/*
2264*4882a593Smuzhiyun 	 * Offset in data[] at which the subslice masks are stored.
2265*4882a593Smuzhiyun 	 */
2266*4882a593Smuzhiyun 	__u16 subslice_offset;
2267*4882a593Smuzhiyun 
2268*4882a593Smuzhiyun 	/*
2269*4882a593Smuzhiyun 	 * Stride at which each of the subslice masks for each slice are
2270*4882a593Smuzhiyun 	 * stored.
2271*4882a593Smuzhiyun 	 */
2272*4882a593Smuzhiyun 	__u16 subslice_stride;
2273*4882a593Smuzhiyun 
2274*4882a593Smuzhiyun 	/*
2275*4882a593Smuzhiyun 	 * Offset in data[] at which the EU masks are stored.
2276*4882a593Smuzhiyun 	 */
2277*4882a593Smuzhiyun 	__u16 eu_offset;
2278*4882a593Smuzhiyun 
2279*4882a593Smuzhiyun 	/*
2280*4882a593Smuzhiyun 	 * Stride at which each of the EU masks for each subslice are stored.
2281*4882a593Smuzhiyun 	 */
2282*4882a593Smuzhiyun 	__u16 eu_stride;
2283*4882a593Smuzhiyun 
2284*4882a593Smuzhiyun 	__u8 data[];
2285*4882a593Smuzhiyun };
2286*4882a593Smuzhiyun 
2287*4882a593Smuzhiyun /**
2288*4882a593Smuzhiyun  * struct drm_i915_engine_info
2289*4882a593Smuzhiyun  *
2290*4882a593Smuzhiyun  * Describes one engine and it's capabilities as known to the driver.
2291*4882a593Smuzhiyun  */
2292*4882a593Smuzhiyun struct drm_i915_engine_info {
2293*4882a593Smuzhiyun 	/** Engine class and instance. */
2294*4882a593Smuzhiyun 	struct i915_engine_class_instance engine;
2295*4882a593Smuzhiyun 
2296*4882a593Smuzhiyun 	/** Reserved field. */
2297*4882a593Smuzhiyun 	__u32 rsvd0;
2298*4882a593Smuzhiyun 
2299*4882a593Smuzhiyun 	/** Engine flags. */
2300*4882a593Smuzhiyun 	__u64 flags;
2301*4882a593Smuzhiyun 
2302*4882a593Smuzhiyun 	/** Capabilities of this engine. */
2303*4882a593Smuzhiyun 	__u64 capabilities;
2304*4882a593Smuzhiyun #define I915_VIDEO_CLASS_CAPABILITY_HEVC		(1 << 0)
2305*4882a593Smuzhiyun #define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC	(1 << 1)
2306*4882a593Smuzhiyun 
2307*4882a593Smuzhiyun 	/** Reserved fields. */
2308*4882a593Smuzhiyun 	__u64 rsvd1[4];
2309*4882a593Smuzhiyun };
2310*4882a593Smuzhiyun 
2311*4882a593Smuzhiyun /**
2312*4882a593Smuzhiyun  * struct drm_i915_query_engine_info
2313*4882a593Smuzhiyun  *
2314*4882a593Smuzhiyun  * Engine info query enumerates all engines known to the driver by filling in
2315*4882a593Smuzhiyun  * an array of struct drm_i915_engine_info structures.
2316*4882a593Smuzhiyun  */
2317*4882a593Smuzhiyun struct drm_i915_query_engine_info {
2318*4882a593Smuzhiyun 	/** Number of struct drm_i915_engine_info structs following. */
2319*4882a593Smuzhiyun 	__u32 num_engines;
2320*4882a593Smuzhiyun 
2321*4882a593Smuzhiyun 	/** MBZ */
2322*4882a593Smuzhiyun 	__u32 rsvd[3];
2323*4882a593Smuzhiyun 
2324*4882a593Smuzhiyun 	/** Marker for drm_i915_engine_info structures. */
2325*4882a593Smuzhiyun 	struct drm_i915_engine_info engines[];
2326*4882a593Smuzhiyun };
2327*4882a593Smuzhiyun 
2328*4882a593Smuzhiyun /*
2329*4882a593Smuzhiyun  * Data written by the kernel with query DRM_I915_QUERY_PERF_CONFIG.
2330*4882a593Smuzhiyun  */
2331*4882a593Smuzhiyun struct drm_i915_query_perf_config {
2332*4882a593Smuzhiyun 	union {
2333*4882a593Smuzhiyun 		/*
2334*4882a593Smuzhiyun 		 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets
2335*4882a593Smuzhiyun 		 * this fields to the number of configurations available.
2336*4882a593Smuzhiyun 		 */
2337*4882a593Smuzhiyun 		__u64 n_configs;
2338*4882a593Smuzhiyun 
2339*4882a593Smuzhiyun 		/*
2340*4882a593Smuzhiyun 		 * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID,
2341*4882a593Smuzhiyun 		 * i915 will use the value in this field as configuration
2342*4882a593Smuzhiyun 		 * identifier to decide what data to write into config_ptr.
2343*4882a593Smuzhiyun 		 */
2344*4882a593Smuzhiyun 		__u64 config;
2345*4882a593Smuzhiyun 
2346*4882a593Smuzhiyun 		/*
2347*4882a593Smuzhiyun 		 * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID,
2348*4882a593Smuzhiyun 		 * i915 will use the value in this field as configuration
2349*4882a593Smuzhiyun 		 * identifier to decide what data to write into config_ptr.
2350*4882a593Smuzhiyun 		 *
2351*4882a593Smuzhiyun 		 * String formatted like "%08x-%04x-%04x-%04x-%012x"
2352*4882a593Smuzhiyun 		 */
2353*4882a593Smuzhiyun 		char uuid[36];
2354*4882a593Smuzhiyun 	};
2355*4882a593Smuzhiyun 
2356*4882a593Smuzhiyun 	/*
2357*4882a593Smuzhiyun 	 * Unused for now. Must be cleared to zero.
2358*4882a593Smuzhiyun 	 */
2359*4882a593Smuzhiyun 	__u32 flags;
2360*4882a593Smuzhiyun 
2361*4882a593Smuzhiyun 	/*
2362*4882a593Smuzhiyun 	 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 will
2363*4882a593Smuzhiyun 	 * write an array of __u64 of configuration identifiers.
2364*4882a593Smuzhiyun 	 *
2365*4882a593Smuzhiyun 	 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_DATA, i915 will
2366*4882a593Smuzhiyun 	 * write a struct drm_i915_perf_oa_config. If the following fields of
2367*4882a593Smuzhiyun 	 * drm_i915_perf_oa_config are set not set to 0, i915 will write into
2368*4882a593Smuzhiyun 	 * the associated pointers the values of submitted when the
2369*4882a593Smuzhiyun 	 * configuration was created :
2370*4882a593Smuzhiyun 	 *
2371*4882a593Smuzhiyun 	 *         - n_mux_regs
2372*4882a593Smuzhiyun 	 *         - n_boolean_regs
2373*4882a593Smuzhiyun 	 *         - n_flex_regs
2374*4882a593Smuzhiyun 	 */
2375*4882a593Smuzhiyun 	__u8 data[];
2376*4882a593Smuzhiyun };
2377*4882a593Smuzhiyun 
2378*4882a593Smuzhiyun #if defined(__cplusplus)
2379*4882a593Smuzhiyun }
2380*4882a593Smuzhiyun #endif
2381*4882a593Smuzhiyun 
2382*4882a593Smuzhiyun #endif /* _UAPI_I915_DRM_H_ */
2383