xref: /OK3568_Linux_fs/kernel/include/uapi/drm/etnaviv_drm.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2015 Etnaviv Project
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or modify it
6*4882a593Smuzhiyun  * under the terms of the GNU General Public License version 2 as published by
7*4882a593Smuzhiyun  * the Free Software Foundation.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * This program is distributed in the hope that it will be useful, but WITHOUT
10*4882a593Smuzhiyun  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12*4882a593Smuzhiyun  * more details.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * You should have received a copy of the GNU General Public License along with
15*4882a593Smuzhiyun  * this program.  If not, see <http://www.gnu.org/licenses/>.
16*4882a593Smuzhiyun  */
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #ifndef __ETNAVIV_DRM_H__
19*4882a593Smuzhiyun #define __ETNAVIV_DRM_H__
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #include "drm.h"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #if defined(__cplusplus)
24*4882a593Smuzhiyun extern "C" {
25*4882a593Smuzhiyun #endif
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /* Please note that modifications to all structs defined here are
28*4882a593Smuzhiyun  * subject to backwards-compatibility constraints:
29*4882a593Smuzhiyun  *  1) Do not use pointers, use __u64 instead for 32 bit / 64 bit
30*4882a593Smuzhiyun  *     user/kernel compatibility
31*4882a593Smuzhiyun  *  2) Keep fields aligned to their size
32*4882a593Smuzhiyun  *  3) Because of how drm_ioctl() works, we can add new fields at
33*4882a593Smuzhiyun  *     the end of an ioctl if some care is taken: drm_ioctl() will
34*4882a593Smuzhiyun  *     zero out the new fields at the tail of the ioctl, so a zero
35*4882a593Smuzhiyun  *     value should have a backwards compatible meaning.  And for
36*4882a593Smuzhiyun  *     output params, userspace won't see the newly added output
37*4882a593Smuzhiyun  *     fields.. so that has to be somehow ok.
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /* timeouts are specified in clock-monotonic absolute times (to simplify
41*4882a593Smuzhiyun  * restarting interrupted ioctls).  The following struct is logically the
42*4882a593Smuzhiyun  * same as 'struct timespec' but 32/64b ABI safe.
43*4882a593Smuzhiyun  */
44*4882a593Smuzhiyun struct drm_etnaviv_timespec {
45*4882a593Smuzhiyun 	__s64 tv_sec;          /* seconds */
46*4882a593Smuzhiyun 	__s64 tv_nsec;         /* nanoseconds */
47*4882a593Smuzhiyun };
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #define ETNAVIV_PARAM_GPU_MODEL                     0x01
50*4882a593Smuzhiyun #define ETNAVIV_PARAM_GPU_REVISION                  0x02
51*4882a593Smuzhiyun #define ETNAVIV_PARAM_GPU_FEATURES_0                0x03
52*4882a593Smuzhiyun #define ETNAVIV_PARAM_GPU_FEATURES_1                0x04
53*4882a593Smuzhiyun #define ETNAVIV_PARAM_GPU_FEATURES_2                0x05
54*4882a593Smuzhiyun #define ETNAVIV_PARAM_GPU_FEATURES_3                0x06
55*4882a593Smuzhiyun #define ETNAVIV_PARAM_GPU_FEATURES_4                0x07
56*4882a593Smuzhiyun #define ETNAVIV_PARAM_GPU_FEATURES_5                0x08
57*4882a593Smuzhiyun #define ETNAVIV_PARAM_GPU_FEATURES_6                0x09
58*4882a593Smuzhiyun #define ETNAVIV_PARAM_GPU_FEATURES_7                0x0a
59*4882a593Smuzhiyun #define ETNAVIV_PARAM_GPU_FEATURES_8                0x0b
60*4882a593Smuzhiyun #define ETNAVIV_PARAM_GPU_FEATURES_9                0x0c
61*4882a593Smuzhiyun #define ETNAVIV_PARAM_GPU_FEATURES_10               0x0d
62*4882a593Smuzhiyun #define ETNAVIV_PARAM_GPU_FEATURES_11               0x0e
63*4882a593Smuzhiyun #define ETNAVIV_PARAM_GPU_FEATURES_12               0x0f
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun #define ETNAVIV_PARAM_GPU_STREAM_COUNT              0x10
66*4882a593Smuzhiyun #define ETNAVIV_PARAM_GPU_REGISTER_MAX              0x11
67*4882a593Smuzhiyun #define ETNAVIV_PARAM_GPU_THREAD_COUNT              0x12
68*4882a593Smuzhiyun #define ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE         0x13
69*4882a593Smuzhiyun #define ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT         0x14
70*4882a593Smuzhiyun #define ETNAVIV_PARAM_GPU_PIXEL_PIPES               0x15
71*4882a593Smuzhiyun #define ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE 0x16
72*4882a593Smuzhiyun #define ETNAVIV_PARAM_GPU_BUFFER_SIZE               0x17
73*4882a593Smuzhiyun #define ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT         0x18
74*4882a593Smuzhiyun #define ETNAVIV_PARAM_GPU_NUM_CONSTANTS             0x19
75*4882a593Smuzhiyun #define ETNAVIV_PARAM_GPU_NUM_VARYINGS              0x1a
76*4882a593Smuzhiyun #define ETNAVIV_PARAM_SOFTPIN_START_ADDR            0x1b
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun #define ETNA_MAX_PIPES 4
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun struct drm_etnaviv_param {
81*4882a593Smuzhiyun 	__u32 pipe;           /* in */
82*4882a593Smuzhiyun 	__u32 param;          /* in, ETNAVIV_PARAM_x */
83*4882a593Smuzhiyun 	__u64 value;          /* out (get_param) or in (set_param) */
84*4882a593Smuzhiyun };
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun  * GEM buffers:
88*4882a593Smuzhiyun  */
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun #define ETNA_BO_CACHE_MASK   0x000f0000
91*4882a593Smuzhiyun /* cache modes */
92*4882a593Smuzhiyun #define ETNA_BO_CACHED       0x00010000
93*4882a593Smuzhiyun #define ETNA_BO_WC           0x00020000
94*4882a593Smuzhiyun #define ETNA_BO_UNCACHED     0x00040000
95*4882a593Smuzhiyun /* map flags */
96*4882a593Smuzhiyun #define ETNA_BO_FORCE_MMU    0x00100000
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun struct drm_etnaviv_gem_new {
99*4882a593Smuzhiyun 	__u64 size;           /* in */
100*4882a593Smuzhiyun 	__u32 flags;          /* in, mask of ETNA_BO_x */
101*4882a593Smuzhiyun 	__u32 handle;         /* out */
102*4882a593Smuzhiyun };
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun struct drm_etnaviv_gem_info {
105*4882a593Smuzhiyun 	__u32 handle;         /* in */
106*4882a593Smuzhiyun 	__u32 pad;
107*4882a593Smuzhiyun 	__u64 offset;         /* out, offset to pass to mmap() */
108*4882a593Smuzhiyun };
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun #define ETNA_PREP_READ        0x01
111*4882a593Smuzhiyun #define ETNA_PREP_WRITE       0x02
112*4882a593Smuzhiyun #define ETNA_PREP_NOSYNC      0x04
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun struct drm_etnaviv_gem_cpu_prep {
115*4882a593Smuzhiyun 	__u32 handle;         /* in */
116*4882a593Smuzhiyun 	__u32 op;             /* in, mask of ETNA_PREP_x */
117*4882a593Smuzhiyun 	struct drm_etnaviv_timespec timeout;   /* in */
118*4882a593Smuzhiyun };
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun struct drm_etnaviv_gem_cpu_fini {
121*4882a593Smuzhiyun 	__u32 handle;         /* in */
122*4882a593Smuzhiyun 	__u32 flags;          /* in, placeholder for now, no defined values */
123*4882a593Smuzhiyun };
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun  * Cmdstream Submission:
127*4882a593Smuzhiyun  */
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun /* The value written into the cmdstream is logically:
130*4882a593Smuzhiyun  * relocbuf->gpuaddr + reloc_offset
131*4882a593Smuzhiyun  *
132*4882a593Smuzhiyun  * NOTE that reloc's must be sorted by order of increasing submit_offset,
133*4882a593Smuzhiyun  * otherwise EINVAL.
134*4882a593Smuzhiyun  */
135*4882a593Smuzhiyun struct drm_etnaviv_gem_submit_reloc {
136*4882a593Smuzhiyun 	__u32 submit_offset;  /* in, offset from submit_bo */
137*4882a593Smuzhiyun 	__u32 reloc_idx;      /* in, index of reloc_bo buffer */
138*4882a593Smuzhiyun 	__u64 reloc_offset;   /* in, offset from start of reloc_bo */
139*4882a593Smuzhiyun 	__u32 flags;          /* in, placeholder for now, no defined values */
140*4882a593Smuzhiyun };
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun /* Each buffer referenced elsewhere in the cmdstream submit (ie. the
143*4882a593Smuzhiyun  * cmdstream buffer(s) themselves or reloc entries) has one (and only
144*4882a593Smuzhiyun  * one) entry in the submit->bos[] table.
145*4882a593Smuzhiyun  *
146*4882a593Smuzhiyun  * As a optimization, the current buffer (gpu virtual address) can be
147*4882a593Smuzhiyun  * passed back through the 'presumed' field.  If on a subsequent reloc,
148*4882a593Smuzhiyun  * userspace passes back a 'presumed' address that is still valid,
149*4882a593Smuzhiyun  * then patching the cmdstream for this entry is skipped.  This can
150*4882a593Smuzhiyun  * avoid kernel needing to map/access the cmdstream bo in the common
151*4882a593Smuzhiyun  * case.
152*4882a593Smuzhiyun  * If the submit is a softpin submit (ETNA_SUBMIT_SOFTPIN) the 'presumed'
153*4882a593Smuzhiyun  * field is interpreted as the fixed location to map the bo into the gpu
154*4882a593Smuzhiyun  * virtual address space. If the kernel is unable to map the buffer at
155*4882a593Smuzhiyun  * this location the submit will fail. This means userspace is responsible
156*4882a593Smuzhiyun  * for the whole gpu virtual address management.
157*4882a593Smuzhiyun  */
158*4882a593Smuzhiyun #define ETNA_SUBMIT_BO_READ             0x0001
159*4882a593Smuzhiyun #define ETNA_SUBMIT_BO_WRITE            0x0002
160*4882a593Smuzhiyun struct drm_etnaviv_gem_submit_bo {
161*4882a593Smuzhiyun 	__u32 flags;          /* in, mask of ETNA_SUBMIT_BO_x */
162*4882a593Smuzhiyun 	__u32 handle;         /* in, GEM handle */
163*4882a593Smuzhiyun 	__u64 presumed;       /* in/out, presumed buffer address */
164*4882a593Smuzhiyun };
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun /* performance monitor request (pmr) */
167*4882a593Smuzhiyun #define ETNA_PM_PROCESS_PRE             0x0001
168*4882a593Smuzhiyun #define ETNA_PM_PROCESS_POST            0x0002
169*4882a593Smuzhiyun struct drm_etnaviv_gem_submit_pmr {
170*4882a593Smuzhiyun 	__u32 flags;          /* in, when to process request (ETNA_PM_PROCESS_x) */
171*4882a593Smuzhiyun 	__u8  domain;         /* in, pm domain */
172*4882a593Smuzhiyun 	__u8  pad;
173*4882a593Smuzhiyun 	__u16 signal;         /* in, pm signal */
174*4882a593Smuzhiyun 	__u32 sequence;       /* in, sequence number */
175*4882a593Smuzhiyun 	__u32 read_offset;    /* in, offset from read_bo */
176*4882a593Smuzhiyun 	__u32 read_idx;       /* in, index of read_bo buffer */
177*4882a593Smuzhiyun };
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun /* Each cmdstream submit consists of a table of buffers involved, and
180*4882a593Smuzhiyun  * one or more cmdstream buffers.  This allows for conditional execution
181*4882a593Smuzhiyun  * (context-restore), and IB buffers needed for per tile/bin draw cmds.
182*4882a593Smuzhiyun  */
183*4882a593Smuzhiyun #define ETNA_SUBMIT_NO_IMPLICIT         0x0001
184*4882a593Smuzhiyun #define ETNA_SUBMIT_FENCE_FD_IN         0x0002
185*4882a593Smuzhiyun #define ETNA_SUBMIT_FENCE_FD_OUT        0x0004
186*4882a593Smuzhiyun #define ETNA_SUBMIT_SOFTPIN             0x0008
187*4882a593Smuzhiyun #define ETNA_SUBMIT_FLAGS		(ETNA_SUBMIT_NO_IMPLICIT | \
188*4882a593Smuzhiyun 					 ETNA_SUBMIT_FENCE_FD_IN | \
189*4882a593Smuzhiyun 					 ETNA_SUBMIT_FENCE_FD_OUT| \
190*4882a593Smuzhiyun 					 ETNA_SUBMIT_SOFTPIN)
191*4882a593Smuzhiyun #define ETNA_PIPE_3D      0x00
192*4882a593Smuzhiyun #define ETNA_PIPE_2D      0x01
193*4882a593Smuzhiyun #define ETNA_PIPE_VG      0x02
194*4882a593Smuzhiyun struct drm_etnaviv_gem_submit {
195*4882a593Smuzhiyun 	__u32 fence;          /* out */
196*4882a593Smuzhiyun 	__u32 pipe;           /* in */
197*4882a593Smuzhiyun 	__u32 exec_state;     /* in, initial execution state (ETNA_PIPE_x) */
198*4882a593Smuzhiyun 	__u32 nr_bos;         /* in, number of submit_bo's */
199*4882a593Smuzhiyun 	__u32 nr_relocs;      /* in, number of submit_reloc's */
200*4882a593Smuzhiyun 	__u32 stream_size;    /* in, cmdstream size */
201*4882a593Smuzhiyun 	__u64 bos;            /* in, ptr to array of submit_bo's */
202*4882a593Smuzhiyun 	__u64 relocs;         /* in, ptr to array of submit_reloc's */
203*4882a593Smuzhiyun 	__u64 stream;         /* in, ptr to cmdstream */
204*4882a593Smuzhiyun 	__u32 flags;          /* in, mask of ETNA_SUBMIT_x */
205*4882a593Smuzhiyun 	__s32 fence_fd;       /* in/out, fence fd (see ETNA_SUBMIT_FENCE_FD_x) */
206*4882a593Smuzhiyun 	__u64 pmrs;           /* in, ptr to array of submit_pmr's */
207*4882a593Smuzhiyun 	__u32 nr_pmrs;        /* in, number of submit_pmr's */
208*4882a593Smuzhiyun 	__u32 pad;
209*4882a593Smuzhiyun };
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun /* The normal way to synchronize with the GPU is just to CPU_PREP on
212*4882a593Smuzhiyun  * a buffer if you need to access it from the CPU (other cmdstream
213*4882a593Smuzhiyun  * submission from same or other contexts, PAGE_FLIP ioctl, etc, all
214*4882a593Smuzhiyun  * handle the required synchronization under the hood).  This ioctl
215*4882a593Smuzhiyun  * mainly just exists as a way to implement the gallium pipe_fence
216*4882a593Smuzhiyun  * APIs without requiring a dummy bo to synchronize on.
217*4882a593Smuzhiyun  */
218*4882a593Smuzhiyun #define ETNA_WAIT_NONBLOCK      0x01
219*4882a593Smuzhiyun struct drm_etnaviv_wait_fence {
220*4882a593Smuzhiyun 	__u32 pipe;           /* in */
221*4882a593Smuzhiyun 	__u32 fence;          /* in */
222*4882a593Smuzhiyun 	__u32 flags;          /* in, mask of ETNA_WAIT_x */
223*4882a593Smuzhiyun 	__u32 pad;
224*4882a593Smuzhiyun 	struct drm_etnaviv_timespec timeout;   /* in */
225*4882a593Smuzhiyun };
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun #define ETNA_USERPTR_READ	0x01
228*4882a593Smuzhiyun #define ETNA_USERPTR_WRITE	0x02
229*4882a593Smuzhiyun struct drm_etnaviv_gem_userptr {
230*4882a593Smuzhiyun 	__u64 user_ptr;	/* in, page aligned user pointer */
231*4882a593Smuzhiyun 	__u64 user_size;	/* in, page aligned user size */
232*4882a593Smuzhiyun 	__u32 flags;		/* in, flags */
233*4882a593Smuzhiyun 	__u32 handle;	/* out, non-zero handle */
234*4882a593Smuzhiyun };
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun struct drm_etnaviv_gem_wait {
237*4882a593Smuzhiyun 	__u32 pipe;				/* in */
238*4882a593Smuzhiyun 	__u32 handle;				/* in, bo to be waited for */
239*4882a593Smuzhiyun 	__u32 flags;				/* in, mask of ETNA_WAIT_x  */
240*4882a593Smuzhiyun 	__u32 pad;
241*4882a593Smuzhiyun 	struct drm_etnaviv_timespec timeout;	/* in */
242*4882a593Smuzhiyun };
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun /*
245*4882a593Smuzhiyun  * Performance Monitor (PM):
246*4882a593Smuzhiyun  */
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun struct drm_etnaviv_pm_domain {
249*4882a593Smuzhiyun 	__u32 pipe;       /* in */
250*4882a593Smuzhiyun 	__u8  iter;       /* in/out, select pm domain at index iter */
251*4882a593Smuzhiyun 	__u8  id;         /* out, id of domain */
252*4882a593Smuzhiyun 	__u16 nr_signals; /* out, how many signals does this domain provide */
253*4882a593Smuzhiyun 	char  name[64];   /* out, name of domain */
254*4882a593Smuzhiyun };
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun struct drm_etnaviv_pm_signal {
257*4882a593Smuzhiyun 	__u32 pipe;       /* in */
258*4882a593Smuzhiyun 	__u8  domain;     /* in, pm domain index */
259*4882a593Smuzhiyun 	__u8  pad;
260*4882a593Smuzhiyun 	__u16 iter;       /* in/out, select pm source at index iter */
261*4882a593Smuzhiyun 	__u16 id;         /* out, id of signal */
262*4882a593Smuzhiyun 	char  name[64];   /* out, name of domain */
263*4882a593Smuzhiyun };
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun #define DRM_ETNAVIV_GET_PARAM          0x00
266*4882a593Smuzhiyun /* placeholder:
267*4882a593Smuzhiyun #define DRM_ETNAVIV_SET_PARAM          0x01
268*4882a593Smuzhiyun  */
269*4882a593Smuzhiyun #define DRM_ETNAVIV_GEM_NEW            0x02
270*4882a593Smuzhiyun #define DRM_ETNAVIV_GEM_INFO           0x03
271*4882a593Smuzhiyun #define DRM_ETNAVIV_GEM_CPU_PREP       0x04
272*4882a593Smuzhiyun #define DRM_ETNAVIV_GEM_CPU_FINI       0x05
273*4882a593Smuzhiyun #define DRM_ETNAVIV_GEM_SUBMIT         0x06
274*4882a593Smuzhiyun #define DRM_ETNAVIV_WAIT_FENCE         0x07
275*4882a593Smuzhiyun #define DRM_ETNAVIV_GEM_USERPTR        0x08
276*4882a593Smuzhiyun #define DRM_ETNAVIV_GEM_WAIT           0x09
277*4882a593Smuzhiyun #define DRM_ETNAVIV_PM_QUERY_DOM       0x0a
278*4882a593Smuzhiyun #define DRM_ETNAVIV_PM_QUERY_SIG       0x0b
279*4882a593Smuzhiyun #define DRM_ETNAVIV_NUM_IOCTLS         0x0c
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun #define DRM_IOCTL_ETNAVIV_GET_PARAM    DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GET_PARAM, struct drm_etnaviv_param)
282*4882a593Smuzhiyun #define DRM_IOCTL_ETNAVIV_GEM_NEW      DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_NEW, struct drm_etnaviv_gem_new)
283*4882a593Smuzhiyun #define DRM_IOCTL_ETNAVIV_GEM_INFO     DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_INFO, struct drm_etnaviv_gem_info)
284*4882a593Smuzhiyun #define DRM_IOCTL_ETNAVIV_GEM_CPU_PREP DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_CPU_PREP, struct drm_etnaviv_gem_cpu_prep)
285*4882a593Smuzhiyun #define DRM_IOCTL_ETNAVIV_GEM_CPU_FINI DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_CPU_FINI, struct drm_etnaviv_gem_cpu_fini)
286*4882a593Smuzhiyun #define DRM_IOCTL_ETNAVIV_GEM_SUBMIT   DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_SUBMIT, struct drm_etnaviv_gem_submit)
287*4882a593Smuzhiyun #define DRM_IOCTL_ETNAVIV_WAIT_FENCE   DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_WAIT_FENCE, struct drm_etnaviv_wait_fence)
288*4882a593Smuzhiyun #define DRM_IOCTL_ETNAVIV_GEM_USERPTR  DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_USERPTR, struct drm_etnaviv_gem_userptr)
289*4882a593Smuzhiyun #define DRM_IOCTL_ETNAVIV_GEM_WAIT     DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_WAIT, struct drm_etnaviv_gem_wait)
290*4882a593Smuzhiyun #define DRM_IOCTL_ETNAVIV_PM_QUERY_DOM DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_PM_QUERY_DOM, struct drm_etnaviv_pm_domain)
291*4882a593Smuzhiyun #define DRM_IOCTL_ETNAVIV_PM_QUERY_SIG DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_PM_QUERY_SIG, struct drm_etnaviv_pm_signal)
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun #if defined(__cplusplus)
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun #endif
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun #endif /* __ETNAVIV_DRM_H__ */
298