1*4882a593Smuzhiyun /* 2*4882a593Smuzhiyun * Copyright © 2014-2015 Broadcom 3*4882a593Smuzhiyun * 4*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a 5*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"), 6*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation 7*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the 9*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions: 10*4882a593Smuzhiyun * 11*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the next 12*4882a593Smuzhiyun * paragraph) shall be included in all copies or substantial portions of the 13*4882a593Smuzhiyun * Software. 14*4882a593Smuzhiyun * 15*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18*4882a593Smuzhiyun * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19*4882a593Smuzhiyun * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20*4882a593Smuzhiyun * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21*4882a593Smuzhiyun * IN THE SOFTWARE. 22*4882a593Smuzhiyun */ 23*4882a593Smuzhiyun 24*4882a593Smuzhiyun #ifndef _UAPI_VC4_DRM_H_ 25*4882a593Smuzhiyun #define _UAPI_VC4_DRM_H_ 26*4882a593Smuzhiyun 27*4882a593Smuzhiyun #include "drm.h" 28*4882a593Smuzhiyun 29*4882a593Smuzhiyun #if defined(__cplusplus) 30*4882a593Smuzhiyun extern "C" { 31*4882a593Smuzhiyun #endif 32*4882a593Smuzhiyun 33*4882a593Smuzhiyun #define DRM_VC4_SUBMIT_CL 0x00 34*4882a593Smuzhiyun #define DRM_VC4_WAIT_SEQNO 0x01 35*4882a593Smuzhiyun #define DRM_VC4_WAIT_BO 0x02 36*4882a593Smuzhiyun #define DRM_VC4_CREATE_BO 0x03 37*4882a593Smuzhiyun #define DRM_VC4_MMAP_BO 0x04 38*4882a593Smuzhiyun #define DRM_VC4_CREATE_SHADER_BO 0x05 39*4882a593Smuzhiyun #define DRM_VC4_GET_HANG_STATE 0x06 40*4882a593Smuzhiyun #define DRM_VC4_GET_PARAM 0x07 41*4882a593Smuzhiyun #define DRM_VC4_SET_TILING 0x08 42*4882a593Smuzhiyun #define DRM_VC4_GET_TILING 0x09 43*4882a593Smuzhiyun #define DRM_VC4_LABEL_BO 0x0a 44*4882a593Smuzhiyun #define DRM_VC4_GEM_MADVISE 0x0b 45*4882a593Smuzhiyun #define DRM_VC4_PERFMON_CREATE 0x0c 46*4882a593Smuzhiyun #define DRM_VC4_PERFMON_DESTROY 0x0d 47*4882a593Smuzhiyun #define DRM_VC4_PERFMON_GET_VALUES 0x0e 48*4882a593Smuzhiyun 49*4882a593Smuzhiyun #define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl) 50*4882a593Smuzhiyun #define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno) 51*4882a593Smuzhiyun #define DRM_IOCTL_VC4_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_BO, struct drm_vc4_wait_bo) 52*4882a593Smuzhiyun #define DRM_IOCTL_VC4_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo) 53*4882a593Smuzhiyun #define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo) 54*4882a593Smuzhiyun #define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo) 55*4882a593Smuzhiyun #define DRM_IOCTL_VC4_GET_HANG_STATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_HANG_STATE, struct drm_vc4_get_hang_state) 56*4882a593Smuzhiyun #define DRM_IOCTL_VC4_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_PARAM, struct drm_vc4_get_param) 57*4882a593Smuzhiyun #define DRM_IOCTL_VC4_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SET_TILING, struct drm_vc4_set_tiling) 58*4882a593Smuzhiyun #define DRM_IOCTL_VC4_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_TILING, struct drm_vc4_get_tiling) 59*4882a593Smuzhiyun #define DRM_IOCTL_VC4_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_LABEL_BO, struct drm_vc4_label_bo) 60*4882a593Smuzhiyun #define DRM_IOCTL_VC4_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GEM_MADVISE, struct drm_vc4_gem_madvise) 61*4882a593Smuzhiyun #define DRM_IOCTL_VC4_PERFMON_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_CREATE, struct drm_vc4_perfmon_create) 62*4882a593Smuzhiyun #define DRM_IOCTL_VC4_PERFMON_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_DESTROY, struct drm_vc4_perfmon_destroy) 63*4882a593Smuzhiyun #define DRM_IOCTL_VC4_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_GET_VALUES, struct drm_vc4_perfmon_get_values) 64*4882a593Smuzhiyun 65*4882a593Smuzhiyun struct drm_vc4_submit_rcl_surface { 66*4882a593Smuzhiyun __u32 hindex; /* Handle index, or ~0 if not present. */ 67*4882a593Smuzhiyun __u32 offset; /* Offset to start of buffer. */ 68*4882a593Smuzhiyun /* 69*4882a593Smuzhiyun * Bits for either render config (color_write) or load/store packet. 70*4882a593Smuzhiyun * Bits should all be 0 for MSAA load/stores. 71*4882a593Smuzhiyun */ 72*4882a593Smuzhiyun __u16 bits; 73*4882a593Smuzhiyun 74*4882a593Smuzhiyun #define VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES (1 << 0) 75*4882a593Smuzhiyun __u16 flags; 76*4882a593Smuzhiyun }; 77*4882a593Smuzhiyun 78*4882a593Smuzhiyun /** 79*4882a593Smuzhiyun * struct drm_vc4_submit_cl - ioctl argument for submitting commands to the 3D 80*4882a593Smuzhiyun * engine. 81*4882a593Smuzhiyun * 82*4882a593Smuzhiyun * Drivers typically use GPU BOs to store batchbuffers / command lists and 83*4882a593Smuzhiyun * their associated state. However, because the VC4 lacks an MMU, we have to 84*4882a593Smuzhiyun * do validation of memory accesses by the GPU commands. If we were to store 85*4882a593Smuzhiyun * our commands in BOs, we'd need to do uncached readback from them to do the 86*4882a593Smuzhiyun * validation process, which is too expensive. Instead, userspace accumulates 87*4882a593Smuzhiyun * commands and associated state in plain memory, then the kernel copies the 88*4882a593Smuzhiyun * data to its own address space, and then validates and stores it in a GPU 89*4882a593Smuzhiyun * BO. 90*4882a593Smuzhiyun */ 91*4882a593Smuzhiyun struct drm_vc4_submit_cl { 92*4882a593Smuzhiyun /* Pointer to the binner command list. 93*4882a593Smuzhiyun * 94*4882a593Smuzhiyun * This is the first set of commands executed, which runs the 95*4882a593Smuzhiyun * coordinate shader to determine where primitives land on the screen, 96*4882a593Smuzhiyun * then writes out the state updates and draw calls necessary per tile 97*4882a593Smuzhiyun * to the tile allocation BO. 98*4882a593Smuzhiyun */ 99*4882a593Smuzhiyun __u64 bin_cl; 100*4882a593Smuzhiyun 101*4882a593Smuzhiyun /* Pointer to the shader records. 102*4882a593Smuzhiyun * 103*4882a593Smuzhiyun * Shader records are the structures read by the hardware that contain 104*4882a593Smuzhiyun * pointers to uniforms, shaders, and vertex attributes. The 105*4882a593Smuzhiyun * reference to the shader record has enough information to determine 106*4882a593Smuzhiyun * how many pointers are necessary (fixed number for shaders/uniforms, 107*4882a593Smuzhiyun * and an attribute count), so those BO indices into bo_handles are 108*4882a593Smuzhiyun * just stored as __u32s before each shader record passed in. 109*4882a593Smuzhiyun */ 110*4882a593Smuzhiyun __u64 shader_rec; 111*4882a593Smuzhiyun 112*4882a593Smuzhiyun /* Pointer to uniform data and texture handles for the textures 113*4882a593Smuzhiyun * referenced by the shader. 114*4882a593Smuzhiyun * 115*4882a593Smuzhiyun * For each shader state record, there is a set of uniform data in the 116*4882a593Smuzhiyun * order referenced by the record (FS, VS, then CS). Each set of 117*4882a593Smuzhiyun * uniform data has a __u32 index into bo_handles per texture 118*4882a593Smuzhiyun * sample operation, in the order the QPU_W_TMUn_S writes appear in 119*4882a593Smuzhiyun * the program. Following the texture BO handle indices is the actual 120*4882a593Smuzhiyun * uniform data. 121*4882a593Smuzhiyun * 122*4882a593Smuzhiyun * The individual uniform state blocks don't have sizes passed in, 123*4882a593Smuzhiyun * because the kernel has to determine the sizes anyway during shader 124*4882a593Smuzhiyun * code validation. 125*4882a593Smuzhiyun */ 126*4882a593Smuzhiyun __u64 uniforms; 127*4882a593Smuzhiyun __u64 bo_handles; 128*4882a593Smuzhiyun 129*4882a593Smuzhiyun /* Size in bytes of the binner command list. */ 130*4882a593Smuzhiyun __u32 bin_cl_size; 131*4882a593Smuzhiyun /* Size in bytes of the set of shader records. */ 132*4882a593Smuzhiyun __u32 shader_rec_size; 133*4882a593Smuzhiyun /* Number of shader records. 134*4882a593Smuzhiyun * 135*4882a593Smuzhiyun * This could just be computed from the contents of shader_records and 136*4882a593Smuzhiyun * the address bits of references to them from the bin CL, but it 137*4882a593Smuzhiyun * keeps the kernel from having to resize some allocations it makes. 138*4882a593Smuzhiyun */ 139*4882a593Smuzhiyun __u32 shader_rec_count; 140*4882a593Smuzhiyun /* Size in bytes of the uniform state. */ 141*4882a593Smuzhiyun __u32 uniforms_size; 142*4882a593Smuzhiyun 143*4882a593Smuzhiyun /* Number of BO handles passed in (size is that times 4). */ 144*4882a593Smuzhiyun __u32 bo_handle_count; 145*4882a593Smuzhiyun 146*4882a593Smuzhiyun /* RCL setup: */ 147*4882a593Smuzhiyun __u16 width; 148*4882a593Smuzhiyun __u16 height; 149*4882a593Smuzhiyun __u8 min_x_tile; 150*4882a593Smuzhiyun __u8 min_y_tile; 151*4882a593Smuzhiyun __u8 max_x_tile; 152*4882a593Smuzhiyun __u8 max_y_tile; 153*4882a593Smuzhiyun struct drm_vc4_submit_rcl_surface color_read; 154*4882a593Smuzhiyun struct drm_vc4_submit_rcl_surface color_write; 155*4882a593Smuzhiyun struct drm_vc4_submit_rcl_surface zs_read; 156*4882a593Smuzhiyun struct drm_vc4_submit_rcl_surface zs_write; 157*4882a593Smuzhiyun struct drm_vc4_submit_rcl_surface msaa_color_write; 158*4882a593Smuzhiyun struct drm_vc4_submit_rcl_surface msaa_zs_write; 159*4882a593Smuzhiyun __u32 clear_color[2]; 160*4882a593Smuzhiyun __u32 clear_z; 161*4882a593Smuzhiyun __u8 clear_s; 162*4882a593Smuzhiyun 163*4882a593Smuzhiyun __u32 pad:24; 164*4882a593Smuzhiyun 165*4882a593Smuzhiyun #define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0) 166*4882a593Smuzhiyun /* By default, the kernel gets to choose the order that the tiles are 167*4882a593Smuzhiyun * rendered in. If this is set, then the tiles will be rendered in a 168*4882a593Smuzhiyun * raster order, with the right-to-left vs left-to-right and 169*4882a593Smuzhiyun * top-to-bottom vs bottom-to-top dictated by 170*4882a593Smuzhiyun * VC4_SUBMIT_CL_RCL_ORDER_INCREASING_*. This allows overlapping 171*4882a593Smuzhiyun * blits to be implemented using the 3D engine. 172*4882a593Smuzhiyun */ 173*4882a593Smuzhiyun #define VC4_SUBMIT_CL_FIXED_RCL_ORDER (1 << 1) 174*4882a593Smuzhiyun #define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X (1 << 2) 175*4882a593Smuzhiyun #define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y (1 << 3) 176*4882a593Smuzhiyun __u32 flags; 177*4882a593Smuzhiyun 178*4882a593Smuzhiyun /* Returned value of the seqno of this render job (for the 179*4882a593Smuzhiyun * wait ioctl). 180*4882a593Smuzhiyun */ 181*4882a593Smuzhiyun __u64 seqno; 182*4882a593Smuzhiyun 183*4882a593Smuzhiyun /* ID of the perfmon to attach to this job. 0 means no perfmon. */ 184*4882a593Smuzhiyun __u32 perfmonid; 185*4882a593Smuzhiyun 186*4882a593Smuzhiyun /* Syncobj handle to wait on. If set, processing of this render job 187*4882a593Smuzhiyun * will not start until the syncobj is signaled. 0 means ignore. 188*4882a593Smuzhiyun */ 189*4882a593Smuzhiyun __u32 in_sync; 190*4882a593Smuzhiyun 191*4882a593Smuzhiyun /* Syncobj handle to export fence to. If set, the fence in the syncobj 192*4882a593Smuzhiyun * will be replaced with a fence that signals upon completion of this 193*4882a593Smuzhiyun * render job. 0 means ignore. 194*4882a593Smuzhiyun */ 195*4882a593Smuzhiyun __u32 out_sync; 196*4882a593Smuzhiyun 197*4882a593Smuzhiyun __u32 pad2; 198*4882a593Smuzhiyun }; 199*4882a593Smuzhiyun 200*4882a593Smuzhiyun /** 201*4882a593Smuzhiyun * struct drm_vc4_wait_seqno - ioctl argument for waiting for 202*4882a593Smuzhiyun * DRM_VC4_SUBMIT_CL completion using its returned seqno. 203*4882a593Smuzhiyun * 204*4882a593Smuzhiyun * timeout_ns is the timeout in nanoseconds, where "0" means "don't 205*4882a593Smuzhiyun * block, just return the status." 206*4882a593Smuzhiyun */ 207*4882a593Smuzhiyun struct drm_vc4_wait_seqno { 208*4882a593Smuzhiyun __u64 seqno; 209*4882a593Smuzhiyun __u64 timeout_ns; 210*4882a593Smuzhiyun }; 211*4882a593Smuzhiyun 212*4882a593Smuzhiyun /** 213*4882a593Smuzhiyun * struct drm_vc4_wait_bo - ioctl argument for waiting for 214*4882a593Smuzhiyun * completion of the last DRM_VC4_SUBMIT_CL on a BO. 215*4882a593Smuzhiyun * 216*4882a593Smuzhiyun * This is useful for cases where multiple processes might be 217*4882a593Smuzhiyun * rendering to a BO and you want to wait for all rendering to be 218*4882a593Smuzhiyun * completed. 219*4882a593Smuzhiyun */ 220*4882a593Smuzhiyun struct drm_vc4_wait_bo { 221*4882a593Smuzhiyun __u32 handle; 222*4882a593Smuzhiyun __u32 pad; 223*4882a593Smuzhiyun __u64 timeout_ns; 224*4882a593Smuzhiyun }; 225*4882a593Smuzhiyun 226*4882a593Smuzhiyun /** 227*4882a593Smuzhiyun * struct drm_vc4_create_bo - ioctl argument for creating VC4 BOs. 228*4882a593Smuzhiyun * 229*4882a593Smuzhiyun * There are currently no values for the flags argument, but it may be 230*4882a593Smuzhiyun * used in a future extension. 231*4882a593Smuzhiyun */ 232*4882a593Smuzhiyun struct drm_vc4_create_bo { 233*4882a593Smuzhiyun __u32 size; 234*4882a593Smuzhiyun __u32 flags; 235*4882a593Smuzhiyun /** Returned GEM handle for the BO. */ 236*4882a593Smuzhiyun __u32 handle; 237*4882a593Smuzhiyun __u32 pad; 238*4882a593Smuzhiyun }; 239*4882a593Smuzhiyun 240*4882a593Smuzhiyun /** 241*4882a593Smuzhiyun * struct drm_vc4_mmap_bo - ioctl argument for mapping VC4 BOs. 242*4882a593Smuzhiyun * 243*4882a593Smuzhiyun * This doesn't actually perform an mmap. Instead, it returns the 244*4882a593Smuzhiyun * offset you need to use in an mmap on the DRM device node. This 245*4882a593Smuzhiyun * means that tools like valgrind end up knowing about the mapped 246*4882a593Smuzhiyun * memory. 247*4882a593Smuzhiyun * 248*4882a593Smuzhiyun * There are currently no values for the flags argument, but it may be 249*4882a593Smuzhiyun * used in a future extension. 250*4882a593Smuzhiyun */ 251*4882a593Smuzhiyun struct drm_vc4_mmap_bo { 252*4882a593Smuzhiyun /** Handle for the object being mapped. */ 253*4882a593Smuzhiyun __u32 handle; 254*4882a593Smuzhiyun __u32 flags; 255*4882a593Smuzhiyun /** offset into the drm node to use for subsequent mmap call. */ 256*4882a593Smuzhiyun __u64 offset; 257*4882a593Smuzhiyun }; 258*4882a593Smuzhiyun 259*4882a593Smuzhiyun /** 260*4882a593Smuzhiyun * struct drm_vc4_create_shader_bo - ioctl argument for creating VC4 261*4882a593Smuzhiyun * shader BOs. 262*4882a593Smuzhiyun * 263*4882a593Smuzhiyun * Since allowing a shader to be overwritten while it's also being 264*4882a593Smuzhiyun * executed from would allow privlege escalation, shaders must be 265*4882a593Smuzhiyun * created using this ioctl, and they can't be mmapped later. 266*4882a593Smuzhiyun */ 267*4882a593Smuzhiyun struct drm_vc4_create_shader_bo { 268*4882a593Smuzhiyun /* Size of the data argument. */ 269*4882a593Smuzhiyun __u32 size; 270*4882a593Smuzhiyun /* Flags, currently must be 0. */ 271*4882a593Smuzhiyun __u32 flags; 272*4882a593Smuzhiyun 273*4882a593Smuzhiyun /* Pointer to the data. */ 274*4882a593Smuzhiyun __u64 data; 275*4882a593Smuzhiyun 276*4882a593Smuzhiyun /** Returned GEM handle for the BO. */ 277*4882a593Smuzhiyun __u32 handle; 278*4882a593Smuzhiyun /* Pad, must be 0. */ 279*4882a593Smuzhiyun __u32 pad; 280*4882a593Smuzhiyun }; 281*4882a593Smuzhiyun 282*4882a593Smuzhiyun struct drm_vc4_get_hang_state_bo { 283*4882a593Smuzhiyun __u32 handle; 284*4882a593Smuzhiyun __u32 paddr; 285*4882a593Smuzhiyun __u32 size; 286*4882a593Smuzhiyun __u32 pad; 287*4882a593Smuzhiyun }; 288*4882a593Smuzhiyun 289*4882a593Smuzhiyun /** 290*4882a593Smuzhiyun * struct drm_vc4_hang_state - ioctl argument for collecting state 291*4882a593Smuzhiyun * from a GPU hang for analysis. 292*4882a593Smuzhiyun */ 293*4882a593Smuzhiyun struct drm_vc4_get_hang_state { 294*4882a593Smuzhiyun /** Pointer to array of struct drm_vc4_get_hang_state_bo. */ 295*4882a593Smuzhiyun __u64 bo; 296*4882a593Smuzhiyun /** 297*4882a593Smuzhiyun * On input, the size of the bo array. Output is the number 298*4882a593Smuzhiyun * of bos to be returned. 299*4882a593Smuzhiyun */ 300*4882a593Smuzhiyun __u32 bo_count; 301*4882a593Smuzhiyun 302*4882a593Smuzhiyun __u32 start_bin, start_render; 303*4882a593Smuzhiyun 304*4882a593Smuzhiyun __u32 ct0ca, ct0ea; 305*4882a593Smuzhiyun __u32 ct1ca, ct1ea; 306*4882a593Smuzhiyun __u32 ct0cs, ct1cs; 307*4882a593Smuzhiyun __u32 ct0ra0, ct1ra0; 308*4882a593Smuzhiyun 309*4882a593Smuzhiyun __u32 bpca, bpcs; 310*4882a593Smuzhiyun __u32 bpoa, bpos; 311*4882a593Smuzhiyun 312*4882a593Smuzhiyun __u32 vpmbase; 313*4882a593Smuzhiyun 314*4882a593Smuzhiyun __u32 dbge; 315*4882a593Smuzhiyun __u32 fdbgo; 316*4882a593Smuzhiyun __u32 fdbgb; 317*4882a593Smuzhiyun __u32 fdbgr; 318*4882a593Smuzhiyun __u32 fdbgs; 319*4882a593Smuzhiyun __u32 errstat; 320*4882a593Smuzhiyun 321*4882a593Smuzhiyun /* Pad that we may save more registers into in the future. */ 322*4882a593Smuzhiyun __u32 pad[16]; 323*4882a593Smuzhiyun }; 324*4882a593Smuzhiyun 325*4882a593Smuzhiyun #define DRM_VC4_PARAM_V3D_IDENT0 0 326*4882a593Smuzhiyun #define DRM_VC4_PARAM_V3D_IDENT1 1 327*4882a593Smuzhiyun #define DRM_VC4_PARAM_V3D_IDENT2 2 328*4882a593Smuzhiyun #define DRM_VC4_PARAM_SUPPORTS_BRANCHES 3 329*4882a593Smuzhiyun #define DRM_VC4_PARAM_SUPPORTS_ETC1 4 330*4882a593Smuzhiyun #define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5 331*4882a593Smuzhiyun #define DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER 6 332*4882a593Smuzhiyun #define DRM_VC4_PARAM_SUPPORTS_MADVISE 7 333*4882a593Smuzhiyun #define DRM_VC4_PARAM_SUPPORTS_PERFMON 8 334*4882a593Smuzhiyun 335*4882a593Smuzhiyun struct drm_vc4_get_param { 336*4882a593Smuzhiyun __u32 param; 337*4882a593Smuzhiyun __u32 pad; 338*4882a593Smuzhiyun __u64 value; 339*4882a593Smuzhiyun }; 340*4882a593Smuzhiyun 341*4882a593Smuzhiyun struct drm_vc4_get_tiling { 342*4882a593Smuzhiyun __u32 handle; 343*4882a593Smuzhiyun __u32 flags; 344*4882a593Smuzhiyun __u64 modifier; 345*4882a593Smuzhiyun }; 346*4882a593Smuzhiyun 347*4882a593Smuzhiyun struct drm_vc4_set_tiling { 348*4882a593Smuzhiyun __u32 handle; 349*4882a593Smuzhiyun __u32 flags; 350*4882a593Smuzhiyun __u64 modifier; 351*4882a593Smuzhiyun }; 352*4882a593Smuzhiyun 353*4882a593Smuzhiyun /** 354*4882a593Smuzhiyun * struct drm_vc4_label_bo - Attach a name to a BO for debug purposes. 355*4882a593Smuzhiyun */ 356*4882a593Smuzhiyun struct drm_vc4_label_bo { 357*4882a593Smuzhiyun __u32 handle; 358*4882a593Smuzhiyun __u32 len; 359*4882a593Smuzhiyun __u64 name; 360*4882a593Smuzhiyun }; 361*4882a593Smuzhiyun 362*4882a593Smuzhiyun /* 363*4882a593Smuzhiyun * States prefixed with '__' are internal states and cannot be passed to the 364*4882a593Smuzhiyun * DRM_IOCTL_VC4_GEM_MADVISE ioctl. 365*4882a593Smuzhiyun */ 366*4882a593Smuzhiyun #define VC4_MADV_WILLNEED 0 367*4882a593Smuzhiyun #define VC4_MADV_DONTNEED 1 368*4882a593Smuzhiyun #define __VC4_MADV_PURGED 2 369*4882a593Smuzhiyun #define __VC4_MADV_NOTSUPP 3 370*4882a593Smuzhiyun 371*4882a593Smuzhiyun struct drm_vc4_gem_madvise { 372*4882a593Smuzhiyun __u32 handle; 373*4882a593Smuzhiyun __u32 madv; 374*4882a593Smuzhiyun __u32 retained; 375*4882a593Smuzhiyun __u32 pad; 376*4882a593Smuzhiyun }; 377*4882a593Smuzhiyun 378*4882a593Smuzhiyun enum { 379*4882a593Smuzhiyun VC4_PERFCNT_FEP_VALID_PRIMS_NO_RENDER, 380*4882a593Smuzhiyun VC4_PERFCNT_FEP_VALID_PRIMS_RENDER, 381*4882a593Smuzhiyun VC4_PERFCNT_FEP_CLIPPED_QUADS, 382*4882a593Smuzhiyun VC4_PERFCNT_FEP_VALID_QUADS, 383*4882a593Smuzhiyun VC4_PERFCNT_TLB_QUADS_NOT_PASSING_STENCIL, 384*4882a593Smuzhiyun VC4_PERFCNT_TLB_QUADS_NOT_PASSING_Z_AND_STENCIL, 385*4882a593Smuzhiyun VC4_PERFCNT_TLB_QUADS_PASSING_Z_AND_STENCIL, 386*4882a593Smuzhiyun VC4_PERFCNT_TLB_QUADS_ZERO_COVERAGE, 387*4882a593Smuzhiyun VC4_PERFCNT_TLB_QUADS_NON_ZERO_COVERAGE, 388*4882a593Smuzhiyun VC4_PERFCNT_TLB_QUADS_WRITTEN_TO_COLOR_BUF, 389*4882a593Smuzhiyun VC4_PERFCNT_PLB_PRIMS_OUTSIDE_VIEWPORT, 390*4882a593Smuzhiyun VC4_PERFCNT_PLB_PRIMS_NEED_CLIPPING, 391*4882a593Smuzhiyun VC4_PERFCNT_PSE_PRIMS_REVERSED, 392*4882a593Smuzhiyun VC4_PERFCNT_QPU_TOTAL_IDLE_CYCLES, 393*4882a593Smuzhiyun VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_VERTEX_COORD_SHADING, 394*4882a593Smuzhiyun VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_FRAGMENT_SHADING, 395*4882a593Smuzhiyun VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_EXEC_VALID_INST, 396*4882a593Smuzhiyun VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_TMUS, 397*4882a593Smuzhiyun VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_SCOREBOARD, 398*4882a593Smuzhiyun VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_VARYINGS, 399*4882a593Smuzhiyun VC4_PERFCNT_QPU_TOTAL_INST_CACHE_HIT, 400*4882a593Smuzhiyun VC4_PERFCNT_QPU_TOTAL_INST_CACHE_MISS, 401*4882a593Smuzhiyun VC4_PERFCNT_QPU_TOTAL_UNIFORM_CACHE_HIT, 402*4882a593Smuzhiyun VC4_PERFCNT_QPU_TOTAL_UNIFORM_CACHE_MISS, 403*4882a593Smuzhiyun VC4_PERFCNT_TMU_TOTAL_TEXT_QUADS_PROCESSED, 404*4882a593Smuzhiyun VC4_PERFCNT_TMU_TOTAL_TEXT_CACHE_MISS, 405*4882a593Smuzhiyun VC4_PERFCNT_VPM_TOTAL_CLK_CYCLES_VDW_STALLED, 406*4882a593Smuzhiyun VC4_PERFCNT_VPM_TOTAL_CLK_CYCLES_VCD_STALLED, 407*4882a593Smuzhiyun VC4_PERFCNT_L2C_TOTAL_L2_CACHE_HIT, 408*4882a593Smuzhiyun VC4_PERFCNT_L2C_TOTAL_L2_CACHE_MISS, 409*4882a593Smuzhiyun VC4_PERFCNT_NUM_EVENTS, 410*4882a593Smuzhiyun }; 411*4882a593Smuzhiyun 412*4882a593Smuzhiyun #define DRM_VC4_MAX_PERF_COUNTERS 16 413*4882a593Smuzhiyun 414*4882a593Smuzhiyun struct drm_vc4_perfmon_create { 415*4882a593Smuzhiyun __u32 id; 416*4882a593Smuzhiyun __u32 ncounters; 417*4882a593Smuzhiyun __u8 events[DRM_VC4_MAX_PERF_COUNTERS]; 418*4882a593Smuzhiyun }; 419*4882a593Smuzhiyun 420*4882a593Smuzhiyun struct drm_vc4_perfmon_destroy { 421*4882a593Smuzhiyun __u32 id; 422*4882a593Smuzhiyun }; 423*4882a593Smuzhiyun 424*4882a593Smuzhiyun /* 425*4882a593Smuzhiyun * Returns the values of the performance counters tracked by this 426*4882a593Smuzhiyun * perfmon (as an array of ncounters u64 values). 427*4882a593Smuzhiyun * 428*4882a593Smuzhiyun * No implicit synchronization is performed, so the user has to 429*4882a593Smuzhiyun * guarantee that any jobs using this perfmon have already been 430*4882a593Smuzhiyun * completed (probably by blocking on the seqno returned by the 431*4882a593Smuzhiyun * last exec that used the perfmon). 432*4882a593Smuzhiyun */ 433*4882a593Smuzhiyun struct drm_vc4_perfmon_get_values { 434*4882a593Smuzhiyun __u32 id; 435*4882a593Smuzhiyun __u64 values_ptr; 436*4882a593Smuzhiyun }; 437*4882a593Smuzhiyun 438*4882a593Smuzhiyun #if defined(__cplusplus) 439*4882a593Smuzhiyun } 440*4882a593Smuzhiyun #endif 441*4882a593Smuzhiyun 442*4882a593Smuzhiyun #endif /* _UAPI_VC4_DRM_H_ */ 443