1*4882a593Smuzhiyun /* amdgpu_drm.h -- Public header for the amdgpu driver -*- linux-c -*- 2*4882a593Smuzhiyun * 3*4882a593Smuzhiyun * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. 4*4882a593Smuzhiyun * Copyright 2000 VA Linux Systems, Inc., Fremont, California. 5*4882a593Smuzhiyun * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. 6*4882a593Smuzhiyun * Copyright 2014 Advanced Micro Devices, Inc. 7*4882a593Smuzhiyun * 8*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a 9*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"), 10*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation 11*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the 13*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions: 14*4882a593Smuzhiyun * 15*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in 16*4882a593Smuzhiyun * all copies or substantial portions of the Software. 17*4882a593Smuzhiyun * 18*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21*4882a593Smuzhiyun * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 22*4882a593Smuzhiyun * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 23*4882a593Smuzhiyun * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 24*4882a593Smuzhiyun * OTHER DEALINGS IN THE SOFTWARE. 25*4882a593Smuzhiyun * 26*4882a593Smuzhiyun * Authors: 27*4882a593Smuzhiyun * Kevin E. Martin <martin@valinux.com> 28*4882a593Smuzhiyun * Gareth Hughes <gareth@valinux.com> 29*4882a593Smuzhiyun * Keith Whitwell <keith@tungstengraphics.com> 30*4882a593Smuzhiyun */ 31*4882a593Smuzhiyun 32*4882a593Smuzhiyun #ifndef __AMDGPU_DRM_H__ 33*4882a593Smuzhiyun #define __AMDGPU_DRM_H__ 34*4882a593Smuzhiyun 35*4882a593Smuzhiyun #include "drm.h" 36*4882a593Smuzhiyun 37*4882a593Smuzhiyun #if defined(__cplusplus) 38*4882a593Smuzhiyun extern "C" { 39*4882a593Smuzhiyun #endif 40*4882a593Smuzhiyun 41*4882a593Smuzhiyun #define DRM_AMDGPU_GEM_CREATE 0x00 42*4882a593Smuzhiyun #define DRM_AMDGPU_GEM_MMAP 0x01 43*4882a593Smuzhiyun #define DRM_AMDGPU_CTX 0x02 44*4882a593Smuzhiyun #define DRM_AMDGPU_BO_LIST 0x03 45*4882a593Smuzhiyun #define DRM_AMDGPU_CS 0x04 46*4882a593Smuzhiyun #define DRM_AMDGPU_INFO 0x05 47*4882a593Smuzhiyun #define DRM_AMDGPU_GEM_METADATA 0x06 48*4882a593Smuzhiyun #define DRM_AMDGPU_GEM_WAIT_IDLE 0x07 49*4882a593Smuzhiyun #define DRM_AMDGPU_GEM_VA 0x08 50*4882a593Smuzhiyun #define DRM_AMDGPU_WAIT_CS 0x09 51*4882a593Smuzhiyun #define DRM_AMDGPU_GEM_OP 0x10 52*4882a593Smuzhiyun #define DRM_AMDGPU_GEM_USERPTR 0x11 53*4882a593Smuzhiyun #define DRM_AMDGPU_WAIT_FENCES 0x12 54*4882a593Smuzhiyun #define DRM_AMDGPU_VM 0x13 55*4882a593Smuzhiyun #define DRM_AMDGPU_FENCE_TO_HANDLE 0x14 56*4882a593Smuzhiyun #define DRM_AMDGPU_SCHED 0x15 57*4882a593Smuzhiyun 58*4882a593Smuzhiyun #define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create) 59*4882a593Smuzhiyun #define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap) 60*4882a593Smuzhiyun #define DRM_IOCTL_AMDGPU_CTX DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CTX, union drm_amdgpu_ctx) 61*4882a593Smuzhiyun #define DRM_IOCTL_AMDGPU_BO_LIST DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_BO_LIST, union drm_amdgpu_bo_list) 62*4882a593Smuzhiyun #define DRM_IOCTL_AMDGPU_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CS, union drm_amdgpu_cs) 63*4882a593Smuzhiyun #define DRM_IOCTL_AMDGPU_INFO DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_INFO, struct drm_amdgpu_info) 64*4882a593Smuzhiyun #define DRM_IOCTL_AMDGPU_GEM_METADATA DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_METADATA, struct drm_amdgpu_gem_metadata) 65*4882a593Smuzhiyun #define DRM_IOCTL_AMDGPU_GEM_WAIT_IDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_WAIT_IDLE, union drm_amdgpu_gem_wait_idle) 66*4882a593Smuzhiyun #define DRM_IOCTL_AMDGPU_GEM_VA DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_VA, struct drm_amdgpu_gem_va) 67*4882a593Smuzhiyun #define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs) 68*4882a593Smuzhiyun #define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op) 69*4882a593Smuzhiyun #define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr) 70*4882a593Smuzhiyun #define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences) 71*4882a593Smuzhiyun #define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm) 72*4882a593Smuzhiyun #define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle) 73*4882a593Smuzhiyun #define DRM_IOCTL_AMDGPU_SCHED DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched) 74*4882a593Smuzhiyun 75*4882a593Smuzhiyun /** 76*4882a593Smuzhiyun * DOC: memory domains 77*4882a593Smuzhiyun * 78*4882a593Smuzhiyun * %AMDGPU_GEM_DOMAIN_CPU System memory that is not GPU accessible. 79*4882a593Smuzhiyun * Memory in this pool could be swapped out to disk if there is pressure. 80*4882a593Smuzhiyun * 81*4882a593Smuzhiyun * %AMDGPU_GEM_DOMAIN_GTT GPU accessible system memory, mapped into the 82*4882a593Smuzhiyun * GPU's virtual address space via gart. Gart memory linearizes non-contiguous 83*4882a593Smuzhiyun * pages of system memory, allows GPU access system memory in a linezrized 84*4882a593Smuzhiyun * fashion. 85*4882a593Smuzhiyun * 86*4882a593Smuzhiyun * %AMDGPU_GEM_DOMAIN_VRAM Local video memory. For APUs, it is memory 87*4882a593Smuzhiyun * carved out by the BIOS. 88*4882a593Smuzhiyun * 89*4882a593Smuzhiyun * %AMDGPU_GEM_DOMAIN_GDS Global on-chip data storage used to share data 90*4882a593Smuzhiyun * across shader threads. 91*4882a593Smuzhiyun * 92*4882a593Smuzhiyun * %AMDGPU_GEM_DOMAIN_GWS Global wave sync, used to synchronize the 93*4882a593Smuzhiyun * execution of all the waves on a device. 94*4882a593Smuzhiyun * 95*4882a593Smuzhiyun * %AMDGPU_GEM_DOMAIN_OA Ordered append, used by 3D or Compute engines 96*4882a593Smuzhiyun * for appending data. 97*4882a593Smuzhiyun */ 98*4882a593Smuzhiyun #define AMDGPU_GEM_DOMAIN_CPU 0x1 99*4882a593Smuzhiyun #define AMDGPU_GEM_DOMAIN_GTT 0x2 100*4882a593Smuzhiyun #define AMDGPU_GEM_DOMAIN_VRAM 0x4 101*4882a593Smuzhiyun #define AMDGPU_GEM_DOMAIN_GDS 0x8 102*4882a593Smuzhiyun #define AMDGPU_GEM_DOMAIN_GWS 0x10 103*4882a593Smuzhiyun #define AMDGPU_GEM_DOMAIN_OA 0x20 104*4882a593Smuzhiyun #define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \ 105*4882a593Smuzhiyun AMDGPU_GEM_DOMAIN_GTT | \ 106*4882a593Smuzhiyun AMDGPU_GEM_DOMAIN_VRAM | \ 107*4882a593Smuzhiyun AMDGPU_GEM_DOMAIN_GDS | \ 108*4882a593Smuzhiyun AMDGPU_GEM_DOMAIN_GWS | \ 109*4882a593Smuzhiyun AMDGPU_GEM_DOMAIN_OA) 110*4882a593Smuzhiyun 111*4882a593Smuzhiyun /* Flag that CPU access will be required for the case of VRAM domain */ 112*4882a593Smuzhiyun #define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0) 113*4882a593Smuzhiyun /* Flag that CPU access will not work, this VRAM domain is invisible */ 114*4882a593Smuzhiyun #define AMDGPU_GEM_CREATE_NO_CPU_ACCESS (1 << 1) 115*4882a593Smuzhiyun /* Flag that USWC attributes should be used for GTT */ 116*4882a593Smuzhiyun #define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2) 117*4882a593Smuzhiyun /* Flag that the memory should be in VRAM and cleared */ 118*4882a593Smuzhiyun #define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3) 119*4882a593Smuzhiyun /* Flag that create shadow bo(GTT) while allocating vram bo */ 120*4882a593Smuzhiyun #define AMDGPU_GEM_CREATE_SHADOW (1 << 4) 121*4882a593Smuzhiyun /* Flag that allocating the BO should use linear VRAM */ 122*4882a593Smuzhiyun #define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5) 123*4882a593Smuzhiyun /* Flag that BO is always valid in this VM */ 124*4882a593Smuzhiyun #define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6) 125*4882a593Smuzhiyun /* Flag that BO sharing will be explicitly synchronized */ 126*4882a593Smuzhiyun #define AMDGPU_GEM_CREATE_EXPLICIT_SYNC (1 << 7) 127*4882a593Smuzhiyun /* Flag that indicates allocating MQD gart on GFX9, where the mtype 128*4882a593Smuzhiyun * for the second page onward should be set to NC. It should never 129*4882a593Smuzhiyun * be used by user space applications. 130*4882a593Smuzhiyun */ 131*4882a593Smuzhiyun #define AMDGPU_GEM_CREATE_CP_MQD_GFX9 (1 << 8) 132*4882a593Smuzhiyun /* Flag that BO may contain sensitive data that must be wiped before 133*4882a593Smuzhiyun * releasing the memory 134*4882a593Smuzhiyun */ 135*4882a593Smuzhiyun #define AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE (1 << 9) 136*4882a593Smuzhiyun /* Flag that BO will be encrypted and that the TMZ bit should be 137*4882a593Smuzhiyun * set in the PTEs when mapping this buffer via GPUVM or 138*4882a593Smuzhiyun * accessing it with various hw blocks 139*4882a593Smuzhiyun */ 140*4882a593Smuzhiyun #define AMDGPU_GEM_CREATE_ENCRYPTED (1 << 10) 141*4882a593Smuzhiyun 142*4882a593Smuzhiyun struct drm_amdgpu_gem_create_in { 143*4882a593Smuzhiyun /** the requested memory size */ 144*4882a593Smuzhiyun __u64 bo_size; 145*4882a593Smuzhiyun /** physical start_addr alignment in bytes for some HW requirements */ 146*4882a593Smuzhiyun __u64 alignment; 147*4882a593Smuzhiyun /** the requested memory domains */ 148*4882a593Smuzhiyun __u64 domains; 149*4882a593Smuzhiyun /** allocation flags */ 150*4882a593Smuzhiyun __u64 domain_flags; 151*4882a593Smuzhiyun }; 152*4882a593Smuzhiyun 153*4882a593Smuzhiyun struct drm_amdgpu_gem_create_out { 154*4882a593Smuzhiyun /** returned GEM object handle */ 155*4882a593Smuzhiyun __u32 handle; 156*4882a593Smuzhiyun __u32 _pad; 157*4882a593Smuzhiyun }; 158*4882a593Smuzhiyun 159*4882a593Smuzhiyun union drm_amdgpu_gem_create { 160*4882a593Smuzhiyun struct drm_amdgpu_gem_create_in in; 161*4882a593Smuzhiyun struct drm_amdgpu_gem_create_out out; 162*4882a593Smuzhiyun }; 163*4882a593Smuzhiyun 164*4882a593Smuzhiyun /** Opcode to create new residency list. */ 165*4882a593Smuzhiyun #define AMDGPU_BO_LIST_OP_CREATE 0 166*4882a593Smuzhiyun /** Opcode to destroy previously created residency list */ 167*4882a593Smuzhiyun #define AMDGPU_BO_LIST_OP_DESTROY 1 168*4882a593Smuzhiyun /** Opcode to update resource information in the list */ 169*4882a593Smuzhiyun #define AMDGPU_BO_LIST_OP_UPDATE 2 170*4882a593Smuzhiyun 171*4882a593Smuzhiyun struct drm_amdgpu_bo_list_in { 172*4882a593Smuzhiyun /** Type of operation */ 173*4882a593Smuzhiyun __u32 operation; 174*4882a593Smuzhiyun /** Handle of list or 0 if we want to create one */ 175*4882a593Smuzhiyun __u32 list_handle; 176*4882a593Smuzhiyun /** Number of BOs in list */ 177*4882a593Smuzhiyun __u32 bo_number; 178*4882a593Smuzhiyun /** Size of each element describing BO */ 179*4882a593Smuzhiyun __u32 bo_info_size; 180*4882a593Smuzhiyun /** Pointer to array describing BOs */ 181*4882a593Smuzhiyun __u64 bo_info_ptr; 182*4882a593Smuzhiyun }; 183*4882a593Smuzhiyun 184*4882a593Smuzhiyun struct drm_amdgpu_bo_list_entry { 185*4882a593Smuzhiyun /** Handle of BO */ 186*4882a593Smuzhiyun __u32 bo_handle; 187*4882a593Smuzhiyun /** New (if specified) BO priority to be used during migration */ 188*4882a593Smuzhiyun __u32 bo_priority; 189*4882a593Smuzhiyun }; 190*4882a593Smuzhiyun 191*4882a593Smuzhiyun struct drm_amdgpu_bo_list_out { 192*4882a593Smuzhiyun /** Handle of resource list */ 193*4882a593Smuzhiyun __u32 list_handle; 194*4882a593Smuzhiyun __u32 _pad; 195*4882a593Smuzhiyun }; 196*4882a593Smuzhiyun 197*4882a593Smuzhiyun union drm_amdgpu_bo_list { 198*4882a593Smuzhiyun struct drm_amdgpu_bo_list_in in; 199*4882a593Smuzhiyun struct drm_amdgpu_bo_list_out out; 200*4882a593Smuzhiyun }; 201*4882a593Smuzhiyun 202*4882a593Smuzhiyun /* context related */ 203*4882a593Smuzhiyun #define AMDGPU_CTX_OP_ALLOC_CTX 1 204*4882a593Smuzhiyun #define AMDGPU_CTX_OP_FREE_CTX 2 205*4882a593Smuzhiyun #define AMDGPU_CTX_OP_QUERY_STATE 3 206*4882a593Smuzhiyun #define AMDGPU_CTX_OP_QUERY_STATE2 4 207*4882a593Smuzhiyun 208*4882a593Smuzhiyun /* GPU reset status */ 209*4882a593Smuzhiyun #define AMDGPU_CTX_NO_RESET 0 210*4882a593Smuzhiyun /* this the context caused it */ 211*4882a593Smuzhiyun #define AMDGPU_CTX_GUILTY_RESET 1 212*4882a593Smuzhiyun /* some other context caused it */ 213*4882a593Smuzhiyun #define AMDGPU_CTX_INNOCENT_RESET 2 214*4882a593Smuzhiyun /* unknown cause */ 215*4882a593Smuzhiyun #define AMDGPU_CTX_UNKNOWN_RESET 3 216*4882a593Smuzhiyun 217*4882a593Smuzhiyun /* indicate gpu reset occured after ctx created */ 218*4882a593Smuzhiyun #define AMDGPU_CTX_QUERY2_FLAGS_RESET (1<<0) 219*4882a593Smuzhiyun /* indicate vram lost occured after ctx created */ 220*4882a593Smuzhiyun #define AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST (1<<1) 221*4882a593Smuzhiyun /* indicate some job from this context once cause gpu hang */ 222*4882a593Smuzhiyun #define AMDGPU_CTX_QUERY2_FLAGS_GUILTY (1<<2) 223*4882a593Smuzhiyun /* indicate some errors are detected by RAS */ 224*4882a593Smuzhiyun #define AMDGPU_CTX_QUERY2_FLAGS_RAS_CE (1<<3) 225*4882a593Smuzhiyun #define AMDGPU_CTX_QUERY2_FLAGS_RAS_UE (1<<4) 226*4882a593Smuzhiyun 227*4882a593Smuzhiyun /* Context priority level */ 228*4882a593Smuzhiyun #define AMDGPU_CTX_PRIORITY_UNSET -2048 229*4882a593Smuzhiyun #define AMDGPU_CTX_PRIORITY_VERY_LOW -1023 230*4882a593Smuzhiyun #define AMDGPU_CTX_PRIORITY_LOW -512 231*4882a593Smuzhiyun #define AMDGPU_CTX_PRIORITY_NORMAL 0 232*4882a593Smuzhiyun /* 233*4882a593Smuzhiyun * When used in struct drm_amdgpu_ctx_in, a priority above NORMAL requires 234*4882a593Smuzhiyun * CAP_SYS_NICE or DRM_MASTER 235*4882a593Smuzhiyun */ 236*4882a593Smuzhiyun #define AMDGPU_CTX_PRIORITY_HIGH 512 237*4882a593Smuzhiyun #define AMDGPU_CTX_PRIORITY_VERY_HIGH 1023 238*4882a593Smuzhiyun 239*4882a593Smuzhiyun struct drm_amdgpu_ctx_in { 240*4882a593Smuzhiyun /** AMDGPU_CTX_OP_* */ 241*4882a593Smuzhiyun __u32 op; 242*4882a593Smuzhiyun /** For future use, no flags defined so far */ 243*4882a593Smuzhiyun __u32 flags; 244*4882a593Smuzhiyun __u32 ctx_id; 245*4882a593Smuzhiyun /** AMDGPU_CTX_PRIORITY_* */ 246*4882a593Smuzhiyun __s32 priority; 247*4882a593Smuzhiyun }; 248*4882a593Smuzhiyun 249*4882a593Smuzhiyun union drm_amdgpu_ctx_out { 250*4882a593Smuzhiyun struct { 251*4882a593Smuzhiyun __u32 ctx_id; 252*4882a593Smuzhiyun __u32 _pad; 253*4882a593Smuzhiyun } alloc; 254*4882a593Smuzhiyun 255*4882a593Smuzhiyun struct { 256*4882a593Smuzhiyun /** For future use, no flags defined so far */ 257*4882a593Smuzhiyun __u64 flags; 258*4882a593Smuzhiyun /** Number of resets caused by this context so far. */ 259*4882a593Smuzhiyun __u32 hangs; 260*4882a593Smuzhiyun /** Reset status since the last call of the ioctl. */ 261*4882a593Smuzhiyun __u32 reset_status; 262*4882a593Smuzhiyun } state; 263*4882a593Smuzhiyun }; 264*4882a593Smuzhiyun 265*4882a593Smuzhiyun union drm_amdgpu_ctx { 266*4882a593Smuzhiyun struct drm_amdgpu_ctx_in in; 267*4882a593Smuzhiyun union drm_amdgpu_ctx_out out; 268*4882a593Smuzhiyun }; 269*4882a593Smuzhiyun 270*4882a593Smuzhiyun /* vm ioctl */ 271*4882a593Smuzhiyun #define AMDGPU_VM_OP_RESERVE_VMID 1 272*4882a593Smuzhiyun #define AMDGPU_VM_OP_UNRESERVE_VMID 2 273*4882a593Smuzhiyun 274*4882a593Smuzhiyun struct drm_amdgpu_vm_in { 275*4882a593Smuzhiyun /** AMDGPU_VM_OP_* */ 276*4882a593Smuzhiyun __u32 op; 277*4882a593Smuzhiyun __u32 flags; 278*4882a593Smuzhiyun }; 279*4882a593Smuzhiyun 280*4882a593Smuzhiyun struct drm_amdgpu_vm_out { 281*4882a593Smuzhiyun /** For future use, no flags defined so far */ 282*4882a593Smuzhiyun __u64 flags; 283*4882a593Smuzhiyun }; 284*4882a593Smuzhiyun 285*4882a593Smuzhiyun union drm_amdgpu_vm { 286*4882a593Smuzhiyun struct drm_amdgpu_vm_in in; 287*4882a593Smuzhiyun struct drm_amdgpu_vm_out out; 288*4882a593Smuzhiyun }; 289*4882a593Smuzhiyun 290*4882a593Smuzhiyun /* sched ioctl */ 291*4882a593Smuzhiyun #define AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE 1 292*4882a593Smuzhiyun #define AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE 2 293*4882a593Smuzhiyun 294*4882a593Smuzhiyun struct drm_amdgpu_sched_in { 295*4882a593Smuzhiyun /* AMDGPU_SCHED_OP_* */ 296*4882a593Smuzhiyun __u32 op; 297*4882a593Smuzhiyun __u32 fd; 298*4882a593Smuzhiyun /** AMDGPU_CTX_PRIORITY_* */ 299*4882a593Smuzhiyun __s32 priority; 300*4882a593Smuzhiyun __u32 ctx_id; 301*4882a593Smuzhiyun }; 302*4882a593Smuzhiyun 303*4882a593Smuzhiyun union drm_amdgpu_sched { 304*4882a593Smuzhiyun struct drm_amdgpu_sched_in in; 305*4882a593Smuzhiyun }; 306*4882a593Smuzhiyun 307*4882a593Smuzhiyun /* 308*4882a593Smuzhiyun * This is not a reliable API and you should expect it to fail for any 309*4882a593Smuzhiyun * number of reasons and have fallback path that do not use userptr to 310*4882a593Smuzhiyun * perform any operation. 311*4882a593Smuzhiyun */ 312*4882a593Smuzhiyun #define AMDGPU_GEM_USERPTR_READONLY (1 << 0) 313*4882a593Smuzhiyun #define AMDGPU_GEM_USERPTR_ANONONLY (1 << 1) 314*4882a593Smuzhiyun #define AMDGPU_GEM_USERPTR_VALIDATE (1 << 2) 315*4882a593Smuzhiyun #define AMDGPU_GEM_USERPTR_REGISTER (1 << 3) 316*4882a593Smuzhiyun 317*4882a593Smuzhiyun struct drm_amdgpu_gem_userptr { 318*4882a593Smuzhiyun __u64 addr; 319*4882a593Smuzhiyun __u64 size; 320*4882a593Smuzhiyun /* AMDGPU_GEM_USERPTR_* */ 321*4882a593Smuzhiyun __u32 flags; 322*4882a593Smuzhiyun /* Resulting GEM handle */ 323*4882a593Smuzhiyun __u32 handle; 324*4882a593Smuzhiyun }; 325*4882a593Smuzhiyun 326*4882a593Smuzhiyun /* SI-CI-VI: */ 327*4882a593Smuzhiyun /* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */ 328*4882a593Smuzhiyun #define AMDGPU_TILING_ARRAY_MODE_SHIFT 0 329*4882a593Smuzhiyun #define AMDGPU_TILING_ARRAY_MODE_MASK 0xf 330*4882a593Smuzhiyun #define AMDGPU_TILING_PIPE_CONFIG_SHIFT 4 331*4882a593Smuzhiyun #define AMDGPU_TILING_PIPE_CONFIG_MASK 0x1f 332*4882a593Smuzhiyun #define AMDGPU_TILING_TILE_SPLIT_SHIFT 9 333*4882a593Smuzhiyun #define AMDGPU_TILING_TILE_SPLIT_MASK 0x7 334*4882a593Smuzhiyun #define AMDGPU_TILING_MICRO_TILE_MODE_SHIFT 12 335*4882a593Smuzhiyun #define AMDGPU_TILING_MICRO_TILE_MODE_MASK 0x7 336*4882a593Smuzhiyun #define AMDGPU_TILING_BANK_WIDTH_SHIFT 15 337*4882a593Smuzhiyun #define AMDGPU_TILING_BANK_WIDTH_MASK 0x3 338*4882a593Smuzhiyun #define AMDGPU_TILING_BANK_HEIGHT_SHIFT 17 339*4882a593Smuzhiyun #define AMDGPU_TILING_BANK_HEIGHT_MASK 0x3 340*4882a593Smuzhiyun #define AMDGPU_TILING_MACRO_TILE_ASPECT_SHIFT 19 341*4882a593Smuzhiyun #define AMDGPU_TILING_MACRO_TILE_ASPECT_MASK 0x3 342*4882a593Smuzhiyun #define AMDGPU_TILING_NUM_BANKS_SHIFT 21 343*4882a593Smuzhiyun #define AMDGPU_TILING_NUM_BANKS_MASK 0x3 344*4882a593Smuzhiyun 345*4882a593Smuzhiyun /* GFX9 and later: */ 346*4882a593Smuzhiyun #define AMDGPU_TILING_SWIZZLE_MODE_SHIFT 0 347*4882a593Smuzhiyun #define AMDGPU_TILING_SWIZZLE_MODE_MASK 0x1f 348*4882a593Smuzhiyun #define AMDGPU_TILING_DCC_OFFSET_256B_SHIFT 5 349*4882a593Smuzhiyun #define AMDGPU_TILING_DCC_OFFSET_256B_MASK 0xFFFFFF 350*4882a593Smuzhiyun #define AMDGPU_TILING_DCC_PITCH_MAX_SHIFT 29 351*4882a593Smuzhiyun #define AMDGPU_TILING_DCC_PITCH_MAX_MASK 0x3FFF 352*4882a593Smuzhiyun #define AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT 43 353*4882a593Smuzhiyun #define AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK 0x1 354*4882a593Smuzhiyun #define AMDGPU_TILING_DCC_INDEPENDENT_128B_SHIFT 44 355*4882a593Smuzhiyun #define AMDGPU_TILING_DCC_INDEPENDENT_128B_MASK 0x1 356*4882a593Smuzhiyun #define AMDGPU_TILING_SCANOUT_SHIFT 63 357*4882a593Smuzhiyun #define AMDGPU_TILING_SCANOUT_MASK 0x1 358*4882a593Smuzhiyun 359*4882a593Smuzhiyun /* Set/Get helpers for tiling flags. */ 360*4882a593Smuzhiyun #define AMDGPU_TILING_SET(field, value) \ 361*4882a593Smuzhiyun (((__u64)(value) & AMDGPU_TILING_##field##_MASK) << AMDGPU_TILING_##field##_SHIFT) 362*4882a593Smuzhiyun #define AMDGPU_TILING_GET(value, field) \ 363*4882a593Smuzhiyun (((__u64)(value) >> AMDGPU_TILING_##field##_SHIFT) & AMDGPU_TILING_##field##_MASK) 364*4882a593Smuzhiyun 365*4882a593Smuzhiyun #define AMDGPU_GEM_METADATA_OP_SET_METADATA 1 366*4882a593Smuzhiyun #define AMDGPU_GEM_METADATA_OP_GET_METADATA 2 367*4882a593Smuzhiyun 368*4882a593Smuzhiyun /** The same structure is shared for input/output */ 369*4882a593Smuzhiyun struct drm_amdgpu_gem_metadata { 370*4882a593Smuzhiyun /** GEM Object handle */ 371*4882a593Smuzhiyun __u32 handle; 372*4882a593Smuzhiyun /** Do we want get or set metadata */ 373*4882a593Smuzhiyun __u32 op; 374*4882a593Smuzhiyun struct { 375*4882a593Smuzhiyun /** For future use, no flags defined so far */ 376*4882a593Smuzhiyun __u64 flags; 377*4882a593Smuzhiyun /** family specific tiling info */ 378*4882a593Smuzhiyun __u64 tiling_info; 379*4882a593Smuzhiyun __u32 data_size_bytes; 380*4882a593Smuzhiyun __u32 data[64]; 381*4882a593Smuzhiyun } data; 382*4882a593Smuzhiyun }; 383*4882a593Smuzhiyun 384*4882a593Smuzhiyun struct drm_amdgpu_gem_mmap_in { 385*4882a593Smuzhiyun /** the GEM object handle */ 386*4882a593Smuzhiyun __u32 handle; 387*4882a593Smuzhiyun __u32 _pad; 388*4882a593Smuzhiyun }; 389*4882a593Smuzhiyun 390*4882a593Smuzhiyun struct drm_amdgpu_gem_mmap_out { 391*4882a593Smuzhiyun /** mmap offset from the vma offset manager */ 392*4882a593Smuzhiyun __u64 addr_ptr; 393*4882a593Smuzhiyun }; 394*4882a593Smuzhiyun 395*4882a593Smuzhiyun union drm_amdgpu_gem_mmap { 396*4882a593Smuzhiyun struct drm_amdgpu_gem_mmap_in in; 397*4882a593Smuzhiyun struct drm_amdgpu_gem_mmap_out out; 398*4882a593Smuzhiyun }; 399*4882a593Smuzhiyun 400*4882a593Smuzhiyun struct drm_amdgpu_gem_wait_idle_in { 401*4882a593Smuzhiyun /** GEM object handle */ 402*4882a593Smuzhiyun __u32 handle; 403*4882a593Smuzhiyun /** For future use, no flags defined so far */ 404*4882a593Smuzhiyun __u32 flags; 405*4882a593Smuzhiyun /** Absolute timeout to wait */ 406*4882a593Smuzhiyun __u64 timeout; 407*4882a593Smuzhiyun }; 408*4882a593Smuzhiyun 409*4882a593Smuzhiyun struct drm_amdgpu_gem_wait_idle_out { 410*4882a593Smuzhiyun /** BO status: 0 - BO is idle, 1 - BO is busy */ 411*4882a593Smuzhiyun __u32 status; 412*4882a593Smuzhiyun /** Returned current memory domain */ 413*4882a593Smuzhiyun __u32 domain; 414*4882a593Smuzhiyun }; 415*4882a593Smuzhiyun 416*4882a593Smuzhiyun union drm_amdgpu_gem_wait_idle { 417*4882a593Smuzhiyun struct drm_amdgpu_gem_wait_idle_in in; 418*4882a593Smuzhiyun struct drm_amdgpu_gem_wait_idle_out out; 419*4882a593Smuzhiyun }; 420*4882a593Smuzhiyun 421*4882a593Smuzhiyun struct drm_amdgpu_wait_cs_in { 422*4882a593Smuzhiyun /* Command submission handle 423*4882a593Smuzhiyun * handle equals 0 means none to wait for 424*4882a593Smuzhiyun * handle equals ~0ull means wait for the latest sequence number 425*4882a593Smuzhiyun */ 426*4882a593Smuzhiyun __u64 handle; 427*4882a593Smuzhiyun /** Absolute timeout to wait */ 428*4882a593Smuzhiyun __u64 timeout; 429*4882a593Smuzhiyun __u32 ip_type; 430*4882a593Smuzhiyun __u32 ip_instance; 431*4882a593Smuzhiyun __u32 ring; 432*4882a593Smuzhiyun __u32 ctx_id; 433*4882a593Smuzhiyun }; 434*4882a593Smuzhiyun 435*4882a593Smuzhiyun struct drm_amdgpu_wait_cs_out { 436*4882a593Smuzhiyun /** CS status: 0 - CS completed, 1 - CS still busy */ 437*4882a593Smuzhiyun __u64 status; 438*4882a593Smuzhiyun }; 439*4882a593Smuzhiyun 440*4882a593Smuzhiyun union drm_amdgpu_wait_cs { 441*4882a593Smuzhiyun struct drm_amdgpu_wait_cs_in in; 442*4882a593Smuzhiyun struct drm_amdgpu_wait_cs_out out; 443*4882a593Smuzhiyun }; 444*4882a593Smuzhiyun 445*4882a593Smuzhiyun struct drm_amdgpu_fence { 446*4882a593Smuzhiyun __u32 ctx_id; 447*4882a593Smuzhiyun __u32 ip_type; 448*4882a593Smuzhiyun __u32 ip_instance; 449*4882a593Smuzhiyun __u32 ring; 450*4882a593Smuzhiyun __u64 seq_no; 451*4882a593Smuzhiyun }; 452*4882a593Smuzhiyun 453*4882a593Smuzhiyun struct drm_amdgpu_wait_fences_in { 454*4882a593Smuzhiyun /** This points to uint64_t * which points to fences */ 455*4882a593Smuzhiyun __u64 fences; 456*4882a593Smuzhiyun __u32 fence_count; 457*4882a593Smuzhiyun __u32 wait_all; 458*4882a593Smuzhiyun __u64 timeout_ns; 459*4882a593Smuzhiyun }; 460*4882a593Smuzhiyun 461*4882a593Smuzhiyun struct drm_amdgpu_wait_fences_out { 462*4882a593Smuzhiyun __u32 status; 463*4882a593Smuzhiyun __u32 first_signaled; 464*4882a593Smuzhiyun }; 465*4882a593Smuzhiyun 466*4882a593Smuzhiyun union drm_amdgpu_wait_fences { 467*4882a593Smuzhiyun struct drm_amdgpu_wait_fences_in in; 468*4882a593Smuzhiyun struct drm_amdgpu_wait_fences_out out; 469*4882a593Smuzhiyun }; 470*4882a593Smuzhiyun 471*4882a593Smuzhiyun #define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0 472*4882a593Smuzhiyun #define AMDGPU_GEM_OP_SET_PLACEMENT 1 473*4882a593Smuzhiyun 474*4882a593Smuzhiyun /* Sets or returns a value associated with a buffer. */ 475*4882a593Smuzhiyun struct drm_amdgpu_gem_op { 476*4882a593Smuzhiyun /** GEM object handle */ 477*4882a593Smuzhiyun __u32 handle; 478*4882a593Smuzhiyun /** AMDGPU_GEM_OP_* */ 479*4882a593Smuzhiyun __u32 op; 480*4882a593Smuzhiyun /** Input or return value */ 481*4882a593Smuzhiyun __u64 value; 482*4882a593Smuzhiyun }; 483*4882a593Smuzhiyun 484*4882a593Smuzhiyun #define AMDGPU_VA_OP_MAP 1 485*4882a593Smuzhiyun #define AMDGPU_VA_OP_UNMAP 2 486*4882a593Smuzhiyun #define AMDGPU_VA_OP_CLEAR 3 487*4882a593Smuzhiyun #define AMDGPU_VA_OP_REPLACE 4 488*4882a593Smuzhiyun 489*4882a593Smuzhiyun /* Delay the page table update till the next CS */ 490*4882a593Smuzhiyun #define AMDGPU_VM_DELAY_UPDATE (1 << 0) 491*4882a593Smuzhiyun 492*4882a593Smuzhiyun /* Mapping flags */ 493*4882a593Smuzhiyun /* readable mapping */ 494*4882a593Smuzhiyun #define AMDGPU_VM_PAGE_READABLE (1 << 1) 495*4882a593Smuzhiyun /* writable mapping */ 496*4882a593Smuzhiyun #define AMDGPU_VM_PAGE_WRITEABLE (1 << 2) 497*4882a593Smuzhiyun /* executable mapping, new for VI */ 498*4882a593Smuzhiyun #define AMDGPU_VM_PAGE_EXECUTABLE (1 << 3) 499*4882a593Smuzhiyun /* partially resident texture */ 500*4882a593Smuzhiyun #define AMDGPU_VM_PAGE_PRT (1 << 4) 501*4882a593Smuzhiyun /* MTYPE flags use bit 5 to 8 */ 502*4882a593Smuzhiyun #define AMDGPU_VM_MTYPE_MASK (0xf << 5) 503*4882a593Smuzhiyun /* Default MTYPE. Pre-AI must use this. Recommended for newer ASICs. */ 504*4882a593Smuzhiyun #define AMDGPU_VM_MTYPE_DEFAULT (0 << 5) 505*4882a593Smuzhiyun /* Use Non Coherent MTYPE instead of default MTYPE */ 506*4882a593Smuzhiyun #define AMDGPU_VM_MTYPE_NC (1 << 5) 507*4882a593Smuzhiyun /* Use Write Combine MTYPE instead of default MTYPE */ 508*4882a593Smuzhiyun #define AMDGPU_VM_MTYPE_WC (2 << 5) 509*4882a593Smuzhiyun /* Use Cache Coherent MTYPE instead of default MTYPE */ 510*4882a593Smuzhiyun #define AMDGPU_VM_MTYPE_CC (3 << 5) 511*4882a593Smuzhiyun /* Use UnCached MTYPE instead of default MTYPE */ 512*4882a593Smuzhiyun #define AMDGPU_VM_MTYPE_UC (4 << 5) 513*4882a593Smuzhiyun /* Use Read Write MTYPE instead of default MTYPE */ 514*4882a593Smuzhiyun #define AMDGPU_VM_MTYPE_RW (5 << 5) 515*4882a593Smuzhiyun 516*4882a593Smuzhiyun struct drm_amdgpu_gem_va { 517*4882a593Smuzhiyun /** GEM object handle */ 518*4882a593Smuzhiyun __u32 handle; 519*4882a593Smuzhiyun __u32 _pad; 520*4882a593Smuzhiyun /** AMDGPU_VA_OP_* */ 521*4882a593Smuzhiyun __u32 operation; 522*4882a593Smuzhiyun /** AMDGPU_VM_PAGE_* */ 523*4882a593Smuzhiyun __u32 flags; 524*4882a593Smuzhiyun /** va address to assign . Must be correctly aligned.*/ 525*4882a593Smuzhiyun __u64 va_address; 526*4882a593Smuzhiyun /** Specify offset inside of BO to assign. Must be correctly aligned.*/ 527*4882a593Smuzhiyun __u64 offset_in_bo; 528*4882a593Smuzhiyun /** Specify mapping size. Must be correctly aligned. */ 529*4882a593Smuzhiyun __u64 map_size; 530*4882a593Smuzhiyun }; 531*4882a593Smuzhiyun 532*4882a593Smuzhiyun #define AMDGPU_HW_IP_GFX 0 533*4882a593Smuzhiyun #define AMDGPU_HW_IP_COMPUTE 1 534*4882a593Smuzhiyun #define AMDGPU_HW_IP_DMA 2 535*4882a593Smuzhiyun #define AMDGPU_HW_IP_UVD 3 536*4882a593Smuzhiyun #define AMDGPU_HW_IP_VCE 4 537*4882a593Smuzhiyun #define AMDGPU_HW_IP_UVD_ENC 5 538*4882a593Smuzhiyun #define AMDGPU_HW_IP_VCN_DEC 6 539*4882a593Smuzhiyun #define AMDGPU_HW_IP_VCN_ENC 7 540*4882a593Smuzhiyun #define AMDGPU_HW_IP_VCN_JPEG 8 541*4882a593Smuzhiyun #define AMDGPU_HW_IP_NUM 9 542*4882a593Smuzhiyun 543*4882a593Smuzhiyun #define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1 544*4882a593Smuzhiyun 545*4882a593Smuzhiyun #define AMDGPU_CHUNK_ID_IB 0x01 546*4882a593Smuzhiyun #define AMDGPU_CHUNK_ID_FENCE 0x02 547*4882a593Smuzhiyun #define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03 548*4882a593Smuzhiyun #define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04 549*4882a593Smuzhiyun #define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05 550*4882a593Smuzhiyun #define AMDGPU_CHUNK_ID_BO_HANDLES 0x06 551*4882a593Smuzhiyun #define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07 552*4882a593Smuzhiyun #define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT 0x08 553*4882a593Smuzhiyun #define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL 0x09 554*4882a593Smuzhiyun 555*4882a593Smuzhiyun struct drm_amdgpu_cs_chunk { 556*4882a593Smuzhiyun __u32 chunk_id; 557*4882a593Smuzhiyun __u32 length_dw; 558*4882a593Smuzhiyun __u64 chunk_data; 559*4882a593Smuzhiyun }; 560*4882a593Smuzhiyun 561*4882a593Smuzhiyun struct drm_amdgpu_cs_in { 562*4882a593Smuzhiyun /** Rendering context id */ 563*4882a593Smuzhiyun __u32 ctx_id; 564*4882a593Smuzhiyun /** Handle of resource list associated with CS */ 565*4882a593Smuzhiyun __u32 bo_list_handle; 566*4882a593Smuzhiyun __u32 num_chunks; 567*4882a593Smuzhiyun __u32 flags; 568*4882a593Smuzhiyun /** this points to __u64 * which point to cs chunks */ 569*4882a593Smuzhiyun __u64 chunks; 570*4882a593Smuzhiyun }; 571*4882a593Smuzhiyun 572*4882a593Smuzhiyun struct drm_amdgpu_cs_out { 573*4882a593Smuzhiyun __u64 handle; 574*4882a593Smuzhiyun }; 575*4882a593Smuzhiyun 576*4882a593Smuzhiyun union drm_amdgpu_cs { 577*4882a593Smuzhiyun struct drm_amdgpu_cs_in in; 578*4882a593Smuzhiyun struct drm_amdgpu_cs_out out; 579*4882a593Smuzhiyun }; 580*4882a593Smuzhiyun 581*4882a593Smuzhiyun /* Specify flags to be used for IB */ 582*4882a593Smuzhiyun 583*4882a593Smuzhiyun /* This IB should be submitted to CE */ 584*4882a593Smuzhiyun #define AMDGPU_IB_FLAG_CE (1<<0) 585*4882a593Smuzhiyun 586*4882a593Smuzhiyun /* Preamble flag, which means the IB could be dropped if no context switch */ 587*4882a593Smuzhiyun #define AMDGPU_IB_FLAG_PREAMBLE (1<<1) 588*4882a593Smuzhiyun 589*4882a593Smuzhiyun /* Preempt flag, IB should set Pre_enb bit if PREEMPT flag detected */ 590*4882a593Smuzhiyun #define AMDGPU_IB_FLAG_PREEMPT (1<<2) 591*4882a593Smuzhiyun 592*4882a593Smuzhiyun /* The IB fence should do the L2 writeback but not invalidate any shader 593*4882a593Smuzhiyun * caches (L2/vL1/sL1/I$). */ 594*4882a593Smuzhiyun #define AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE (1 << 3) 595*4882a593Smuzhiyun 596*4882a593Smuzhiyun /* Set GDS_COMPUTE_MAX_WAVE_ID = DEFAULT before PACKET3_INDIRECT_BUFFER. 597*4882a593Smuzhiyun * This will reset wave ID counters for the IB. 598*4882a593Smuzhiyun */ 599*4882a593Smuzhiyun #define AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID (1 << 4) 600*4882a593Smuzhiyun 601*4882a593Smuzhiyun /* Flag the IB as secure (TMZ) 602*4882a593Smuzhiyun */ 603*4882a593Smuzhiyun #define AMDGPU_IB_FLAGS_SECURE (1 << 5) 604*4882a593Smuzhiyun 605*4882a593Smuzhiyun /* Tell KMD to flush and invalidate caches 606*4882a593Smuzhiyun */ 607*4882a593Smuzhiyun #define AMDGPU_IB_FLAG_EMIT_MEM_SYNC (1 << 6) 608*4882a593Smuzhiyun 609*4882a593Smuzhiyun struct drm_amdgpu_cs_chunk_ib { 610*4882a593Smuzhiyun __u32 _pad; 611*4882a593Smuzhiyun /** AMDGPU_IB_FLAG_* */ 612*4882a593Smuzhiyun __u32 flags; 613*4882a593Smuzhiyun /** Virtual address to begin IB execution */ 614*4882a593Smuzhiyun __u64 va_start; 615*4882a593Smuzhiyun /** Size of submission */ 616*4882a593Smuzhiyun __u32 ib_bytes; 617*4882a593Smuzhiyun /** HW IP to submit to */ 618*4882a593Smuzhiyun __u32 ip_type; 619*4882a593Smuzhiyun /** HW IP index of the same type to submit to */ 620*4882a593Smuzhiyun __u32 ip_instance; 621*4882a593Smuzhiyun /** Ring index to submit to */ 622*4882a593Smuzhiyun __u32 ring; 623*4882a593Smuzhiyun }; 624*4882a593Smuzhiyun 625*4882a593Smuzhiyun struct drm_amdgpu_cs_chunk_dep { 626*4882a593Smuzhiyun __u32 ip_type; 627*4882a593Smuzhiyun __u32 ip_instance; 628*4882a593Smuzhiyun __u32 ring; 629*4882a593Smuzhiyun __u32 ctx_id; 630*4882a593Smuzhiyun __u64 handle; 631*4882a593Smuzhiyun }; 632*4882a593Smuzhiyun 633*4882a593Smuzhiyun struct drm_amdgpu_cs_chunk_fence { 634*4882a593Smuzhiyun __u32 handle; 635*4882a593Smuzhiyun __u32 offset; 636*4882a593Smuzhiyun }; 637*4882a593Smuzhiyun 638*4882a593Smuzhiyun struct drm_amdgpu_cs_chunk_sem { 639*4882a593Smuzhiyun __u32 handle; 640*4882a593Smuzhiyun }; 641*4882a593Smuzhiyun 642*4882a593Smuzhiyun struct drm_amdgpu_cs_chunk_syncobj { 643*4882a593Smuzhiyun __u32 handle; 644*4882a593Smuzhiyun __u32 flags; 645*4882a593Smuzhiyun __u64 point; 646*4882a593Smuzhiyun }; 647*4882a593Smuzhiyun 648*4882a593Smuzhiyun #define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ 0 649*4882a593Smuzhiyun #define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD 1 650*4882a593Smuzhiyun #define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD 2 651*4882a593Smuzhiyun 652*4882a593Smuzhiyun union drm_amdgpu_fence_to_handle { 653*4882a593Smuzhiyun struct { 654*4882a593Smuzhiyun struct drm_amdgpu_fence fence; 655*4882a593Smuzhiyun __u32 what; 656*4882a593Smuzhiyun __u32 pad; 657*4882a593Smuzhiyun } in; 658*4882a593Smuzhiyun struct { 659*4882a593Smuzhiyun __u32 handle; 660*4882a593Smuzhiyun } out; 661*4882a593Smuzhiyun }; 662*4882a593Smuzhiyun 663*4882a593Smuzhiyun struct drm_amdgpu_cs_chunk_data { 664*4882a593Smuzhiyun union { 665*4882a593Smuzhiyun struct drm_amdgpu_cs_chunk_ib ib_data; 666*4882a593Smuzhiyun struct drm_amdgpu_cs_chunk_fence fence_data; 667*4882a593Smuzhiyun }; 668*4882a593Smuzhiyun }; 669*4882a593Smuzhiyun 670*4882a593Smuzhiyun /** 671*4882a593Smuzhiyun * Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU 672*4882a593Smuzhiyun * 673*4882a593Smuzhiyun */ 674*4882a593Smuzhiyun #define AMDGPU_IDS_FLAGS_FUSION 0x1 675*4882a593Smuzhiyun #define AMDGPU_IDS_FLAGS_PREEMPTION 0x2 676*4882a593Smuzhiyun #define AMDGPU_IDS_FLAGS_TMZ 0x4 677*4882a593Smuzhiyun 678*4882a593Smuzhiyun /* indicate if acceleration can be working */ 679*4882a593Smuzhiyun #define AMDGPU_INFO_ACCEL_WORKING 0x00 680*4882a593Smuzhiyun /* get the crtc_id from the mode object id? */ 681*4882a593Smuzhiyun #define AMDGPU_INFO_CRTC_FROM_ID 0x01 682*4882a593Smuzhiyun /* query hw IP info */ 683*4882a593Smuzhiyun #define AMDGPU_INFO_HW_IP_INFO 0x02 684*4882a593Smuzhiyun /* query hw IP instance count for the specified type */ 685*4882a593Smuzhiyun #define AMDGPU_INFO_HW_IP_COUNT 0x03 686*4882a593Smuzhiyun /* timestamp for GL_ARB_timer_query */ 687*4882a593Smuzhiyun #define AMDGPU_INFO_TIMESTAMP 0x05 688*4882a593Smuzhiyun /* Query the firmware version */ 689*4882a593Smuzhiyun #define AMDGPU_INFO_FW_VERSION 0x0e 690*4882a593Smuzhiyun /* Subquery id: Query VCE firmware version */ 691*4882a593Smuzhiyun #define AMDGPU_INFO_FW_VCE 0x1 692*4882a593Smuzhiyun /* Subquery id: Query UVD firmware version */ 693*4882a593Smuzhiyun #define AMDGPU_INFO_FW_UVD 0x2 694*4882a593Smuzhiyun /* Subquery id: Query GMC firmware version */ 695*4882a593Smuzhiyun #define AMDGPU_INFO_FW_GMC 0x03 696*4882a593Smuzhiyun /* Subquery id: Query GFX ME firmware version */ 697*4882a593Smuzhiyun #define AMDGPU_INFO_FW_GFX_ME 0x04 698*4882a593Smuzhiyun /* Subquery id: Query GFX PFP firmware version */ 699*4882a593Smuzhiyun #define AMDGPU_INFO_FW_GFX_PFP 0x05 700*4882a593Smuzhiyun /* Subquery id: Query GFX CE firmware version */ 701*4882a593Smuzhiyun #define AMDGPU_INFO_FW_GFX_CE 0x06 702*4882a593Smuzhiyun /* Subquery id: Query GFX RLC firmware version */ 703*4882a593Smuzhiyun #define AMDGPU_INFO_FW_GFX_RLC 0x07 704*4882a593Smuzhiyun /* Subquery id: Query GFX MEC firmware version */ 705*4882a593Smuzhiyun #define AMDGPU_INFO_FW_GFX_MEC 0x08 706*4882a593Smuzhiyun /* Subquery id: Query SMC firmware version */ 707*4882a593Smuzhiyun #define AMDGPU_INFO_FW_SMC 0x0a 708*4882a593Smuzhiyun /* Subquery id: Query SDMA firmware version */ 709*4882a593Smuzhiyun #define AMDGPU_INFO_FW_SDMA 0x0b 710*4882a593Smuzhiyun /* Subquery id: Query PSP SOS firmware version */ 711*4882a593Smuzhiyun #define AMDGPU_INFO_FW_SOS 0x0c 712*4882a593Smuzhiyun /* Subquery id: Query PSP ASD firmware version */ 713*4882a593Smuzhiyun #define AMDGPU_INFO_FW_ASD 0x0d 714*4882a593Smuzhiyun /* Subquery id: Query VCN firmware version */ 715*4882a593Smuzhiyun #define AMDGPU_INFO_FW_VCN 0x0e 716*4882a593Smuzhiyun /* Subquery id: Query GFX RLC SRLC firmware version */ 717*4882a593Smuzhiyun #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL 0x0f 718*4882a593Smuzhiyun /* Subquery id: Query GFX RLC SRLG firmware version */ 719*4882a593Smuzhiyun #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM 0x10 720*4882a593Smuzhiyun /* Subquery id: Query GFX RLC SRLS firmware version */ 721*4882a593Smuzhiyun #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM 0x11 722*4882a593Smuzhiyun /* Subquery id: Query DMCU firmware version */ 723*4882a593Smuzhiyun #define AMDGPU_INFO_FW_DMCU 0x12 724*4882a593Smuzhiyun #define AMDGPU_INFO_FW_TA 0x13 725*4882a593Smuzhiyun /* Subquery id: Query DMCUB firmware version */ 726*4882a593Smuzhiyun #define AMDGPU_INFO_FW_DMCUB 0x14 727*4882a593Smuzhiyun 728*4882a593Smuzhiyun /* number of bytes moved for TTM migration */ 729*4882a593Smuzhiyun #define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f 730*4882a593Smuzhiyun /* the used VRAM size */ 731*4882a593Smuzhiyun #define AMDGPU_INFO_VRAM_USAGE 0x10 732*4882a593Smuzhiyun /* the used GTT size */ 733*4882a593Smuzhiyun #define AMDGPU_INFO_GTT_USAGE 0x11 734*4882a593Smuzhiyun /* Information about GDS, etc. resource configuration */ 735*4882a593Smuzhiyun #define AMDGPU_INFO_GDS_CONFIG 0x13 736*4882a593Smuzhiyun /* Query information about VRAM and GTT domains */ 737*4882a593Smuzhiyun #define AMDGPU_INFO_VRAM_GTT 0x14 738*4882a593Smuzhiyun /* Query information about register in MMR address space*/ 739*4882a593Smuzhiyun #define AMDGPU_INFO_READ_MMR_REG 0x15 740*4882a593Smuzhiyun /* Query information about device: rev id, family, etc. */ 741*4882a593Smuzhiyun #define AMDGPU_INFO_DEV_INFO 0x16 742*4882a593Smuzhiyun /* visible vram usage */ 743*4882a593Smuzhiyun #define AMDGPU_INFO_VIS_VRAM_USAGE 0x17 744*4882a593Smuzhiyun /* number of TTM buffer evictions */ 745*4882a593Smuzhiyun #define AMDGPU_INFO_NUM_EVICTIONS 0x18 746*4882a593Smuzhiyun /* Query memory about VRAM and GTT domains */ 747*4882a593Smuzhiyun #define AMDGPU_INFO_MEMORY 0x19 748*4882a593Smuzhiyun /* Query vce clock table */ 749*4882a593Smuzhiyun #define AMDGPU_INFO_VCE_CLOCK_TABLE 0x1A 750*4882a593Smuzhiyun /* Query vbios related information */ 751*4882a593Smuzhiyun #define AMDGPU_INFO_VBIOS 0x1B 752*4882a593Smuzhiyun /* Subquery id: Query vbios size */ 753*4882a593Smuzhiyun #define AMDGPU_INFO_VBIOS_SIZE 0x1 754*4882a593Smuzhiyun /* Subquery id: Query vbios image */ 755*4882a593Smuzhiyun #define AMDGPU_INFO_VBIOS_IMAGE 0x2 756*4882a593Smuzhiyun /* Query UVD handles */ 757*4882a593Smuzhiyun #define AMDGPU_INFO_NUM_HANDLES 0x1C 758*4882a593Smuzhiyun /* Query sensor related information */ 759*4882a593Smuzhiyun #define AMDGPU_INFO_SENSOR 0x1D 760*4882a593Smuzhiyun /* Subquery id: Query GPU shader clock */ 761*4882a593Smuzhiyun #define AMDGPU_INFO_SENSOR_GFX_SCLK 0x1 762*4882a593Smuzhiyun /* Subquery id: Query GPU memory clock */ 763*4882a593Smuzhiyun #define AMDGPU_INFO_SENSOR_GFX_MCLK 0x2 764*4882a593Smuzhiyun /* Subquery id: Query GPU temperature */ 765*4882a593Smuzhiyun #define AMDGPU_INFO_SENSOR_GPU_TEMP 0x3 766*4882a593Smuzhiyun /* Subquery id: Query GPU load */ 767*4882a593Smuzhiyun #define AMDGPU_INFO_SENSOR_GPU_LOAD 0x4 768*4882a593Smuzhiyun /* Subquery id: Query average GPU power */ 769*4882a593Smuzhiyun #define AMDGPU_INFO_SENSOR_GPU_AVG_POWER 0x5 770*4882a593Smuzhiyun /* Subquery id: Query northbridge voltage */ 771*4882a593Smuzhiyun #define AMDGPU_INFO_SENSOR_VDDNB 0x6 772*4882a593Smuzhiyun /* Subquery id: Query graphics voltage */ 773*4882a593Smuzhiyun #define AMDGPU_INFO_SENSOR_VDDGFX 0x7 774*4882a593Smuzhiyun /* Subquery id: Query GPU stable pstate shader clock */ 775*4882a593Smuzhiyun #define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK 0x8 776*4882a593Smuzhiyun /* Subquery id: Query GPU stable pstate memory clock */ 777*4882a593Smuzhiyun #define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK 0x9 778*4882a593Smuzhiyun /* Number of VRAM page faults on CPU access. */ 779*4882a593Smuzhiyun #define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E 780*4882a593Smuzhiyun #define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F 781*4882a593Smuzhiyun /* query ras mask of enabled features*/ 782*4882a593Smuzhiyun #define AMDGPU_INFO_RAS_ENABLED_FEATURES 0x20 783*4882a593Smuzhiyun 784*4882a593Smuzhiyun /* RAS MASK: UMC (VRAM) */ 785*4882a593Smuzhiyun #define AMDGPU_INFO_RAS_ENABLED_UMC (1 << 0) 786*4882a593Smuzhiyun /* RAS MASK: SDMA */ 787*4882a593Smuzhiyun #define AMDGPU_INFO_RAS_ENABLED_SDMA (1 << 1) 788*4882a593Smuzhiyun /* RAS MASK: GFX */ 789*4882a593Smuzhiyun #define AMDGPU_INFO_RAS_ENABLED_GFX (1 << 2) 790*4882a593Smuzhiyun /* RAS MASK: MMHUB */ 791*4882a593Smuzhiyun #define AMDGPU_INFO_RAS_ENABLED_MMHUB (1 << 3) 792*4882a593Smuzhiyun /* RAS MASK: ATHUB */ 793*4882a593Smuzhiyun #define AMDGPU_INFO_RAS_ENABLED_ATHUB (1 << 4) 794*4882a593Smuzhiyun /* RAS MASK: PCIE */ 795*4882a593Smuzhiyun #define AMDGPU_INFO_RAS_ENABLED_PCIE (1 << 5) 796*4882a593Smuzhiyun /* RAS MASK: HDP */ 797*4882a593Smuzhiyun #define AMDGPU_INFO_RAS_ENABLED_HDP (1 << 6) 798*4882a593Smuzhiyun /* RAS MASK: XGMI */ 799*4882a593Smuzhiyun #define AMDGPU_INFO_RAS_ENABLED_XGMI (1 << 7) 800*4882a593Smuzhiyun /* RAS MASK: DF */ 801*4882a593Smuzhiyun #define AMDGPU_INFO_RAS_ENABLED_DF (1 << 8) 802*4882a593Smuzhiyun /* RAS MASK: SMN */ 803*4882a593Smuzhiyun #define AMDGPU_INFO_RAS_ENABLED_SMN (1 << 9) 804*4882a593Smuzhiyun /* RAS MASK: SEM */ 805*4882a593Smuzhiyun #define AMDGPU_INFO_RAS_ENABLED_SEM (1 << 10) 806*4882a593Smuzhiyun /* RAS MASK: MP0 */ 807*4882a593Smuzhiyun #define AMDGPU_INFO_RAS_ENABLED_MP0 (1 << 11) 808*4882a593Smuzhiyun /* RAS MASK: MP1 */ 809*4882a593Smuzhiyun #define AMDGPU_INFO_RAS_ENABLED_MP1 (1 << 12) 810*4882a593Smuzhiyun /* RAS MASK: FUSE */ 811*4882a593Smuzhiyun #define AMDGPU_INFO_RAS_ENABLED_FUSE (1 << 13) 812*4882a593Smuzhiyun 813*4882a593Smuzhiyun #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0 814*4882a593Smuzhiyun #define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff 815*4882a593Smuzhiyun #define AMDGPU_INFO_MMR_SH_INDEX_SHIFT 8 816*4882a593Smuzhiyun #define AMDGPU_INFO_MMR_SH_INDEX_MASK 0xff 817*4882a593Smuzhiyun 818*4882a593Smuzhiyun struct drm_amdgpu_query_fw { 819*4882a593Smuzhiyun /** AMDGPU_INFO_FW_* */ 820*4882a593Smuzhiyun __u32 fw_type; 821*4882a593Smuzhiyun /** 822*4882a593Smuzhiyun * Index of the IP if there are more IPs of 823*4882a593Smuzhiyun * the same type. 824*4882a593Smuzhiyun */ 825*4882a593Smuzhiyun __u32 ip_instance; 826*4882a593Smuzhiyun /** 827*4882a593Smuzhiyun * Index of the engine. Whether this is used depends 828*4882a593Smuzhiyun * on the firmware type. (e.g. MEC, SDMA) 829*4882a593Smuzhiyun */ 830*4882a593Smuzhiyun __u32 index; 831*4882a593Smuzhiyun __u32 _pad; 832*4882a593Smuzhiyun }; 833*4882a593Smuzhiyun 834*4882a593Smuzhiyun /* Input structure for the INFO ioctl */ 835*4882a593Smuzhiyun struct drm_amdgpu_info { 836*4882a593Smuzhiyun /* Where the return value will be stored */ 837*4882a593Smuzhiyun __u64 return_pointer; 838*4882a593Smuzhiyun /* The size of the return value. Just like "size" in "snprintf", 839*4882a593Smuzhiyun * it limits how many bytes the kernel can write. */ 840*4882a593Smuzhiyun __u32 return_size; 841*4882a593Smuzhiyun /* The query request id. */ 842*4882a593Smuzhiyun __u32 query; 843*4882a593Smuzhiyun 844*4882a593Smuzhiyun union { 845*4882a593Smuzhiyun struct { 846*4882a593Smuzhiyun __u32 id; 847*4882a593Smuzhiyun __u32 _pad; 848*4882a593Smuzhiyun } mode_crtc; 849*4882a593Smuzhiyun 850*4882a593Smuzhiyun struct { 851*4882a593Smuzhiyun /** AMDGPU_HW_IP_* */ 852*4882a593Smuzhiyun __u32 type; 853*4882a593Smuzhiyun /** 854*4882a593Smuzhiyun * Index of the IP if there are more IPs of the same 855*4882a593Smuzhiyun * type. Ignored by AMDGPU_INFO_HW_IP_COUNT. 856*4882a593Smuzhiyun */ 857*4882a593Smuzhiyun __u32 ip_instance; 858*4882a593Smuzhiyun } query_hw_ip; 859*4882a593Smuzhiyun 860*4882a593Smuzhiyun struct { 861*4882a593Smuzhiyun __u32 dword_offset; 862*4882a593Smuzhiyun /** number of registers to read */ 863*4882a593Smuzhiyun __u32 count; 864*4882a593Smuzhiyun __u32 instance; 865*4882a593Smuzhiyun /** For future use, no flags defined so far */ 866*4882a593Smuzhiyun __u32 flags; 867*4882a593Smuzhiyun } read_mmr_reg; 868*4882a593Smuzhiyun 869*4882a593Smuzhiyun struct drm_amdgpu_query_fw query_fw; 870*4882a593Smuzhiyun 871*4882a593Smuzhiyun struct { 872*4882a593Smuzhiyun __u32 type; 873*4882a593Smuzhiyun __u32 offset; 874*4882a593Smuzhiyun } vbios_info; 875*4882a593Smuzhiyun 876*4882a593Smuzhiyun struct { 877*4882a593Smuzhiyun __u32 type; 878*4882a593Smuzhiyun } sensor_info; 879*4882a593Smuzhiyun }; 880*4882a593Smuzhiyun }; 881*4882a593Smuzhiyun 882*4882a593Smuzhiyun struct drm_amdgpu_info_gds { 883*4882a593Smuzhiyun /** GDS GFX partition size */ 884*4882a593Smuzhiyun __u32 gds_gfx_partition_size; 885*4882a593Smuzhiyun /** GDS compute partition size */ 886*4882a593Smuzhiyun __u32 compute_partition_size; 887*4882a593Smuzhiyun /** total GDS memory size */ 888*4882a593Smuzhiyun __u32 gds_total_size; 889*4882a593Smuzhiyun /** GWS size per GFX partition */ 890*4882a593Smuzhiyun __u32 gws_per_gfx_partition; 891*4882a593Smuzhiyun /** GSW size per compute partition */ 892*4882a593Smuzhiyun __u32 gws_per_compute_partition; 893*4882a593Smuzhiyun /** OA size per GFX partition */ 894*4882a593Smuzhiyun __u32 oa_per_gfx_partition; 895*4882a593Smuzhiyun /** OA size per compute partition */ 896*4882a593Smuzhiyun __u32 oa_per_compute_partition; 897*4882a593Smuzhiyun __u32 _pad; 898*4882a593Smuzhiyun }; 899*4882a593Smuzhiyun 900*4882a593Smuzhiyun struct drm_amdgpu_info_vram_gtt { 901*4882a593Smuzhiyun __u64 vram_size; 902*4882a593Smuzhiyun __u64 vram_cpu_accessible_size; 903*4882a593Smuzhiyun __u64 gtt_size; 904*4882a593Smuzhiyun }; 905*4882a593Smuzhiyun 906*4882a593Smuzhiyun struct drm_amdgpu_heap_info { 907*4882a593Smuzhiyun /** max. physical memory */ 908*4882a593Smuzhiyun __u64 total_heap_size; 909*4882a593Smuzhiyun 910*4882a593Smuzhiyun /** Theoretical max. available memory in the given heap */ 911*4882a593Smuzhiyun __u64 usable_heap_size; 912*4882a593Smuzhiyun 913*4882a593Smuzhiyun /** 914*4882a593Smuzhiyun * Number of bytes allocated in the heap. This includes all processes 915*4882a593Smuzhiyun * and private allocations in the kernel. It changes when new buffers 916*4882a593Smuzhiyun * are allocated, freed, and moved. It cannot be larger than 917*4882a593Smuzhiyun * heap_size. 918*4882a593Smuzhiyun */ 919*4882a593Smuzhiyun __u64 heap_usage; 920*4882a593Smuzhiyun 921*4882a593Smuzhiyun /** 922*4882a593Smuzhiyun * Theoretical possible max. size of buffer which 923*4882a593Smuzhiyun * could be allocated in the given heap 924*4882a593Smuzhiyun */ 925*4882a593Smuzhiyun __u64 max_allocation; 926*4882a593Smuzhiyun }; 927*4882a593Smuzhiyun 928*4882a593Smuzhiyun struct drm_amdgpu_memory_info { 929*4882a593Smuzhiyun struct drm_amdgpu_heap_info vram; 930*4882a593Smuzhiyun struct drm_amdgpu_heap_info cpu_accessible_vram; 931*4882a593Smuzhiyun struct drm_amdgpu_heap_info gtt; 932*4882a593Smuzhiyun }; 933*4882a593Smuzhiyun 934*4882a593Smuzhiyun struct drm_amdgpu_info_firmware { 935*4882a593Smuzhiyun __u32 ver; 936*4882a593Smuzhiyun __u32 feature; 937*4882a593Smuzhiyun }; 938*4882a593Smuzhiyun 939*4882a593Smuzhiyun #define AMDGPU_VRAM_TYPE_UNKNOWN 0 940*4882a593Smuzhiyun #define AMDGPU_VRAM_TYPE_GDDR1 1 941*4882a593Smuzhiyun #define AMDGPU_VRAM_TYPE_DDR2 2 942*4882a593Smuzhiyun #define AMDGPU_VRAM_TYPE_GDDR3 3 943*4882a593Smuzhiyun #define AMDGPU_VRAM_TYPE_GDDR4 4 944*4882a593Smuzhiyun #define AMDGPU_VRAM_TYPE_GDDR5 5 945*4882a593Smuzhiyun #define AMDGPU_VRAM_TYPE_HBM 6 946*4882a593Smuzhiyun #define AMDGPU_VRAM_TYPE_DDR3 7 947*4882a593Smuzhiyun #define AMDGPU_VRAM_TYPE_DDR4 8 948*4882a593Smuzhiyun #define AMDGPU_VRAM_TYPE_GDDR6 9 949*4882a593Smuzhiyun 950*4882a593Smuzhiyun struct drm_amdgpu_info_device { 951*4882a593Smuzhiyun /** PCI Device ID */ 952*4882a593Smuzhiyun __u32 device_id; 953*4882a593Smuzhiyun /** Internal chip revision: A0, A1, etc.) */ 954*4882a593Smuzhiyun __u32 chip_rev; 955*4882a593Smuzhiyun __u32 external_rev; 956*4882a593Smuzhiyun /** Revision id in PCI Config space */ 957*4882a593Smuzhiyun __u32 pci_rev; 958*4882a593Smuzhiyun __u32 family; 959*4882a593Smuzhiyun __u32 num_shader_engines; 960*4882a593Smuzhiyun __u32 num_shader_arrays_per_engine; 961*4882a593Smuzhiyun /* in KHz */ 962*4882a593Smuzhiyun __u32 gpu_counter_freq; 963*4882a593Smuzhiyun __u64 max_engine_clock; 964*4882a593Smuzhiyun __u64 max_memory_clock; 965*4882a593Smuzhiyun /* cu information */ 966*4882a593Smuzhiyun __u32 cu_active_number; 967*4882a593Smuzhiyun /* NOTE: cu_ao_mask is INVALID, DON'T use it */ 968*4882a593Smuzhiyun __u32 cu_ao_mask; 969*4882a593Smuzhiyun __u32 cu_bitmap[4][4]; 970*4882a593Smuzhiyun /** Render backend pipe mask. One render backend is CB+DB. */ 971*4882a593Smuzhiyun __u32 enabled_rb_pipes_mask; 972*4882a593Smuzhiyun __u32 num_rb_pipes; 973*4882a593Smuzhiyun __u32 num_hw_gfx_contexts; 974*4882a593Smuzhiyun __u32 _pad; 975*4882a593Smuzhiyun __u64 ids_flags; 976*4882a593Smuzhiyun /** Starting virtual address for UMDs. */ 977*4882a593Smuzhiyun __u64 virtual_address_offset; 978*4882a593Smuzhiyun /** The maximum virtual address */ 979*4882a593Smuzhiyun __u64 virtual_address_max; 980*4882a593Smuzhiyun /** Required alignment of virtual addresses. */ 981*4882a593Smuzhiyun __u32 virtual_address_alignment; 982*4882a593Smuzhiyun /** Page table entry - fragment size */ 983*4882a593Smuzhiyun __u32 pte_fragment_size; 984*4882a593Smuzhiyun __u32 gart_page_size; 985*4882a593Smuzhiyun /** constant engine ram size*/ 986*4882a593Smuzhiyun __u32 ce_ram_size; 987*4882a593Smuzhiyun /** video memory type info*/ 988*4882a593Smuzhiyun __u32 vram_type; 989*4882a593Smuzhiyun /** video memory bit width*/ 990*4882a593Smuzhiyun __u32 vram_bit_width; 991*4882a593Smuzhiyun /* vce harvesting instance */ 992*4882a593Smuzhiyun __u32 vce_harvest_config; 993*4882a593Smuzhiyun /* gfx double offchip LDS buffers */ 994*4882a593Smuzhiyun __u32 gc_double_offchip_lds_buf; 995*4882a593Smuzhiyun /* NGG Primitive Buffer */ 996*4882a593Smuzhiyun __u64 prim_buf_gpu_addr; 997*4882a593Smuzhiyun /* NGG Position Buffer */ 998*4882a593Smuzhiyun __u64 pos_buf_gpu_addr; 999*4882a593Smuzhiyun /* NGG Control Sideband */ 1000*4882a593Smuzhiyun __u64 cntl_sb_buf_gpu_addr; 1001*4882a593Smuzhiyun /* NGG Parameter Cache */ 1002*4882a593Smuzhiyun __u64 param_buf_gpu_addr; 1003*4882a593Smuzhiyun __u32 prim_buf_size; 1004*4882a593Smuzhiyun __u32 pos_buf_size; 1005*4882a593Smuzhiyun __u32 cntl_sb_buf_size; 1006*4882a593Smuzhiyun __u32 param_buf_size; 1007*4882a593Smuzhiyun /* wavefront size*/ 1008*4882a593Smuzhiyun __u32 wave_front_size; 1009*4882a593Smuzhiyun /* shader visible vgprs*/ 1010*4882a593Smuzhiyun __u32 num_shader_visible_vgprs; 1011*4882a593Smuzhiyun /* CU per shader array*/ 1012*4882a593Smuzhiyun __u32 num_cu_per_sh; 1013*4882a593Smuzhiyun /* number of tcc blocks*/ 1014*4882a593Smuzhiyun __u32 num_tcc_blocks; 1015*4882a593Smuzhiyun /* gs vgt table depth*/ 1016*4882a593Smuzhiyun __u32 gs_vgt_table_depth; 1017*4882a593Smuzhiyun /* gs primitive buffer depth*/ 1018*4882a593Smuzhiyun __u32 gs_prim_buffer_depth; 1019*4882a593Smuzhiyun /* max gs wavefront per vgt*/ 1020*4882a593Smuzhiyun __u32 max_gs_waves_per_vgt; 1021*4882a593Smuzhiyun __u32 _pad1; 1022*4882a593Smuzhiyun /* always on cu bitmap */ 1023*4882a593Smuzhiyun __u32 cu_ao_bitmap[4][4]; 1024*4882a593Smuzhiyun /** Starting high virtual address for UMDs. */ 1025*4882a593Smuzhiyun __u64 high_va_offset; 1026*4882a593Smuzhiyun /** The maximum high virtual address */ 1027*4882a593Smuzhiyun __u64 high_va_max; 1028*4882a593Smuzhiyun /* gfx10 pa_sc_tile_steering_override */ 1029*4882a593Smuzhiyun __u32 pa_sc_tile_steering_override; 1030*4882a593Smuzhiyun /* disabled TCCs */ 1031*4882a593Smuzhiyun __u64 tcc_disabled_mask; 1032*4882a593Smuzhiyun }; 1033*4882a593Smuzhiyun 1034*4882a593Smuzhiyun struct drm_amdgpu_info_hw_ip { 1035*4882a593Smuzhiyun /** Version of h/w IP */ 1036*4882a593Smuzhiyun __u32 hw_ip_version_major; 1037*4882a593Smuzhiyun __u32 hw_ip_version_minor; 1038*4882a593Smuzhiyun /** Capabilities */ 1039*4882a593Smuzhiyun __u64 capabilities_flags; 1040*4882a593Smuzhiyun /** command buffer address start alignment*/ 1041*4882a593Smuzhiyun __u32 ib_start_alignment; 1042*4882a593Smuzhiyun /** command buffer size alignment*/ 1043*4882a593Smuzhiyun __u32 ib_size_alignment; 1044*4882a593Smuzhiyun /** Bitmask of available rings. Bit 0 means ring 0, etc. */ 1045*4882a593Smuzhiyun __u32 available_rings; 1046*4882a593Smuzhiyun __u32 _pad; 1047*4882a593Smuzhiyun }; 1048*4882a593Smuzhiyun 1049*4882a593Smuzhiyun struct drm_amdgpu_info_num_handles { 1050*4882a593Smuzhiyun /** Max handles as supported by firmware for UVD */ 1051*4882a593Smuzhiyun __u32 uvd_max_handles; 1052*4882a593Smuzhiyun /** Handles currently in use for UVD */ 1053*4882a593Smuzhiyun __u32 uvd_used_handles; 1054*4882a593Smuzhiyun }; 1055*4882a593Smuzhiyun 1056*4882a593Smuzhiyun #define AMDGPU_VCE_CLOCK_TABLE_ENTRIES 6 1057*4882a593Smuzhiyun 1058*4882a593Smuzhiyun struct drm_amdgpu_info_vce_clock_table_entry { 1059*4882a593Smuzhiyun /** System clock */ 1060*4882a593Smuzhiyun __u32 sclk; 1061*4882a593Smuzhiyun /** Memory clock */ 1062*4882a593Smuzhiyun __u32 mclk; 1063*4882a593Smuzhiyun /** VCE clock */ 1064*4882a593Smuzhiyun __u32 eclk; 1065*4882a593Smuzhiyun __u32 pad; 1066*4882a593Smuzhiyun }; 1067*4882a593Smuzhiyun 1068*4882a593Smuzhiyun struct drm_amdgpu_info_vce_clock_table { 1069*4882a593Smuzhiyun struct drm_amdgpu_info_vce_clock_table_entry entries[AMDGPU_VCE_CLOCK_TABLE_ENTRIES]; 1070*4882a593Smuzhiyun __u32 num_valid_entries; 1071*4882a593Smuzhiyun __u32 pad; 1072*4882a593Smuzhiyun }; 1073*4882a593Smuzhiyun 1074*4882a593Smuzhiyun /* 1075*4882a593Smuzhiyun * Supported GPU families 1076*4882a593Smuzhiyun */ 1077*4882a593Smuzhiyun #define AMDGPU_FAMILY_UNKNOWN 0 1078*4882a593Smuzhiyun #define AMDGPU_FAMILY_SI 110 /* Hainan, Oland, Verde, Pitcairn, Tahiti */ 1079*4882a593Smuzhiyun #define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */ 1080*4882a593Smuzhiyun #define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */ 1081*4882a593Smuzhiyun #define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */ 1082*4882a593Smuzhiyun #define AMDGPU_FAMILY_CZ 135 /* Carrizo, Stoney */ 1083*4882a593Smuzhiyun #define AMDGPU_FAMILY_AI 141 /* Vega10 */ 1084*4882a593Smuzhiyun #define AMDGPU_FAMILY_RV 142 /* Raven */ 1085*4882a593Smuzhiyun #define AMDGPU_FAMILY_NV 143 /* Navi10 */ 1086*4882a593Smuzhiyun 1087*4882a593Smuzhiyun #if defined(__cplusplus) 1088*4882a593Smuzhiyun } 1089*4882a593Smuzhiyun #endif 1090*4882a593Smuzhiyun 1091*4882a593Smuzhiyun #endif 1092