1*4882a593Smuzhiyun /** 2*4882a593Smuzhiyun * \file drm.h 3*4882a593Smuzhiyun * Header for the Direct Rendering Manager 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * \author Rickard E. (Rik) Faith <faith@valinux.com> 6*4882a593Smuzhiyun * 7*4882a593Smuzhiyun * \par Acknowledgments: 8*4882a593Smuzhiyun * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg. 9*4882a593Smuzhiyun */ 10*4882a593Smuzhiyun 11*4882a593Smuzhiyun /* 12*4882a593Smuzhiyun * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 13*4882a593Smuzhiyun * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 14*4882a593Smuzhiyun * All rights reserved. 15*4882a593Smuzhiyun * 16*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a 17*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"), 18*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation 19*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense, 20*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the 21*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions: 22*4882a593Smuzhiyun * 23*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the next 24*4882a593Smuzhiyun * paragraph) shall be included in all copies or substantial portions of the 25*4882a593Smuzhiyun * Software. 26*4882a593Smuzhiyun * 27*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 28*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 29*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 30*4882a593Smuzhiyun * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 31*4882a593Smuzhiyun * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 32*4882a593Smuzhiyun * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 33*4882a593Smuzhiyun * OTHER DEALINGS IN THE SOFTWARE. 34*4882a593Smuzhiyun */ 35*4882a593Smuzhiyun 36*4882a593Smuzhiyun #ifndef _DRM_H_ 37*4882a593Smuzhiyun #define _DRM_H_ 38*4882a593Smuzhiyun 39*4882a593Smuzhiyun #if defined(__KERNEL__) 40*4882a593Smuzhiyun 41*4882a593Smuzhiyun #include <linux/types.h> 42*4882a593Smuzhiyun #include <asm/ioctl.h> 43*4882a593Smuzhiyun typedef unsigned int drm_handle_t; 44*4882a593Smuzhiyun 45*4882a593Smuzhiyun #elif defined(__linux__) 46*4882a593Smuzhiyun 47*4882a593Smuzhiyun #include <linux/types.h> 48*4882a593Smuzhiyun #include <asm/ioctl.h> 49*4882a593Smuzhiyun typedef unsigned int drm_handle_t; 50*4882a593Smuzhiyun 51*4882a593Smuzhiyun #else /* One of the BSDs */ 52*4882a593Smuzhiyun 53*4882a593Smuzhiyun #include <stdint.h> 54*4882a593Smuzhiyun #include <sys/ioccom.h> 55*4882a593Smuzhiyun #include <sys/types.h> 56*4882a593Smuzhiyun typedef int8_t __s8; 57*4882a593Smuzhiyun typedef uint8_t __u8; 58*4882a593Smuzhiyun typedef int16_t __s16; 59*4882a593Smuzhiyun typedef uint16_t __u16; 60*4882a593Smuzhiyun typedef int32_t __s32; 61*4882a593Smuzhiyun typedef uint32_t __u32; 62*4882a593Smuzhiyun typedef int64_t __s64; 63*4882a593Smuzhiyun typedef uint64_t __u64; 64*4882a593Smuzhiyun typedef size_t __kernel_size_t; 65*4882a593Smuzhiyun typedef unsigned long drm_handle_t; 66*4882a593Smuzhiyun 67*4882a593Smuzhiyun #endif 68*4882a593Smuzhiyun 69*4882a593Smuzhiyun #if defined(__cplusplus) 70*4882a593Smuzhiyun extern "C" { 71*4882a593Smuzhiyun #endif 72*4882a593Smuzhiyun 73*4882a593Smuzhiyun #define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ 74*4882a593Smuzhiyun #define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */ 75*4882a593Smuzhiyun #define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */ 76*4882a593Smuzhiyun #define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */ 77*4882a593Smuzhiyun 78*4882a593Smuzhiyun #define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */ 79*4882a593Smuzhiyun #define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */ 80*4882a593Smuzhiyun #define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD) 81*4882a593Smuzhiyun #define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT) 82*4882a593Smuzhiyun #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) 83*4882a593Smuzhiyun 84*4882a593Smuzhiyun typedef unsigned int drm_context_t; 85*4882a593Smuzhiyun typedef unsigned int drm_drawable_t; 86*4882a593Smuzhiyun typedef unsigned int drm_magic_t; 87*4882a593Smuzhiyun 88*4882a593Smuzhiyun /** 89*4882a593Smuzhiyun * Cliprect. 90*4882a593Smuzhiyun * 91*4882a593Smuzhiyun * \warning: If you change this structure, make sure you change 92*4882a593Smuzhiyun * XF86DRIClipRectRec in the server as well 93*4882a593Smuzhiyun * 94*4882a593Smuzhiyun * \note KW: Actually it's illegal to change either for 95*4882a593Smuzhiyun * backwards-compatibility reasons. 96*4882a593Smuzhiyun */ 97*4882a593Smuzhiyun struct drm_clip_rect { 98*4882a593Smuzhiyun unsigned short x1; 99*4882a593Smuzhiyun unsigned short y1; 100*4882a593Smuzhiyun unsigned short x2; 101*4882a593Smuzhiyun unsigned short y2; 102*4882a593Smuzhiyun }; 103*4882a593Smuzhiyun 104*4882a593Smuzhiyun /** 105*4882a593Smuzhiyun * Drawable information. 106*4882a593Smuzhiyun */ 107*4882a593Smuzhiyun struct drm_drawable_info { 108*4882a593Smuzhiyun unsigned int num_rects; 109*4882a593Smuzhiyun struct drm_clip_rect *rects; 110*4882a593Smuzhiyun }; 111*4882a593Smuzhiyun 112*4882a593Smuzhiyun /** 113*4882a593Smuzhiyun * Texture region, 114*4882a593Smuzhiyun */ 115*4882a593Smuzhiyun struct drm_tex_region { 116*4882a593Smuzhiyun unsigned char next; 117*4882a593Smuzhiyun unsigned char prev; 118*4882a593Smuzhiyun unsigned char in_use; 119*4882a593Smuzhiyun unsigned char padding; 120*4882a593Smuzhiyun unsigned int age; 121*4882a593Smuzhiyun }; 122*4882a593Smuzhiyun 123*4882a593Smuzhiyun /** 124*4882a593Smuzhiyun * Hardware lock. 125*4882a593Smuzhiyun * 126*4882a593Smuzhiyun * The lock structure is a simple cache-line aligned integer. To avoid 127*4882a593Smuzhiyun * processor bus contention on a multiprocessor system, there should not be any 128*4882a593Smuzhiyun * other data stored in the same cache line. 129*4882a593Smuzhiyun */ 130*4882a593Smuzhiyun struct drm_hw_lock { 131*4882a593Smuzhiyun __volatile__ unsigned int lock; /**< lock variable */ 132*4882a593Smuzhiyun char padding[60]; /**< Pad to cache line */ 133*4882a593Smuzhiyun }; 134*4882a593Smuzhiyun 135*4882a593Smuzhiyun /** 136*4882a593Smuzhiyun * DRM_IOCTL_VERSION ioctl argument type. 137*4882a593Smuzhiyun * 138*4882a593Smuzhiyun * \sa drmGetVersion(). 139*4882a593Smuzhiyun */ 140*4882a593Smuzhiyun struct drm_version { 141*4882a593Smuzhiyun int version_major; /**< Major version */ 142*4882a593Smuzhiyun int version_minor; /**< Minor version */ 143*4882a593Smuzhiyun int version_patchlevel; /**< Patch level */ 144*4882a593Smuzhiyun __kernel_size_t name_len; /**< Length of name buffer */ 145*4882a593Smuzhiyun char __user *name; /**< Name of driver */ 146*4882a593Smuzhiyun __kernel_size_t date_len; /**< Length of date buffer */ 147*4882a593Smuzhiyun char __user *date; /**< User-space buffer to hold date */ 148*4882a593Smuzhiyun __kernel_size_t desc_len; /**< Length of desc buffer */ 149*4882a593Smuzhiyun char __user *desc; /**< User-space buffer to hold desc */ 150*4882a593Smuzhiyun }; 151*4882a593Smuzhiyun 152*4882a593Smuzhiyun /** 153*4882a593Smuzhiyun * DRM_IOCTL_GET_UNIQUE ioctl argument type. 154*4882a593Smuzhiyun * 155*4882a593Smuzhiyun * \sa drmGetBusid() and drmSetBusId(). 156*4882a593Smuzhiyun */ 157*4882a593Smuzhiyun struct drm_unique { 158*4882a593Smuzhiyun __kernel_size_t unique_len; /**< Length of unique */ 159*4882a593Smuzhiyun char __user *unique; /**< Unique name for driver instantiation */ 160*4882a593Smuzhiyun }; 161*4882a593Smuzhiyun 162*4882a593Smuzhiyun struct drm_list { 163*4882a593Smuzhiyun int count; /**< Length of user-space structures */ 164*4882a593Smuzhiyun struct drm_version __user *version; 165*4882a593Smuzhiyun }; 166*4882a593Smuzhiyun 167*4882a593Smuzhiyun struct drm_block { 168*4882a593Smuzhiyun int unused; 169*4882a593Smuzhiyun }; 170*4882a593Smuzhiyun 171*4882a593Smuzhiyun /** 172*4882a593Smuzhiyun * DRM_IOCTL_CONTROL ioctl argument type. 173*4882a593Smuzhiyun * 174*4882a593Smuzhiyun * \sa drmCtlInstHandler() and drmCtlUninstHandler(). 175*4882a593Smuzhiyun */ 176*4882a593Smuzhiyun struct drm_control { 177*4882a593Smuzhiyun enum { 178*4882a593Smuzhiyun DRM_ADD_COMMAND, 179*4882a593Smuzhiyun DRM_RM_COMMAND, 180*4882a593Smuzhiyun DRM_INST_HANDLER, 181*4882a593Smuzhiyun DRM_UNINST_HANDLER 182*4882a593Smuzhiyun } func; 183*4882a593Smuzhiyun int irq; 184*4882a593Smuzhiyun }; 185*4882a593Smuzhiyun 186*4882a593Smuzhiyun /** 187*4882a593Smuzhiyun * Type of memory to map. 188*4882a593Smuzhiyun */ 189*4882a593Smuzhiyun enum drm_map_type { 190*4882a593Smuzhiyun _DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */ 191*4882a593Smuzhiyun _DRM_REGISTERS = 1, /**< no caching, no core dump */ 192*4882a593Smuzhiyun _DRM_SHM = 2, /**< shared, cached */ 193*4882a593Smuzhiyun _DRM_AGP = 3, /**< AGP/GART */ 194*4882a593Smuzhiyun _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ 195*4882a593Smuzhiyun _DRM_CONSISTENT = 5 /**< Consistent memory for PCI DMA */ 196*4882a593Smuzhiyun }; 197*4882a593Smuzhiyun 198*4882a593Smuzhiyun /** 199*4882a593Smuzhiyun * Memory mapping flags. 200*4882a593Smuzhiyun */ 201*4882a593Smuzhiyun enum drm_map_flags { 202*4882a593Smuzhiyun _DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */ 203*4882a593Smuzhiyun _DRM_READ_ONLY = 0x02, 204*4882a593Smuzhiyun _DRM_LOCKED = 0x04, /**< shared, cached, locked */ 205*4882a593Smuzhiyun _DRM_KERNEL = 0x08, /**< kernel requires access */ 206*4882a593Smuzhiyun _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */ 207*4882a593Smuzhiyun _DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */ 208*4882a593Smuzhiyun _DRM_REMOVABLE = 0x40, /**< Removable mapping */ 209*4882a593Smuzhiyun _DRM_DRIVER = 0x80 /**< Managed by driver */ 210*4882a593Smuzhiyun }; 211*4882a593Smuzhiyun 212*4882a593Smuzhiyun struct drm_ctx_priv_map { 213*4882a593Smuzhiyun unsigned int ctx_id; /**< Context requesting private mapping */ 214*4882a593Smuzhiyun void *handle; /**< Handle of map */ 215*4882a593Smuzhiyun }; 216*4882a593Smuzhiyun 217*4882a593Smuzhiyun /** 218*4882a593Smuzhiyun * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls 219*4882a593Smuzhiyun * argument type. 220*4882a593Smuzhiyun * 221*4882a593Smuzhiyun * \sa drmAddMap(). 222*4882a593Smuzhiyun */ 223*4882a593Smuzhiyun struct drm_map { 224*4882a593Smuzhiyun unsigned long offset; /**< Requested physical address (0 for SAREA)*/ 225*4882a593Smuzhiyun unsigned long size; /**< Requested physical size (bytes) */ 226*4882a593Smuzhiyun enum drm_map_type type; /**< Type of memory to map */ 227*4882a593Smuzhiyun enum drm_map_flags flags; /**< Flags */ 228*4882a593Smuzhiyun void *handle; /**< User-space: "Handle" to pass to mmap() */ 229*4882a593Smuzhiyun /**< Kernel-space: kernel-virtual address */ 230*4882a593Smuzhiyun int mtrr; /**< MTRR slot used */ 231*4882a593Smuzhiyun /* Private data */ 232*4882a593Smuzhiyun }; 233*4882a593Smuzhiyun 234*4882a593Smuzhiyun /** 235*4882a593Smuzhiyun * DRM_IOCTL_GET_CLIENT ioctl argument type. 236*4882a593Smuzhiyun */ 237*4882a593Smuzhiyun struct drm_client { 238*4882a593Smuzhiyun int idx; /**< Which client desired? */ 239*4882a593Smuzhiyun int auth; /**< Is client authenticated? */ 240*4882a593Smuzhiyun unsigned long pid; /**< Process ID */ 241*4882a593Smuzhiyun unsigned long uid; /**< User ID */ 242*4882a593Smuzhiyun unsigned long magic; /**< Magic */ 243*4882a593Smuzhiyun unsigned long iocs; /**< Ioctl count */ 244*4882a593Smuzhiyun }; 245*4882a593Smuzhiyun 246*4882a593Smuzhiyun enum drm_stat_type { 247*4882a593Smuzhiyun _DRM_STAT_LOCK, 248*4882a593Smuzhiyun _DRM_STAT_OPENS, 249*4882a593Smuzhiyun _DRM_STAT_CLOSES, 250*4882a593Smuzhiyun _DRM_STAT_IOCTLS, 251*4882a593Smuzhiyun _DRM_STAT_LOCKS, 252*4882a593Smuzhiyun _DRM_STAT_UNLOCKS, 253*4882a593Smuzhiyun _DRM_STAT_VALUE, /**< Generic value */ 254*4882a593Smuzhiyun _DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */ 255*4882a593Smuzhiyun _DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */ 256*4882a593Smuzhiyun 257*4882a593Smuzhiyun _DRM_STAT_IRQ, /**< IRQ */ 258*4882a593Smuzhiyun _DRM_STAT_PRIMARY, /**< Primary DMA bytes */ 259*4882a593Smuzhiyun _DRM_STAT_SECONDARY, /**< Secondary DMA bytes */ 260*4882a593Smuzhiyun _DRM_STAT_DMA, /**< DMA */ 261*4882a593Smuzhiyun _DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */ 262*4882a593Smuzhiyun _DRM_STAT_MISSED /**< Missed DMA opportunity */ 263*4882a593Smuzhiyun /* Add to the *END* of the list */ 264*4882a593Smuzhiyun }; 265*4882a593Smuzhiyun 266*4882a593Smuzhiyun /** 267*4882a593Smuzhiyun * DRM_IOCTL_GET_STATS ioctl argument type. 268*4882a593Smuzhiyun */ 269*4882a593Smuzhiyun struct drm_stats { 270*4882a593Smuzhiyun unsigned long count; 271*4882a593Smuzhiyun struct { 272*4882a593Smuzhiyun unsigned long value; 273*4882a593Smuzhiyun enum drm_stat_type type; 274*4882a593Smuzhiyun } data[15]; 275*4882a593Smuzhiyun }; 276*4882a593Smuzhiyun 277*4882a593Smuzhiyun /** 278*4882a593Smuzhiyun * Hardware locking flags. 279*4882a593Smuzhiyun */ 280*4882a593Smuzhiyun enum drm_lock_flags { 281*4882a593Smuzhiyun _DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */ 282*4882a593Smuzhiyun _DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */ 283*4882a593Smuzhiyun _DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */ 284*4882a593Smuzhiyun _DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */ 285*4882a593Smuzhiyun /* These *HALT* flags aren't supported yet 286*4882a593Smuzhiyun -- they will be used to support the 287*4882a593Smuzhiyun full-screen DGA-like mode. */ 288*4882a593Smuzhiyun _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */ 289*4882a593Smuzhiyun _DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */ 290*4882a593Smuzhiyun }; 291*4882a593Smuzhiyun 292*4882a593Smuzhiyun /** 293*4882a593Smuzhiyun * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type. 294*4882a593Smuzhiyun * 295*4882a593Smuzhiyun * \sa drmGetLock() and drmUnlock(). 296*4882a593Smuzhiyun */ 297*4882a593Smuzhiyun struct drm_lock { 298*4882a593Smuzhiyun int context; 299*4882a593Smuzhiyun enum drm_lock_flags flags; 300*4882a593Smuzhiyun }; 301*4882a593Smuzhiyun 302*4882a593Smuzhiyun /** 303*4882a593Smuzhiyun * DMA flags 304*4882a593Smuzhiyun * 305*4882a593Smuzhiyun * \warning 306*4882a593Smuzhiyun * These values \e must match xf86drm.h. 307*4882a593Smuzhiyun * 308*4882a593Smuzhiyun * \sa drm_dma. 309*4882a593Smuzhiyun */ 310*4882a593Smuzhiyun enum drm_dma_flags { 311*4882a593Smuzhiyun /* Flags for DMA buffer dispatch */ 312*4882a593Smuzhiyun _DRM_DMA_BLOCK = 0x01, /**< 313*4882a593Smuzhiyun * Block until buffer dispatched. 314*4882a593Smuzhiyun * 315*4882a593Smuzhiyun * \note The buffer may not yet have 316*4882a593Smuzhiyun * been processed by the hardware -- 317*4882a593Smuzhiyun * getting a hardware lock with the 318*4882a593Smuzhiyun * hardware quiescent will ensure 319*4882a593Smuzhiyun * that the buffer has been 320*4882a593Smuzhiyun * processed. 321*4882a593Smuzhiyun */ 322*4882a593Smuzhiyun _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */ 323*4882a593Smuzhiyun _DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */ 324*4882a593Smuzhiyun 325*4882a593Smuzhiyun /* Flags for DMA buffer request */ 326*4882a593Smuzhiyun _DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */ 327*4882a593Smuzhiyun _DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */ 328*4882a593Smuzhiyun _DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */ 329*4882a593Smuzhiyun }; 330*4882a593Smuzhiyun 331*4882a593Smuzhiyun /** 332*4882a593Smuzhiyun * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type. 333*4882a593Smuzhiyun * 334*4882a593Smuzhiyun * \sa drmAddBufs(). 335*4882a593Smuzhiyun */ 336*4882a593Smuzhiyun struct drm_buf_desc { 337*4882a593Smuzhiyun int count; /**< Number of buffers of this size */ 338*4882a593Smuzhiyun int size; /**< Size in bytes */ 339*4882a593Smuzhiyun int low_mark; /**< Low water mark */ 340*4882a593Smuzhiyun int high_mark; /**< High water mark */ 341*4882a593Smuzhiyun enum { 342*4882a593Smuzhiyun _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */ 343*4882a593Smuzhiyun _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */ 344*4882a593Smuzhiyun _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */ 345*4882a593Smuzhiyun _DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */ 346*4882a593Smuzhiyun _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */ 347*4882a593Smuzhiyun } flags; 348*4882a593Smuzhiyun unsigned long agp_start; /**< 349*4882a593Smuzhiyun * Start address of where the AGP buffers are 350*4882a593Smuzhiyun * in the AGP aperture 351*4882a593Smuzhiyun */ 352*4882a593Smuzhiyun }; 353*4882a593Smuzhiyun 354*4882a593Smuzhiyun /** 355*4882a593Smuzhiyun * DRM_IOCTL_INFO_BUFS ioctl argument type. 356*4882a593Smuzhiyun */ 357*4882a593Smuzhiyun struct drm_buf_info { 358*4882a593Smuzhiyun int count; /**< Entries in list */ 359*4882a593Smuzhiyun struct drm_buf_desc __user *list; 360*4882a593Smuzhiyun }; 361*4882a593Smuzhiyun 362*4882a593Smuzhiyun /** 363*4882a593Smuzhiyun * DRM_IOCTL_FREE_BUFS ioctl argument type. 364*4882a593Smuzhiyun */ 365*4882a593Smuzhiyun struct drm_buf_free { 366*4882a593Smuzhiyun int count; 367*4882a593Smuzhiyun int __user *list; 368*4882a593Smuzhiyun }; 369*4882a593Smuzhiyun 370*4882a593Smuzhiyun /** 371*4882a593Smuzhiyun * Buffer information 372*4882a593Smuzhiyun * 373*4882a593Smuzhiyun * \sa drm_buf_map. 374*4882a593Smuzhiyun */ 375*4882a593Smuzhiyun struct drm_buf_pub { 376*4882a593Smuzhiyun int idx; /**< Index into the master buffer list */ 377*4882a593Smuzhiyun int total; /**< Buffer size */ 378*4882a593Smuzhiyun int used; /**< Amount of buffer in use (for DMA) */ 379*4882a593Smuzhiyun void __user *address; /**< Address of buffer */ 380*4882a593Smuzhiyun }; 381*4882a593Smuzhiyun 382*4882a593Smuzhiyun /** 383*4882a593Smuzhiyun * DRM_IOCTL_MAP_BUFS ioctl argument type. 384*4882a593Smuzhiyun */ 385*4882a593Smuzhiyun struct drm_buf_map { 386*4882a593Smuzhiyun int count; /**< Length of the buffer list */ 387*4882a593Smuzhiyun #ifdef __cplusplus 388*4882a593Smuzhiyun void __user *virt; 389*4882a593Smuzhiyun #else 390*4882a593Smuzhiyun void __user *virtual; /**< Mmap'd area in user-virtual */ 391*4882a593Smuzhiyun #endif 392*4882a593Smuzhiyun struct drm_buf_pub __user *list; /**< Buffer information */ 393*4882a593Smuzhiyun }; 394*4882a593Smuzhiyun 395*4882a593Smuzhiyun /** 396*4882a593Smuzhiyun * DRM_IOCTL_DMA ioctl argument type. 397*4882a593Smuzhiyun * 398*4882a593Smuzhiyun * Indices here refer to the offset into the buffer list in drm_buf_get. 399*4882a593Smuzhiyun * 400*4882a593Smuzhiyun * \sa drmDMA(). 401*4882a593Smuzhiyun */ 402*4882a593Smuzhiyun struct drm_dma { 403*4882a593Smuzhiyun int context; /**< Context handle */ 404*4882a593Smuzhiyun int send_count; /**< Number of buffers to send */ 405*4882a593Smuzhiyun int __user *send_indices; /**< List of handles to buffers */ 406*4882a593Smuzhiyun int __user *send_sizes; /**< Lengths of data to send */ 407*4882a593Smuzhiyun enum drm_dma_flags flags; /**< Flags */ 408*4882a593Smuzhiyun int request_count; /**< Number of buffers requested */ 409*4882a593Smuzhiyun int request_size; /**< Desired size for buffers */ 410*4882a593Smuzhiyun int __user *request_indices; /**< Buffer information */ 411*4882a593Smuzhiyun int __user *request_sizes; 412*4882a593Smuzhiyun int granted_count; /**< Number of buffers granted */ 413*4882a593Smuzhiyun }; 414*4882a593Smuzhiyun 415*4882a593Smuzhiyun enum drm_ctx_flags { 416*4882a593Smuzhiyun _DRM_CONTEXT_PRESERVED = 0x01, 417*4882a593Smuzhiyun _DRM_CONTEXT_2DONLY = 0x02 418*4882a593Smuzhiyun }; 419*4882a593Smuzhiyun 420*4882a593Smuzhiyun /** 421*4882a593Smuzhiyun * DRM_IOCTL_ADD_CTX ioctl argument type. 422*4882a593Smuzhiyun * 423*4882a593Smuzhiyun * \sa drmCreateContext() and drmDestroyContext(). 424*4882a593Smuzhiyun */ 425*4882a593Smuzhiyun struct drm_ctx { 426*4882a593Smuzhiyun drm_context_t handle; 427*4882a593Smuzhiyun enum drm_ctx_flags flags; 428*4882a593Smuzhiyun }; 429*4882a593Smuzhiyun 430*4882a593Smuzhiyun /** 431*4882a593Smuzhiyun * DRM_IOCTL_RES_CTX ioctl argument type. 432*4882a593Smuzhiyun */ 433*4882a593Smuzhiyun struct drm_ctx_res { 434*4882a593Smuzhiyun int count; 435*4882a593Smuzhiyun struct drm_ctx __user *contexts; 436*4882a593Smuzhiyun }; 437*4882a593Smuzhiyun 438*4882a593Smuzhiyun /** 439*4882a593Smuzhiyun * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type. 440*4882a593Smuzhiyun */ 441*4882a593Smuzhiyun struct drm_draw { 442*4882a593Smuzhiyun drm_drawable_t handle; 443*4882a593Smuzhiyun }; 444*4882a593Smuzhiyun 445*4882a593Smuzhiyun /** 446*4882a593Smuzhiyun * DRM_IOCTL_UPDATE_DRAW ioctl argument type. 447*4882a593Smuzhiyun */ 448*4882a593Smuzhiyun typedef enum { 449*4882a593Smuzhiyun DRM_DRAWABLE_CLIPRECTS 450*4882a593Smuzhiyun } drm_drawable_info_type_t; 451*4882a593Smuzhiyun 452*4882a593Smuzhiyun struct drm_update_draw { 453*4882a593Smuzhiyun drm_drawable_t handle; 454*4882a593Smuzhiyun unsigned int type; 455*4882a593Smuzhiyun unsigned int num; 456*4882a593Smuzhiyun unsigned long long data; 457*4882a593Smuzhiyun }; 458*4882a593Smuzhiyun 459*4882a593Smuzhiyun /** 460*4882a593Smuzhiyun * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type. 461*4882a593Smuzhiyun */ 462*4882a593Smuzhiyun struct drm_auth { 463*4882a593Smuzhiyun drm_magic_t magic; 464*4882a593Smuzhiyun }; 465*4882a593Smuzhiyun 466*4882a593Smuzhiyun /** 467*4882a593Smuzhiyun * DRM_IOCTL_IRQ_BUSID ioctl argument type. 468*4882a593Smuzhiyun * 469*4882a593Smuzhiyun * \sa drmGetInterruptFromBusID(). 470*4882a593Smuzhiyun */ 471*4882a593Smuzhiyun struct drm_irq_busid { 472*4882a593Smuzhiyun int irq; /**< IRQ number */ 473*4882a593Smuzhiyun int busnum; /**< bus number */ 474*4882a593Smuzhiyun int devnum; /**< device number */ 475*4882a593Smuzhiyun int funcnum; /**< function number */ 476*4882a593Smuzhiyun }; 477*4882a593Smuzhiyun 478*4882a593Smuzhiyun enum drm_vblank_seq_type { 479*4882a593Smuzhiyun _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ 480*4882a593Smuzhiyun _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ 481*4882a593Smuzhiyun /* bits 1-6 are reserved for high crtcs */ 482*4882a593Smuzhiyun _DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e, 483*4882a593Smuzhiyun _DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */ 484*4882a593Smuzhiyun _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ 485*4882a593Smuzhiyun _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ 486*4882a593Smuzhiyun _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ 487*4882a593Smuzhiyun _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */ 488*4882a593Smuzhiyun }; 489*4882a593Smuzhiyun #define _DRM_VBLANK_HIGH_CRTC_SHIFT 1 490*4882a593Smuzhiyun 491*4882a593Smuzhiyun #define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) 492*4882a593Smuzhiyun #define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \ 493*4882a593Smuzhiyun _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS) 494*4882a593Smuzhiyun 495*4882a593Smuzhiyun struct drm_wait_vblank_request { 496*4882a593Smuzhiyun enum drm_vblank_seq_type type; 497*4882a593Smuzhiyun unsigned int sequence; 498*4882a593Smuzhiyun unsigned long signal; 499*4882a593Smuzhiyun }; 500*4882a593Smuzhiyun 501*4882a593Smuzhiyun struct drm_wait_vblank_reply { 502*4882a593Smuzhiyun enum drm_vblank_seq_type type; 503*4882a593Smuzhiyun unsigned int sequence; 504*4882a593Smuzhiyun long tval_sec; 505*4882a593Smuzhiyun long tval_usec; 506*4882a593Smuzhiyun }; 507*4882a593Smuzhiyun 508*4882a593Smuzhiyun /** 509*4882a593Smuzhiyun * DRM_IOCTL_WAIT_VBLANK ioctl argument type. 510*4882a593Smuzhiyun * 511*4882a593Smuzhiyun * \sa drmWaitVBlank(). 512*4882a593Smuzhiyun */ 513*4882a593Smuzhiyun union drm_wait_vblank { 514*4882a593Smuzhiyun struct drm_wait_vblank_request request; 515*4882a593Smuzhiyun struct drm_wait_vblank_reply reply; 516*4882a593Smuzhiyun }; 517*4882a593Smuzhiyun 518*4882a593Smuzhiyun #define _DRM_PRE_MODESET 1 519*4882a593Smuzhiyun #define _DRM_POST_MODESET 2 520*4882a593Smuzhiyun 521*4882a593Smuzhiyun /** 522*4882a593Smuzhiyun * DRM_IOCTL_MODESET_CTL ioctl argument type 523*4882a593Smuzhiyun * 524*4882a593Smuzhiyun * \sa drmModesetCtl(). 525*4882a593Smuzhiyun */ 526*4882a593Smuzhiyun struct drm_modeset_ctl { 527*4882a593Smuzhiyun __u32 crtc; 528*4882a593Smuzhiyun __u32 cmd; 529*4882a593Smuzhiyun }; 530*4882a593Smuzhiyun 531*4882a593Smuzhiyun /** 532*4882a593Smuzhiyun * DRM_IOCTL_AGP_ENABLE ioctl argument type. 533*4882a593Smuzhiyun * 534*4882a593Smuzhiyun * \sa drmAgpEnable(). 535*4882a593Smuzhiyun */ 536*4882a593Smuzhiyun struct drm_agp_mode { 537*4882a593Smuzhiyun unsigned long mode; /**< AGP mode */ 538*4882a593Smuzhiyun }; 539*4882a593Smuzhiyun 540*4882a593Smuzhiyun /** 541*4882a593Smuzhiyun * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type. 542*4882a593Smuzhiyun * 543*4882a593Smuzhiyun * \sa drmAgpAlloc() and drmAgpFree(). 544*4882a593Smuzhiyun */ 545*4882a593Smuzhiyun struct drm_agp_buffer { 546*4882a593Smuzhiyun unsigned long size; /**< In bytes -- will round to page boundary */ 547*4882a593Smuzhiyun unsigned long handle; /**< Used for binding / unbinding */ 548*4882a593Smuzhiyun unsigned long type; /**< Type of memory to allocate */ 549*4882a593Smuzhiyun unsigned long physical; /**< Physical used by i810 */ 550*4882a593Smuzhiyun }; 551*4882a593Smuzhiyun 552*4882a593Smuzhiyun /** 553*4882a593Smuzhiyun * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type. 554*4882a593Smuzhiyun * 555*4882a593Smuzhiyun * \sa drmAgpBind() and drmAgpUnbind(). 556*4882a593Smuzhiyun */ 557*4882a593Smuzhiyun struct drm_agp_binding { 558*4882a593Smuzhiyun unsigned long handle; /**< From drm_agp_buffer */ 559*4882a593Smuzhiyun unsigned long offset; /**< In bytes -- will round to page boundary */ 560*4882a593Smuzhiyun }; 561*4882a593Smuzhiyun 562*4882a593Smuzhiyun /** 563*4882a593Smuzhiyun * DRM_IOCTL_AGP_INFO ioctl argument type. 564*4882a593Smuzhiyun * 565*4882a593Smuzhiyun * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(), 566*4882a593Smuzhiyun * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(), 567*4882a593Smuzhiyun * drmAgpVendorId() and drmAgpDeviceId(). 568*4882a593Smuzhiyun */ 569*4882a593Smuzhiyun struct drm_agp_info { 570*4882a593Smuzhiyun int agp_version_major; 571*4882a593Smuzhiyun int agp_version_minor; 572*4882a593Smuzhiyun unsigned long mode; 573*4882a593Smuzhiyun unsigned long aperture_base; /* physical address */ 574*4882a593Smuzhiyun unsigned long aperture_size; /* bytes */ 575*4882a593Smuzhiyun unsigned long memory_allowed; /* bytes */ 576*4882a593Smuzhiyun unsigned long memory_used; 577*4882a593Smuzhiyun 578*4882a593Smuzhiyun /* PCI information */ 579*4882a593Smuzhiyun unsigned short id_vendor; 580*4882a593Smuzhiyun unsigned short id_device; 581*4882a593Smuzhiyun }; 582*4882a593Smuzhiyun 583*4882a593Smuzhiyun /** 584*4882a593Smuzhiyun * DRM_IOCTL_SG_ALLOC ioctl argument type. 585*4882a593Smuzhiyun */ 586*4882a593Smuzhiyun struct drm_scatter_gather { 587*4882a593Smuzhiyun unsigned long size; /**< In bytes -- will round to page boundary */ 588*4882a593Smuzhiyun unsigned long handle; /**< Used for mapping / unmapping */ 589*4882a593Smuzhiyun }; 590*4882a593Smuzhiyun 591*4882a593Smuzhiyun /** 592*4882a593Smuzhiyun * DRM_IOCTL_SET_VERSION ioctl argument type. 593*4882a593Smuzhiyun */ 594*4882a593Smuzhiyun struct drm_set_version { 595*4882a593Smuzhiyun int drm_di_major; 596*4882a593Smuzhiyun int drm_di_minor; 597*4882a593Smuzhiyun int drm_dd_major; 598*4882a593Smuzhiyun int drm_dd_minor; 599*4882a593Smuzhiyun }; 600*4882a593Smuzhiyun 601*4882a593Smuzhiyun /** DRM_IOCTL_GEM_CLOSE ioctl argument type */ 602*4882a593Smuzhiyun struct drm_gem_close { 603*4882a593Smuzhiyun /** Handle of the object to be closed. */ 604*4882a593Smuzhiyun __u32 handle; 605*4882a593Smuzhiyun __u32 pad; 606*4882a593Smuzhiyun }; 607*4882a593Smuzhiyun 608*4882a593Smuzhiyun /** DRM_IOCTL_GEM_FLINK ioctl argument type */ 609*4882a593Smuzhiyun struct drm_gem_flink { 610*4882a593Smuzhiyun /** Handle for the object being named */ 611*4882a593Smuzhiyun __u32 handle; 612*4882a593Smuzhiyun 613*4882a593Smuzhiyun /** Returned global name */ 614*4882a593Smuzhiyun __u32 name; 615*4882a593Smuzhiyun }; 616*4882a593Smuzhiyun 617*4882a593Smuzhiyun /** DRM_IOCTL_GEM_OPEN ioctl argument type */ 618*4882a593Smuzhiyun struct drm_gem_open { 619*4882a593Smuzhiyun /** Name of object being opened */ 620*4882a593Smuzhiyun __u32 name; 621*4882a593Smuzhiyun 622*4882a593Smuzhiyun /** Returned handle for the object */ 623*4882a593Smuzhiyun __u32 handle; 624*4882a593Smuzhiyun 625*4882a593Smuzhiyun /** Returned size of the object */ 626*4882a593Smuzhiyun __u64 size; 627*4882a593Smuzhiyun }; 628*4882a593Smuzhiyun 629*4882a593Smuzhiyun #define DRM_CAP_DUMB_BUFFER 0x1 630*4882a593Smuzhiyun #define DRM_CAP_VBLANK_HIGH_CRTC 0x2 631*4882a593Smuzhiyun #define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3 632*4882a593Smuzhiyun #define DRM_CAP_DUMB_PREFER_SHADOW 0x4 633*4882a593Smuzhiyun #define DRM_CAP_PRIME 0x5 634*4882a593Smuzhiyun #define DRM_PRIME_CAP_IMPORT 0x1 635*4882a593Smuzhiyun #define DRM_PRIME_CAP_EXPORT 0x2 636*4882a593Smuzhiyun #define DRM_CAP_TIMESTAMP_MONOTONIC 0x6 637*4882a593Smuzhiyun #define DRM_CAP_ASYNC_PAGE_FLIP 0x7 638*4882a593Smuzhiyun /* 639*4882a593Smuzhiyun * The CURSOR_WIDTH and CURSOR_HEIGHT capabilities return a valid widthxheight 640*4882a593Smuzhiyun * combination for the hardware cursor. The intention is that a hardware 641*4882a593Smuzhiyun * agnostic userspace can query a cursor plane size to use. 642*4882a593Smuzhiyun * 643*4882a593Smuzhiyun * Note that the cross-driver contract is to merely return a valid size; 644*4882a593Smuzhiyun * drivers are free to attach another meaning on top, eg. i915 returns the 645*4882a593Smuzhiyun * maximum plane size. 646*4882a593Smuzhiyun */ 647*4882a593Smuzhiyun #define DRM_CAP_CURSOR_WIDTH 0x8 648*4882a593Smuzhiyun #define DRM_CAP_CURSOR_HEIGHT 0x9 649*4882a593Smuzhiyun #define DRM_CAP_ADDFB2_MODIFIERS 0x10 650*4882a593Smuzhiyun #define DRM_CAP_PAGE_FLIP_TARGET 0x11 651*4882a593Smuzhiyun #define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12 652*4882a593Smuzhiyun #define DRM_CAP_SYNCOBJ 0x13 653*4882a593Smuzhiyun #define DRM_CAP_SYNCOBJ_TIMELINE 0x14 654*4882a593Smuzhiyun 655*4882a593Smuzhiyun /** DRM_IOCTL_GET_CAP ioctl argument type */ 656*4882a593Smuzhiyun struct drm_get_cap { 657*4882a593Smuzhiyun __u64 capability; 658*4882a593Smuzhiyun __u64 value; 659*4882a593Smuzhiyun }; 660*4882a593Smuzhiyun 661*4882a593Smuzhiyun /** 662*4882a593Smuzhiyun * DRM_CLIENT_CAP_STEREO_3D 663*4882a593Smuzhiyun * 664*4882a593Smuzhiyun * if set to 1, the DRM core will expose the stereo 3D capabilities of the 665*4882a593Smuzhiyun * monitor by advertising the supported 3D layouts in the flags of struct 666*4882a593Smuzhiyun * drm_mode_modeinfo. 667*4882a593Smuzhiyun */ 668*4882a593Smuzhiyun #define DRM_CLIENT_CAP_STEREO_3D 1 669*4882a593Smuzhiyun 670*4882a593Smuzhiyun /** 671*4882a593Smuzhiyun * DRM_CLIENT_CAP_UNIVERSAL_PLANES 672*4882a593Smuzhiyun * 673*4882a593Smuzhiyun * If set to 1, the DRM core will expose all planes (overlay, primary, and 674*4882a593Smuzhiyun * cursor) to userspace. 675*4882a593Smuzhiyun */ 676*4882a593Smuzhiyun #define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2 677*4882a593Smuzhiyun 678*4882a593Smuzhiyun /** 679*4882a593Smuzhiyun * DRM_CLIENT_CAP_ATOMIC 680*4882a593Smuzhiyun * 681*4882a593Smuzhiyun * If set to 1, the DRM core will expose atomic properties to userspace 682*4882a593Smuzhiyun */ 683*4882a593Smuzhiyun #define DRM_CLIENT_CAP_ATOMIC 3 684*4882a593Smuzhiyun 685*4882a593Smuzhiyun /** 686*4882a593Smuzhiyun * DRM_CLIENT_CAP_ASPECT_RATIO 687*4882a593Smuzhiyun * 688*4882a593Smuzhiyun * If set to 1, the DRM core will provide aspect ratio information in modes. 689*4882a593Smuzhiyun */ 690*4882a593Smuzhiyun #define DRM_CLIENT_CAP_ASPECT_RATIO 4 691*4882a593Smuzhiyun 692*4882a593Smuzhiyun /** 693*4882a593Smuzhiyun * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 694*4882a593Smuzhiyun * 695*4882a593Smuzhiyun * If set to 1, the DRM core will expose special connectors to be used for 696*4882a593Smuzhiyun * writing back to memory the scene setup in the commit. Depends on client 697*4882a593Smuzhiyun * also supporting DRM_CLIENT_CAP_ATOMIC 698*4882a593Smuzhiyun */ 699*4882a593Smuzhiyun #define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5 700*4882a593Smuzhiyun 701*4882a593Smuzhiyun /** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ 702*4882a593Smuzhiyun struct drm_set_client_cap { 703*4882a593Smuzhiyun __u64 capability; 704*4882a593Smuzhiyun __u64 value; 705*4882a593Smuzhiyun }; 706*4882a593Smuzhiyun 707*4882a593Smuzhiyun #define DRM_RDWR O_RDWR 708*4882a593Smuzhiyun #define DRM_CLOEXEC O_CLOEXEC 709*4882a593Smuzhiyun struct drm_prime_handle { 710*4882a593Smuzhiyun __u32 handle; 711*4882a593Smuzhiyun 712*4882a593Smuzhiyun /** Flags.. only applicable for handle->fd */ 713*4882a593Smuzhiyun __u32 flags; 714*4882a593Smuzhiyun 715*4882a593Smuzhiyun /** Returned dmabuf file descriptor */ 716*4882a593Smuzhiyun __s32 fd; 717*4882a593Smuzhiyun }; 718*4882a593Smuzhiyun 719*4882a593Smuzhiyun struct drm_syncobj_create { 720*4882a593Smuzhiyun __u32 handle; 721*4882a593Smuzhiyun #define DRM_SYNCOBJ_CREATE_SIGNALED (1 << 0) 722*4882a593Smuzhiyun __u32 flags; 723*4882a593Smuzhiyun }; 724*4882a593Smuzhiyun 725*4882a593Smuzhiyun struct drm_syncobj_destroy { 726*4882a593Smuzhiyun __u32 handle; 727*4882a593Smuzhiyun __u32 pad; 728*4882a593Smuzhiyun }; 729*4882a593Smuzhiyun 730*4882a593Smuzhiyun #define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE (1 << 0) 731*4882a593Smuzhiyun #define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE (1 << 0) 732*4882a593Smuzhiyun struct drm_syncobj_handle { 733*4882a593Smuzhiyun __u32 handle; 734*4882a593Smuzhiyun __u32 flags; 735*4882a593Smuzhiyun 736*4882a593Smuzhiyun __s32 fd; 737*4882a593Smuzhiyun __u32 pad; 738*4882a593Smuzhiyun }; 739*4882a593Smuzhiyun 740*4882a593Smuzhiyun struct drm_syncobj_transfer { 741*4882a593Smuzhiyun __u32 src_handle; 742*4882a593Smuzhiyun __u32 dst_handle; 743*4882a593Smuzhiyun __u64 src_point; 744*4882a593Smuzhiyun __u64 dst_point; 745*4882a593Smuzhiyun __u32 flags; 746*4882a593Smuzhiyun __u32 pad; 747*4882a593Smuzhiyun }; 748*4882a593Smuzhiyun 749*4882a593Smuzhiyun #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0) 750*4882a593Smuzhiyun #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1) 751*4882a593Smuzhiyun #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */ 752*4882a593Smuzhiyun struct drm_syncobj_wait { 753*4882a593Smuzhiyun __u64 handles; 754*4882a593Smuzhiyun /* absolute timeout */ 755*4882a593Smuzhiyun __s64 timeout_nsec; 756*4882a593Smuzhiyun __u32 count_handles; 757*4882a593Smuzhiyun __u32 flags; 758*4882a593Smuzhiyun __u32 first_signaled; /* only valid when not waiting all */ 759*4882a593Smuzhiyun __u32 pad; 760*4882a593Smuzhiyun }; 761*4882a593Smuzhiyun 762*4882a593Smuzhiyun struct drm_syncobj_timeline_wait { 763*4882a593Smuzhiyun __u64 handles; 764*4882a593Smuzhiyun /* wait on specific timeline point for every handles*/ 765*4882a593Smuzhiyun __u64 points; 766*4882a593Smuzhiyun /* absolute timeout */ 767*4882a593Smuzhiyun __s64 timeout_nsec; 768*4882a593Smuzhiyun __u32 count_handles; 769*4882a593Smuzhiyun __u32 flags; 770*4882a593Smuzhiyun __u32 first_signaled; /* only valid when not waiting all */ 771*4882a593Smuzhiyun __u32 pad; 772*4882a593Smuzhiyun }; 773*4882a593Smuzhiyun 774*4882a593Smuzhiyun 775*4882a593Smuzhiyun struct drm_syncobj_array { 776*4882a593Smuzhiyun __u64 handles; 777*4882a593Smuzhiyun __u32 count_handles; 778*4882a593Smuzhiyun __u32 pad; 779*4882a593Smuzhiyun }; 780*4882a593Smuzhiyun 781*4882a593Smuzhiyun #define DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED (1 << 0) /* last available point on timeline syncobj */ 782*4882a593Smuzhiyun struct drm_syncobj_timeline_array { 783*4882a593Smuzhiyun __u64 handles; 784*4882a593Smuzhiyun __u64 points; 785*4882a593Smuzhiyun __u32 count_handles; 786*4882a593Smuzhiyun __u32 flags; 787*4882a593Smuzhiyun }; 788*4882a593Smuzhiyun 789*4882a593Smuzhiyun 790*4882a593Smuzhiyun /* Query current scanout sequence number */ 791*4882a593Smuzhiyun struct drm_crtc_get_sequence { 792*4882a593Smuzhiyun __u32 crtc_id; /* requested crtc_id */ 793*4882a593Smuzhiyun __u32 active; /* return: crtc output is active */ 794*4882a593Smuzhiyun __u64 sequence; /* return: most recent vblank sequence */ 795*4882a593Smuzhiyun __s64 sequence_ns; /* return: most recent time of first pixel out */ 796*4882a593Smuzhiyun }; 797*4882a593Smuzhiyun 798*4882a593Smuzhiyun /* Queue event to be delivered at specified sequence. Time stamp marks 799*4882a593Smuzhiyun * when the first pixel of the refresh cycle leaves the display engine 800*4882a593Smuzhiyun * for the display 801*4882a593Smuzhiyun */ 802*4882a593Smuzhiyun #define DRM_CRTC_SEQUENCE_RELATIVE 0x00000001 /* sequence is relative to current */ 803*4882a593Smuzhiyun #define DRM_CRTC_SEQUENCE_NEXT_ON_MISS 0x00000002 /* Use next sequence if we've missed */ 804*4882a593Smuzhiyun 805*4882a593Smuzhiyun struct drm_crtc_queue_sequence { 806*4882a593Smuzhiyun __u32 crtc_id; 807*4882a593Smuzhiyun __u32 flags; 808*4882a593Smuzhiyun __u64 sequence; /* on input, target sequence. on output, actual sequence */ 809*4882a593Smuzhiyun __u64 user_data; /* user data passed to event */ 810*4882a593Smuzhiyun }; 811*4882a593Smuzhiyun 812*4882a593Smuzhiyun #if defined(__cplusplus) 813*4882a593Smuzhiyun } 814*4882a593Smuzhiyun #endif 815*4882a593Smuzhiyun 816*4882a593Smuzhiyun #include "drm_mode.h" 817*4882a593Smuzhiyun 818*4882a593Smuzhiyun #if defined(__cplusplus) 819*4882a593Smuzhiyun extern "C" { 820*4882a593Smuzhiyun #endif 821*4882a593Smuzhiyun 822*4882a593Smuzhiyun #define DRM_IOCTL_BASE 'd' 823*4882a593Smuzhiyun #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) 824*4882a593Smuzhiyun #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) 825*4882a593Smuzhiyun #define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type) 826*4882a593Smuzhiyun #define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type) 827*4882a593Smuzhiyun 828*4882a593Smuzhiyun #define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version) 829*4882a593Smuzhiyun #define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique) 830*4882a593Smuzhiyun #define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth) 831*4882a593Smuzhiyun #define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid) 832*4882a593Smuzhiyun #define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map) 833*4882a593Smuzhiyun #define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client) 834*4882a593Smuzhiyun #define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) 835*4882a593Smuzhiyun #define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) 836*4882a593Smuzhiyun #define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl) 837*4882a593Smuzhiyun #define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close) 838*4882a593Smuzhiyun #define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink) 839*4882a593Smuzhiyun #define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open) 840*4882a593Smuzhiyun #define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap) 841*4882a593Smuzhiyun #define DRM_IOCTL_SET_CLIENT_CAP DRM_IOW( 0x0d, struct drm_set_client_cap) 842*4882a593Smuzhiyun 843*4882a593Smuzhiyun #define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) 844*4882a593Smuzhiyun #define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) 845*4882a593Smuzhiyun #define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block) 846*4882a593Smuzhiyun #define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block) 847*4882a593Smuzhiyun #define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control) 848*4882a593Smuzhiyun #define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map) 849*4882a593Smuzhiyun #define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc) 850*4882a593Smuzhiyun #define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc) 851*4882a593Smuzhiyun #define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info) 852*4882a593Smuzhiyun #define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map) 853*4882a593Smuzhiyun #define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free) 854*4882a593Smuzhiyun 855*4882a593Smuzhiyun #define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map) 856*4882a593Smuzhiyun 857*4882a593Smuzhiyun #define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map) 858*4882a593Smuzhiyun #define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map) 859*4882a593Smuzhiyun 860*4882a593Smuzhiyun #define DRM_IOCTL_SET_MASTER DRM_IO(0x1e) 861*4882a593Smuzhiyun #define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f) 862*4882a593Smuzhiyun 863*4882a593Smuzhiyun #define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx) 864*4882a593Smuzhiyun #define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx) 865*4882a593Smuzhiyun #define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx) 866*4882a593Smuzhiyun #define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx) 867*4882a593Smuzhiyun #define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx) 868*4882a593Smuzhiyun #define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx) 869*4882a593Smuzhiyun #define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res) 870*4882a593Smuzhiyun #define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw) 871*4882a593Smuzhiyun #define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw) 872*4882a593Smuzhiyun #define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma) 873*4882a593Smuzhiyun #define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock) 874*4882a593Smuzhiyun #define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock) 875*4882a593Smuzhiyun #define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock) 876*4882a593Smuzhiyun 877*4882a593Smuzhiyun #define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle) 878*4882a593Smuzhiyun #define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle) 879*4882a593Smuzhiyun 880*4882a593Smuzhiyun #define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) 881*4882a593Smuzhiyun #define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31) 882*4882a593Smuzhiyun #define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode) 883*4882a593Smuzhiyun #define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info) 884*4882a593Smuzhiyun #define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer) 885*4882a593Smuzhiyun #define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer) 886*4882a593Smuzhiyun #define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding) 887*4882a593Smuzhiyun #define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding) 888*4882a593Smuzhiyun 889*4882a593Smuzhiyun #define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather) 890*4882a593Smuzhiyun #define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather) 891*4882a593Smuzhiyun 892*4882a593Smuzhiyun #define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank) 893*4882a593Smuzhiyun 894*4882a593Smuzhiyun #define DRM_IOCTL_CRTC_GET_SEQUENCE DRM_IOWR(0x3b, struct drm_crtc_get_sequence) 895*4882a593Smuzhiyun #define DRM_IOCTL_CRTC_QUEUE_SEQUENCE DRM_IOWR(0x3c, struct drm_crtc_queue_sequence) 896*4882a593Smuzhiyun 897*4882a593Smuzhiyun #define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) 898*4882a593Smuzhiyun 899*4882a593Smuzhiyun #define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res) 900*4882a593Smuzhiyun #define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc) 901*4882a593Smuzhiyun #define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc) 902*4882a593Smuzhiyun #define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor) 903*4882a593Smuzhiyun #define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut) 904*4882a593Smuzhiyun #define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut) 905*4882a593Smuzhiyun #define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder) 906*4882a593Smuzhiyun #define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector) 907*4882a593Smuzhiyun #define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd) /* deprecated (never worked) */ 908*4882a593Smuzhiyun #define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd) /* deprecated (never worked) */ 909*4882a593Smuzhiyun 910*4882a593Smuzhiyun #define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property) 911*4882a593Smuzhiyun #define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property) 912*4882a593Smuzhiyun #define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob) 913*4882a593Smuzhiyun #define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd) 914*4882a593Smuzhiyun #define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd) 915*4882a593Smuzhiyun #define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int) 916*4882a593Smuzhiyun #define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip) 917*4882a593Smuzhiyun #define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd) 918*4882a593Smuzhiyun 919*4882a593Smuzhiyun #define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb) 920*4882a593Smuzhiyun #define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb) 921*4882a593Smuzhiyun #define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb) 922*4882a593Smuzhiyun #define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res) 923*4882a593Smuzhiyun #define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane) 924*4882a593Smuzhiyun #define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane) 925*4882a593Smuzhiyun #define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2) 926*4882a593Smuzhiyun #define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties) 927*4882a593Smuzhiyun #define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property) 928*4882a593Smuzhiyun #define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2) 929*4882a593Smuzhiyun #define DRM_IOCTL_MODE_ATOMIC DRM_IOWR(0xBC, struct drm_mode_atomic) 930*4882a593Smuzhiyun #define DRM_IOCTL_MODE_CREATEPROPBLOB DRM_IOWR(0xBD, struct drm_mode_create_blob) 931*4882a593Smuzhiyun #define DRM_IOCTL_MODE_DESTROYPROPBLOB DRM_IOWR(0xBE, struct drm_mode_destroy_blob) 932*4882a593Smuzhiyun 933*4882a593Smuzhiyun #define DRM_IOCTL_SYNCOBJ_CREATE DRM_IOWR(0xBF, struct drm_syncobj_create) 934*4882a593Smuzhiyun #define DRM_IOCTL_SYNCOBJ_DESTROY DRM_IOWR(0xC0, struct drm_syncobj_destroy) 935*4882a593Smuzhiyun #define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD DRM_IOWR(0xC1, struct drm_syncobj_handle) 936*4882a593Smuzhiyun #define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE DRM_IOWR(0xC2, struct drm_syncobj_handle) 937*4882a593Smuzhiyun #define DRM_IOCTL_SYNCOBJ_WAIT DRM_IOWR(0xC3, struct drm_syncobj_wait) 938*4882a593Smuzhiyun #define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array) 939*4882a593Smuzhiyun #define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array) 940*4882a593Smuzhiyun 941*4882a593Smuzhiyun #define DRM_IOCTL_MODE_CREATE_LEASE DRM_IOWR(0xC6, struct drm_mode_create_lease) 942*4882a593Smuzhiyun #define DRM_IOCTL_MODE_LIST_LESSEES DRM_IOWR(0xC7, struct drm_mode_list_lessees) 943*4882a593Smuzhiyun #define DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct drm_mode_get_lease) 944*4882a593Smuzhiyun #define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease) 945*4882a593Smuzhiyun 946*4882a593Smuzhiyun #define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait) 947*4882a593Smuzhiyun #define DRM_IOCTL_SYNCOBJ_QUERY DRM_IOWR(0xCB, struct drm_syncobj_timeline_array) 948*4882a593Smuzhiyun #define DRM_IOCTL_SYNCOBJ_TRANSFER DRM_IOWR(0xCC, struct drm_syncobj_transfer) 949*4882a593Smuzhiyun #define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array) 950*4882a593Smuzhiyun 951*4882a593Smuzhiyun #define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2) 952*4882a593Smuzhiyun 953*4882a593Smuzhiyun /** 954*4882a593Smuzhiyun * Device specific ioctls should only be in their respective headers 955*4882a593Smuzhiyun * The device specific ioctl range is from 0x40 to 0x9f. 956*4882a593Smuzhiyun * Generic IOCTLS restart at 0xA0. 957*4882a593Smuzhiyun * 958*4882a593Smuzhiyun * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and 959*4882a593Smuzhiyun * drmCommandReadWrite(). 960*4882a593Smuzhiyun */ 961*4882a593Smuzhiyun #define DRM_COMMAND_BASE 0x40 962*4882a593Smuzhiyun #define DRM_COMMAND_END 0xA0 963*4882a593Smuzhiyun 964*4882a593Smuzhiyun /** 965*4882a593Smuzhiyun * Header for events written back to userspace on the drm fd. The 966*4882a593Smuzhiyun * type defines the type of event, the length specifies the total 967*4882a593Smuzhiyun * length of the event (including the header), and user_data is 968*4882a593Smuzhiyun * typically a 64 bit value passed with the ioctl that triggered the 969*4882a593Smuzhiyun * event. A read on the drm fd will always only return complete 970*4882a593Smuzhiyun * events, that is, if for example the read buffer is 100 bytes, and 971*4882a593Smuzhiyun * there are two 64 byte events pending, only one will be returned. 972*4882a593Smuzhiyun * 973*4882a593Smuzhiyun * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and 974*4882a593Smuzhiyun * up are chipset specific. 975*4882a593Smuzhiyun */ 976*4882a593Smuzhiyun struct drm_event { 977*4882a593Smuzhiyun __u32 type; 978*4882a593Smuzhiyun __u32 length; 979*4882a593Smuzhiyun }; 980*4882a593Smuzhiyun 981*4882a593Smuzhiyun #define DRM_EVENT_VBLANK 0x01 982*4882a593Smuzhiyun #define DRM_EVENT_FLIP_COMPLETE 0x02 983*4882a593Smuzhiyun #define DRM_EVENT_CRTC_SEQUENCE 0x03 984*4882a593Smuzhiyun 985*4882a593Smuzhiyun struct drm_event_vblank { 986*4882a593Smuzhiyun struct drm_event base; 987*4882a593Smuzhiyun __u64 user_data; 988*4882a593Smuzhiyun __u32 tv_sec; 989*4882a593Smuzhiyun __u32 tv_usec; 990*4882a593Smuzhiyun __u32 sequence; 991*4882a593Smuzhiyun __u32 crtc_id; /* 0 on older kernels that do not support this */ 992*4882a593Smuzhiyun }; 993*4882a593Smuzhiyun 994*4882a593Smuzhiyun /* Event delivered at sequence. Time stamp marks when the first pixel 995*4882a593Smuzhiyun * of the refresh cycle leaves the display engine for the display 996*4882a593Smuzhiyun */ 997*4882a593Smuzhiyun struct drm_event_crtc_sequence { 998*4882a593Smuzhiyun struct drm_event base; 999*4882a593Smuzhiyun __u64 user_data; 1000*4882a593Smuzhiyun __s64 time_ns; 1001*4882a593Smuzhiyun __u64 sequence; 1002*4882a593Smuzhiyun }; 1003*4882a593Smuzhiyun 1004*4882a593Smuzhiyun /* typedef area */ 1005*4882a593Smuzhiyun #ifndef __KERNEL__ 1006*4882a593Smuzhiyun typedef struct drm_clip_rect drm_clip_rect_t; 1007*4882a593Smuzhiyun typedef struct drm_drawable_info drm_drawable_info_t; 1008*4882a593Smuzhiyun typedef struct drm_tex_region drm_tex_region_t; 1009*4882a593Smuzhiyun typedef struct drm_hw_lock drm_hw_lock_t; 1010*4882a593Smuzhiyun typedef struct drm_version drm_version_t; 1011*4882a593Smuzhiyun typedef struct drm_unique drm_unique_t; 1012*4882a593Smuzhiyun typedef struct drm_list drm_list_t; 1013*4882a593Smuzhiyun typedef struct drm_block drm_block_t; 1014*4882a593Smuzhiyun typedef struct drm_control drm_control_t; 1015*4882a593Smuzhiyun typedef enum drm_map_type drm_map_type_t; 1016*4882a593Smuzhiyun typedef enum drm_map_flags drm_map_flags_t; 1017*4882a593Smuzhiyun typedef struct drm_ctx_priv_map drm_ctx_priv_map_t; 1018*4882a593Smuzhiyun typedef struct drm_map drm_map_t; 1019*4882a593Smuzhiyun typedef struct drm_client drm_client_t; 1020*4882a593Smuzhiyun typedef enum drm_stat_type drm_stat_type_t; 1021*4882a593Smuzhiyun typedef struct drm_stats drm_stats_t; 1022*4882a593Smuzhiyun typedef enum drm_lock_flags drm_lock_flags_t; 1023*4882a593Smuzhiyun typedef struct drm_lock drm_lock_t; 1024*4882a593Smuzhiyun typedef enum drm_dma_flags drm_dma_flags_t; 1025*4882a593Smuzhiyun typedef struct drm_buf_desc drm_buf_desc_t; 1026*4882a593Smuzhiyun typedef struct drm_buf_info drm_buf_info_t; 1027*4882a593Smuzhiyun typedef struct drm_buf_free drm_buf_free_t; 1028*4882a593Smuzhiyun typedef struct drm_buf_pub drm_buf_pub_t; 1029*4882a593Smuzhiyun typedef struct drm_buf_map drm_buf_map_t; 1030*4882a593Smuzhiyun typedef struct drm_dma drm_dma_t; 1031*4882a593Smuzhiyun typedef union drm_wait_vblank drm_wait_vblank_t; 1032*4882a593Smuzhiyun typedef struct drm_agp_mode drm_agp_mode_t; 1033*4882a593Smuzhiyun typedef enum drm_ctx_flags drm_ctx_flags_t; 1034*4882a593Smuzhiyun typedef struct drm_ctx drm_ctx_t; 1035*4882a593Smuzhiyun typedef struct drm_ctx_res drm_ctx_res_t; 1036*4882a593Smuzhiyun typedef struct drm_draw drm_draw_t; 1037*4882a593Smuzhiyun typedef struct drm_update_draw drm_update_draw_t; 1038*4882a593Smuzhiyun typedef struct drm_auth drm_auth_t; 1039*4882a593Smuzhiyun typedef struct drm_irq_busid drm_irq_busid_t; 1040*4882a593Smuzhiyun typedef enum drm_vblank_seq_type drm_vblank_seq_type_t; 1041*4882a593Smuzhiyun 1042*4882a593Smuzhiyun typedef struct drm_agp_buffer drm_agp_buffer_t; 1043*4882a593Smuzhiyun typedef struct drm_agp_binding drm_agp_binding_t; 1044*4882a593Smuzhiyun typedef struct drm_agp_info drm_agp_info_t; 1045*4882a593Smuzhiyun typedef struct drm_scatter_gather drm_scatter_gather_t; 1046*4882a593Smuzhiyun typedef struct drm_set_version drm_set_version_t; 1047*4882a593Smuzhiyun #endif 1048*4882a593Smuzhiyun 1049*4882a593Smuzhiyun #if defined(__cplusplus) 1050*4882a593Smuzhiyun } 1051*4882a593Smuzhiyun #endif 1052*4882a593Smuzhiyun 1053*4882a593Smuzhiyun #endif 1054