1*4882a593Smuzhiyun /************************************************************************** 2*4882a593Smuzhiyun * 3*4882a593Smuzhiyun * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA 4*4882a593Smuzhiyun * All Rights Reserved. 5*4882a593Smuzhiyun * 6*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a 7*4882a593Smuzhiyun * copy of this software and associated documentation files (the 8*4882a593Smuzhiyun * "Software"), to deal in the Software without restriction, including 9*4882a593Smuzhiyun * without limitation the rights to use, copy, modify, merge, publish, 10*4882a593Smuzhiyun * distribute, sub license, and/or sell copies of the Software, and to 11*4882a593Smuzhiyun * permit persons to whom the Software is furnished to do so, subject to 12*4882a593Smuzhiyun * the following conditions: 13*4882a593Smuzhiyun * 14*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the 15*4882a593Smuzhiyun * next paragraph) shall be included in all copies or substantial portions 16*4882a593Smuzhiyun * of the Software. 17*4882a593Smuzhiyun * 18*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21*4882a593Smuzhiyun * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22*4882a593Smuzhiyun * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23*4882a593Smuzhiyun * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24*4882a593Smuzhiyun * USE OR OTHER DEALINGS IN THE SOFTWARE. 25*4882a593Smuzhiyun * 26*4882a593Smuzhiyun **************************************************************************/ 27*4882a593Smuzhiyun 28*4882a593Smuzhiyun #ifndef __VMWGFX_DRM_H__ 29*4882a593Smuzhiyun #define __VMWGFX_DRM_H__ 30*4882a593Smuzhiyun 31*4882a593Smuzhiyun #include "drm.h" 32*4882a593Smuzhiyun 33*4882a593Smuzhiyun #if defined(__cplusplus) 34*4882a593Smuzhiyun extern "C" { 35*4882a593Smuzhiyun #endif 36*4882a593Smuzhiyun 37*4882a593Smuzhiyun #define DRM_VMW_MAX_SURFACE_FACES 6 38*4882a593Smuzhiyun #define DRM_VMW_MAX_MIP_LEVELS 24 39*4882a593Smuzhiyun 40*4882a593Smuzhiyun 41*4882a593Smuzhiyun #define DRM_VMW_GET_PARAM 0 42*4882a593Smuzhiyun #define DRM_VMW_ALLOC_DMABUF 1 43*4882a593Smuzhiyun #define DRM_VMW_ALLOC_BO 1 44*4882a593Smuzhiyun #define DRM_VMW_UNREF_DMABUF 2 45*4882a593Smuzhiyun #define DRM_VMW_HANDLE_CLOSE 2 46*4882a593Smuzhiyun #define DRM_VMW_CURSOR_BYPASS 3 47*4882a593Smuzhiyun /* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/ 48*4882a593Smuzhiyun #define DRM_VMW_CONTROL_STREAM 4 49*4882a593Smuzhiyun #define DRM_VMW_CLAIM_STREAM 5 50*4882a593Smuzhiyun #define DRM_VMW_UNREF_STREAM 6 51*4882a593Smuzhiyun /* guarded by DRM_VMW_PARAM_3D == 1 */ 52*4882a593Smuzhiyun #define DRM_VMW_CREATE_CONTEXT 7 53*4882a593Smuzhiyun #define DRM_VMW_UNREF_CONTEXT 8 54*4882a593Smuzhiyun #define DRM_VMW_CREATE_SURFACE 9 55*4882a593Smuzhiyun #define DRM_VMW_UNREF_SURFACE 10 56*4882a593Smuzhiyun #define DRM_VMW_REF_SURFACE 11 57*4882a593Smuzhiyun #define DRM_VMW_EXECBUF 12 58*4882a593Smuzhiyun #define DRM_VMW_GET_3D_CAP 13 59*4882a593Smuzhiyun #define DRM_VMW_FENCE_WAIT 14 60*4882a593Smuzhiyun #define DRM_VMW_FENCE_SIGNALED 15 61*4882a593Smuzhiyun #define DRM_VMW_FENCE_UNREF 16 62*4882a593Smuzhiyun #define DRM_VMW_FENCE_EVENT 17 63*4882a593Smuzhiyun #define DRM_VMW_PRESENT 18 64*4882a593Smuzhiyun #define DRM_VMW_PRESENT_READBACK 19 65*4882a593Smuzhiyun #define DRM_VMW_UPDATE_LAYOUT 20 66*4882a593Smuzhiyun #define DRM_VMW_CREATE_SHADER 21 67*4882a593Smuzhiyun #define DRM_VMW_UNREF_SHADER 22 68*4882a593Smuzhiyun #define DRM_VMW_GB_SURFACE_CREATE 23 69*4882a593Smuzhiyun #define DRM_VMW_GB_SURFACE_REF 24 70*4882a593Smuzhiyun #define DRM_VMW_SYNCCPU 25 71*4882a593Smuzhiyun #define DRM_VMW_CREATE_EXTENDED_CONTEXT 26 72*4882a593Smuzhiyun #define DRM_VMW_GB_SURFACE_CREATE_EXT 27 73*4882a593Smuzhiyun #define DRM_VMW_GB_SURFACE_REF_EXT 28 74*4882a593Smuzhiyun #define DRM_VMW_MSG 29 75*4882a593Smuzhiyun 76*4882a593Smuzhiyun /*************************************************************************/ 77*4882a593Smuzhiyun /** 78*4882a593Smuzhiyun * DRM_VMW_GET_PARAM - get device information. 79*4882a593Smuzhiyun * 80*4882a593Smuzhiyun * DRM_VMW_PARAM_FIFO_OFFSET: 81*4882a593Smuzhiyun * Offset to use to map the first page of the FIFO read-only. 82*4882a593Smuzhiyun * The fifo is mapped using the mmap() system call on the drm device. 83*4882a593Smuzhiyun * 84*4882a593Smuzhiyun * DRM_VMW_PARAM_OVERLAY_IOCTL: 85*4882a593Smuzhiyun * Does the driver support the overlay ioctl. 86*4882a593Smuzhiyun * 87*4882a593Smuzhiyun * DRM_VMW_PARAM_SM4_1 88*4882a593Smuzhiyun * SM4_1 support is enabled. 89*4882a593Smuzhiyun * 90*4882a593Smuzhiyun * DRM_VMW_PARAM_SM5 91*4882a593Smuzhiyun * SM5 support is enabled. 92*4882a593Smuzhiyun */ 93*4882a593Smuzhiyun 94*4882a593Smuzhiyun #define DRM_VMW_PARAM_NUM_STREAMS 0 95*4882a593Smuzhiyun #define DRM_VMW_PARAM_NUM_FREE_STREAMS 1 96*4882a593Smuzhiyun #define DRM_VMW_PARAM_3D 2 97*4882a593Smuzhiyun #define DRM_VMW_PARAM_HW_CAPS 3 98*4882a593Smuzhiyun #define DRM_VMW_PARAM_FIFO_CAPS 4 99*4882a593Smuzhiyun #define DRM_VMW_PARAM_MAX_FB_SIZE 5 100*4882a593Smuzhiyun #define DRM_VMW_PARAM_FIFO_HW_VERSION 6 101*4882a593Smuzhiyun #define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 102*4882a593Smuzhiyun #define DRM_VMW_PARAM_3D_CAPS_SIZE 8 103*4882a593Smuzhiyun #define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 104*4882a593Smuzhiyun #define DRM_VMW_PARAM_MAX_MOB_SIZE 10 105*4882a593Smuzhiyun #define DRM_VMW_PARAM_SCREEN_TARGET 11 106*4882a593Smuzhiyun #define DRM_VMW_PARAM_DX 12 107*4882a593Smuzhiyun #define DRM_VMW_PARAM_HW_CAPS2 13 108*4882a593Smuzhiyun #define DRM_VMW_PARAM_SM4_1 14 109*4882a593Smuzhiyun #define DRM_VMW_PARAM_SM5 15 110*4882a593Smuzhiyun 111*4882a593Smuzhiyun /** 112*4882a593Smuzhiyun * enum drm_vmw_handle_type - handle type for ref ioctls 113*4882a593Smuzhiyun * 114*4882a593Smuzhiyun */ 115*4882a593Smuzhiyun enum drm_vmw_handle_type { 116*4882a593Smuzhiyun DRM_VMW_HANDLE_LEGACY = 0, 117*4882a593Smuzhiyun DRM_VMW_HANDLE_PRIME = 1 118*4882a593Smuzhiyun }; 119*4882a593Smuzhiyun 120*4882a593Smuzhiyun /** 121*4882a593Smuzhiyun * struct drm_vmw_getparam_arg 122*4882a593Smuzhiyun * 123*4882a593Smuzhiyun * @value: Returned value. //Out 124*4882a593Smuzhiyun * @param: Parameter to query. //In. 125*4882a593Smuzhiyun * 126*4882a593Smuzhiyun * Argument to the DRM_VMW_GET_PARAM Ioctl. 127*4882a593Smuzhiyun */ 128*4882a593Smuzhiyun 129*4882a593Smuzhiyun struct drm_vmw_getparam_arg { 130*4882a593Smuzhiyun __u64 value; 131*4882a593Smuzhiyun __u32 param; 132*4882a593Smuzhiyun __u32 pad64; 133*4882a593Smuzhiyun }; 134*4882a593Smuzhiyun 135*4882a593Smuzhiyun /*************************************************************************/ 136*4882a593Smuzhiyun /** 137*4882a593Smuzhiyun * DRM_VMW_CREATE_CONTEXT - Create a host context. 138*4882a593Smuzhiyun * 139*4882a593Smuzhiyun * Allocates a device unique context id, and queues a create context command 140*4882a593Smuzhiyun * for the host. Does not wait for host completion. 141*4882a593Smuzhiyun */ 142*4882a593Smuzhiyun 143*4882a593Smuzhiyun /** 144*4882a593Smuzhiyun * struct drm_vmw_context_arg 145*4882a593Smuzhiyun * 146*4882a593Smuzhiyun * @cid: Device unique context ID. 147*4882a593Smuzhiyun * 148*4882a593Smuzhiyun * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl. 149*4882a593Smuzhiyun * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl. 150*4882a593Smuzhiyun */ 151*4882a593Smuzhiyun 152*4882a593Smuzhiyun struct drm_vmw_context_arg { 153*4882a593Smuzhiyun __s32 cid; 154*4882a593Smuzhiyun __u32 pad64; 155*4882a593Smuzhiyun }; 156*4882a593Smuzhiyun 157*4882a593Smuzhiyun /*************************************************************************/ 158*4882a593Smuzhiyun /** 159*4882a593Smuzhiyun * DRM_VMW_UNREF_CONTEXT - Create a host context. 160*4882a593Smuzhiyun * 161*4882a593Smuzhiyun * Frees a global context id, and queues a destroy host command for the host. 162*4882a593Smuzhiyun * Does not wait for host completion. The context ID can be used directly 163*4882a593Smuzhiyun * in the command stream and shows up as the same context ID on the host. 164*4882a593Smuzhiyun */ 165*4882a593Smuzhiyun 166*4882a593Smuzhiyun /*************************************************************************/ 167*4882a593Smuzhiyun /** 168*4882a593Smuzhiyun * DRM_VMW_CREATE_SURFACE - Create a host suface. 169*4882a593Smuzhiyun * 170*4882a593Smuzhiyun * Allocates a device unique surface id, and queues a create surface command 171*4882a593Smuzhiyun * for the host. Does not wait for host completion. The surface ID can be 172*4882a593Smuzhiyun * used directly in the command stream and shows up as the same surface 173*4882a593Smuzhiyun * ID on the host. 174*4882a593Smuzhiyun */ 175*4882a593Smuzhiyun 176*4882a593Smuzhiyun /** 177*4882a593Smuzhiyun * struct drm_wmv_surface_create_req 178*4882a593Smuzhiyun * 179*4882a593Smuzhiyun * @flags: Surface flags as understood by the host. 180*4882a593Smuzhiyun * @format: Surface format as understood by the host. 181*4882a593Smuzhiyun * @mip_levels: Number of mip levels for each face. 182*4882a593Smuzhiyun * An unused face should have 0 encoded. 183*4882a593Smuzhiyun * @size_addr: Address of a user-space array of sruct drm_vmw_size 184*4882a593Smuzhiyun * cast to an __u64 for 32-64 bit compatibility. 185*4882a593Smuzhiyun * The size of the array should equal the total number of mipmap levels. 186*4882a593Smuzhiyun * @shareable: Boolean whether other clients (as identified by file descriptors) 187*4882a593Smuzhiyun * may reference this surface. 188*4882a593Smuzhiyun * @scanout: Boolean whether the surface is intended to be used as a 189*4882a593Smuzhiyun * scanout. 190*4882a593Smuzhiyun * 191*4882a593Smuzhiyun * Input data to the DRM_VMW_CREATE_SURFACE Ioctl. 192*4882a593Smuzhiyun * Output data from the DRM_VMW_REF_SURFACE Ioctl. 193*4882a593Smuzhiyun */ 194*4882a593Smuzhiyun 195*4882a593Smuzhiyun struct drm_vmw_surface_create_req { 196*4882a593Smuzhiyun __u32 flags; 197*4882a593Smuzhiyun __u32 format; 198*4882a593Smuzhiyun __u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES]; 199*4882a593Smuzhiyun __u64 size_addr; 200*4882a593Smuzhiyun __s32 shareable; 201*4882a593Smuzhiyun __s32 scanout; 202*4882a593Smuzhiyun }; 203*4882a593Smuzhiyun 204*4882a593Smuzhiyun /** 205*4882a593Smuzhiyun * struct drm_wmv_surface_arg 206*4882a593Smuzhiyun * 207*4882a593Smuzhiyun * @sid: Surface id of created surface or surface to destroy or reference. 208*4882a593Smuzhiyun * @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl. 209*4882a593Smuzhiyun * 210*4882a593Smuzhiyun * Output data from the DRM_VMW_CREATE_SURFACE Ioctl. 211*4882a593Smuzhiyun * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl. 212*4882a593Smuzhiyun * Input argument to the DRM_VMW_REF_SURFACE Ioctl. 213*4882a593Smuzhiyun */ 214*4882a593Smuzhiyun 215*4882a593Smuzhiyun struct drm_vmw_surface_arg { 216*4882a593Smuzhiyun __s32 sid; 217*4882a593Smuzhiyun enum drm_vmw_handle_type handle_type; 218*4882a593Smuzhiyun }; 219*4882a593Smuzhiyun 220*4882a593Smuzhiyun /** 221*4882a593Smuzhiyun * struct drm_vmw_size ioctl. 222*4882a593Smuzhiyun * 223*4882a593Smuzhiyun * @width - mip level width 224*4882a593Smuzhiyun * @height - mip level height 225*4882a593Smuzhiyun * @depth - mip level depth 226*4882a593Smuzhiyun * 227*4882a593Smuzhiyun * Description of a mip level. 228*4882a593Smuzhiyun * Input data to the DRM_WMW_CREATE_SURFACE Ioctl. 229*4882a593Smuzhiyun */ 230*4882a593Smuzhiyun 231*4882a593Smuzhiyun struct drm_vmw_size { 232*4882a593Smuzhiyun __u32 width; 233*4882a593Smuzhiyun __u32 height; 234*4882a593Smuzhiyun __u32 depth; 235*4882a593Smuzhiyun __u32 pad64; 236*4882a593Smuzhiyun }; 237*4882a593Smuzhiyun 238*4882a593Smuzhiyun /** 239*4882a593Smuzhiyun * union drm_vmw_surface_create_arg 240*4882a593Smuzhiyun * 241*4882a593Smuzhiyun * @rep: Output data as described above. 242*4882a593Smuzhiyun * @req: Input data as described above. 243*4882a593Smuzhiyun * 244*4882a593Smuzhiyun * Argument to the DRM_VMW_CREATE_SURFACE Ioctl. 245*4882a593Smuzhiyun */ 246*4882a593Smuzhiyun 247*4882a593Smuzhiyun union drm_vmw_surface_create_arg { 248*4882a593Smuzhiyun struct drm_vmw_surface_arg rep; 249*4882a593Smuzhiyun struct drm_vmw_surface_create_req req; 250*4882a593Smuzhiyun }; 251*4882a593Smuzhiyun 252*4882a593Smuzhiyun /*************************************************************************/ 253*4882a593Smuzhiyun /** 254*4882a593Smuzhiyun * DRM_VMW_REF_SURFACE - Reference a host surface. 255*4882a593Smuzhiyun * 256*4882a593Smuzhiyun * Puts a reference on a host surface with a give sid, as previously 257*4882a593Smuzhiyun * returned by the DRM_VMW_CREATE_SURFACE ioctl. 258*4882a593Smuzhiyun * A reference will make sure the surface isn't destroyed while we hold 259*4882a593Smuzhiyun * it and will allow the calling client to use the surface ID in the command 260*4882a593Smuzhiyun * stream. 261*4882a593Smuzhiyun * 262*4882a593Smuzhiyun * On successful return, the Ioctl returns the surface information given 263*4882a593Smuzhiyun * in the DRM_VMW_CREATE_SURFACE ioctl. 264*4882a593Smuzhiyun */ 265*4882a593Smuzhiyun 266*4882a593Smuzhiyun /** 267*4882a593Smuzhiyun * union drm_vmw_surface_reference_arg 268*4882a593Smuzhiyun * 269*4882a593Smuzhiyun * @rep: Output data as described above. 270*4882a593Smuzhiyun * @req: Input data as described above. 271*4882a593Smuzhiyun * 272*4882a593Smuzhiyun * Argument to the DRM_VMW_REF_SURFACE Ioctl. 273*4882a593Smuzhiyun */ 274*4882a593Smuzhiyun 275*4882a593Smuzhiyun union drm_vmw_surface_reference_arg { 276*4882a593Smuzhiyun struct drm_vmw_surface_create_req rep; 277*4882a593Smuzhiyun struct drm_vmw_surface_arg req; 278*4882a593Smuzhiyun }; 279*4882a593Smuzhiyun 280*4882a593Smuzhiyun /*************************************************************************/ 281*4882a593Smuzhiyun /** 282*4882a593Smuzhiyun * DRM_VMW_UNREF_SURFACE - Unreference a host surface. 283*4882a593Smuzhiyun * 284*4882a593Smuzhiyun * Clear a reference previously put on a host surface. 285*4882a593Smuzhiyun * When all references are gone, including the one implicitly placed 286*4882a593Smuzhiyun * on creation, 287*4882a593Smuzhiyun * a destroy surface command will be queued for the host. 288*4882a593Smuzhiyun * Does not wait for completion. 289*4882a593Smuzhiyun */ 290*4882a593Smuzhiyun 291*4882a593Smuzhiyun /*************************************************************************/ 292*4882a593Smuzhiyun /** 293*4882a593Smuzhiyun * DRM_VMW_EXECBUF 294*4882a593Smuzhiyun * 295*4882a593Smuzhiyun * Submit a command buffer for execution on the host, and return a 296*4882a593Smuzhiyun * fence seqno that when signaled, indicates that the command buffer has 297*4882a593Smuzhiyun * executed. 298*4882a593Smuzhiyun */ 299*4882a593Smuzhiyun 300*4882a593Smuzhiyun /** 301*4882a593Smuzhiyun * struct drm_vmw_execbuf_arg 302*4882a593Smuzhiyun * 303*4882a593Smuzhiyun * @commands: User-space address of a command buffer cast to an __u64. 304*4882a593Smuzhiyun * @command-size: Size in bytes of the command buffer. 305*4882a593Smuzhiyun * @throttle-us: Sleep until software is less than @throttle_us 306*4882a593Smuzhiyun * microseconds ahead of hardware. The driver may round this value 307*4882a593Smuzhiyun * to the nearest kernel tick. 308*4882a593Smuzhiyun * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an 309*4882a593Smuzhiyun * __u64. 310*4882a593Smuzhiyun * @version: Allows expanding the execbuf ioctl parameters without breaking 311*4882a593Smuzhiyun * backwards compatibility, since user-space will always tell the kernel 312*4882a593Smuzhiyun * which version it uses. 313*4882a593Smuzhiyun * @flags: Execbuf flags. 314*4882a593Smuzhiyun * @imported_fence_fd: FD for a fence imported from another device 315*4882a593Smuzhiyun * 316*4882a593Smuzhiyun * Argument to the DRM_VMW_EXECBUF Ioctl. 317*4882a593Smuzhiyun */ 318*4882a593Smuzhiyun 319*4882a593Smuzhiyun #define DRM_VMW_EXECBUF_VERSION 2 320*4882a593Smuzhiyun 321*4882a593Smuzhiyun #define DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD (1 << 0) 322*4882a593Smuzhiyun #define DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD (1 << 1) 323*4882a593Smuzhiyun 324*4882a593Smuzhiyun struct drm_vmw_execbuf_arg { 325*4882a593Smuzhiyun __u64 commands; 326*4882a593Smuzhiyun __u32 command_size; 327*4882a593Smuzhiyun __u32 throttle_us; 328*4882a593Smuzhiyun __u64 fence_rep; 329*4882a593Smuzhiyun __u32 version; 330*4882a593Smuzhiyun __u32 flags; 331*4882a593Smuzhiyun __u32 context_handle; 332*4882a593Smuzhiyun __s32 imported_fence_fd; 333*4882a593Smuzhiyun }; 334*4882a593Smuzhiyun 335*4882a593Smuzhiyun /** 336*4882a593Smuzhiyun * struct drm_vmw_fence_rep 337*4882a593Smuzhiyun * 338*4882a593Smuzhiyun * @handle: Fence object handle for fence associated with a command submission. 339*4882a593Smuzhiyun * @mask: Fence flags relevant for this fence object. 340*4882a593Smuzhiyun * @seqno: Fence sequence number in fifo. A fence object with a lower 341*4882a593Smuzhiyun * seqno will signal the EXEC flag before a fence object with a higher 342*4882a593Smuzhiyun * seqno. This can be used by user-space to avoid kernel calls to determine 343*4882a593Smuzhiyun * whether a fence has signaled the EXEC flag. Note that @seqno will 344*4882a593Smuzhiyun * wrap at 32-bit. 345*4882a593Smuzhiyun * @passed_seqno: The highest seqno number processed by the hardware 346*4882a593Smuzhiyun * so far. This can be used to mark user-space fence objects as signaled, and 347*4882a593Smuzhiyun * to determine whether a fence seqno might be stale. 348*4882a593Smuzhiyun * @fd: FD associated with the fence, -1 if not exported 349*4882a593Smuzhiyun * @error: This member should've been set to -EFAULT on submission. 350*4882a593Smuzhiyun * The following actions should be take on completion: 351*4882a593Smuzhiyun * error == -EFAULT: Fence communication failed. The host is synchronized. 352*4882a593Smuzhiyun * Use the last fence id read from the FIFO fence register. 353*4882a593Smuzhiyun * error != 0 && error != -EFAULT: 354*4882a593Smuzhiyun * Fence submission failed. The host is synchronized. Use the fence_seq member. 355*4882a593Smuzhiyun * error == 0: All is OK, The host may not be synchronized. 356*4882a593Smuzhiyun * Use the fence_seq member. 357*4882a593Smuzhiyun * 358*4882a593Smuzhiyun * Input / Output data to the DRM_VMW_EXECBUF Ioctl. 359*4882a593Smuzhiyun */ 360*4882a593Smuzhiyun 361*4882a593Smuzhiyun struct drm_vmw_fence_rep { 362*4882a593Smuzhiyun __u32 handle; 363*4882a593Smuzhiyun __u32 mask; 364*4882a593Smuzhiyun __u32 seqno; 365*4882a593Smuzhiyun __u32 passed_seqno; 366*4882a593Smuzhiyun __s32 fd; 367*4882a593Smuzhiyun __s32 error; 368*4882a593Smuzhiyun }; 369*4882a593Smuzhiyun 370*4882a593Smuzhiyun /*************************************************************************/ 371*4882a593Smuzhiyun /** 372*4882a593Smuzhiyun * DRM_VMW_ALLOC_BO 373*4882a593Smuzhiyun * 374*4882a593Smuzhiyun * Allocate a buffer object that is visible also to the host. 375*4882a593Smuzhiyun * NOTE: The buffer is 376*4882a593Smuzhiyun * identified by a handle and an offset, which are private to the guest, but 377*4882a593Smuzhiyun * useable in the command stream. The guest kernel may translate these 378*4882a593Smuzhiyun * and patch up the command stream accordingly. In the future, the offset may 379*4882a593Smuzhiyun * be zero at all times, or it may disappear from the interface before it is 380*4882a593Smuzhiyun * fixed. 381*4882a593Smuzhiyun * 382*4882a593Smuzhiyun * The buffer object may stay user-space mapped in the guest at all times, 383*4882a593Smuzhiyun * and is thus suitable for sub-allocation. 384*4882a593Smuzhiyun * 385*4882a593Smuzhiyun * Buffer objects are mapped using the mmap() syscall on the drm device. 386*4882a593Smuzhiyun */ 387*4882a593Smuzhiyun 388*4882a593Smuzhiyun /** 389*4882a593Smuzhiyun * struct drm_vmw_alloc_bo_req 390*4882a593Smuzhiyun * 391*4882a593Smuzhiyun * @size: Required minimum size of the buffer. 392*4882a593Smuzhiyun * 393*4882a593Smuzhiyun * Input data to the DRM_VMW_ALLOC_BO Ioctl. 394*4882a593Smuzhiyun */ 395*4882a593Smuzhiyun 396*4882a593Smuzhiyun struct drm_vmw_alloc_bo_req { 397*4882a593Smuzhiyun __u32 size; 398*4882a593Smuzhiyun __u32 pad64; 399*4882a593Smuzhiyun }; 400*4882a593Smuzhiyun #define drm_vmw_alloc_dmabuf_req drm_vmw_alloc_bo_req 401*4882a593Smuzhiyun 402*4882a593Smuzhiyun /** 403*4882a593Smuzhiyun * struct drm_vmw_bo_rep 404*4882a593Smuzhiyun * 405*4882a593Smuzhiyun * @map_handle: Offset to use in the mmap() call used to map the buffer. 406*4882a593Smuzhiyun * @handle: Handle unique to this buffer. Used for unreferencing. 407*4882a593Smuzhiyun * @cur_gmr_id: GMR id to use in the command stream when this buffer is 408*4882a593Smuzhiyun * referenced. See not above. 409*4882a593Smuzhiyun * @cur_gmr_offset: Offset to use in the command stream when this buffer is 410*4882a593Smuzhiyun * referenced. See note above. 411*4882a593Smuzhiyun * 412*4882a593Smuzhiyun * Output data from the DRM_VMW_ALLOC_BO Ioctl. 413*4882a593Smuzhiyun */ 414*4882a593Smuzhiyun 415*4882a593Smuzhiyun struct drm_vmw_bo_rep { 416*4882a593Smuzhiyun __u64 map_handle; 417*4882a593Smuzhiyun __u32 handle; 418*4882a593Smuzhiyun __u32 cur_gmr_id; 419*4882a593Smuzhiyun __u32 cur_gmr_offset; 420*4882a593Smuzhiyun __u32 pad64; 421*4882a593Smuzhiyun }; 422*4882a593Smuzhiyun #define drm_vmw_dmabuf_rep drm_vmw_bo_rep 423*4882a593Smuzhiyun 424*4882a593Smuzhiyun /** 425*4882a593Smuzhiyun * union drm_vmw_alloc_bo_arg 426*4882a593Smuzhiyun * 427*4882a593Smuzhiyun * @req: Input data as described above. 428*4882a593Smuzhiyun * @rep: Output data as described above. 429*4882a593Smuzhiyun * 430*4882a593Smuzhiyun * Argument to the DRM_VMW_ALLOC_BO Ioctl. 431*4882a593Smuzhiyun */ 432*4882a593Smuzhiyun 433*4882a593Smuzhiyun union drm_vmw_alloc_bo_arg { 434*4882a593Smuzhiyun struct drm_vmw_alloc_bo_req req; 435*4882a593Smuzhiyun struct drm_vmw_bo_rep rep; 436*4882a593Smuzhiyun }; 437*4882a593Smuzhiyun #define drm_vmw_alloc_dmabuf_arg drm_vmw_alloc_bo_arg 438*4882a593Smuzhiyun 439*4882a593Smuzhiyun /*************************************************************************/ 440*4882a593Smuzhiyun /** 441*4882a593Smuzhiyun * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams. 442*4882a593Smuzhiyun * 443*4882a593Smuzhiyun * This IOCTL controls the overlay units of the svga device. 444*4882a593Smuzhiyun * The SVGA overlay units does not work like regular hardware units in 445*4882a593Smuzhiyun * that they do not automaticaly read back the contents of the given dma 446*4882a593Smuzhiyun * buffer. But instead only read back for each call to this ioctl, and 447*4882a593Smuzhiyun * at any point between this call being made and a following call that 448*4882a593Smuzhiyun * either changes the buffer or disables the stream. 449*4882a593Smuzhiyun */ 450*4882a593Smuzhiyun 451*4882a593Smuzhiyun /** 452*4882a593Smuzhiyun * struct drm_vmw_rect 453*4882a593Smuzhiyun * 454*4882a593Smuzhiyun * Defines a rectangle. Used in the overlay ioctl to define 455*4882a593Smuzhiyun * source and destination rectangle. 456*4882a593Smuzhiyun */ 457*4882a593Smuzhiyun 458*4882a593Smuzhiyun struct drm_vmw_rect { 459*4882a593Smuzhiyun __s32 x; 460*4882a593Smuzhiyun __s32 y; 461*4882a593Smuzhiyun __u32 w; 462*4882a593Smuzhiyun __u32 h; 463*4882a593Smuzhiyun }; 464*4882a593Smuzhiyun 465*4882a593Smuzhiyun /** 466*4882a593Smuzhiyun * struct drm_vmw_control_stream_arg 467*4882a593Smuzhiyun * 468*4882a593Smuzhiyun * @stream_id: Stearm to control 469*4882a593Smuzhiyun * @enabled: If false all following arguments are ignored. 470*4882a593Smuzhiyun * @handle: Handle to buffer for getting data from. 471*4882a593Smuzhiyun * @format: Format of the overlay as understood by the host. 472*4882a593Smuzhiyun * @width: Width of the overlay. 473*4882a593Smuzhiyun * @height: Height of the overlay. 474*4882a593Smuzhiyun * @size: Size of the overlay in bytes. 475*4882a593Smuzhiyun * @pitch: Array of pitches, the two last are only used for YUV12 formats. 476*4882a593Smuzhiyun * @offset: Offset from start of dma buffer to overlay. 477*4882a593Smuzhiyun * @src: Source rect, must be within the defined area above. 478*4882a593Smuzhiyun * @dst: Destination rect, x and y may be negative. 479*4882a593Smuzhiyun * 480*4882a593Smuzhiyun * Argument to the DRM_VMW_CONTROL_STREAM Ioctl. 481*4882a593Smuzhiyun */ 482*4882a593Smuzhiyun 483*4882a593Smuzhiyun struct drm_vmw_control_stream_arg { 484*4882a593Smuzhiyun __u32 stream_id; 485*4882a593Smuzhiyun __u32 enabled; 486*4882a593Smuzhiyun 487*4882a593Smuzhiyun __u32 flags; 488*4882a593Smuzhiyun __u32 color_key; 489*4882a593Smuzhiyun 490*4882a593Smuzhiyun __u32 handle; 491*4882a593Smuzhiyun __u32 offset; 492*4882a593Smuzhiyun __s32 format; 493*4882a593Smuzhiyun __u32 size; 494*4882a593Smuzhiyun __u32 width; 495*4882a593Smuzhiyun __u32 height; 496*4882a593Smuzhiyun __u32 pitch[3]; 497*4882a593Smuzhiyun 498*4882a593Smuzhiyun __u32 pad64; 499*4882a593Smuzhiyun struct drm_vmw_rect src; 500*4882a593Smuzhiyun struct drm_vmw_rect dst; 501*4882a593Smuzhiyun }; 502*4882a593Smuzhiyun 503*4882a593Smuzhiyun /*************************************************************************/ 504*4882a593Smuzhiyun /** 505*4882a593Smuzhiyun * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass. 506*4882a593Smuzhiyun * 507*4882a593Smuzhiyun */ 508*4882a593Smuzhiyun 509*4882a593Smuzhiyun #define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0) 510*4882a593Smuzhiyun #define DRM_VMW_CURSOR_BYPASS_FLAGS (1) 511*4882a593Smuzhiyun 512*4882a593Smuzhiyun /** 513*4882a593Smuzhiyun * struct drm_vmw_cursor_bypass_arg 514*4882a593Smuzhiyun * 515*4882a593Smuzhiyun * @flags: Flags. 516*4882a593Smuzhiyun * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed. 517*4882a593Smuzhiyun * @xpos: X position of cursor. 518*4882a593Smuzhiyun * @ypos: Y position of cursor. 519*4882a593Smuzhiyun * @xhot: X hotspot. 520*4882a593Smuzhiyun * @yhot: Y hotspot. 521*4882a593Smuzhiyun * 522*4882a593Smuzhiyun * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl. 523*4882a593Smuzhiyun */ 524*4882a593Smuzhiyun 525*4882a593Smuzhiyun struct drm_vmw_cursor_bypass_arg { 526*4882a593Smuzhiyun __u32 flags; 527*4882a593Smuzhiyun __u32 crtc_id; 528*4882a593Smuzhiyun __s32 xpos; 529*4882a593Smuzhiyun __s32 ypos; 530*4882a593Smuzhiyun __s32 xhot; 531*4882a593Smuzhiyun __s32 yhot; 532*4882a593Smuzhiyun }; 533*4882a593Smuzhiyun 534*4882a593Smuzhiyun /*************************************************************************/ 535*4882a593Smuzhiyun /** 536*4882a593Smuzhiyun * DRM_VMW_CLAIM_STREAM - Claim a single stream. 537*4882a593Smuzhiyun */ 538*4882a593Smuzhiyun 539*4882a593Smuzhiyun /** 540*4882a593Smuzhiyun * struct drm_vmw_context_arg 541*4882a593Smuzhiyun * 542*4882a593Smuzhiyun * @stream_id: Device unique context ID. 543*4882a593Smuzhiyun * 544*4882a593Smuzhiyun * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl. 545*4882a593Smuzhiyun * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl. 546*4882a593Smuzhiyun */ 547*4882a593Smuzhiyun 548*4882a593Smuzhiyun struct drm_vmw_stream_arg { 549*4882a593Smuzhiyun __u32 stream_id; 550*4882a593Smuzhiyun __u32 pad64; 551*4882a593Smuzhiyun }; 552*4882a593Smuzhiyun 553*4882a593Smuzhiyun /*************************************************************************/ 554*4882a593Smuzhiyun /** 555*4882a593Smuzhiyun * DRM_VMW_UNREF_STREAM - Unclaim a stream. 556*4882a593Smuzhiyun * 557*4882a593Smuzhiyun * Return a single stream that was claimed by this process. Also makes 558*4882a593Smuzhiyun * sure that the stream has been stopped. 559*4882a593Smuzhiyun */ 560*4882a593Smuzhiyun 561*4882a593Smuzhiyun /*************************************************************************/ 562*4882a593Smuzhiyun /** 563*4882a593Smuzhiyun * DRM_VMW_GET_3D_CAP 564*4882a593Smuzhiyun * 565*4882a593Smuzhiyun * Read 3D capabilities from the FIFO 566*4882a593Smuzhiyun * 567*4882a593Smuzhiyun */ 568*4882a593Smuzhiyun 569*4882a593Smuzhiyun /** 570*4882a593Smuzhiyun * struct drm_vmw_get_3d_cap_arg 571*4882a593Smuzhiyun * 572*4882a593Smuzhiyun * @buffer: Pointer to a buffer for capability data, cast to an __u64 573*4882a593Smuzhiyun * @size: Max size to copy 574*4882a593Smuzhiyun * 575*4882a593Smuzhiyun * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL 576*4882a593Smuzhiyun * ioctls. 577*4882a593Smuzhiyun */ 578*4882a593Smuzhiyun 579*4882a593Smuzhiyun struct drm_vmw_get_3d_cap_arg { 580*4882a593Smuzhiyun __u64 buffer; 581*4882a593Smuzhiyun __u32 max_size; 582*4882a593Smuzhiyun __u32 pad64; 583*4882a593Smuzhiyun }; 584*4882a593Smuzhiyun 585*4882a593Smuzhiyun /*************************************************************************/ 586*4882a593Smuzhiyun /** 587*4882a593Smuzhiyun * DRM_VMW_FENCE_WAIT 588*4882a593Smuzhiyun * 589*4882a593Smuzhiyun * Waits for a fence object to signal. The wait is interruptible, so that 590*4882a593Smuzhiyun * signals may be delivered during the interrupt. The wait may timeout, 591*4882a593Smuzhiyun * in which case the calls returns -EBUSY. If the wait is restarted, 592*4882a593Smuzhiyun * that is restarting without resetting @cookie_valid to zero, 593*4882a593Smuzhiyun * the timeout is computed from the first call. 594*4882a593Smuzhiyun * 595*4882a593Smuzhiyun * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait 596*4882a593Smuzhiyun * on: 597*4882a593Smuzhiyun * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command 598*4882a593Smuzhiyun * stream 599*4882a593Smuzhiyun * have executed. 600*4882a593Smuzhiyun * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish 601*4882a593Smuzhiyun * commands 602*4882a593Smuzhiyun * in the buffer given to the EXECBUF ioctl returning the fence object handle 603*4882a593Smuzhiyun * are available to user-space. 604*4882a593Smuzhiyun * 605*4882a593Smuzhiyun * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the 606*4882a593Smuzhiyun * fenc wait ioctl returns 0, the fence object has been unreferenced after 607*4882a593Smuzhiyun * the wait. 608*4882a593Smuzhiyun */ 609*4882a593Smuzhiyun 610*4882a593Smuzhiyun #define DRM_VMW_FENCE_FLAG_EXEC (1 << 0) 611*4882a593Smuzhiyun #define DRM_VMW_FENCE_FLAG_QUERY (1 << 1) 612*4882a593Smuzhiyun 613*4882a593Smuzhiyun #define DRM_VMW_WAIT_OPTION_UNREF (1 << 0) 614*4882a593Smuzhiyun 615*4882a593Smuzhiyun /** 616*4882a593Smuzhiyun * struct drm_vmw_fence_wait_arg 617*4882a593Smuzhiyun * 618*4882a593Smuzhiyun * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. 619*4882a593Smuzhiyun * @cookie_valid: Must be reset to 0 on first call. Left alone on restart. 620*4882a593Smuzhiyun * @kernel_cookie: Set to 0 on first call. Left alone on restart. 621*4882a593Smuzhiyun * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout. 622*4882a593Smuzhiyun * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick 623*4882a593Smuzhiyun * before returning. 624*4882a593Smuzhiyun * @flags: Fence flags to wait on. 625*4882a593Smuzhiyun * @wait_options: Options that control the behaviour of the wait ioctl. 626*4882a593Smuzhiyun * 627*4882a593Smuzhiyun * Input argument to the DRM_VMW_FENCE_WAIT ioctl. 628*4882a593Smuzhiyun */ 629*4882a593Smuzhiyun 630*4882a593Smuzhiyun struct drm_vmw_fence_wait_arg { 631*4882a593Smuzhiyun __u32 handle; 632*4882a593Smuzhiyun __s32 cookie_valid; 633*4882a593Smuzhiyun __u64 kernel_cookie; 634*4882a593Smuzhiyun __u64 timeout_us; 635*4882a593Smuzhiyun __s32 lazy; 636*4882a593Smuzhiyun __s32 flags; 637*4882a593Smuzhiyun __s32 wait_options; 638*4882a593Smuzhiyun __s32 pad64; 639*4882a593Smuzhiyun }; 640*4882a593Smuzhiyun 641*4882a593Smuzhiyun /*************************************************************************/ 642*4882a593Smuzhiyun /** 643*4882a593Smuzhiyun * DRM_VMW_FENCE_SIGNALED 644*4882a593Smuzhiyun * 645*4882a593Smuzhiyun * Checks if a fence object is signaled.. 646*4882a593Smuzhiyun */ 647*4882a593Smuzhiyun 648*4882a593Smuzhiyun /** 649*4882a593Smuzhiyun * struct drm_vmw_fence_signaled_arg 650*4882a593Smuzhiyun * 651*4882a593Smuzhiyun * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. 652*4882a593Smuzhiyun * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl 653*4882a593Smuzhiyun * @signaled: Out: Flags signaled. 654*4882a593Smuzhiyun * @sequence: Out: Highest sequence passed so far. Can be used to signal the 655*4882a593Smuzhiyun * EXEC flag of user-space fence objects. 656*4882a593Smuzhiyun * 657*4882a593Smuzhiyun * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF 658*4882a593Smuzhiyun * ioctls. 659*4882a593Smuzhiyun */ 660*4882a593Smuzhiyun 661*4882a593Smuzhiyun struct drm_vmw_fence_signaled_arg { 662*4882a593Smuzhiyun __u32 handle; 663*4882a593Smuzhiyun __u32 flags; 664*4882a593Smuzhiyun __s32 signaled; 665*4882a593Smuzhiyun __u32 passed_seqno; 666*4882a593Smuzhiyun __u32 signaled_flags; 667*4882a593Smuzhiyun __u32 pad64; 668*4882a593Smuzhiyun }; 669*4882a593Smuzhiyun 670*4882a593Smuzhiyun /*************************************************************************/ 671*4882a593Smuzhiyun /** 672*4882a593Smuzhiyun * DRM_VMW_FENCE_UNREF 673*4882a593Smuzhiyun * 674*4882a593Smuzhiyun * Unreferences a fence object, and causes it to be destroyed if there are no 675*4882a593Smuzhiyun * other references to it. 676*4882a593Smuzhiyun * 677*4882a593Smuzhiyun */ 678*4882a593Smuzhiyun 679*4882a593Smuzhiyun /** 680*4882a593Smuzhiyun * struct drm_vmw_fence_arg 681*4882a593Smuzhiyun * 682*4882a593Smuzhiyun * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. 683*4882a593Smuzhiyun * 684*4882a593Smuzhiyun * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl.. 685*4882a593Smuzhiyun */ 686*4882a593Smuzhiyun 687*4882a593Smuzhiyun struct drm_vmw_fence_arg { 688*4882a593Smuzhiyun __u32 handle; 689*4882a593Smuzhiyun __u32 pad64; 690*4882a593Smuzhiyun }; 691*4882a593Smuzhiyun 692*4882a593Smuzhiyun 693*4882a593Smuzhiyun /*************************************************************************/ 694*4882a593Smuzhiyun /** 695*4882a593Smuzhiyun * DRM_VMW_FENCE_EVENT 696*4882a593Smuzhiyun * 697*4882a593Smuzhiyun * Queues an event on a fence to be delivered on the drm character device 698*4882a593Smuzhiyun * when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag. 699*4882a593Smuzhiyun * Optionally the approximate time when the fence signaled is 700*4882a593Smuzhiyun * given by the event. 701*4882a593Smuzhiyun */ 702*4882a593Smuzhiyun 703*4882a593Smuzhiyun /* 704*4882a593Smuzhiyun * The event type 705*4882a593Smuzhiyun */ 706*4882a593Smuzhiyun #define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000 707*4882a593Smuzhiyun 708*4882a593Smuzhiyun struct drm_vmw_event_fence { 709*4882a593Smuzhiyun struct drm_event base; 710*4882a593Smuzhiyun __u64 user_data; 711*4882a593Smuzhiyun __u32 tv_sec; 712*4882a593Smuzhiyun __u32 tv_usec; 713*4882a593Smuzhiyun }; 714*4882a593Smuzhiyun 715*4882a593Smuzhiyun /* 716*4882a593Smuzhiyun * Flags that may be given to the command. 717*4882a593Smuzhiyun */ 718*4882a593Smuzhiyun /* Request fence signaled time on the event. */ 719*4882a593Smuzhiyun #define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0) 720*4882a593Smuzhiyun 721*4882a593Smuzhiyun /** 722*4882a593Smuzhiyun * struct drm_vmw_fence_event_arg 723*4882a593Smuzhiyun * 724*4882a593Smuzhiyun * @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if 725*4882a593Smuzhiyun * the fence is not supposed to be referenced by user-space. 726*4882a593Smuzhiyun * @user_info: Info to be delivered with the event. 727*4882a593Smuzhiyun * @handle: Attach the event to this fence only. 728*4882a593Smuzhiyun * @flags: A set of flags as defined above. 729*4882a593Smuzhiyun */ 730*4882a593Smuzhiyun struct drm_vmw_fence_event_arg { 731*4882a593Smuzhiyun __u64 fence_rep; 732*4882a593Smuzhiyun __u64 user_data; 733*4882a593Smuzhiyun __u32 handle; 734*4882a593Smuzhiyun __u32 flags; 735*4882a593Smuzhiyun }; 736*4882a593Smuzhiyun 737*4882a593Smuzhiyun 738*4882a593Smuzhiyun /*************************************************************************/ 739*4882a593Smuzhiyun /** 740*4882a593Smuzhiyun * DRM_VMW_PRESENT 741*4882a593Smuzhiyun * 742*4882a593Smuzhiyun * Executes an SVGA present on a given fb for a given surface. The surface 743*4882a593Smuzhiyun * is placed on the framebuffer. Cliprects are given relative to the given 744*4882a593Smuzhiyun * point (the point disignated by dest_{x|y}). 745*4882a593Smuzhiyun * 746*4882a593Smuzhiyun */ 747*4882a593Smuzhiyun 748*4882a593Smuzhiyun /** 749*4882a593Smuzhiyun * struct drm_vmw_present_arg 750*4882a593Smuzhiyun * @fb_id: framebuffer id to present / read back from. 751*4882a593Smuzhiyun * @sid: Surface id to present from. 752*4882a593Smuzhiyun * @dest_x: X placement coordinate for surface. 753*4882a593Smuzhiyun * @dest_y: Y placement coordinate for surface. 754*4882a593Smuzhiyun * @clips_ptr: Pointer to an array of clip rects cast to an __u64. 755*4882a593Smuzhiyun * @num_clips: Number of cliprects given relative to the framebuffer origin, 756*4882a593Smuzhiyun * in the same coordinate space as the frame buffer. 757*4882a593Smuzhiyun * @pad64: Unused 64-bit padding. 758*4882a593Smuzhiyun * 759*4882a593Smuzhiyun * Input argument to the DRM_VMW_PRESENT ioctl. 760*4882a593Smuzhiyun */ 761*4882a593Smuzhiyun 762*4882a593Smuzhiyun struct drm_vmw_present_arg { 763*4882a593Smuzhiyun __u32 fb_id; 764*4882a593Smuzhiyun __u32 sid; 765*4882a593Smuzhiyun __s32 dest_x; 766*4882a593Smuzhiyun __s32 dest_y; 767*4882a593Smuzhiyun __u64 clips_ptr; 768*4882a593Smuzhiyun __u32 num_clips; 769*4882a593Smuzhiyun __u32 pad64; 770*4882a593Smuzhiyun }; 771*4882a593Smuzhiyun 772*4882a593Smuzhiyun 773*4882a593Smuzhiyun /*************************************************************************/ 774*4882a593Smuzhiyun /** 775*4882a593Smuzhiyun * DRM_VMW_PRESENT_READBACK 776*4882a593Smuzhiyun * 777*4882a593Smuzhiyun * Executes an SVGA present readback from a given fb to the dma buffer 778*4882a593Smuzhiyun * currently bound as the fb. If there is no dma buffer bound to the fb, 779*4882a593Smuzhiyun * an error will be returned. 780*4882a593Smuzhiyun * 781*4882a593Smuzhiyun */ 782*4882a593Smuzhiyun 783*4882a593Smuzhiyun /** 784*4882a593Smuzhiyun * struct drm_vmw_present_arg 785*4882a593Smuzhiyun * @fb_id: fb_id to present / read back from. 786*4882a593Smuzhiyun * @num_clips: Number of cliprects. 787*4882a593Smuzhiyun * @clips_ptr: Pointer to an array of clip rects cast to an __u64. 788*4882a593Smuzhiyun * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64. 789*4882a593Smuzhiyun * If this member is NULL, then the ioctl should not return a fence. 790*4882a593Smuzhiyun */ 791*4882a593Smuzhiyun 792*4882a593Smuzhiyun struct drm_vmw_present_readback_arg { 793*4882a593Smuzhiyun __u32 fb_id; 794*4882a593Smuzhiyun __u32 num_clips; 795*4882a593Smuzhiyun __u64 clips_ptr; 796*4882a593Smuzhiyun __u64 fence_rep; 797*4882a593Smuzhiyun }; 798*4882a593Smuzhiyun 799*4882a593Smuzhiyun /*************************************************************************/ 800*4882a593Smuzhiyun /** 801*4882a593Smuzhiyun * DRM_VMW_UPDATE_LAYOUT - Update layout 802*4882a593Smuzhiyun * 803*4882a593Smuzhiyun * Updates the preferred modes and connection status for connectors. The 804*4882a593Smuzhiyun * command consists of one drm_vmw_update_layout_arg pointing to an array 805*4882a593Smuzhiyun * of num_outputs drm_vmw_rect's. 806*4882a593Smuzhiyun */ 807*4882a593Smuzhiyun 808*4882a593Smuzhiyun /** 809*4882a593Smuzhiyun * struct drm_vmw_update_layout_arg 810*4882a593Smuzhiyun * 811*4882a593Smuzhiyun * @num_outputs: number of active connectors 812*4882a593Smuzhiyun * @rects: pointer to array of drm_vmw_rect cast to an __u64 813*4882a593Smuzhiyun * 814*4882a593Smuzhiyun * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl. 815*4882a593Smuzhiyun */ 816*4882a593Smuzhiyun struct drm_vmw_update_layout_arg { 817*4882a593Smuzhiyun __u32 num_outputs; 818*4882a593Smuzhiyun __u32 pad64; 819*4882a593Smuzhiyun __u64 rects; 820*4882a593Smuzhiyun }; 821*4882a593Smuzhiyun 822*4882a593Smuzhiyun 823*4882a593Smuzhiyun /*************************************************************************/ 824*4882a593Smuzhiyun /** 825*4882a593Smuzhiyun * DRM_VMW_CREATE_SHADER - Create shader 826*4882a593Smuzhiyun * 827*4882a593Smuzhiyun * Creates a shader and optionally binds it to a dma buffer containing 828*4882a593Smuzhiyun * the shader byte-code. 829*4882a593Smuzhiyun */ 830*4882a593Smuzhiyun 831*4882a593Smuzhiyun /** 832*4882a593Smuzhiyun * enum drm_vmw_shader_type - Shader types 833*4882a593Smuzhiyun */ 834*4882a593Smuzhiyun enum drm_vmw_shader_type { 835*4882a593Smuzhiyun drm_vmw_shader_type_vs = 0, 836*4882a593Smuzhiyun drm_vmw_shader_type_ps, 837*4882a593Smuzhiyun }; 838*4882a593Smuzhiyun 839*4882a593Smuzhiyun 840*4882a593Smuzhiyun /** 841*4882a593Smuzhiyun * struct drm_vmw_shader_create_arg 842*4882a593Smuzhiyun * 843*4882a593Smuzhiyun * @shader_type: Shader type of the shader to create. 844*4882a593Smuzhiyun * @size: Size of the byte-code in bytes. 845*4882a593Smuzhiyun * where the shader byte-code starts 846*4882a593Smuzhiyun * @buffer_handle: Buffer handle identifying the buffer containing the 847*4882a593Smuzhiyun * shader byte-code 848*4882a593Smuzhiyun * @shader_handle: On successful completion contains a handle that 849*4882a593Smuzhiyun * can be used to subsequently identify the shader. 850*4882a593Smuzhiyun * @offset: Offset in bytes into the buffer given by @buffer_handle, 851*4882a593Smuzhiyun * 852*4882a593Smuzhiyun * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl. 853*4882a593Smuzhiyun */ 854*4882a593Smuzhiyun struct drm_vmw_shader_create_arg { 855*4882a593Smuzhiyun enum drm_vmw_shader_type shader_type; 856*4882a593Smuzhiyun __u32 size; 857*4882a593Smuzhiyun __u32 buffer_handle; 858*4882a593Smuzhiyun __u32 shader_handle; 859*4882a593Smuzhiyun __u64 offset; 860*4882a593Smuzhiyun }; 861*4882a593Smuzhiyun 862*4882a593Smuzhiyun /*************************************************************************/ 863*4882a593Smuzhiyun /** 864*4882a593Smuzhiyun * DRM_VMW_UNREF_SHADER - Unreferences a shader 865*4882a593Smuzhiyun * 866*4882a593Smuzhiyun * Destroys a user-space reference to a shader, optionally destroying 867*4882a593Smuzhiyun * it. 868*4882a593Smuzhiyun */ 869*4882a593Smuzhiyun 870*4882a593Smuzhiyun /** 871*4882a593Smuzhiyun * struct drm_vmw_shader_arg 872*4882a593Smuzhiyun * 873*4882a593Smuzhiyun * @handle: Handle identifying the shader to destroy. 874*4882a593Smuzhiyun * 875*4882a593Smuzhiyun * Input argument to the DRM_VMW_UNREF_SHADER ioctl. 876*4882a593Smuzhiyun */ 877*4882a593Smuzhiyun struct drm_vmw_shader_arg { 878*4882a593Smuzhiyun __u32 handle; 879*4882a593Smuzhiyun __u32 pad64; 880*4882a593Smuzhiyun }; 881*4882a593Smuzhiyun 882*4882a593Smuzhiyun /*************************************************************************/ 883*4882a593Smuzhiyun /** 884*4882a593Smuzhiyun * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface. 885*4882a593Smuzhiyun * 886*4882a593Smuzhiyun * Allocates a surface handle and queues a create surface command 887*4882a593Smuzhiyun * for the host on the first use of the surface. The surface ID can 888*4882a593Smuzhiyun * be used as the surface ID in commands referencing the surface. 889*4882a593Smuzhiyun */ 890*4882a593Smuzhiyun 891*4882a593Smuzhiyun /** 892*4882a593Smuzhiyun * enum drm_vmw_surface_flags 893*4882a593Smuzhiyun * 894*4882a593Smuzhiyun * @drm_vmw_surface_flag_shareable: Whether the surface is shareable 895*4882a593Smuzhiyun * @drm_vmw_surface_flag_scanout: Whether the surface is a scanout 896*4882a593Smuzhiyun * surface. 897*4882a593Smuzhiyun * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is 898*4882a593Smuzhiyun * given. 899*4882a593Smuzhiyun * @drm_vmw_surface_flag_coherent: Back surface with coherent memory. 900*4882a593Smuzhiyun */ 901*4882a593Smuzhiyun enum drm_vmw_surface_flags { 902*4882a593Smuzhiyun drm_vmw_surface_flag_shareable = (1 << 0), 903*4882a593Smuzhiyun drm_vmw_surface_flag_scanout = (1 << 1), 904*4882a593Smuzhiyun drm_vmw_surface_flag_create_buffer = (1 << 2), 905*4882a593Smuzhiyun drm_vmw_surface_flag_coherent = (1 << 3), 906*4882a593Smuzhiyun }; 907*4882a593Smuzhiyun 908*4882a593Smuzhiyun /** 909*4882a593Smuzhiyun * struct drm_vmw_gb_surface_create_req 910*4882a593Smuzhiyun * 911*4882a593Smuzhiyun * @svga3d_flags: SVGA3d surface flags for the device. 912*4882a593Smuzhiyun * @format: SVGA3d format. 913*4882a593Smuzhiyun * @mip_level: Number of mip levels for all faces. 914*4882a593Smuzhiyun * @drm_surface_flags Flags as described above. 915*4882a593Smuzhiyun * @multisample_count Future use. Set to 0. 916*4882a593Smuzhiyun * @autogen_filter Future use. Set to 0. 917*4882a593Smuzhiyun * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID 918*4882a593Smuzhiyun * if none. 919*4882a593Smuzhiyun * @base_size Size of the base mip level for all faces. 920*4882a593Smuzhiyun * @array_size Must be zero for non-DX hardware, and if non-zero 921*4882a593Smuzhiyun * svga3d_flags must have proper bind flags setup. 922*4882a593Smuzhiyun * 923*4882a593Smuzhiyun * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl. 924*4882a593Smuzhiyun * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. 925*4882a593Smuzhiyun */ 926*4882a593Smuzhiyun struct drm_vmw_gb_surface_create_req { 927*4882a593Smuzhiyun __u32 svga3d_flags; 928*4882a593Smuzhiyun __u32 format; 929*4882a593Smuzhiyun __u32 mip_levels; 930*4882a593Smuzhiyun enum drm_vmw_surface_flags drm_surface_flags; 931*4882a593Smuzhiyun __u32 multisample_count; 932*4882a593Smuzhiyun __u32 autogen_filter; 933*4882a593Smuzhiyun __u32 buffer_handle; 934*4882a593Smuzhiyun __u32 array_size; 935*4882a593Smuzhiyun struct drm_vmw_size base_size; 936*4882a593Smuzhiyun }; 937*4882a593Smuzhiyun 938*4882a593Smuzhiyun /** 939*4882a593Smuzhiyun * struct drm_vmw_gb_surface_create_rep 940*4882a593Smuzhiyun * 941*4882a593Smuzhiyun * @handle: Surface handle. 942*4882a593Smuzhiyun * @backup_size: Size of backup buffers for this surface. 943*4882a593Smuzhiyun * @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none. 944*4882a593Smuzhiyun * @buffer_size: Actual size of the buffer identified by 945*4882a593Smuzhiyun * @buffer_handle 946*4882a593Smuzhiyun * @buffer_map_handle: Offset into device address space for the buffer 947*4882a593Smuzhiyun * identified by @buffer_handle. 948*4882a593Smuzhiyun * 949*4882a593Smuzhiyun * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl. 950*4882a593Smuzhiyun * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl. 951*4882a593Smuzhiyun */ 952*4882a593Smuzhiyun struct drm_vmw_gb_surface_create_rep { 953*4882a593Smuzhiyun __u32 handle; 954*4882a593Smuzhiyun __u32 backup_size; 955*4882a593Smuzhiyun __u32 buffer_handle; 956*4882a593Smuzhiyun __u32 buffer_size; 957*4882a593Smuzhiyun __u64 buffer_map_handle; 958*4882a593Smuzhiyun }; 959*4882a593Smuzhiyun 960*4882a593Smuzhiyun /** 961*4882a593Smuzhiyun * union drm_vmw_gb_surface_create_arg 962*4882a593Smuzhiyun * 963*4882a593Smuzhiyun * @req: Input argument as described above. 964*4882a593Smuzhiyun * @rep: Output argument as described above. 965*4882a593Smuzhiyun * 966*4882a593Smuzhiyun * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl. 967*4882a593Smuzhiyun */ 968*4882a593Smuzhiyun union drm_vmw_gb_surface_create_arg { 969*4882a593Smuzhiyun struct drm_vmw_gb_surface_create_rep rep; 970*4882a593Smuzhiyun struct drm_vmw_gb_surface_create_req req; 971*4882a593Smuzhiyun }; 972*4882a593Smuzhiyun 973*4882a593Smuzhiyun /*************************************************************************/ 974*4882a593Smuzhiyun /** 975*4882a593Smuzhiyun * DRM_VMW_GB_SURFACE_REF - Reference a host surface. 976*4882a593Smuzhiyun * 977*4882a593Smuzhiyun * Puts a reference on a host surface with a given handle, as previously 978*4882a593Smuzhiyun * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl. 979*4882a593Smuzhiyun * A reference will make sure the surface isn't destroyed while we hold 980*4882a593Smuzhiyun * it and will allow the calling client to use the surface handle in 981*4882a593Smuzhiyun * the command stream. 982*4882a593Smuzhiyun * 983*4882a593Smuzhiyun * On successful return, the Ioctl returns the surface information given 984*4882a593Smuzhiyun * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl. 985*4882a593Smuzhiyun */ 986*4882a593Smuzhiyun 987*4882a593Smuzhiyun /** 988*4882a593Smuzhiyun * struct drm_vmw_gb_surface_reference_arg 989*4882a593Smuzhiyun * 990*4882a593Smuzhiyun * @creq: The data used as input when the surface was created, as described 991*4882a593Smuzhiyun * above at "struct drm_vmw_gb_surface_create_req" 992*4882a593Smuzhiyun * @crep: Additional data output when the surface was created, as described 993*4882a593Smuzhiyun * above at "struct drm_vmw_gb_surface_create_rep" 994*4882a593Smuzhiyun * 995*4882a593Smuzhiyun * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl. 996*4882a593Smuzhiyun */ 997*4882a593Smuzhiyun struct drm_vmw_gb_surface_ref_rep { 998*4882a593Smuzhiyun struct drm_vmw_gb_surface_create_req creq; 999*4882a593Smuzhiyun struct drm_vmw_gb_surface_create_rep crep; 1000*4882a593Smuzhiyun }; 1001*4882a593Smuzhiyun 1002*4882a593Smuzhiyun /** 1003*4882a593Smuzhiyun * union drm_vmw_gb_surface_reference_arg 1004*4882a593Smuzhiyun * 1005*4882a593Smuzhiyun * @req: Input data as described above at "struct drm_vmw_surface_arg" 1006*4882a593Smuzhiyun * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep" 1007*4882a593Smuzhiyun * 1008*4882a593Smuzhiyun * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl. 1009*4882a593Smuzhiyun */ 1010*4882a593Smuzhiyun union drm_vmw_gb_surface_reference_arg { 1011*4882a593Smuzhiyun struct drm_vmw_gb_surface_ref_rep rep; 1012*4882a593Smuzhiyun struct drm_vmw_surface_arg req; 1013*4882a593Smuzhiyun }; 1014*4882a593Smuzhiyun 1015*4882a593Smuzhiyun 1016*4882a593Smuzhiyun /*************************************************************************/ 1017*4882a593Smuzhiyun /** 1018*4882a593Smuzhiyun * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access. 1019*4882a593Smuzhiyun * 1020*4882a593Smuzhiyun * Idles any previously submitted GPU operations on the buffer and 1021*4882a593Smuzhiyun * by default blocks command submissions that reference the buffer. 1022*4882a593Smuzhiyun * If the file descriptor used to grab a blocking CPU sync is closed, the 1023*4882a593Smuzhiyun * cpu sync is released. 1024*4882a593Smuzhiyun * The flags argument indicates how the grab / release operation should be 1025*4882a593Smuzhiyun * performed: 1026*4882a593Smuzhiyun */ 1027*4882a593Smuzhiyun 1028*4882a593Smuzhiyun /** 1029*4882a593Smuzhiyun * enum drm_vmw_synccpu_flags - Synccpu flags: 1030*4882a593Smuzhiyun * 1031*4882a593Smuzhiyun * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a 1032*4882a593Smuzhiyun * hint to the kernel to allow command submissions that references the buffer 1033*4882a593Smuzhiyun * for read-only. 1034*4882a593Smuzhiyun * @drm_vmw_synccpu_write: Sync for write. Block all command submissions 1035*4882a593Smuzhiyun * referencing this buffer. 1036*4882a593Smuzhiyun * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return 1037*4882a593Smuzhiyun * -EBUSY should the buffer be busy. 1038*4882a593Smuzhiyun * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer 1039*4882a593Smuzhiyun * while the buffer is synced for CPU. This is similar to the GEM bo idle 1040*4882a593Smuzhiyun * behavior. 1041*4882a593Smuzhiyun */ 1042*4882a593Smuzhiyun enum drm_vmw_synccpu_flags { 1043*4882a593Smuzhiyun drm_vmw_synccpu_read = (1 << 0), 1044*4882a593Smuzhiyun drm_vmw_synccpu_write = (1 << 1), 1045*4882a593Smuzhiyun drm_vmw_synccpu_dontblock = (1 << 2), 1046*4882a593Smuzhiyun drm_vmw_synccpu_allow_cs = (1 << 3) 1047*4882a593Smuzhiyun }; 1048*4882a593Smuzhiyun 1049*4882a593Smuzhiyun /** 1050*4882a593Smuzhiyun * enum drm_vmw_synccpu_op - Synccpu operations: 1051*4882a593Smuzhiyun * 1052*4882a593Smuzhiyun * @drm_vmw_synccpu_grab: Grab the buffer for CPU operations 1053*4882a593Smuzhiyun * @drm_vmw_synccpu_release: Release a previous grab. 1054*4882a593Smuzhiyun */ 1055*4882a593Smuzhiyun enum drm_vmw_synccpu_op { 1056*4882a593Smuzhiyun drm_vmw_synccpu_grab, 1057*4882a593Smuzhiyun drm_vmw_synccpu_release 1058*4882a593Smuzhiyun }; 1059*4882a593Smuzhiyun 1060*4882a593Smuzhiyun /** 1061*4882a593Smuzhiyun * struct drm_vmw_synccpu_arg 1062*4882a593Smuzhiyun * 1063*4882a593Smuzhiyun * @op: The synccpu operation as described above. 1064*4882a593Smuzhiyun * @handle: Handle identifying the buffer object. 1065*4882a593Smuzhiyun * @flags: Flags as described above. 1066*4882a593Smuzhiyun */ 1067*4882a593Smuzhiyun struct drm_vmw_synccpu_arg { 1068*4882a593Smuzhiyun enum drm_vmw_synccpu_op op; 1069*4882a593Smuzhiyun enum drm_vmw_synccpu_flags flags; 1070*4882a593Smuzhiyun __u32 handle; 1071*4882a593Smuzhiyun __u32 pad64; 1072*4882a593Smuzhiyun }; 1073*4882a593Smuzhiyun 1074*4882a593Smuzhiyun /*************************************************************************/ 1075*4882a593Smuzhiyun /** 1076*4882a593Smuzhiyun * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context. 1077*4882a593Smuzhiyun * 1078*4882a593Smuzhiyun * Allocates a device unique context id, and queues a create context command 1079*4882a593Smuzhiyun * for the host. Does not wait for host completion. 1080*4882a593Smuzhiyun */ 1081*4882a593Smuzhiyun enum drm_vmw_extended_context { 1082*4882a593Smuzhiyun drm_vmw_context_legacy, 1083*4882a593Smuzhiyun drm_vmw_context_dx 1084*4882a593Smuzhiyun }; 1085*4882a593Smuzhiyun 1086*4882a593Smuzhiyun /** 1087*4882a593Smuzhiyun * union drm_vmw_extended_context_arg 1088*4882a593Smuzhiyun * 1089*4882a593Smuzhiyun * @req: Context type. 1090*4882a593Smuzhiyun * @rep: Context identifier. 1091*4882a593Smuzhiyun * 1092*4882a593Smuzhiyun * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl. 1093*4882a593Smuzhiyun */ 1094*4882a593Smuzhiyun union drm_vmw_extended_context_arg { 1095*4882a593Smuzhiyun enum drm_vmw_extended_context req; 1096*4882a593Smuzhiyun struct drm_vmw_context_arg rep; 1097*4882a593Smuzhiyun }; 1098*4882a593Smuzhiyun 1099*4882a593Smuzhiyun /*************************************************************************/ 1100*4882a593Smuzhiyun /* 1101*4882a593Smuzhiyun * DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its 1102*4882a593Smuzhiyun * underlying resource. 1103*4882a593Smuzhiyun * 1104*4882a593Smuzhiyun * Note that this ioctl is overlaid on the deprecated DRM_VMW_UNREF_DMABUF 1105*4882a593Smuzhiyun * Ioctl. 1106*4882a593Smuzhiyun */ 1107*4882a593Smuzhiyun 1108*4882a593Smuzhiyun /** 1109*4882a593Smuzhiyun * struct drm_vmw_handle_close_arg 1110*4882a593Smuzhiyun * 1111*4882a593Smuzhiyun * @handle: Handle to close. 1112*4882a593Smuzhiyun * 1113*4882a593Smuzhiyun * Argument to the DRM_VMW_HANDLE_CLOSE Ioctl. 1114*4882a593Smuzhiyun */ 1115*4882a593Smuzhiyun struct drm_vmw_handle_close_arg { 1116*4882a593Smuzhiyun __u32 handle; 1117*4882a593Smuzhiyun __u32 pad64; 1118*4882a593Smuzhiyun }; 1119*4882a593Smuzhiyun #define drm_vmw_unref_dmabuf_arg drm_vmw_handle_close_arg 1120*4882a593Smuzhiyun 1121*4882a593Smuzhiyun /*************************************************************************/ 1122*4882a593Smuzhiyun /** 1123*4882a593Smuzhiyun * DRM_VMW_GB_SURFACE_CREATE_EXT - Create a host guest-backed surface. 1124*4882a593Smuzhiyun * 1125*4882a593Smuzhiyun * Allocates a surface handle and queues a create surface command 1126*4882a593Smuzhiyun * for the host on the first use of the surface. The surface ID can 1127*4882a593Smuzhiyun * be used as the surface ID in commands referencing the surface. 1128*4882a593Smuzhiyun * 1129*4882a593Smuzhiyun * This new command extends DRM_VMW_GB_SURFACE_CREATE by adding version 1130*4882a593Smuzhiyun * parameter and 64 bit svga flag. 1131*4882a593Smuzhiyun */ 1132*4882a593Smuzhiyun 1133*4882a593Smuzhiyun /** 1134*4882a593Smuzhiyun * enum drm_vmw_surface_version 1135*4882a593Smuzhiyun * 1136*4882a593Smuzhiyun * @drm_vmw_surface_gb_v1: Corresponds to current gb surface format with 1137*4882a593Smuzhiyun * svga3d surface flags split into 2, upper half and lower half. 1138*4882a593Smuzhiyun */ 1139*4882a593Smuzhiyun enum drm_vmw_surface_version { 1140*4882a593Smuzhiyun drm_vmw_gb_surface_v1, 1141*4882a593Smuzhiyun }; 1142*4882a593Smuzhiyun 1143*4882a593Smuzhiyun /** 1144*4882a593Smuzhiyun * struct drm_vmw_gb_surface_create_ext_req 1145*4882a593Smuzhiyun * 1146*4882a593Smuzhiyun * @base: Surface create parameters. 1147*4882a593Smuzhiyun * @version: Version of surface create ioctl. 1148*4882a593Smuzhiyun * @svga3d_flags_upper_32_bits: Upper 32 bits of svga3d flags. 1149*4882a593Smuzhiyun * @multisample_pattern: Multisampling pattern when msaa is supported. 1150*4882a593Smuzhiyun * @quality_level: Precision settings for each sample. 1151*4882a593Smuzhiyun * @buffer_byte_stride: Buffer byte stride. 1152*4882a593Smuzhiyun * @must_be_zero: Reserved for future usage. 1153*4882a593Smuzhiyun * 1154*4882a593Smuzhiyun * Input argument to the DRM_VMW_GB_SURFACE_CREATE_EXT Ioctl. 1155*4882a593Smuzhiyun * Part of output argument for the DRM_VMW_GB_SURFACE_REF_EXT Ioctl. 1156*4882a593Smuzhiyun */ 1157*4882a593Smuzhiyun struct drm_vmw_gb_surface_create_ext_req { 1158*4882a593Smuzhiyun struct drm_vmw_gb_surface_create_req base; 1159*4882a593Smuzhiyun enum drm_vmw_surface_version version; 1160*4882a593Smuzhiyun __u32 svga3d_flags_upper_32_bits; 1161*4882a593Smuzhiyun __u32 multisample_pattern; 1162*4882a593Smuzhiyun __u32 quality_level; 1163*4882a593Smuzhiyun __u32 buffer_byte_stride; 1164*4882a593Smuzhiyun __u32 must_be_zero; 1165*4882a593Smuzhiyun }; 1166*4882a593Smuzhiyun 1167*4882a593Smuzhiyun /** 1168*4882a593Smuzhiyun * union drm_vmw_gb_surface_create_ext_arg 1169*4882a593Smuzhiyun * 1170*4882a593Smuzhiyun * @req: Input argument as described above. 1171*4882a593Smuzhiyun * @rep: Output argument as described above. 1172*4882a593Smuzhiyun * 1173*4882a593Smuzhiyun * Argument to the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl. 1174*4882a593Smuzhiyun */ 1175*4882a593Smuzhiyun union drm_vmw_gb_surface_create_ext_arg { 1176*4882a593Smuzhiyun struct drm_vmw_gb_surface_create_rep rep; 1177*4882a593Smuzhiyun struct drm_vmw_gb_surface_create_ext_req req; 1178*4882a593Smuzhiyun }; 1179*4882a593Smuzhiyun 1180*4882a593Smuzhiyun /*************************************************************************/ 1181*4882a593Smuzhiyun /** 1182*4882a593Smuzhiyun * DRM_VMW_GB_SURFACE_REF_EXT - Reference a host surface. 1183*4882a593Smuzhiyun * 1184*4882a593Smuzhiyun * Puts a reference on a host surface with a given handle, as previously 1185*4882a593Smuzhiyun * returned by the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl. 1186*4882a593Smuzhiyun * A reference will make sure the surface isn't destroyed while we hold 1187*4882a593Smuzhiyun * it and will allow the calling client to use the surface handle in 1188*4882a593Smuzhiyun * the command stream. 1189*4882a593Smuzhiyun * 1190*4882a593Smuzhiyun * On successful return, the Ioctl returns the surface information given 1191*4882a593Smuzhiyun * to and returned from the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl. 1192*4882a593Smuzhiyun */ 1193*4882a593Smuzhiyun 1194*4882a593Smuzhiyun /** 1195*4882a593Smuzhiyun * struct drm_vmw_gb_surface_ref_ext_rep 1196*4882a593Smuzhiyun * 1197*4882a593Smuzhiyun * @creq: The data used as input when the surface was created, as described 1198*4882a593Smuzhiyun * above at "struct drm_vmw_gb_surface_create_ext_req" 1199*4882a593Smuzhiyun * @crep: Additional data output when the surface was created, as described 1200*4882a593Smuzhiyun * above at "struct drm_vmw_gb_surface_create_rep" 1201*4882a593Smuzhiyun * 1202*4882a593Smuzhiyun * Output Argument to the DRM_VMW_GB_SURFACE_REF_EXT ioctl. 1203*4882a593Smuzhiyun */ 1204*4882a593Smuzhiyun struct drm_vmw_gb_surface_ref_ext_rep { 1205*4882a593Smuzhiyun struct drm_vmw_gb_surface_create_ext_req creq; 1206*4882a593Smuzhiyun struct drm_vmw_gb_surface_create_rep crep; 1207*4882a593Smuzhiyun }; 1208*4882a593Smuzhiyun 1209*4882a593Smuzhiyun /** 1210*4882a593Smuzhiyun * union drm_vmw_gb_surface_reference_ext_arg 1211*4882a593Smuzhiyun * 1212*4882a593Smuzhiyun * @req: Input data as described above at "struct drm_vmw_surface_arg" 1213*4882a593Smuzhiyun * @rep: Output data as described above at 1214*4882a593Smuzhiyun * "struct drm_vmw_gb_surface_ref_ext_rep" 1215*4882a593Smuzhiyun * 1216*4882a593Smuzhiyun * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl. 1217*4882a593Smuzhiyun */ 1218*4882a593Smuzhiyun union drm_vmw_gb_surface_reference_ext_arg { 1219*4882a593Smuzhiyun struct drm_vmw_gb_surface_ref_ext_rep rep; 1220*4882a593Smuzhiyun struct drm_vmw_surface_arg req; 1221*4882a593Smuzhiyun }; 1222*4882a593Smuzhiyun 1223*4882a593Smuzhiyun /** 1224*4882a593Smuzhiyun * struct drm_vmw_msg_arg 1225*4882a593Smuzhiyun * 1226*4882a593Smuzhiyun * @send: Pointer to user-space msg string (null terminated). 1227*4882a593Smuzhiyun * @receive: Pointer to user-space receive buffer. 1228*4882a593Smuzhiyun * @send_only: Boolean whether this is only sending or receiving too. 1229*4882a593Smuzhiyun * 1230*4882a593Smuzhiyun * Argument to the DRM_VMW_MSG ioctl. 1231*4882a593Smuzhiyun */ 1232*4882a593Smuzhiyun struct drm_vmw_msg_arg { 1233*4882a593Smuzhiyun __u64 send; 1234*4882a593Smuzhiyun __u64 receive; 1235*4882a593Smuzhiyun __s32 send_only; 1236*4882a593Smuzhiyun __u32 receive_len; 1237*4882a593Smuzhiyun }; 1238*4882a593Smuzhiyun 1239*4882a593Smuzhiyun #if defined(__cplusplus) 1240*4882a593Smuzhiyun } 1241*4882a593Smuzhiyun #endif 1242*4882a593Smuzhiyun 1243*4882a593Smuzhiyun #endif 1244