xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR MIT
2*4882a593Smuzhiyun /**************************************************************************
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
7*4882a593Smuzhiyun  * copy of this software and associated documentation files (the
8*4882a593Smuzhiyun  * "Software"), to deal in the Software without restriction, including
9*4882a593Smuzhiyun  * without limitation the rights to use, copy, modify, merge, publish,
10*4882a593Smuzhiyun  * distribute, sub license, and/or sell copies of the Software, and to
11*4882a593Smuzhiyun  * permit persons to whom the Software is furnished to do so, subject to
12*4882a593Smuzhiyun  * the following conditions:
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the
15*4882a593Smuzhiyun  * next paragraph) shall be included in all copies or substantial portions
16*4882a593Smuzhiyun  * of the Software.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21*4882a593Smuzhiyun  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22*4882a593Smuzhiyun  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23*4882a593Smuzhiyun  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24*4882a593Smuzhiyun  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  **************************************************************************/
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #include <drm/ttm/ttm_placement.h>
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #include "vmwgfx_drv.h"
31*4882a593Smuzhiyun #include "vmwgfx_resource_priv.h"
32*4882a593Smuzhiyun #include "vmwgfx_so.h"
33*4882a593Smuzhiyun #include "vmwgfx_binding.h"
34*4882a593Smuzhiyun #include "device_include/svga3d_surfacedefs.h"
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
37*4882a593Smuzhiyun #define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
38*4882a593Smuzhiyun #define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
39*4882a593Smuzhiyun 	(svga3d_flags & ((uint64_t)U32_MAX))
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun /**
42*4882a593Smuzhiyun  * struct vmw_user_surface - User-space visible surface resource
43*4882a593Smuzhiyun  *
44*4882a593Smuzhiyun  * @base:           The TTM base object handling user-space visibility.
45*4882a593Smuzhiyun  * @srf:            The surface metadata.
46*4882a593Smuzhiyun  * @size:           TTM accounting size for the surface.
47*4882a593Smuzhiyun  * @master: master of the creating client. Used for security check.
48*4882a593Smuzhiyun  */
49*4882a593Smuzhiyun struct vmw_user_surface {
50*4882a593Smuzhiyun 	struct ttm_prime_object prime;
51*4882a593Smuzhiyun 	struct vmw_surface srf;
52*4882a593Smuzhiyun 	uint32_t size;
53*4882a593Smuzhiyun 	struct drm_master *master;
54*4882a593Smuzhiyun 	struct ttm_base_object *backup_base;
55*4882a593Smuzhiyun };
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /**
58*4882a593Smuzhiyun  * struct vmw_surface_offset - Backing store mip level offset info
59*4882a593Smuzhiyun  *
60*4882a593Smuzhiyun  * @face:           Surface face.
61*4882a593Smuzhiyun  * @mip:            Mip level.
62*4882a593Smuzhiyun  * @bo_offset:      Offset into backing store of this mip level.
63*4882a593Smuzhiyun  *
64*4882a593Smuzhiyun  */
65*4882a593Smuzhiyun struct vmw_surface_offset {
66*4882a593Smuzhiyun 	uint32_t face;
67*4882a593Smuzhiyun 	uint32_t mip;
68*4882a593Smuzhiyun 	uint32_t bo_offset;
69*4882a593Smuzhiyun };
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun /**
72*4882a593Smuzhiyun  * vmw_surface_dirty - Surface dirty-tracker
73*4882a593Smuzhiyun  * @cache: Cached layout information of the surface.
74*4882a593Smuzhiyun  * @size: Accounting size for the struct vmw_surface_dirty.
75*4882a593Smuzhiyun  * @num_subres: Number of subresources.
76*4882a593Smuzhiyun  * @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource.
77*4882a593Smuzhiyun  */
78*4882a593Smuzhiyun struct vmw_surface_dirty {
79*4882a593Smuzhiyun 	struct svga3dsurface_cache cache;
80*4882a593Smuzhiyun 	size_t size;
81*4882a593Smuzhiyun 	u32 num_subres;
82*4882a593Smuzhiyun 	SVGA3dBox boxes[];
83*4882a593Smuzhiyun };
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun static void vmw_user_surface_free(struct vmw_resource *res);
86*4882a593Smuzhiyun static struct vmw_resource *
87*4882a593Smuzhiyun vmw_user_surface_base_to_res(struct ttm_base_object *base);
88*4882a593Smuzhiyun static int vmw_legacy_srf_bind(struct vmw_resource *res,
89*4882a593Smuzhiyun 			       struct ttm_validate_buffer *val_buf);
90*4882a593Smuzhiyun static int vmw_legacy_srf_unbind(struct vmw_resource *res,
91*4882a593Smuzhiyun 				 bool readback,
92*4882a593Smuzhiyun 				 struct ttm_validate_buffer *val_buf);
93*4882a593Smuzhiyun static int vmw_legacy_srf_create(struct vmw_resource *res);
94*4882a593Smuzhiyun static int vmw_legacy_srf_destroy(struct vmw_resource *res);
95*4882a593Smuzhiyun static int vmw_gb_surface_create(struct vmw_resource *res);
96*4882a593Smuzhiyun static int vmw_gb_surface_bind(struct vmw_resource *res,
97*4882a593Smuzhiyun 			       struct ttm_validate_buffer *val_buf);
98*4882a593Smuzhiyun static int vmw_gb_surface_unbind(struct vmw_resource *res,
99*4882a593Smuzhiyun 				 bool readback,
100*4882a593Smuzhiyun 				 struct ttm_validate_buffer *val_buf);
101*4882a593Smuzhiyun static int vmw_gb_surface_destroy(struct vmw_resource *res);
102*4882a593Smuzhiyun static int
103*4882a593Smuzhiyun vmw_gb_surface_define_internal(struct drm_device *dev,
104*4882a593Smuzhiyun 			       struct drm_vmw_gb_surface_create_ext_req *req,
105*4882a593Smuzhiyun 			       struct drm_vmw_gb_surface_create_rep *rep,
106*4882a593Smuzhiyun 			       struct drm_file *file_priv);
107*4882a593Smuzhiyun static int
108*4882a593Smuzhiyun vmw_gb_surface_reference_internal(struct drm_device *dev,
109*4882a593Smuzhiyun 				  struct drm_vmw_surface_arg *req,
110*4882a593Smuzhiyun 				  struct drm_vmw_gb_surface_ref_ext_rep *rep,
111*4882a593Smuzhiyun 				  struct drm_file *file_priv);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun static void vmw_surface_dirty_free(struct vmw_resource *res);
114*4882a593Smuzhiyun static int vmw_surface_dirty_alloc(struct vmw_resource *res);
115*4882a593Smuzhiyun static int vmw_surface_dirty_sync(struct vmw_resource *res);
116*4882a593Smuzhiyun static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
117*4882a593Smuzhiyun 					size_t end);
118*4882a593Smuzhiyun static int vmw_surface_clean(struct vmw_resource *res);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun static const struct vmw_user_resource_conv user_surface_conv = {
121*4882a593Smuzhiyun 	.object_type = VMW_RES_SURFACE,
122*4882a593Smuzhiyun 	.base_obj_to_res = vmw_user_surface_base_to_res,
123*4882a593Smuzhiyun 	.res_free = vmw_user_surface_free
124*4882a593Smuzhiyun };
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun const struct vmw_user_resource_conv *user_surface_converter =
127*4882a593Smuzhiyun 	&user_surface_conv;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun static uint64_t vmw_user_surface_size;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun static const struct vmw_res_func vmw_legacy_surface_func = {
133*4882a593Smuzhiyun 	.res_type = vmw_res_surface,
134*4882a593Smuzhiyun 	.needs_backup = false,
135*4882a593Smuzhiyun 	.may_evict = true,
136*4882a593Smuzhiyun 	.prio = 1,
137*4882a593Smuzhiyun 	.dirty_prio = 1,
138*4882a593Smuzhiyun 	.type_name = "legacy surfaces",
139*4882a593Smuzhiyun 	.backup_placement = &vmw_srf_placement,
140*4882a593Smuzhiyun 	.create = &vmw_legacy_srf_create,
141*4882a593Smuzhiyun 	.destroy = &vmw_legacy_srf_destroy,
142*4882a593Smuzhiyun 	.bind = &vmw_legacy_srf_bind,
143*4882a593Smuzhiyun 	.unbind = &vmw_legacy_srf_unbind
144*4882a593Smuzhiyun };
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun static const struct vmw_res_func vmw_gb_surface_func = {
147*4882a593Smuzhiyun 	.res_type = vmw_res_surface,
148*4882a593Smuzhiyun 	.needs_backup = true,
149*4882a593Smuzhiyun 	.may_evict = true,
150*4882a593Smuzhiyun 	.prio = 1,
151*4882a593Smuzhiyun 	.dirty_prio = 2,
152*4882a593Smuzhiyun 	.type_name = "guest backed surfaces",
153*4882a593Smuzhiyun 	.backup_placement = &vmw_mob_placement,
154*4882a593Smuzhiyun 	.create = vmw_gb_surface_create,
155*4882a593Smuzhiyun 	.destroy = vmw_gb_surface_destroy,
156*4882a593Smuzhiyun 	.bind = vmw_gb_surface_bind,
157*4882a593Smuzhiyun 	.unbind = vmw_gb_surface_unbind,
158*4882a593Smuzhiyun 	.dirty_alloc = vmw_surface_dirty_alloc,
159*4882a593Smuzhiyun 	.dirty_free = vmw_surface_dirty_free,
160*4882a593Smuzhiyun 	.dirty_sync = vmw_surface_dirty_sync,
161*4882a593Smuzhiyun 	.dirty_range_add = vmw_surface_dirty_range_add,
162*4882a593Smuzhiyun 	.clean = vmw_surface_clean,
163*4882a593Smuzhiyun };
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun /**
166*4882a593Smuzhiyun  * struct vmw_surface_dma - SVGA3D DMA command
167*4882a593Smuzhiyun  */
168*4882a593Smuzhiyun struct vmw_surface_dma {
169*4882a593Smuzhiyun 	SVGA3dCmdHeader header;
170*4882a593Smuzhiyun 	SVGA3dCmdSurfaceDMA body;
171*4882a593Smuzhiyun 	SVGA3dCopyBox cb;
172*4882a593Smuzhiyun 	SVGA3dCmdSurfaceDMASuffix suffix;
173*4882a593Smuzhiyun };
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun /**
176*4882a593Smuzhiyun  * struct vmw_surface_define - SVGA3D Surface Define command
177*4882a593Smuzhiyun  */
178*4882a593Smuzhiyun struct vmw_surface_define {
179*4882a593Smuzhiyun 	SVGA3dCmdHeader header;
180*4882a593Smuzhiyun 	SVGA3dCmdDefineSurface body;
181*4882a593Smuzhiyun };
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun /**
184*4882a593Smuzhiyun  * struct vmw_surface_destroy - SVGA3D Surface Destroy command
185*4882a593Smuzhiyun  */
186*4882a593Smuzhiyun struct vmw_surface_destroy {
187*4882a593Smuzhiyun 	SVGA3dCmdHeader header;
188*4882a593Smuzhiyun 	SVGA3dCmdDestroySurface body;
189*4882a593Smuzhiyun };
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun /**
193*4882a593Smuzhiyun  * vmw_surface_dma_size - Compute fifo size for a dma command.
194*4882a593Smuzhiyun  *
195*4882a593Smuzhiyun  * @srf: Pointer to a struct vmw_surface
196*4882a593Smuzhiyun  *
197*4882a593Smuzhiyun  * Computes the required size for a surface dma command for backup or
198*4882a593Smuzhiyun  * restoration of the surface represented by @srf.
199*4882a593Smuzhiyun  */
vmw_surface_dma_size(const struct vmw_surface * srf)200*4882a593Smuzhiyun static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	return srf->metadata.num_sizes * sizeof(struct vmw_surface_dma);
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun /**
207*4882a593Smuzhiyun  * vmw_surface_define_size - Compute fifo size for a surface define command.
208*4882a593Smuzhiyun  *
209*4882a593Smuzhiyun  * @srf: Pointer to a struct vmw_surface
210*4882a593Smuzhiyun  *
211*4882a593Smuzhiyun  * Computes the required size for a surface define command for the definition
212*4882a593Smuzhiyun  * of the surface represented by @srf.
213*4882a593Smuzhiyun  */
vmw_surface_define_size(const struct vmw_surface * srf)214*4882a593Smuzhiyun static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun 	return sizeof(struct vmw_surface_define) + srf->metadata.num_sizes *
217*4882a593Smuzhiyun 		sizeof(SVGA3dSize);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun /**
222*4882a593Smuzhiyun  * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
223*4882a593Smuzhiyun  *
224*4882a593Smuzhiyun  * Computes the required size for a surface destroy command for the destruction
225*4882a593Smuzhiyun  * of a hw surface.
226*4882a593Smuzhiyun  */
vmw_surface_destroy_size(void)227*4882a593Smuzhiyun static inline uint32_t vmw_surface_destroy_size(void)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	return sizeof(struct vmw_surface_destroy);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun /**
233*4882a593Smuzhiyun  * vmw_surface_destroy_encode - Encode a surface_destroy command.
234*4882a593Smuzhiyun  *
235*4882a593Smuzhiyun  * @id: The surface id
236*4882a593Smuzhiyun  * @cmd_space: Pointer to memory area in which the commands should be encoded.
237*4882a593Smuzhiyun  */
vmw_surface_destroy_encode(uint32_t id,void * cmd_space)238*4882a593Smuzhiyun static void vmw_surface_destroy_encode(uint32_t id,
239*4882a593Smuzhiyun 				       void *cmd_space)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
242*4882a593Smuzhiyun 		cmd_space;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
245*4882a593Smuzhiyun 	cmd->header.size = sizeof(cmd->body);
246*4882a593Smuzhiyun 	cmd->body.sid = id;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun /**
250*4882a593Smuzhiyun  * vmw_surface_define_encode - Encode a surface_define command.
251*4882a593Smuzhiyun  *
252*4882a593Smuzhiyun  * @srf: Pointer to a struct vmw_surface object.
253*4882a593Smuzhiyun  * @cmd_space: Pointer to memory area in which the commands should be encoded.
254*4882a593Smuzhiyun  */
vmw_surface_define_encode(const struct vmw_surface * srf,void * cmd_space)255*4882a593Smuzhiyun static void vmw_surface_define_encode(const struct vmw_surface *srf,
256*4882a593Smuzhiyun 				      void *cmd_space)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun 	struct vmw_surface_define *cmd = (struct vmw_surface_define *)
259*4882a593Smuzhiyun 		cmd_space;
260*4882a593Smuzhiyun 	struct drm_vmw_size *src_size;
261*4882a593Smuzhiyun 	SVGA3dSize *cmd_size;
262*4882a593Smuzhiyun 	uint32_t cmd_len;
263*4882a593Smuzhiyun 	int i;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	cmd_len = sizeof(cmd->body) + srf->metadata.num_sizes *
266*4882a593Smuzhiyun 		sizeof(SVGA3dSize);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
269*4882a593Smuzhiyun 	cmd->header.size = cmd_len;
270*4882a593Smuzhiyun 	cmd->body.sid = srf->res.id;
271*4882a593Smuzhiyun 	/*
272*4882a593Smuzhiyun 	 * Downcast of surfaceFlags, was upcasted when received from user-space,
273*4882a593Smuzhiyun 	 * since driver internally stores as 64 bit.
274*4882a593Smuzhiyun 	 * For legacy surface define only 32 bit flag is supported.
275*4882a593Smuzhiyun 	 */
276*4882a593Smuzhiyun 	cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->metadata.flags;
277*4882a593Smuzhiyun 	cmd->body.format = srf->metadata.format;
278*4882a593Smuzhiyun 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
279*4882a593Smuzhiyun 		cmd->body.face[i].numMipLevels = srf->metadata.mip_levels[i];
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	cmd += 1;
282*4882a593Smuzhiyun 	cmd_size = (SVGA3dSize *) cmd;
283*4882a593Smuzhiyun 	src_size = srf->metadata.sizes;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	for (i = 0; i < srf->metadata.num_sizes; ++i, cmd_size++, src_size++) {
286*4882a593Smuzhiyun 		cmd_size->width = src_size->width;
287*4882a593Smuzhiyun 		cmd_size->height = src_size->height;
288*4882a593Smuzhiyun 		cmd_size->depth = src_size->depth;
289*4882a593Smuzhiyun 	}
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun /**
293*4882a593Smuzhiyun  * vmw_surface_dma_encode - Encode a surface_dma command.
294*4882a593Smuzhiyun  *
295*4882a593Smuzhiyun  * @srf: Pointer to a struct vmw_surface object.
296*4882a593Smuzhiyun  * @cmd_space: Pointer to memory area in which the commands should be encoded.
297*4882a593Smuzhiyun  * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
298*4882a593Smuzhiyun  * should be placed or read from.
299*4882a593Smuzhiyun  * @to_surface: Boolean whether to DMA to the surface or from the surface.
300*4882a593Smuzhiyun  */
vmw_surface_dma_encode(struct vmw_surface * srf,void * cmd_space,const SVGAGuestPtr * ptr,bool to_surface)301*4882a593Smuzhiyun static void vmw_surface_dma_encode(struct vmw_surface *srf,
302*4882a593Smuzhiyun 				   void *cmd_space,
303*4882a593Smuzhiyun 				   const SVGAGuestPtr *ptr,
304*4882a593Smuzhiyun 				   bool to_surface)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun 	uint32_t i;
307*4882a593Smuzhiyun 	struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
308*4882a593Smuzhiyun 	const struct svga3d_surface_desc *desc =
309*4882a593Smuzhiyun 		svga3dsurface_get_desc(srf->metadata.format);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	for (i = 0; i < srf->metadata.num_sizes; ++i) {
312*4882a593Smuzhiyun 		SVGA3dCmdHeader *header = &cmd->header;
313*4882a593Smuzhiyun 		SVGA3dCmdSurfaceDMA *body = &cmd->body;
314*4882a593Smuzhiyun 		SVGA3dCopyBox *cb = &cmd->cb;
315*4882a593Smuzhiyun 		SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
316*4882a593Smuzhiyun 		const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
317*4882a593Smuzhiyun 		const struct drm_vmw_size *cur_size = &srf->metadata.sizes[i];
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 		header->id = SVGA_3D_CMD_SURFACE_DMA;
320*4882a593Smuzhiyun 		header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 		body->guest.ptr = *ptr;
323*4882a593Smuzhiyun 		body->guest.ptr.offset += cur_offset->bo_offset;
324*4882a593Smuzhiyun 		body->guest.pitch = svga3dsurface_calculate_pitch(desc,
325*4882a593Smuzhiyun 								  cur_size);
326*4882a593Smuzhiyun 		body->host.sid = srf->res.id;
327*4882a593Smuzhiyun 		body->host.face = cur_offset->face;
328*4882a593Smuzhiyun 		body->host.mipmap = cur_offset->mip;
329*4882a593Smuzhiyun 		body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
330*4882a593Smuzhiyun 				  SVGA3D_READ_HOST_VRAM);
331*4882a593Smuzhiyun 		cb->x = 0;
332*4882a593Smuzhiyun 		cb->y = 0;
333*4882a593Smuzhiyun 		cb->z = 0;
334*4882a593Smuzhiyun 		cb->srcx = 0;
335*4882a593Smuzhiyun 		cb->srcy = 0;
336*4882a593Smuzhiyun 		cb->srcz = 0;
337*4882a593Smuzhiyun 		cb->w = cur_size->width;
338*4882a593Smuzhiyun 		cb->h = cur_size->height;
339*4882a593Smuzhiyun 		cb->d = cur_size->depth;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 		suffix->suffixSize = sizeof(*suffix);
342*4882a593Smuzhiyun 		suffix->maximumOffset =
343*4882a593Smuzhiyun 			svga3dsurface_get_image_buffer_size(desc, cur_size,
344*4882a593Smuzhiyun 							    body->guest.pitch);
345*4882a593Smuzhiyun 		suffix->flags.discard = 0;
346*4882a593Smuzhiyun 		suffix->flags.unsynchronized = 0;
347*4882a593Smuzhiyun 		suffix->flags.reserved = 0;
348*4882a593Smuzhiyun 		++cmd;
349*4882a593Smuzhiyun 	}
350*4882a593Smuzhiyun };
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun /**
354*4882a593Smuzhiyun  * vmw_hw_surface_destroy - destroy a Device surface
355*4882a593Smuzhiyun  *
356*4882a593Smuzhiyun  * @res:        Pointer to a struct vmw_resource embedded in a struct
357*4882a593Smuzhiyun  *              vmw_surface.
358*4882a593Smuzhiyun  *
359*4882a593Smuzhiyun  * Destroys a the device surface associated with a struct vmw_surface if
360*4882a593Smuzhiyun  * any, and adjusts accounting and resource count accordingly.
361*4882a593Smuzhiyun  */
vmw_hw_surface_destroy(struct vmw_resource * res)362*4882a593Smuzhiyun static void vmw_hw_surface_destroy(struct vmw_resource *res)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	struct vmw_private *dev_priv = res->dev_priv;
366*4882a593Smuzhiyun 	void *cmd;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	if (res->func->destroy == vmw_gb_surface_destroy) {
369*4882a593Smuzhiyun 		(void) vmw_gb_surface_destroy(res);
370*4882a593Smuzhiyun 		return;
371*4882a593Smuzhiyun 	}
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	if (res->id != -1) {
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 		cmd = VMW_FIFO_RESERVE(dev_priv, vmw_surface_destroy_size());
376*4882a593Smuzhiyun 		if (unlikely(!cmd))
377*4882a593Smuzhiyun 			return;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 		vmw_surface_destroy_encode(res->id, cmd);
380*4882a593Smuzhiyun 		vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 		/*
383*4882a593Smuzhiyun 		 * used_memory_size_atomic, or separate lock
384*4882a593Smuzhiyun 		 * to avoid taking dev_priv::cmdbuf_mutex in
385*4882a593Smuzhiyun 		 * the destroy path.
386*4882a593Smuzhiyun 		 */
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 		mutex_lock(&dev_priv->cmdbuf_mutex);
389*4882a593Smuzhiyun 		dev_priv->used_memory_size -= res->backup_size;
390*4882a593Smuzhiyun 		mutex_unlock(&dev_priv->cmdbuf_mutex);
391*4882a593Smuzhiyun 	}
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun /**
395*4882a593Smuzhiyun  * vmw_legacy_srf_create - Create a device surface as part of the
396*4882a593Smuzhiyun  * resource validation process.
397*4882a593Smuzhiyun  *
398*4882a593Smuzhiyun  * @res: Pointer to a struct vmw_surface.
399*4882a593Smuzhiyun  *
400*4882a593Smuzhiyun  * If the surface doesn't have a hw id.
401*4882a593Smuzhiyun  *
402*4882a593Smuzhiyun  * Returns -EBUSY if there wasn't sufficient device resources to
403*4882a593Smuzhiyun  * complete the validation. Retry after freeing up resources.
404*4882a593Smuzhiyun  *
405*4882a593Smuzhiyun  * May return other errors if the kernel is out of guest resources.
406*4882a593Smuzhiyun  */
vmw_legacy_srf_create(struct vmw_resource * res)407*4882a593Smuzhiyun static int vmw_legacy_srf_create(struct vmw_resource *res)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	struct vmw_private *dev_priv = res->dev_priv;
410*4882a593Smuzhiyun 	struct vmw_surface *srf;
411*4882a593Smuzhiyun 	uint32_t submit_size;
412*4882a593Smuzhiyun 	uint8_t *cmd;
413*4882a593Smuzhiyun 	int ret;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	if (likely(res->id != -1))
416*4882a593Smuzhiyun 		return 0;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	srf = vmw_res_to_srf(res);
419*4882a593Smuzhiyun 	if (unlikely(dev_priv->used_memory_size + res->backup_size >=
420*4882a593Smuzhiyun 		     dev_priv->memory_size))
421*4882a593Smuzhiyun 		return -EBUSY;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	/*
424*4882a593Smuzhiyun 	 * Alloc id for the resource.
425*4882a593Smuzhiyun 	 */
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	ret = vmw_resource_alloc_id(res);
428*4882a593Smuzhiyun 	if (unlikely(ret != 0)) {
429*4882a593Smuzhiyun 		DRM_ERROR("Failed to allocate a surface id.\n");
430*4882a593Smuzhiyun 		goto out_no_id;
431*4882a593Smuzhiyun 	}
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
434*4882a593Smuzhiyun 		ret = -EBUSY;
435*4882a593Smuzhiyun 		goto out_no_fifo;
436*4882a593Smuzhiyun 	}
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	/*
439*4882a593Smuzhiyun 	 * Encode surface define- commands.
440*4882a593Smuzhiyun 	 */
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	submit_size = vmw_surface_define_size(srf);
443*4882a593Smuzhiyun 	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
444*4882a593Smuzhiyun 	if (unlikely(!cmd)) {
445*4882a593Smuzhiyun 		ret = -ENOMEM;
446*4882a593Smuzhiyun 		goto out_no_fifo;
447*4882a593Smuzhiyun 	}
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	vmw_surface_define_encode(srf, cmd);
450*4882a593Smuzhiyun 	vmw_fifo_commit(dev_priv, submit_size);
451*4882a593Smuzhiyun 	vmw_fifo_resource_inc(dev_priv);
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	/*
454*4882a593Smuzhiyun 	 * Surface memory usage accounting.
455*4882a593Smuzhiyun 	 */
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	dev_priv->used_memory_size += res->backup_size;
458*4882a593Smuzhiyun 	return 0;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun out_no_fifo:
461*4882a593Smuzhiyun 	vmw_resource_release_id(res);
462*4882a593Smuzhiyun out_no_id:
463*4882a593Smuzhiyun 	return ret;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun /**
467*4882a593Smuzhiyun  * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
468*4882a593Smuzhiyun  *
469*4882a593Smuzhiyun  * @res:            Pointer to a struct vmw_res embedded in a struct
470*4882a593Smuzhiyun  *                  vmw_surface.
471*4882a593Smuzhiyun  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
472*4882a593Smuzhiyun  *                  information about the backup buffer.
473*4882a593Smuzhiyun  * @bind:           Boolean wether to DMA to the surface.
474*4882a593Smuzhiyun  *
475*4882a593Smuzhiyun  * Transfer backup data to or from a legacy surface as part of the
476*4882a593Smuzhiyun  * validation process.
477*4882a593Smuzhiyun  * May return other errors if the kernel is out of guest resources.
478*4882a593Smuzhiyun  * The backup buffer will be fenced or idle upon successful completion,
479*4882a593Smuzhiyun  * and if the surface needs persistent backup storage, the backup buffer
480*4882a593Smuzhiyun  * will also be returned reserved iff @bind is true.
481*4882a593Smuzhiyun  */
vmw_legacy_srf_dma(struct vmw_resource * res,struct ttm_validate_buffer * val_buf,bool bind)482*4882a593Smuzhiyun static int vmw_legacy_srf_dma(struct vmw_resource *res,
483*4882a593Smuzhiyun 			      struct ttm_validate_buffer *val_buf,
484*4882a593Smuzhiyun 			      bool bind)
485*4882a593Smuzhiyun {
486*4882a593Smuzhiyun 	SVGAGuestPtr ptr;
487*4882a593Smuzhiyun 	struct vmw_fence_obj *fence;
488*4882a593Smuzhiyun 	uint32_t submit_size;
489*4882a593Smuzhiyun 	struct vmw_surface *srf = vmw_res_to_srf(res);
490*4882a593Smuzhiyun 	uint8_t *cmd;
491*4882a593Smuzhiyun 	struct vmw_private *dev_priv = res->dev_priv;
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	BUG_ON(!val_buf->bo);
494*4882a593Smuzhiyun 	submit_size = vmw_surface_dma_size(srf);
495*4882a593Smuzhiyun 	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
496*4882a593Smuzhiyun 	if (unlikely(!cmd))
497*4882a593Smuzhiyun 		return -ENOMEM;
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
500*4882a593Smuzhiyun 	vmw_surface_dma_encode(srf, cmd, &ptr, bind);
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	vmw_fifo_commit(dev_priv, submit_size);
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	/*
505*4882a593Smuzhiyun 	 * Create a fence object and fence the backup buffer.
506*4882a593Smuzhiyun 	 */
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
509*4882a593Smuzhiyun 					  &fence, NULL);
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	vmw_bo_fence_single(val_buf->bo, fence);
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	if (likely(fence != NULL))
514*4882a593Smuzhiyun 		vmw_fence_obj_unreference(&fence);
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	return 0;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun /**
520*4882a593Smuzhiyun  * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
521*4882a593Smuzhiyun  *                       surface validation process.
522*4882a593Smuzhiyun  *
523*4882a593Smuzhiyun  * @res:            Pointer to a struct vmw_res embedded in a struct
524*4882a593Smuzhiyun  *                  vmw_surface.
525*4882a593Smuzhiyun  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
526*4882a593Smuzhiyun  *                  information about the backup buffer.
527*4882a593Smuzhiyun  *
528*4882a593Smuzhiyun  * This function will copy backup data to the surface if the
529*4882a593Smuzhiyun  * backup buffer is dirty.
530*4882a593Smuzhiyun  */
vmw_legacy_srf_bind(struct vmw_resource * res,struct ttm_validate_buffer * val_buf)531*4882a593Smuzhiyun static int vmw_legacy_srf_bind(struct vmw_resource *res,
532*4882a593Smuzhiyun 			       struct ttm_validate_buffer *val_buf)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun 	if (!res->backup_dirty)
535*4882a593Smuzhiyun 		return 0;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	return vmw_legacy_srf_dma(res, val_buf, true);
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun /**
542*4882a593Smuzhiyun  * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
543*4882a593Smuzhiyun  *                         surface eviction process.
544*4882a593Smuzhiyun  *
545*4882a593Smuzhiyun  * @res:            Pointer to a struct vmw_res embedded in a struct
546*4882a593Smuzhiyun  *                  vmw_surface.
547*4882a593Smuzhiyun  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
548*4882a593Smuzhiyun  *                  information about the backup buffer.
549*4882a593Smuzhiyun  *
550*4882a593Smuzhiyun  * This function will copy backup data from the surface.
551*4882a593Smuzhiyun  */
vmw_legacy_srf_unbind(struct vmw_resource * res,bool readback,struct ttm_validate_buffer * val_buf)552*4882a593Smuzhiyun static int vmw_legacy_srf_unbind(struct vmw_resource *res,
553*4882a593Smuzhiyun 				 bool readback,
554*4882a593Smuzhiyun 				 struct ttm_validate_buffer *val_buf)
555*4882a593Smuzhiyun {
556*4882a593Smuzhiyun 	if (unlikely(readback))
557*4882a593Smuzhiyun 		return vmw_legacy_srf_dma(res, val_buf, false);
558*4882a593Smuzhiyun 	return 0;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun /**
562*4882a593Smuzhiyun  * vmw_legacy_srf_destroy - Destroy a device surface as part of a
563*4882a593Smuzhiyun  *                          resource eviction process.
564*4882a593Smuzhiyun  *
565*4882a593Smuzhiyun  * @res:            Pointer to a struct vmw_res embedded in a struct
566*4882a593Smuzhiyun  *                  vmw_surface.
567*4882a593Smuzhiyun  */
vmw_legacy_srf_destroy(struct vmw_resource * res)568*4882a593Smuzhiyun static int vmw_legacy_srf_destroy(struct vmw_resource *res)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun 	struct vmw_private *dev_priv = res->dev_priv;
571*4882a593Smuzhiyun 	uint32_t submit_size;
572*4882a593Smuzhiyun 	uint8_t *cmd;
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	BUG_ON(res->id == -1);
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	/*
577*4882a593Smuzhiyun 	 * Encode the dma- and surface destroy commands.
578*4882a593Smuzhiyun 	 */
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	submit_size = vmw_surface_destroy_size();
581*4882a593Smuzhiyun 	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
582*4882a593Smuzhiyun 	if (unlikely(!cmd))
583*4882a593Smuzhiyun 		return -ENOMEM;
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	vmw_surface_destroy_encode(res->id, cmd);
586*4882a593Smuzhiyun 	vmw_fifo_commit(dev_priv, submit_size);
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	/*
589*4882a593Smuzhiyun 	 * Surface memory usage accounting.
590*4882a593Smuzhiyun 	 */
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	dev_priv->used_memory_size -= res->backup_size;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	/*
595*4882a593Smuzhiyun 	 * Release the surface ID.
596*4882a593Smuzhiyun 	 */
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	vmw_resource_release_id(res);
599*4882a593Smuzhiyun 	vmw_fifo_resource_dec(dev_priv);
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	return 0;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun /**
606*4882a593Smuzhiyun  * vmw_surface_init - initialize a struct vmw_surface
607*4882a593Smuzhiyun  *
608*4882a593Smuzhiyun  * @dev_priv:       Pointer to a device private struct.
609*4882a593Smuzhiyun  * @srf:            Pointer to the struct vmw_surface to initialize.
610*4882a593Smuzhiyun  * @res_free:       Pointer to a resource destructor used to free
611*4882a593Smuzhiyun  *                  the object.
612*4882a593Smuzhiyun  */
vmw_surface_init(struct vmw_private * dev_priv,struct vmw_surface * srf,void (* res_free)(struct vmw_resource * res))613*4882a593Smuzhiyun static int vmw_surface_init(struct vmw_private *dev_priv,
614*4882a593Smuzhiyun 			    struct vmw_surface *srf,
615*4882a593Smuzhiyun 			    void (*res_free) (struct vmw_resource *res))
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun 	int ret;
618*4882a593Smuzhiyun 	struct vmw_resource *res = &srf->res;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	BUG_ON(!res_free);
621*4882a593Smuzhiyun 	ret = vmw_resource_init(dev_priv, res, true, res_free,
622*4882a593Smuzhiyun 				(dev_priv->has_mob) ? &vmw_gb_surface_func :
623*4882a593Smuzhiyun 				&vmw_legacy_surface_func);
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	if (unlikely(ret != 0)) {
626*4882a593Smuzhiyun 		res_free(res);
627*4882a593Smuzhiyun 		return ret;
628*4882a593Smuzhiyun 	}
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	/*
631*4882a593Smuzhiyun 	 * The surface won't be visible to hardware until a
632*4882a593Smuzhiyun 	 * surface validate.
633*4882a593Smuzhiyun 	 */
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	INIT_LIST_HEAD(&srf->view_list);
636*4882a593Smuzhiyun 	res->hw_destroy = vmw_hw_surface_destroy;
637*4882a593Smuzhiyun 	return ret;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun /**
641*4882a593Smuzhiyun  * vmw_user_surface_base_to_res - TTM base object to resource converter for
642*4882a593Smuzhiyun  *                                user visible surfaces
643*4882a593Smuzhiyun  *
644*4882a593Smuzhiyun  * @base:           Pointer to a TTM base object
645*4882a593Smuzhiyun  *
646*4882a593Smuzhiyun  * Returns the struct vmw_resource embedded in a struct vmw_surface
647*4882a593Smuzhiyun  * for the user-visible object identified by the TTM base object @base.
648*4882a593Smuzhiyun  */
649*4882a593Smuzhiyun static struct vmw_resource *
vmw_user_surface_base_to_res(struct ttm_base_object * base)650*4882a593Smuzhiyun vmw_user_surface_base_to_res(struct ttm_base_object *base)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun 	return &(container_of(base, struct vmw_user_surface,
653*4882a593Smuzhiyun 			      prime.base)->srf.res);
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun /**
657*4882a593Smuzhiyun  * vmw_user_surface_free - User visible surface resource destructor
658*4882a593Smuzhiyun  *
659*4882a593Smuzhiyun  * @res:            A struct vmw_resource embedded in a struct vmw_surface.
660*4882a593Smuzhiyun  */
vmw_user_surface_free(struct vmw_resource * res)661*4882a593Smuzhiyun static void vmw_user_surface_free(struct vmw_resource *res)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun 	struct vmw_surface *srf = vmw_res_to_srf(res);
664*4882a593Smuzhiyun 	struct vmw_user_surface *user_srf =
665*4882a593Smuzhiyun 	    container_of(srf, struct vmw_user_surface, srf);
666*4882a593Smuzhiyun 	struct vmw_private *dev_priv = srf->res.dev_priv;
667*4882a593Smuzhiyun 	uint32_t size = user_srf->size;
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	WARN_ON_ONCE(res->dirty);
670*4882a593Smuzhiyun 	if (user_srf->master)
671*4882a593Smuzhiyun 		drm_master_put(&user_srf->master);
672*4882a593Smuzhiyun 	kfree(srf->offsets);
673*4882a593Smuzhiyun 	kfree(srf->metadata.sizes);
674*4882a593Smuzhiyun 	kfree(srf->snooper.image);
675*4882a593Smuzhiyun 	ttm_prime_object_kfree(user_srf, prime);
676*4882a593Smuzhiyun 	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun /**
680*4882a593Smuzhiyun  * vmw_user_surface_free - User visible surface TTM base object destructor
681*4882a593Smuzhiyun  *
682*4882a593Smuzhiyun  * @p_base:         Pointer to a pointer to a TTM base object
683*4882a593Smuzhiyun  *                  embedded in a struct vmw_user_surface.
684*4882a593Smuzhiyun  *
685*4882a593Smuzhiyun  * Drops the base object's reference on its resource, and the
686*4882a593Smuzhiyun  * pointer pointed to by *p_base is set to NULL.
687*4882a593Smuzhiyun  */
vmw_user_surface_base_release(struct ttm_base_object ** p_base)688*4882a593Smuzhiyun static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
689*4882a593Smuzhiyun {
690*4882a593Smuzhiyun 	struct ttm_base_object *base = *p_base;
691*4882a593Smuzhiyun 	struct vmw_user_surface *user_srf =
692*4882a593Smuzhiyun 	    container_of(base, struct vmw_user_surface, prime.base);
693*4882a593Smuzhiyun 	struct vmw_resource *res = &user_srf->srf.res;
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	*p_base = NULL;
696*4882a593Smuzhiyun 	if (user_srf->backup_base)
697*4882a593Smuzhiyun 		ttm_base_object_unref(&user_srf->backup_base);
698*4882a593Smuzhiyun 	vmw_resource_unreference(&res);
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun /**
702*4882a593Smuzhiyun  * vmw_user_surface_destroy_ioctl - Ioctl function implementing
703*4882a593Smuzhiyun  *                                  the user surface destroy functionality.
704*4882a593Smuzhiyun  *
705*4882a593Smuzhiyun  * @dev:            Pointer to a struct drm_device.
706*4882a593Smuzhiyun  * @data:           Pointer to data copied from / to user-space.
707*4882a593Smuzhiyun  * @file_priv:      Pointer to a drm file private structure.
708*4882a593Smuzhiyun  */
vmw_surface_destroy_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)709*4882a593Smuzhiyun int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
710*4882a593Smuzhiyun 			      struct drm_file *file_priv)
711*4882a593Smuzhiyun {
712*4882a593Smuzhiyun 	struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
713*4882a593Smuzhiyun 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun /**
719*4882a593Smuzhiyun  * vmw_user_surface_define_ioctl - Ioctl function implementing
720*4882a593Smuzhiyun  *                                  the user surface define functionality.
721*4882a593Smuzhiyun  *
722*4882a593Smuzhiyun  * @dev:            Pointer to a struct drm_device.
723*4882a593Smuzhiyun  * @data:           Pointer to data copied from / to user-space.
724*4882a593Smuzhiyun  * @file_priv:      Pointer to a drm file private structure.
725*4882a593Smuzhiyun  */
vmw_surface_define_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)726*4882a593Smuzhiyun int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
727*4882a593Smuzhiyun 			     struct drm_file *file_priv)
728*4882a593Smuzhiyun {
729*4882a593Smuzhiyun 	struct vmw_private *dev_priv = vmw_priv(dev);
730*4882a593Smuzhiyun 	struct vmw_user_surface *user_srf;
731*4882a593Smuzhiyun 	struct vmw_surface *srf;
732*4882a593Smuzhiyun 	struct vmw_surface_metadata *metadata;
733*4882a593Smuzhiyun 	struct vmw_resource *res;
734*4882a593Smuzhiyun 	struct vmw_resource *tmp;
735*4882a593Smuzhiyun 	union drm_vmw_surface_create_arg *arg =
736*4882a593Smuzhiyun 	    (union drm_vmw_surface_create_arg *)data;
737*4882a593Smuzhiyun 	struct drm_vmw_surface_create_req *req = &arg->req;
738*4882a593Smuzhiyun 	struct drm_vmw_surface_arg *rep = &arg->rep;
739*4882a593Smuzhiyun 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
740*4882a593Smuzhiyun 	struct ttm_operation_ctx ctx = {
741*4882a593Smuzhiyun 		.interruptible = true,
742*4882a593Smuzhiyun 		.no_wait_gpu = false
743*4882a593Smuzhiyun 	};
744*4882a593Smuzhiyun 	int ret;
745*4882a593Smuzhiyun 	int i, j;
746*4882a593Smuzhiyun 	uint32_t cur_bo_offset;
747*4882a593Smuzhiyun 	struct drm_vmw_size *cur_size;
748*4882a593Smuzhiyun 	struct vmw_surface_offset *cur_offset;
749*4882a593Smuzhiyun 	uint32_t num_sizes;
750*4882a593Smuzhiyun 	uint32_t size;
751*4882a593Smuzhiyun 	const struct svga3d_surface_desc *desc;
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	if (unlikely(vmw_user_surface_size == 0))
754*4882a593Smuzhiyun 		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
755*4882a593Smuzhiyun 			VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	num_sizes = 0;
758*4882a593Smuzhiyun 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
759*4882a593Smuzhiyun 		if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
760*4882a593Smuzhiyun 			return -EINVAL;
761*4882a593Smuzhiyun 		num_sizes += req->mip_levels[i];
762*4882a593Smuzhiyun 	}
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 	if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
765*4882a593Smuzhiyun 	    num_sizes == 0)
766*4882a593Smuzhiyun 		return -EINVAL;
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	size = vmw_user_surface_size +
769*4882a593Smuzhiyun 		ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
770*4882a593Smuzhiyun 		ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	desc = svga3dsurface_get_desc(req->format);
773*4882a593Smuzhiyun 	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
774*4882a593Smuzhiyun 		VMW_DEBUG_USER("Invalid format %d for surface creation.\n",
775*4882a593Smuzhiyun 			       req->format);
776*4882a593Smuzhiyun 		return -EINVAL;
777*4882a593Smuzhiyun 	}
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
780*4882a593Smuzhiyun 	if (unlikely(ret != 0))
781*4882a593Smuzhiyun 		return ret;
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
784*4882a593Smuzhiyun 				   size, &ctx);
785*4882a593Smuzhiyun 	if (unlikely(ret != 0)) {
786*4882a593Smuzhiyun 		if (ret != -ERESTARTSYS)
787*4882a593Smuzhiyun 			DRM_ERROR("Out of graphics memory for surface.\n");
788*4882a593Smuzhiyun 		goto out_unlock;
789*4882a593Smuzhiyun 	}
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun 	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
792*4882a593Smuzhiyun 	if (unlikely(!user_srf)) {
793*4882a593Smuzhiyun 		ret = -ENOMEM;
794*4882a593Smuzhiyun 		goto out_no_user_srf;
795*4882a593Smuzhiyun 	}
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	srf = &user_srf->srf;
798*4882a593Smuzhiyun 	metadata = &srf->metadata;
799*4882a593Smuzhiyun 	res = &srf->res;
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	/* Driver internally stores as 64-bit flags */
802*4882a593Smuzhiyun 	metadata->flags = (SVGA3dSurfaceAllFlags)req->flags;
803*4882a593Smuzhiyun 	metadata->format = req->format;
804*4882a593Smuzhiyun 	metadata->scanout = req->scanout;
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	memcpy(metadata->mip_levels, req->mip_levels,
807*4882a593Smuzhiyun 	       sizeof(metadata->mip_levels));
808*4882a593Smuzhiyun 	metadata->num_sizes = num_sizes;
809*4882a593Smuzhiyun 	user_srf->size = size;
810*4882a593Smuzhiyun 	metadata->sizes =
811*4882a593Smuzhiyun 		memdup_user((struct drm_vmw_size __user *)(unsigned long)
812*4882a593Smuzhiyun 			    req->size_addr,
813*4882a593Smuzhiyun 			    sizeof(*metadata->sizes) * metadata->num_sizes);
814*4882a593Smuzhiyun 	if (IS_ERR(metadata->sizes)) {
815*4882a593Smuzhiyun 		ret = PTR_ERR(metadata->sizes);
816*4882a593Smuzhiyun 		goto out_no_sizes;
817*4882a593Smuzhiyun 	}
818*4882a593Smuzhiyun 	srf->offsets = kmalloc_array(metadata->num_sizes, sizeof(*srf->offsets),
819*4882a593Smuzhiyun 				     GFP_KERNEL);
820*4882a593Smuzhiyun 	if (unlikely(!srf->offsets)) {
821*4882a593Smuzhiyun 		ret = -ENOMEM;
822*4882a593Smuzhiyun 		goto out_no_offsets;
823*4882a593Smuzhiyun 	}
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	metadata->base_size = *srf->metadata.sizes;
826*4882a593Smuzhiyun 	metadata->autogen_filter = SVGA3D_TEX_FILTER_NONE;
827*4882a593Smuzhiyun 	metadata->multisample_count = 0;
828*4882a593Smuzhiyun 	metadata->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
829*4882a593Smuzhiyun 	metadata->quality_level = SVGA3D_MS_QUALITY_NONE;
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	cur_bo_offset = 0;
832*4882a593Smuzhiyun 	cur_offset = srf->offsets;
833*4882a593Smuzhiyun 	cur_size = metadata->sizes;
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
836*4882a593Smuzhiyun 		for (j = 0; j < metadata->mip_levels[i]; ++j) {
837*4882a593Smuzhiyun 			uint32_t stride = svga3dsurface_calculate_pitch
838*4882a593Smuzhiyun 				(desc, cur_size);
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 			cur_offset->face = i;
841*4882a593Smuzhiyun 			cur_offset->mip = j;
842*4882a593Smuzhiyun 			cur_offset->bo_offset = cur_bo_offset;
843*4882a593Smuzhiyun 			cur_bo_offset += svga3dsurface_get_image_buffer_size
844*4882a593Smuzhiyun 				(desc, cur_size, stride);
845*4882a593Smuzhiyun 			++cur_offset;
846*4882a593Smuzhiyun 			++cur_size;
847*4882a593Smuzhiyun 		}
848*4882a593Smuzhiyun 	}
849*4882a593Smuzhiyun 	res->backup_size = cur_bo_offset;
850*4882a593Smuzhiyun 	if (metadata->scanout &&
851*4882a593Smuzhiyun 	    metadata->num_sizes == 1 &&
852*4882a593Smuzhiyun 	    metadata->sizes[0].width == 64 &&
853*4882a593Smuzhiyun 	    metadata->sizes[0].height == 64 &&
854*4882a593Smuzhiyun 	    metadata->format == SVGA3D_A8R8G8B8) {
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 		srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
857*4882a593Smuzhiyun 		if (!srf->snooper.image) {
858*4882a593Smuzhiyun 			DRM_ERROR("Failed to allocate cursor_image\n");
859*4882a593Smuzhiyun 			ret = -ENOMEM;
860*4882a593Smuzhiyun 			goto out_no_copy;
861*4882a593Smuzhiyun 		}
862*4882a593Smuzhiyun 	} else {
863*4882a593Smuzhiyun 		srf->snooper.image = NULL;
864*4882a593Smuzhiyun 	}
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	user_srf->prime.base.shareable = false;
867*4882a593Smuzhiyun 	user_srf->prime.base.tfile = NULL;
868*4882a593Smuzhiyun 	if (drm_is_primary_client(file_priv))
869*4882a593Smuzhiyun 		user_srf->master = drm_master_get(file_priv->master);
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	/**
872*4882a593Smuzhiyun 	 * From this point, the generic resource management functions
873*4882a593Smuzhiyun 	 * destroy the object on failure.
874*4882a593Smuzhiyun 	 */
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
877*4882a593Smuzhiyun 	if (unlikely(ret != 0))
878*4882a593Smuzhiyun 		goto out_unlock;
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 	/*
881*4882a593Smuzhiyun 	 * A gb-aware client referencing a shared surface will
882*4882a593Smuzhiyun 	 * expect a backup buffer to be present.
883*4882a593Smuzhiyun 	 */
884*4882a593Smuzhiyun 	if (dev_priv->has_mob && req->shareable) {
885*4882a593Smuzhiyun 		uint32_t backup_handle;
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 		ret = vmw_user_bo_alloc(dev_priv, tfile,
888*4882a593Smuzhiyun 					res->backup_size,
889*4882a593Smuzhiyun 					true,
890*4882a593Smuzhiyun 					&backup_handle,
891*4882a593Smuzhiyun 					&res->backup,
892*4882a593Smuzhiyun 					&user_srf->backup_base);
893*4882a593Smuzhiyun 		if (unlikely(ret != 0)) {
894*4882a593Smuzhiyun 			vmw_resource_unreference(&res);
895*4882a593Smuzhiyun 			goto out_unlock;
896*4882a593Smuzhiyun 		}
897*4882a593Smuzhiyun 	}
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	tmp = vmw_resource_reference(&srf->res);
900*4882a593Smuzhiyun 	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
901*4882a593Smuzhiyun 				    req->shareable, VMW_RES_SURFACE,
902*4882a593Smuzhiyun 				    &vmw_user_surface_base_release, NULL);
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	if (unlikely(ret != 0)) {
905*4882a593Smuzhiyun 		vmw_resource_unreference(&tmp);
906*4882a593Smuzhiyun 		vmw_resource_unreference(&res);
907*4882a593Smuzhiyun 		goto out_unlock;
908*4882a593Smuzhiyun 	}
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	rep->sid = user_srf->prime.base.handle;
911*4882a593Smuzhiyun 	vmw_resource_unreference(&res);
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	ttm_read_unlock(&dev_priv->reservation_sem);
914*4882a593Smuzhiyun 	return 0;
915*4882a593Smuzhiyun out_no_copy:
916*4882a593Smuzhiyun 	kfree(srf->offsets);
917*4882a593Smuzhiyun out_no_offsets:
918*4882a593Smuzhiyun 	kfree(metadata->sizes);
919*4882a593Smuzhiyun out_no_sizes:
920*4882a593Smuzhiyun 	ttm_prime_object_kfree(user_srf, prime);
921*4882a593Smuzhiyun out_no_user_srf:
922*4882a593Smuzhiyun 	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
923*4882a593Smuzhiyun out_unlock:
924*4882a593Smuzhiyun 	ttm_read_unlock(&dev_priv->reservation_sem);
925*4882a593Smuzhiyun 	return ret;
926*4882a593Smuzhiyun }
927*4882a593Smuzhiyun 
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun static int
vmw_surface_handle_reference(struct vmw_private * dev_priv,struct drm_file * file_priv,uint32_t u_handle,enum drm_vmw_handle_type handle_type,struct ttm_base_object ** base_p)930*4882a593Smuzhiyun vmw_surface_handle_reference(struct vmw_private *dev_priv,
931*4882a593Smuzhiyun 			     struct drm_file *file_priv,
932*4882a593Smuzhiyun 			     uint32_t u_handle,
933*4882a593Smuzhiyun 			     enum drm_vmw_handle_type handle_type,
934*4882a593Smuzhiyun 			     struct ttm_base_object **base_p)
935*4882a593Smuzhiyun {
936*4882a593Smuzhiyun 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
937*4882a593Smuzhiyun 	struct vmw_user_surface *user_srf;
938*4882a593Smuzhiyun 	uint32_t handle;
939*4882a593Smuzhiyun 	struct ttm_base_object *base;
940*4882a593Smuzhiyun 	int ret;
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	if (handle_type == DRM_VMW_HANDLE_PRIME) {
943*4882a593Smuzhiyun 		ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
944*4882a593Smuzhiyun 		if (unlikely(ret != 0))
945*4882a593Smuzhiyun 			return ret;
946*4882a593Smuzhiyun 	} else {
947*4882a593Smuzhiyun 		handle = u_handle;
948*4882a593Smuzhiyun 	}
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	ret = -EINVAL;
951*4882a593Smuzhiyun 	base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
952*4882a593Smuzhiyun 	if (unlikely(!base)) {
953*4882a593Smuzhiyun 		VMW_DEBUG_USER("Could not find surface to reference.\n");
954*4882a593Smuzhiyun 		goto out_no_lookup;
955*4882a593Smuzhiyun 	}
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
958*4882a593Smuzhiyun 		VMW_DEBUG_USER("Referenced object is not a surface.\n");
959*4882a593Smuzhiyun 		goto out_bad_resource;
960*4882a593Smuzhiyun 	}
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	if (handle_type != DRM_VMW_HANDLE_PRIME) {
963*4882a593Smuzhiyun 		bool require_exist = false;
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 		user_srf = container_of(base, struct vmw_user_surface,
966*4882a593Smuzhiyun 					prime.base);
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 		/* Error out if we are unauthenticated primary */
969*4882a593Smuzhiyun 		if (drm_is_primary_client(file_priv) &&
970*4882a593Smuzhiyun 		    !file_priv->authenticated) {
971*4882a593Smuzhiyun 			ret = -EACCES;
972*4882a593Smuzhiyun 			goto out_bad_resource;
973*4882a593Smuzhiyun 		}
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 		/*
976*4882a593Smuzhiyun 		 * Make sure the surface creator has the same
977*4882a593Smuzhiyun 		 * authenticating master, or is already registered with us.
978*4882a593Smuzhiyun 		 */
979*4882a593Smuzhiyun 		if (drm_is_primary_client(file_priv) &&
980*4882a593Smuzhiyun 		    user_srf->master != file_priv->master)
981*4882a593Smuzhiyun 			require_exist = true;
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 		if (unlikely(drm_is_render_client(file_priv)))
984*4882a593Smuzhiyun 			require_exist = true;
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 		ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
987*4882a593Smuzhiyun 					 require_exist);
988*4882a593Smuzhiyun 		if (unlikely(ret != 0)) {
989*4882a593Smuzhiyun 			DRM_ERROR("Could not add a reference to a surface.\n");
990*4882a593Smuzhiyun 			goto out_bad_resource;
991*4882a593Smuzhiyun 		}
992*4882a593Smuzhiyun 	}
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 	*base_p = base;
995*4882a593Smuzhiyun 	return 0;
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun out_bad_resource:
998*4882a593Smuzhiyun 	ttm_base_object_unref(&base);
999*4882a593Smuzhiyun out_no_lookup:
1000*4882a593Smuzhiyun 	if (handle_type == DRM_VMW_HANDLE_PRIME)
1001*4882a593Smuzhiyun 		(void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 	return ret;
1004*4882a593Smuzhiyun }
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun /**
1007*4882a593Smuzhiyun  * vmw_user_surface_define_ioctl - Ioctl function implementing
1008*4882a593Smuzhiyun  *                                  the user surface reference functionality.
1009*4882a593Smuzhiyun  *
1010*4882a593Smuzhiyun  * @dev:            Pointer to a struct drm_device.
1011*4882a593Smuzhiyun  * @data:           Pointer to data copied from / to user-space.
1012*4882a593Smuzhiyun  * @file_priv:      Pointer to a drm file private structure.
1013*4882a593Smuzhiyun  */
vmw_surface_reference_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1014*4882a593Smuzhiyun int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
1015*4882a593Smuzhiyun 				struct drm_file *file_priv)
1016*4882a593Smuzhiyun {
1017*4882a593Smuzhiyun 	struct vmw_private *dev_priv = vmw_priv(dev);
1018*4882a593Smuzhiyun 	union drm_vmw_surface_reference_arg *arg =
1019*4882a593Smuzhiyun 	    (union drm_vmw_surface_reference_arg *)data;
1020*4882a593Smuzhiyun 	struct drm_vmw_surface_arg *req = &arg->req;
1021*4882a593Smuzhiyun 	struct drm_vmw_surface_create_req *rep = &arg->rep;
1022*4882a593Smuzhiyun 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1023*4882a593Smuzhiyun 	struct vmw_surface *srf;
1024*4882a593Smuzhiyun 	struct vmw_user_surface *user_srf;
1025*4882a593Smuzhiyun 	struct drm_vmw_size __user *user_sizes;
1026*4882a593Smuzhiyun 	struct ttm_base_object *base;
1027*4882a593Smuzhiyun 	int ret;
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun 	ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
1030*4882a593Smuzhiyun 					   req->handle_type, &base);
1031*4882a593Smuzhiyun 	if (unlikely(ret != 0))
1032*4882a593Smuzhiyun 		return ret;
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun 	user_srf = container_of(base, struct vmw_user_surface, prime.base);
1035*4882a593Smuzhiyun 	srf = &user_srf->srf;
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	/* Downcast of flags when sending back to user space */
1038*4882a593Smuzhiyun 	rep->flags = (uint32_t)srf->metadata.flags;
1039*4882a593Smuzhiyun 	rep->format = srf->metadata.format;
1040*4882a593Smuzhiyun 	memcpy(rep->mip_levels, srf->metadata.mip_levels,
1041*4882a593Smuzhiyun 	       sizeof(srf->metadata.mip_levels));
1042*4882a593Smuzhiyun 	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1043*4882a593Smuzhiyun 	    rep->size_addr;
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	if (user_sizes)
1046*4882a593Smuzhiyun 		ret = copy_to_user(user_sizes, &srf->metadata.base_size,
1047*4882a593Smuzhiyun 				   sizeof(srf->metadata.base_size));
1048*4882a593Smuzhiyun 	if (unlikely(ret != 0)) {
1049*4882a593Smuzhiyun 		VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes,
1050*4882a593Smuzhiyun 			       srf->metadata.num_sizes);
1051*4882a593Smuzhiyun 		ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE);
1052*4882a593Smuzhiyun 		ret = -EFAULT;
1053*4882a593Smuzhiyun 	}
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 	ttm_base_object_unref(&base);
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 	return ret;
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun /**
1061*4882a593Smuzhiyun  * vmw_surface_define_encode - Encode a surface_define command.
1062*4882a593Smuzhiyun  *
1063*4882a593Smuzhiyun  * @srf: Pointer to a struct vmw_surface object.
1064*4882a593Smuzhiyun  * @cmd_space: Pointer to memory area in which the commands should be encoded.
1065*4882a593Smuzhiyun  */
vmw_gb_surface_create(struct vmw_resource * res)1066*4882a593Smuzhiyun static int vmw_gb_surface_create(struct vmw_resource *res)
1067*4882a593Smuzhiyun {
1068*4882a593Smuzhiyun 	struct vmw_private *dev_priv = res->dev_priv;
1069*4882a593Smuzhiyun 	struct vmw_surface *srf = vmw_res_to_srf(res);
1070*4882a593Smuzhiyun 	struct vmw_surface_metadata *metadata = &srf->metadata;
1071*4882a593Smuzhiyun 	uint32_t cmd_len, cmd_id, submit_len;
1072*4882a593Smuzhiyun 	int ret;
1073*4882a593Smuzhiyun 	struct {
1074*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
1075*4882a593Smuzhiyun 		SVGA3dCmdDefineGBSurface body;
1076*4882a593Smuzhiyun 	} *cmd;
1077*4882a593Smuzhiyun 	struct {
1078*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
1079*4882a593Smuzhiyun 		SVGA3dCmdDefineGBSurface_v2 body;
1080*4882a593Smuzhiyun 	} *cmd2;
1081*4882a593Smuzhiyun 	struct {
1082*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
1083*4882a593Smuzhiyun 		SVGA3dCmdDefineGBSurface_v3 body;
1084*4882a593Smuzhiyun 	} *cmd3;
1085*4882a593Smuzhiyun 	struct {
1086*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
1087*4882a593Smuzhiyun 		SVGA3dCmdDefineGBSurface_v4 body;
1088*4882a593Smuzhiyun 	} *cmd4;
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 	if (likely(res->id != -1))
1091*4882a593Smuzhiyun 		return 0;
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 	vmw_fifo_resource_inc(dev_priv);
1094*4882a593Smuzhiyun 	ret = vmw_resource_alloc_id(res);
1095*4882a593Smuzhiyun 	if (unlikely(ret != 0)) {
1096*4882a593Smuzhiyun 		DRM_ERROR("Failed to allocate a surface id.\n");
1097*4882a593Smuzhiyun 		goto out_no_id;
1098*4882a593Smuzhiyun 	}
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 	if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
1101*4882a593Smuzhiyun 		ret = -EBUSY;
1102*4882a593Smuzhiyun 		goto out_no_fifo;
1103*4882a593Smuzhiyun 	}
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 	if (has_sm5_context(dev_priv) && metadata->array_size > 0) {
1106*4882a593Smuzhiyun 		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V4;
1107*4882a593Smuzhiyun 		cmd_len = sizeof(cmd4->body);
1108*4882a593Smuzhiyun 		submit_len = sizeof(*cmd4);
1109*4882a593Smuzhiyun 	} else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) {
1110*4882a593Smuzhiyun 		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V3;
1111*4882a593Smuzhiyun 		cmd_len = sizeof(cmd3->body);
1112*4882a593Smuzhiyun 		submit_len = sizeof(*cmd3);
1113*4882a593Smuzhiyun 	} else if (metadata->array_size > 0) {
1114*4882a593Smuzhiyun 		/* VMW_SM_4 support verified at creation time. */
1115*4882a593Smuzhiyun 		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
1116*4882a593Smuzhiyun 		cmd_len = sizeof(cmd2->body);
1117*4882a593Smuzhiyun 		submit_len = sizeof(*cmd2);
1118*4882a593Smuzhiyun 	} else {
1119*4882a593Smuzhiyun 		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
1120*4882a593Smuzhiyun 		cmd_len = sizeof(cmd->body);
1121*4882a593Smuzhiyun 		submit_len = sizeof(*cmd);
1122*4882a593Smuzhiyun 	}
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun 	cmd = VMW_FIFO_RESERVE(dev_priv, submit_len);
1125*4882a593Smuzhiyun 	cmd2 = (typeof(cmd2))cmd;
1126*4882a593Smuzhiyun 	cmd3 = (typeof(cmd3))cmd;
1127*4882a593Smuzhiyun 	cmd4 = (typeof(cmd4))cmd;
1128*4882a593Smuzhiyun 	if (unlikely(!cmd)) {
1129*4882a593Smuzhiyun 		ret = -ENOMEM;
1130*4882a593Smuzhiyun 		goto out_no_fifo;
1131*4882a593Smuzhiyun 	}
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 	if (has_sm5_context(dev_priv) && metadata->array_size > 0) {
1134*4882a593Smuzhiyun 		cmd4->header.id = cmd_id;
1135*4882a593Smuzhiyun 		cmd4->header.size = cmd_len;
1136*4882a593Smuzhiyun 		cmd4->body.sid = srf->res.id;
1137*4882a593Smuzhiyun 		cmd4->body.surfaceFlags = metadata->flags;
1138*4882a593Smuzhiyun 		cmd4->body.format = metadata->format;
1139*4882a593Smuzhiyun 		cmd4->body.numMipLevels = metadata->mip_levels[0];
1140*4882a593Smuzhiyun 		cmd4->body.multisampleCount = metadata->multisample_count;
1141*4882a593Smuzhiyun 		cmd4->body.multisamplePattern = metadata->multisample_pattern;
1142*4882a593Smuzhiyun 		cmd4->body.qualityLevel = metadata->quality_level;
1143*4882a593Smuzhiyun 		cmd4->body.autogenFilter = metadata->autogen_filter;
1144*4882a593Smuzhiyun 		cmd4->body.size.width = metadata->base_size.width;
1145*4882a593Smuzhiyun 		cmd4->body.size.height = metadata->base_size.height;
1146*4882a593Smuzhiyun 		cmd4->body.size.depth = metadata->base_size.depth;
1147*4882a593Smuzhiyun 		cmd4->body.arraySize = metadata->array_size;
1148*4882a593Smuzhiyun 		cmd4->body.bufferByteStride = metadata->buffer_byte_stride;
1149*4882a593Smuzhiyun 	} else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) {
1150*4882a593Smuzhiyun 		cmd3->header.id = cmd_id;
1151*4882a593Smuzhiyun 		cmd3->header.size = cmd_len;
1152*4882a593Smuzhiyun 		cmd3->body.sid = srf->res.id;
1153*4882a593Smuzhiyun 		cmd3->body.surfaceFlags = metadata->flags;
1154*4882a593Smuzhiyun 		cmd3->body.format = metadata->format;
1155*4882a593Smuzhiyun 		cmd3->body.numMipLevels = metadata->mip_levels[0];
1156*4882a593Smuzhiyun 		cmd3->body.multisampleCount = metadata->multisample_count;
1157*4882a593Smuzhiyun 		cmd3->body.multisamplePattern = metadata->multisample_pattern;
1158*4882a593Smuzhiyun 		cmd3->body.qualityLevel = metadata->quality_level;
1159*4882a593Smuzhiyun 		cmd3->body.autogenFilter = metadata->autogen_filter;
1160*4882a593Smuzhiyun 		cmd3->body.size.width = metadata->base_size.width;
1161*4882a593Smuzhiyun 		cmd3->body.size.height = metadata->base_size.height;
1162*4882a593Smuzhiyun 		cmd3->body.size.depth = metadata->base_size.depth;
1163*4882a593Smuzhiyun 		cmd3->body.arraySize = metadata->array_size;
1164*4882a593Smuzhiyun 	} else if (metadata->array_size > 0) {
1165*4882a593Smuzhiyun 		cmd2->header.id = cmd_id;
1166*4882a593Smuzhiyun 		cmd2->header.size = cmd_len;
1167*4882a593Smuzhiyun 		cmd2->body.sid = srf->res.id;
1168*4882a593Smuzhiyun 		cmd2->body.surfaceFlags = metadata->flags;
1169*4882a593Smuzhiyun 		cmd2->body.format = metadata->format;
1170*4882a593Smuzhiyun 		cmd2->body.numMipLevels = metadata->mip_levels[0];
1171*4882a593Smuzhiyun 		cmd2->body.multisampleCount = metadata->multisample_count;
1172*4882a593Smuzhiyun 		cmd2->body.autogenFilter = metadata->autogen_filter;
1173*4882a593Smuzhiyun 		cmd2->body.size.width = metadata->base_size.width;
1174*4882a593Smuzhiyun 		cmd2->body.size.height = metadata->base_size.height;
1175*4882a593Smuzhiyun 		cmd2->body.size.depth = metadata->base_size.depth;
1176*4882a593Smuzhiyun 		cmd2->body.arraySize = metadata->array_size;
1177*4882a593Smuzhiyun 	} else {
1178*4882a593Smuzhiyun 		cmd->header.id = cmd_id;
1179*4882a593Smuzhiyun 		cmd->header.size = cmd_len;
1180*4882a593Smuzhiyun 		cmd->body.sid = srf->res.id;
1181*4882a593Smuzhiyun 		cmd->body.surfaceFlags = metadata->flags;
1182*4882a593Smuzhiyun 		cmd->body.format = metadata->format;
1183*4882a593Smuzhiyun 		cmd->body.numMipLevels = metadata->mip_levels[0];
1184*4882a593Smuzhiyun 		cmd->body.multisampleCount = metadata->multisample_count;
1185*4882a593Smuzhiyun 		cmd->body.autogenFilter = metadata->autogen_filter;
1186*4882a593Smuzhiyun 		cmd->body.size.width = metadata->base_size.width;
1187*4882a593Smuzhiyun 		cmd->body.size.height = metadata->base_size.height;
1188*4882a593Smuzhiyun 		cmd->body.size.depth = metadata->base_size.depth;
1189*4882a593Smuzhiyun 	}
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun 	vmw_fifo_commit(dev_priv, submit_len);
1192*4882a593Smuzhiyun 
1193*4882a593Smuzhiyun 	return 0;
1194*4882a593Smuzhiyun 
1195*4882a593Smuzhiyun out_no_fifo:
1196*4882a593Smuzhiyun 	vmw_resource_release_id(res);
1197*4882a593Smuzhiyun out_no_id:
1198*4882a593Smuzhiyun 	vmw_fifo_resource_dec(dev_priv);
1199*4882a593Smuzhiyun 	return ret;
1200*4882a593Smuzhiyun }
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun 
vmw_gb_surface_bind(struct vmw_resource * res,struct ttm_validate_buffer * val_buf)1203*4882a593Smuzhiyun static int vmw_gb_surface_bind(struct vmw_resource *res,
1204*4882a593Smuzhiyun 			       struct ttm_validate_buffer *val_buf)
1205*4882a593Smuzhiyun {
1206*4882a593Smuzhiyun 	struct vmw_private *dev_priv = res->dev_priv;
1207*4882a593Smuzhiyun 	struct {
1208*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
1209*4882a593Smuzhiyun 		SVGA3dCmdBindGBSurface body;
1210*4882a593Smuzhiyun 	} *cmd1;
1211*4882a593Smuzhiyun 	struct {
1212*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
1213*4882a593Smuzhiyun 		SVGA3dCmdUpdateGBSurface body;
1214*4882a593Smuzhiyun 	} *cmd2;
1215*4882a593Smuzhiyun 	uint32_t submit_size;
1216*4882a593Smuzhiyun 	struct ttm_buffer_object *bo = val_buf->bo;
1217*4882a593Smuzhiyun 
1218*4882a593Smuzhiyun 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
1219*4882a593Smuzhiyun 
1220*4882a593Smuzhiyun 	submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
1221*4882a593Smuzhiyun 
1222*4882a593Smuzhiyun 	cmd1 = VMW_FIFO_RESERVE(dev_priv, submit_size);
1223*4882a593Smuzhiyun 	if (unlikely(!cmd1))
1224*4882a593Smuzhiyun 		return -ENOMEM;
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun 	cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1227*4882a593Smuzhiyun 	cmd1->header.size = sizeof(cmd1->body);
1228*4882a593Smuzhiyun 	cmd1->body.sid = res->id;
1229*4882a593Smuzhiyun 	cmd1->body.mobid = bo->mem.start;
1230*4882a593Smuzhiyun 	if (res->backup_dirty) {
1231*4882a593Smuzhiyun 		cmd2 = (void *) &cmd1[1];
1232*4882a593Smuzhiyun 		cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
1233*4882a593Smuzhiyun 		cmd2->header.size = sizeof(cmd2->body);
1234*4882a593Smuzhiyun 		cmd2->body.sid = res->id;
1235*4882a593Smuzhiyun 	}
1236*4882a593Smuzhiyun 	vmw_fifo_commit(dev_priv, submit_size);
1237*4882a593Smuzhiyun 
1238*4882a593Smuzhiyun 	if (res->backup->dirty && res->backup_dirty) {
1239*4882a593Smuzhiyun 		/* We've just made a full upload. Cear dirty regions. */
1240*4882a593Smuzhiyun 		vmw_bo_dirty_clear_res(res);
1241*4882a593Smuzhiyun 	}
1242*4882a593Smuzhiyun 
1243*4882a593Smuzhiyun 	res->backup_dirty = false;
1244*4882a593Smuzhiyun 
1245*4882a593Smuzhiyun 	return 0;
1246*4882a593Smuzhiyun }
1247*4882a593Smuzhiyun 
vmw_gb_surface_unbind(struct vmw_resource * res,bool readback,struct ttm_validate_buffer * val_buf)1248*4882a593Smuzhiyun static int vmw_gb_surface_unbind(struct vmw_resource *res,
1249*4882a593Smuzhiyun 				 bool readback,
1250*4882a593Smuzhiyun 				 struct ttm_validate_buffer *val_buf)
1251*4882a593Smuzhiyun {
1252*4882a593Smuzhiyun 	struct vmw_private *dev_priv = res->dev_priv;
1253*4882a593Smuzhiyun 	struct ttm_buffer_object *bo = val_buf->bo;
1254*4882a593Smuzhiyun 	struct vmw_fence_obj *fence;
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun 	struct {
1257*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
1258*4882a593Smuzhiyun 		SVGA3dCmdReadbackGBSurface body;
1259*4882a593Smuzhiyun 	} *cmd1;
1260*4882a593Smuzhiyun 	struct {
1261*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
1262*4882a593Smuzhiyun 		SVGA3dCmdInvalidateGBSurface body;
1263*4882a593Smuzhiyun 	} *cmd2;
1264*4882a593Smuzhiyun 	struct {
1265*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
1266*4882a593Smuzhiyun 		SVGA3dCmdBindGBSurface body;
1267*4882a593Smuzhiyun 	} *cmd3;
1268*4882a593Smuzhiyun 	uint32_t submit_size;
1269*4882a593Smuzhiyun 	uint8_t *cmd;
1270*4882a593Smuzhiyun 
1271*4882a593Smuzhiyun 
1272*4882a593Smuzhiyun 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
1273*4882a593Smuzhiyun 
1274*4882a593Smuzhiyun 	submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
1275*4882a593Smuzhiyun 	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
1276*4882a593Smuzhiyun 	if (unlikely(!cmd))
1277*4882a593Smuzhiyun 		return -ENOMEM;
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun 	if (readback) {
1280*4882a593Smuzhiyun 		cmd1 = (void *) cmd;
1281*4882a593Smuzhiyun 		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
1282*4882a593Smuzhiyun 		cmd1->header.size = sizeof(cmd1->body);
1283*4882a593Smuzhiyun 		cmd1->body.sid = res->id;
1284*4882a593Smuzhiyun 		cmd3 = (void *) &cmd1[1];
1285*4882a593Smuzhiyun 	} else {
1286*4882a593Smuzhiyun 		cmd2 = (void *) cmd;
1287*4882a593Smuzhiyun 		cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
1288*4882a593Smuzhiyun 		cmd2->header.size = sizeof(cmd2->body);
1289*4882a593Smuzhiyun 		cmd2->body.sid = res->id;
1290*4882a593Smuzhiyun 		cmd3 = (void *) &cmd2[1];
1291*4882a593Smuzhiyun 	}
1292*4882a593Smuzhiyun 
1293*4882a593Smuzhiyun 	cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1294*4882a593Smuzhiyun 	cmd3->header.size = sizeof(cmd3->body);
1295*4882a593Smuzhiyun 	cmd3->body.sid = res->id;
1296*4882a593Smuzhiyun 	cmd3->body.mobid = SVGA3D_INVALID_ID;
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun 	vmw_fifo_commit(dev_priv, submit_size);
1299*4882a593Smuzhiyun 
1300*4882a593Smuzhiyun 	/*
1301*4882a593Smuzhiyun 	 * Create a fence object and fence the backup buffer.
1302*4882a593Smuzhiyun 	 */
1303*4882a593Smuzhiyun 
1304*4882a593Smuzhiyun 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
1305*4882a593Smuzhiyun 					  &fence, NULL);
1306*4882a593Smuzhiyun 
1307*4882a593Smuzhiyun 	vmw_bo_fence_single(val_buf->bo, fence);
1308*4882a593Smuzhiyun 
1309*4882a593Smuzhiyun 	if (likely(fence != NULL))
1310*4882a593Smuzhiyun 		vmw_fence_obj_unreference(&fence);
1311*4882a593Smuzhiyun 
1312*4882a593Smuzhiyun 	return 0;
1313*4882a593Smuzhiyun }
1314*4882a593Smuzhiyun 
vmw_gb_surface_destroy(struct vmw_resource * res)1315*4882a593Smuzhiyun static int vmw_gb_surface_destroy(struct vmw_resource *res)
1316*4882a593Smuzhiyun {
1317*4882a593Smuzhiyun 	struct vmw_private *dev_priv = res->dev_priv;
1318*4882a593Smuzhiyun 	struct vmw_surface *srf = vmw_res_to_srf(res);
1319*4882a593Smuzhiyun 	struct {
1320*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
1321*4882a593Smuzhiyun 		SVGA3dCmdDestroyGBSurface body;
1322*4882a593Smuzhiyun 	} *cmd;
1323*4882a593Smuzhiyun 
1324*4882a593Smuzhiyun 	if (likely(res->id == -1))
1325*4882a593Smuzhiyun 		return 0;
1326*4882a593Smuzhiyun 
1327*4882a593Smuzhiyun 	mutex_lock(&dev_priv->binding_mutex);
1328*4882a593Smuzhiyun 	vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
1329*4882a593Smuzhiyun 	vmw_binding_res_list_scrub(&res->binding_head);
1330*4882a593Smuzhiyun 
1331*4882a593Smuzhiyun 	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
1332*4882a593Smuzhiyun 	if (unlikely(!cmd)) {
1333*4882a593Smuzhiyun 		mutex_unlock(&dev_priv->binding_mutex);
1334*4882a593Smuzhiyun 		return -ENOMEM;
1335*4882a593Smuzhiyun 	}
1336*4882a593Smuzhiyun 
1337*4882a593Smuzhiyun 	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
1338*4882a593Smuzhiyun 	cmd->header.size = sizeof(cmd->body);
1339*4882a593Smuzhiyun 	cmd->body.sid = res->id;
1340*4882a593Smuzhiyun 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
1341*4882a593Smuzhiyun 	mutex_unlock(&dev_priv->binding_mutex);
1342*4882a593Smuzhiyun 	vmw_resource_release_id(res);
1343*4882a593Smuzhiyun 	vmw_fifo_resource_dec(dev_priv);
1344*4882a593Smuzhiyun 
1345*4882a593Smuzhiyun 	return 0;
1346*4882a593Smuzhiyun }
1347*4882a593Smuzhiyun 
1348*4882a593Smuzhiyun /**
1349*4882a593Smuzhiyun  * vmw_gb_surface_define_ioctl - Ioctl function implementing
1350*4882a593Smuzhiyun  * the user surface define functionality.
1351*4882a593Smuzhiyun  *
1352*4882a593Smuzhiyun  * @dev: Pointer to a struct drm_device.
1353*4882a593Smuzhiyun  * @data: Pointer to data copied from / to user-space.
1354*4882a593Smuzhiyun  * @file_priv: Pointer to a drm file private structure.
1355*4882a593Smuzhiyun  */
vmw_gb_surface_define_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1356*4882a593Smuzhiyun int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1357*4882a593Smuzhiyun 				struct drm_file *file_priv)
1358*4882a593Smuzhiyun {
1359*4882a593Smuzhiyun 	union drm_vmw_gb_surface_create_arg *arg =
1360*4882a593Smuzhiyun 	    (union drm_vmw_gb_surface_create_arg *)data;
1361*4882a593Smuzhiyun 	struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
1362*4882a593Smuzhiyun 	struct drm_vmw_gb_surface_create_ext_req req_ext;
1363*4882a593Smuzhiyun 
1364*4882a593Smuzhiyun 	req_ext.base = arg->req;
1365*4882a593Smuzhiyun 	req_ext.version = drm_vmw_gb_surface_v1;
1366*4882a593Smuzhiyun 	req_ext.svga3d_flags_upper_32_bits = 0;
1367*4882a593Smuzhiyun 	req_ext.multisample_pattern = SVGA3D_MS_PATTERN_NONE;
1368*4882a593Smuzhiyun 	req_ext.quality_level = SVGA3D_MS_QUALITY_NONE;
1369*4882a593Smuzhiyun 	req_ext.buffer_byte_stride = 0;
1370*4882a593Smuzhiyun 	req_ext.must_be_zero = 0;
1371*4882a593Smuzhiyun 
1372*4882a593Smuzhiyun 	return vmw_gb_surface_define_internal(dev, &req_ext, rep, file_priv);
1373*4882a593Smuzhiyun }
1374*4882a593Smuzhiyun 
1375*4882a593Smuzhiyun /**
1376*4882a593Smuzhiyun  * vmw_gb_surface_reference_ioctl - Ioctl function implementing
1377*4882a593Smuzhiyun  * the user surface reference functionality.
1378*4882a593Smuzhiyun  *
1379*4882a593Smuzhiyun  * @dev: Pointer to a struct drm_device.
1380*4882a593Smuzhiyun  * @data: Pointer to data copied from / to user-space.
1381*4882a593Smuzhiyun  * @file_priv: Pointer to a drm file private structure.
1382*4882a593Smuzhiyun  */
vmw_gb_surface_reference_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1383*4882a593Smuzhiyun int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1384*4882a593Smuzhiyun 				   struct drm_file *file_priv)
1385*4882a593Smuzhiyun {
1386*4882a593Smuzhiyun 	union drm_vmw_gb_surface_reference_arg *arg =
1387*4882a593Smuzhiyun 	    (union drm_vmw_gb_surface_reference_arg *)data;
1388*4882a593Smuzhiyun 	struct drm_vmw_surface_arg *req = &arg->req;
1389*4882a593Smuzhiyun 	struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
1390*4882a593Smuzhiyun 	struct drm_vmw_gb_surface_ref_ext_rep rep_ext;
1391*4882a593Smuzhiyun 	int ret;
1392*4882a593Smuzhiyun 
1393*4882a593Smuzhiyun 	ret = vmw_gb_surface_reference_internal(dev, req, &rep_ext, file_priv);
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun 	if (unlikely(ret != 0))
1396*4882a593Smuzhiyun 		return ret;
1397*4882a593Smuzhiyun 
1398*4882a593Smuzhiyun 	rep->creq = rep_ext.creq.base;
1399*4882a593Smuzhiyun 	rep->crep = rep_ext.crep;
1400*4882a593Smuzhiyun 
1401*4882a593Smuzhiyun 	return ret;
1402*4882a593Smuzhiyun }
1403*4882a593Smuzhiyun 
1404*4882a593Smuzhiyun /**
1405*4882a593Smuzhiyun  * vmw_gb_surface_define_ext_ioctl - Ioctl function implementing
1406*4882a593Smuzhiyun  * the user surface define functionality.
1407*4882a593Smuzhiyun  *
1408*4882a593Smuzhiyun  * @dev: Pointer to a struct drm_device.
1409*4882a593Smuzhiyun  * @data: Pointer to data copied from / to user-space.
1410*4882a593Smuzhiyun  * @file_priv: Pointer to a drm file private structure.
1411*4882a593Smuzhiyun  */
vmw_gb_surface_define_ext_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1412*4882a593Smuzhiyun int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, void *data,
1413*4882a593Smuzhiyun 				struct drm_file *file_priv)
1414*4882a593Smuzhiyun {
1415*4882a593Smuzhiyun 	union drm_vmw_gb_surface_create_ext_arg *arg =
1416*4882a593Smuzhiyun 	    (union drm_vmw_gb_surface_create_ext_arg *)data;
1417*4882a593Smuzhiyun 	struct drm_vmw_gb_surface_create_ext_req *req = &arg->req;
1418*4882a593Smuzhiyun 	struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
1419*4882a593Smuzhiyun 
1420*4882a593Smuzhiyun 	return vmw_gb_surface_define_internal(dev, req, rep, file_priv);
1421*4882a593Smuzhiyun }
1422*4882a593Smuzhiyun 
1423*4882a593Smuzhiyun /**
1424*4882a593Smuzhiyun  * vmw_gb_surface_reference_ext_ioctl - Ioctl function implementing
1425*4882a593Smuzhiyun  * the user surface reference functionality.
1426*4882a593Smuzhiyun  *
1427*4882a593Smuzhiyun  * @dev: Pointer to a struct drm_device.
1428*4882a593Smuzhiyun  * @data: Pointer to data copied from / to user-space.
1429*4882a593Smuzhiyun  * @file_priv: Pointer to a drm file private structure.
1430*4882a593Smuzhiyun  */
vmw_gb_surface_reference_ext_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1431*4882a593Smuzhiyun int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, void *data,
1432*4882a593Smuzhiyun 				   struct drm_file *file_priv)
1433*4882a593Smuzhiyun {
1434*4882a593Smuzhiyun 	union drm_vmw_gb_surface_reference_ext_arg *arg =
1435*4882a593Smuzhiyun 	    (union drm_vmw_gb_surface_reference_ext_arg *)data;
1436*4882a593Smuzhiyun 	struct drm_vmw_surface_arg *req = &arg->req;
1437*4882a593Smuzhiyun 	struct drm_vmw_gb_surface_ref_ext_rep *rep = &arg->rep;
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun 	return vmw_gb_surface_reference_internal(dev, req, rep, file_priv);
1440*4882a593Smuzhiyun }
1441*4882a593Smuzhiyun 
1442*4882a593Smuzhiyun /**
1443*4882a593Smuzhiyun  * vmw_gb_surface_define_internal - Ioctl function implementing
1444*4882a593Smuzhiyun  * the user surface define functionality.
1445*4882a593Smuzhiyun  *
1446*4882a593Smuzhiyun  * @dev: Pointer to a struct drm_device.
1447*4882a593Smuzhiyun  * @req: Request argument from user-space.
1448*4882a593Smuzhiyun  * @rep: Response argument to user-space.
1449*4882a593Smuzhiyun  * @file_priv: Pointer to a drm file private structure.
1450*4882a593Smuzhiyun  */
1451*4882a593Smuzhiyun static int
vmw_gb_surface_define_internal(struct drm_device * dev,struct drm_vmw_gb_surface_create_ext_req * req,struct drm_vmw_gb_surface_create_rep * rep,struct drm_file * file_priv)1452*4882a593Smuzhiyun vmw_gb_surface_define_internal(struct drm_device *dev,
1453*4882a593Smuzhiyun 			       struct drm_vmw_gb_surface_create_ext_req *req,
1454*4882a593Smuzhiyun 			       struct drm_vmw_gb_surface_create_rep *rep,
1455*4882a593Smuzhiyun 			       struct drm_file *file_priv)
1456*4882a593Smuzhiyun {
1457*4882a593Smuzhiyun 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1458*4882a593Smuzhiyun 	struct vmw_private *dev_priv = vmw_priv(dev);
1459*4882a593Smuzhiyun 	struct vmw_user_surface *user_srf;
1460*4882a593Smuzhiyun 	struct vmw_surface_metadata metadata = {0};
1461*4882a593Smuzhiyun 	struct vmw_surface *srf;
1462*4882a593Smuzhiyun 	struct vmw_resource *res;
1463*4882a593Smuzhiyun 	struct vmw_resource *tmp;
1464*4882a593Smuzhiyun 	int ret = 0;
1465*4882a593Smuzhiyun 	uint32_t size;
1466*4882a593Smuzhiyun 	uint32_t backup_handle = 0;
1467*4882a593Smuzhiyun 	SVGA3dSurfaceAllFlags svga3d_flags_64 =
1468*4882a593Smuzhiyun 		SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
1469*4882a593Smuzhiyun 				req->base.svga3d_flags);
1470*4882a593Smuzhiyun 
1471*4882a593Smuzhiyun 	/* array_size must be null for non-GL3 host. */
1472*4882a593Smuzhiyun 	if (req->base.array_size > 0 && !has_sm4_context(dev_priv)) {
1473*4882a593Smuzhiyun 		VMW_DEBUG_USER("SM4 surface not supported.\n");
1474*4882a593Smuzhiyun 		return -EINVAL;
1475*4882a593Smuzhiyun 	}
1476*4882a593Smuzhiyun 
1477*4882a593Smuzhiyun 	if (!has_sm4_1_context(dev_priv)) {
1478*4882a593Smuzhiyun 		if (req->svga3d_flags_upper_32_bits != 0)
1479*4882a593Smuzhiyun 			ret = -EINVAL;
1480*4882a593Smuzhiyun 
1481*4882a593Smuzhiyun 		if (req->base.multisample_count != 0)
1482*4882a593Smuzhiyun 			ret = -EINVAL;
1483*4882a593Smuzhiyun 
1484*4882a593Smuzhiyun 		if (req->multisample_pattern != SVGA3D_MS_PATTERN_NONE)
1485*4882a593Smuzhiyun 			ret = -EINVAL;
1486*4882a593Smuzhiyun 
1487*4882a593Smuzhiyun 		if (req->quality_level != SVGA3D_MS_QUALITY_NONE)
1488*4882a593Smuzhiyun 			ret = -EINVAL;
1489*4882a593Smuzhiyun 
1490*4882a593Smuzhiyun 		if (ret) {
1491*4882a593Smuzhiyun 			VMW_DEBUG_USER("SM4.1 surface not supported.\n");
1492*4882a593Smuzhiyun 			return ret;
1493*4882a593Smuzhiyun 		}
1494*4882a593Smuzhiyun 	}
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun 	if (req->buffer_byte_stride > 0 && !has_sm5_context(dev_priv)) {
1497*4882a593Smuzhiyun 		VMW_DEBUG_USER("SM5 surface not supported.\n");
1498*4882a593Smuzhiyun 		return -EINVAL;
1499*4882a593Smuzhiyun 	}
1500*4882a593Smuzhiyun 
1501*4882a593Smuzhiyun 	if ((svga3d_flags_64 & SVGA3D_SURFACE_MULTISAMPLE) &&
1502*4882a593Smuzhiyun 	    req->base.multisample_count == 0) {
1503*4882a593Smuzhiyun 		VMW_DEBUG_USER("Invalid sample count.\n");
1504*4882a593Smuzhiyun 		return -EINVAL;
1505*4882a593Smuzhiyun 	}
1506*4882a593Smuzhiyun 
1507*4882a593Smuzhiyun 	if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS) {
1508*4882a593Smuzhiyun 		VMW_DEBUG_USER("Invalid mip level.\n");
1509*4882a593Smuzhiyun 		return -EINVAL;
1510*4882a593Smuzhiyun 	}
1511*4882a593Smuzhiyun 
1512*4882a593Smuzhiyun 	if (unlikely(vmw_user_surface_size == 0))
1513*4882a593Smuzhiyun 		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
1514*4882a593Smuzhiyun 			VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
1515*4882a593Smuzhiyun 
1516*4882a593Smuzhiyun 	size = vmw_user_surface_size;
1517*4882a593Smuzhiyun 
1518*4882a593Smuzhiyun 	metadata.flags = svga3d_flags_64;
1519*4882a593Smuzhiyun 	metadata.format = req->base.format;
1520*4882a593Smuzhiyun 	metadata.mip_levels[0] = req->base.mip_levels;
1521*4882a593Smuzhiyun 	metadata.multisample_count = req->base.multisample_count;
1522*4882a593Smuzhiyun 	metadata.multisample_pattern = req->multisample_pattern;
1523*4882a593Smuzhiyun 	metadata.quality_level = req->quality_level;
1524*4882a593Smuzhiyun 	metadata.array_size = req->base.array_size;
1525*4882a593Smuzhiyun 	metadata.buffer_byte_stride = req->buffer_byte_stride;
1526*4882a593Smuzhiyun 	metadata.num_sizes = 1;
1527*4882a593Smuzhiyun 	metadata.base_size = req->base.base_size;
1528*4882a593Smuzhiyun 	metadata.scanout = req->base.drm_surface_flags &
1529*4882a593Smuzhiyun 		drm_vmw_surface_flag_scanout;
1530*4882a593Smuzhiyun 
1531*4882a593Smuzhiyun 	/* Define a surface based on the parameters. */
1532*4882a593Smuzhiyun 	ret = vmw_gb_surface_define(dev_priv, size, &metadata, &srf);
1533*4882a593Smuzhiyun 	if (ret != 0) {
1534*4882a593Smuzhiyun 		VMW_DEBUG_USER("Failed to define surface.\n");
1535*4882a593Smuzhiyun 		return ret;
1536*4882a593Smuzhiyun 	}
1537*4882a593Smuzhiyun 
1538*4882a593Smuzhiyun 	user_srf = container_of(srf, struct vmw_user_surface, srf);
1539*4882a593Smuzhiyun 	if (drm_is_primary_client(file_priv))
1540*4882a593Smuzhiyun 		user_srf->master = drm_master_get(file_priv->master);
1541*4882a593Smuzhiyun 
1542*4882a593Smuzhiyun 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1543*4882a593Smuzhiyun 	if (unlikely(ret != 0))
1544*4882a593Smuzhiyun 		return ret;
1545*4882a593Smuzhiyun 
1546*4882a593Smuzhiyun 	res = &user_srf->srf.res;
1547*4882a593Smuzhiyun 
1548*4882a593Smuzhiyun 	if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
1549*4882a593Smuzhiyun 		ret = vmw_user_bo_lookup(tfile, req->base.buffer_handle,
1550*4882a593Smuzhiyun 					 &res->backup,
1551*4882a593Smuzhiyun 					 &user_srf->backup_base);
1552*4882a593Smuzhiyun 		if (ret == 0) {
1553*4882a593Smuzhiyun 			if (res->backup->base.num_pages * PAGE_SIZE <
1554*4882a593Smuzhiyun 			    res->backup_size) {
1555*4882a593Smuzhiyun 				VMW_DEBUG_USER("Surface backup buffer too small.\n");
1556*4882a593Smuzhiyun 				vmw_bo_unreference(&res->backup);
1557*4882a593Smuzhiyun 				ret = -EINVAL;
1558*4882a593Smuzhiyun 				goto out_unlock;
1559*4882a593Smuzhiyun 			} else {
1560*4882a593Smuzhiyun 				backup_handle = req->base.buffer_handle;
1561*4882a593Smuzhiyun 			}
1562*4882a593Smuzhiyun 		}
1563*4882a593Smuzhiyun 	} else if (req->base.drm_surface_flags &
1564*4882a593Smuzhiyun 		   (drm_vmw_surface_flag_create_buffer |
1565*4882a593Smuzhiyun 		    drm_vmw_surface_flag_coherent))
1566*4882a593Smuzhiyun 		ret = vmw_user_bo_alloc(dev_priv, tfile,
1567*4882a593Smuzhiyun 					res->backup_size,
1568*4882a593Smuzhiyun 					req->base.drm_surface_flags &
1569*4882a593Smuzhiyun 					drm_vmw_surface_flag_shareable,
1570*4882a593Smuzhiyun 					&backup_handle,
1571*4882a593Smuzhiyun 					&res->backup,
1572*4882a593Smuzhiyun 					&user_srf->backup_base);
1573*4882a593Smuzhiyun 
1574*4882a593Smuzhiyun 	if (unlikely(ret != 0)) {
1575*4882a593Smuzhiyun 		vmw_resource_unreference(&res);
1576*4882a593Smuzhiyun 		goto out_unlock;
1577*4882a593Smuzhiyun 	}
1578*4882a593Smuzhiyun 
1579*4882a593Smuzhiyun 	if (req->base.drm_surface_flags & drm_vmw_surface_flag_coherent) {
1580*4882a593Smuzhiyun 		struct vmw_buffer_object *backup = res->backup;
1581*4882a593Smuzhiyun 
1582*4882a593Smuzhiyun 		ttm_bo_reserve(&backup->base, false, false, NULL);
1583*4882a593Smuzhiyun 		if (!res->func->dirty_alloc)
1584*4882a593Smuzhiyun 			ret = -EINVAL;
1585*4882a593Smuzhiyun 		if (!ret)
1586*4882a593Smuzhiyun 			ret = vmw_bo_dirty_add(backup);
1587*4882a593Smuzhiyun 		if (!ret) {
1588*4882a593Smuzhiyun 			res->coherent = true;
1589*4882a593Smuzhiyun 			ret = res->func->dirty_alloc(res);
1590*4882a593Smuzhiyun 		}
1591*4882a593Smuzhiyun 		ttm_bo_unreserve(&backup->base);
1592*4882a593Smuzhiyun 		if (ret) {
1593*4882a593Smuzhiyun 			vmw_resource_unreference(&res);
1594*4882a593Smuzhiyun 			goto out_unlock;
1595*4882a593Smuzhiyun 		}
1596*4882a593Smuzhiyun 
1597*4882a593Smuzhiyun 	}
1598*4882a593Smuzhiyun 
1599*4882a593Smuzhiyun 	tmp = vmw_resource_reference(res);
1600*4882a593Smuzhiyun 	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
1601*4882a593Smuzhiyun 				    req->base.drm_surface_flags &
1602*4882a593Smuzhiyun 				    drm_vmw_surface_flag_shareable,
1603*4882a593Smuzhiyun 				    VMW_RES_SURFACE,
1604*4882a593Smuzhiyun 				    &vmw_user_surface_base_release, NULL);
1605*4882a593Smuzhiyun 
1606*4882a593Smuzhiyun 	if (unlikely(ret != 0)) {
1607*4882a593Smuzhiyun 		vmw_resource_unreference(&tmp);
1608*4882a593Smuzhiyun 		vmw_resource_unreference(&res);
1609*4882a593Smuzhiyun 		goto out_unlock;
1610*4882a593Smuzhiyun 	}
1611*4882a593Smuzhiyun 
1612*4882a593Smuzhiyun 	rep->handle      = user_srf->prime.base.handle;
1613*4882a593Smuzhiyun 	rep->backup_size = res->backup_size;
1614*4882a593Smuzhiyun 	if (res->backup) {
1615*4882a593Smuzhiyun 		rep->buffer_map_handle =
1616*4882a593Smuzhiyun 			drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
1617*4882a593Smuzhiyun 		rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
1618*4882a593Smuzhiyun 		rep->buffer_handle = backup_handle;
1619*4882a593Smuzhiyun 	} else {
1620*4882a593Smuzhiyun 		rep->buffer_map_handle = 0;
1621*4882a593Smuzhiyun 		rep->buffer_size = 0;
1622*4882a593Smuzhiyun 		rep->buffer_handle = SVGA3D_INVALID_ID;
1623*4882a593Smuzhiyun 	}
1624*4882a593Smuzhiyun 
1625*4882a593Smuzhiyun 	vmw_resource_unreference(&res);
1626*4882a593Smuzhiyun 
1627*4882a593Smuzhiyun out_unlock:
1628*4882a593Smuzhiyun 	ttm_read_unlock(&dev_priv->reservation_sem);
1629*4882a593Smuzhiyun 	return ret;
1630*4882a593Smuzhiyun }
1631*4882a593Smuzhiyun 
1632*4882a593Smuzhiyun /**
1633*4882a593Smuzhiyun  * vmw_gb_surface_reference_internal - Ioctl function implementing
1634*4882a593Smuzhiyun  * the user surface reference functionality.
1635*4882a593Smuzhiyun  *
1636*4882a593Smuzhiyun  * @dev: Pointer to a struct drm_device.
1637*4882a593Smuzhiyun  * @req: Pointer to user-space request surface arg.
1638*4882a593Smuzhiyun  * @rep: Pointer to response to user-space.
1639*4882a593Smuzhiyun  * @file_priv: Pointer to a drm file private structure.
1640*4882a593Smuzhiyun  */
1641*4882a593Smuzhiyun static int
vmw_gb_surface_reference_internal(struct drm_device * dev,struct drm_vmw_surface_arg * req,struct drm_vmw_gb_surface_ref_ext_rep * rep,struct drm_file * file_priv)1642*4882a593Smuzhiyun vmw_gb_surface_reference_internal(struct drm_device *dev,
1643*4882a593Smuzhiyun 				  struct drm_vmw_surface_arg *req,
1644*4882a593Smuzhiyun 				  struct drm_vmw_gb_surface_ref_ext_rep *rep,
1645*4882a593Smuzhiyun 				  struct drm_file *file_priv)
1646*4882a593Smuzhiyun {
1647*4882a593Smuzhiyun 	struct vmw_private *dev_priv = vmw_priv(dev);
1648*4882a593Smuzhiyun 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1649*4882a593Smuzhiyun 	struct vmw_surface *srf;
1650*4882a593Smuzhiyun 	struct vmw_user_surface *user_srf;
1651*4882a593Smuzhiyun 	struct vmw_surface_metadata *metadata;
1652*4882a593Smuzhiyun 	struct ttm_base_object *base;
1653*4882a593Smuzhiyun 	uint32_t backup_handle;
1654*4882a593Smuzhiyun 	int ret;
1655*4882a593Smuzhiyun 
1656*4882a593Smuzhiyun 	ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
1657*4882a593Smuzhiyun 					   req->handle_type, &base);
1658*4882a593Smuzhiyun 	if (unlikely(ret != 0))
1659*4882a593Smuzhiyun 		return ret;
1660*4882a593Smuzhiyun 
1661*4882a593Smuzhiyun 	user_srf = container_of(base, struct vmw_user_surface, prime.base);
1662*4882a593Smuzhiyun 	srf = &user_srf->srf;
1663*4882a593Smuzhiyun 	if (!srf->res.backup) {
1664*4882a593Smuzhiyun 		DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
1665*4882a593Smuzhiyun 		goto out_bad_resource;
1666*4882a593Smuzhiyun 	}
1667*4882a593Smuzhiyun 	metadata = &srf->metadata;
1668*4882a593Smuzhiyun 
1669*4882a593Smuzhiyun 	mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
1670*4882a593Smuzhiyun 	ret = vmw_user_bo_reference(tfile, srf->res.backup, &backup_handle);
1671*4882a593Smuzhiyun 	mutex_unlock(&dev_priv->cmdbuf_mutex);
1672*4882a593Smuzhiyun 
1673*4882a593Smuzhiyun 	if (unlikely(ret != 0)) {
1674*4882a593Smuzhiyun 		DRM_ERROR("Could not add a reference to a GB surface "
1675*4882a593Smuzhiyun 			  "backup buffer.\n");
1676*4882a593Smuzhiyun 		(void) ttm_ref_object_base_unref(tfile, base->handle,
1677*4882a593Smuzhiyun 						 TTM_REF_USAGE);
1678*4882a593Smuzhiyun 		goto out_bad_resource;
1679*4882a593Smuzhiyun 	}
1680*4882a593Smuzhiyun 
1681*4882a593Smuzhiyun 	rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(metadata->flags);
1682*4882a593Smuzhiyun 	rep->creq.base.format = metadata->format;
1683*4882a593Smuzhiyun 	rep->creq.base.mip_levels = metadata->mip_levels[0];
1684*4882a593Smuzhiyun 	rep->creq.base.drm_surface_flags = 0;
1685*4882a593Smuzhiyun 	rep->creq.base.multisample_count = metadata->multisample_count;
1686*4882a593Smuzhiyun 	rep->creq.base.autogen_filter = metadata->autogen_filter;
1687*4882a593Smuzhiyun 	rep->creq.base.array_size = metadata->array_size;
1688*4882a593Smuzhiyun 	rep->creq.base.buffer_handle = backup_handle;
1689*4882a593Smuzhiyun 	rep->creq.base.base_size = metadata->base_size;
1690*4882a593Smuzhiyun 	rep->crep.handle = user_srf->prime.base.handle;
1691*4882a593Smuzhiyun 	rep->crep.backup_size = srf->res.backup_size;
1692*4882a593Smuzhiyun 	rep->crep.buffer_handle = backup_handle;
1693*4882a593Smuzhiyun 	rep->crep.buffer_map_handle =
1694*4882a593Smuzhiyun 		drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node);
1695*4882a593Smuzhiyun 	rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
1696*4882a593Smuzhiyun 
1697*4882a593Smuzhiyun 	rep->creq.version = drm_vmw_gb_surface_v1;
1698*4882a593Smuzhiyun 	rep->creq.svga3d_flags_upper_32_bits =
1699*4882a593Smuzhiyun 		SVGA3D_FLAGS_UPPER_32(metadata->flags);
1700*4882a593Smuzhiyun 	rep->creq.multisample_pattern = metadata->multisample_pattern;
1701*4882a593Smuzhiyun 	rep->creq.quality_level = metadata->quality_level;
1702*4882a593Smuzhiyun 	rep->creq.must_be_zero = 0;
1703*4882a593Smuzhiyun 
1704*4882a593Smuzhiyun out_bad_resource:
1705*4882a593Smuzhiyun 	ttm_base_object_unref(&base);
1706*4882a593Smuzhiyun 
1707*4882a593Smuzhiyun 	return ret;
1708*4882a593Smuzhiyun }
1709*4882a593Smuzhiyun 
1710*4882a593Smuzhiyun /**
1711*4882a593Smuzhiyun  * vmw_subres_dirty_add - Add a dirty region to a subresource
1712*4882a593Smuzhiyun  * @dirty: The surfaces's dirty tracker.
1713*4882a593Smuzhiyun  * @loc_start: The location corresponding to the start of the region.
1714*4882a593Smuzhiyun  * @loc_end: The location corresponding to the end of the region.
1715*4882a593Smuzhiyun  *
1716*4882a593Smuzhiyun  * As we are assuming that @loc_start and @loc_end represent a sequential
1717*4882a593Smuzhiyun  * range of backing store memory, if the region spans multiple lines then
1718*4882a593Smuzhiyun  * regardless of the x coordinate, the full lines are dirtied.
1719*4882a593Smuzhiyun  * Correspondingly if the region spans multiple z slices, then full rather
1720*4882a593Smuzhiyun  * than partial z slices are dirtied.
1721*4882a593Smuzhiyun  */
vmw_subres_dirty_add(struct vmw_surface_dirty * dirty,const struct svga3dsurface_loc * loc_start,const struct svga3dsurface_loc * loc_end)1722*4882a593Smuzhiyun static void vmw_subres_dirty_add(struct vmw_surface_dirty *dirty,
1723*4882a593Smuzhiyun 				 const struct svga3dsurface_loc *loc_start,
1724*4882a593Smuzhiyun 				 const struct svga3dsurface_loc *loc_end)
1725*4882a593Smuzhiyun {
1726*4882a593Smuzhiyun 	const struct svga3dsurface_cache *cache = &dirty->cache;
1727*4882a593Smuzhiyun 	SVGA3dBox *box = &dirty->boxes[loc_start->sub_resource];
1728*4882a593Smuzhiyun 	u32 mip = loc_start->sub_resource % cache->num_mip_levels;
1729*4882a593Smuzhiyun 	const struct drm_vmw_size *size = &cache->mip[mip].size;
1730*4882a593Smuzhiyun 	u32 box_c2 = box->z + box->d;
1731*4882a593Smuzhiyun 
1732*4882a593Smuzhiyun 	if (WARN_ON(loc_start->sub_resource >= dirty->num_subres))
1733*4882a593Smuzhiyun 		return;
1734*4882a593Smuzhiyun 
1735*4882a593Smuzhiyun 	if (box->d == 0 || box->z > loc_start->z)
1736*4882a593Smuzhiyun 		box->z = loc_start->z;
1737*4882a593Smuzhiyun 	if (box_c2 < loc_end->z)
1738*4882a593Smuzhiyun 		box->d = loc_end->z - box->z;
1739*4882a593Smuzhiyun 
1740*4882a593Smuzhiyun 	if (loc_start->z + 1 == loc_end->z) {
1741*4882a593Smuzhiyun 		box_c2 = box->y + box->h;
1742*4882a593Smuzhiyun 		if (box->h == 0 || box->y > loc_start->y)
1743*4882a593Smuzhiyun 			box->y = loc_start->y;
1744*4882a593Smuzhiyun 		if (box_c2 < loc_end->y)
1745*4882a593Smuzhiyun 			box->h = loc_end->y - box->y;
1746*4882a593Smuzhiyun 
1747*4882a593Smuzhiyun 		if (loc_start->y + 1 == loc_end->y) {
1748*4882a593Smuzhiyun 			box_c2 = box->x + box->w;
1749*4882a593Smuzhiyun 			if (box->w == 0 || box->x > loc_start->x)
1750*4882a593Smuzhiyun 				box->x = loc_start->x;
1751*4882a593Smuzhiyun 			if (box_c2 < loc_end->x)
1752*4882a593Smuzhiyun 				box->w = loc_end->x - box->x;
1753*4882a593Smuzhiyun 		} else {
1754*4882a593Smuzhiyun 			box->x = 0;
1755*4882a593Smuzhiyun 			box->w = size->width;
1756*4882a593Smuzhiyun 		}
1757*4882a593Smuzhiyun 	} else {
1758*4882a593Smuzhiyun 		box->y = 0;
1759*4882a593Smuzhiyun 		box->h = size->height;
1760*4882a593Smuzhiyun 		box->x = 0;
1761*4882a593Smuzhiyun 		box->w = size->width;
1762*4882a593Smuzhiyun 	}
1763*4882a593Smuzhiyun }
1764*4882a593Smuzhiyun 
1765*4882a593Smuzhiyun /**
1766*4882a593Smuzhiyun  * vmw_subres_dirty_full - Mark a full subresource as dirty
1767*4882a593Smuzhiyun  * @dirty: The surface's dirty tracker.
1768*4882a593Smuzhiyun  * @subres: The subresource
1769*4882a593Smuzhiyun  */
vmw_subres_dirty_full(struct vmw_surface_dirty * dirty,u32 subres)1770*4882a593Smuzhiyun static void vmw_subres_dirty_full(struct vmw_surface_dirty *dirty, u32 subres)
1771*4882a593Smuzhiyun {
1772*4882a593Smuzhiyun 	const struct svga3dsurface_cache *cache = &dirty->cache;
1773*4882a593Smuzhiyun 	u32 mip = subres % cache->num_mip_levels;
1774*4882a593Smuzhiyun 	const struct drm_vmw_size *size = &cache->mip[mip].size;
1775*4882a593Smuzhiyun 	SVGA3dBox *box = &dirty->boxes[subres];
1776*4882a593Smuzhiyun 
1777*4882a593Smuzhiyun 	box->x = 0;
1778*4882a593Smuzhiyun 	box->y = 0;
1779*4882a593Smuzhiyun 	box->z = 0;
1780*4882a593Smuzhiyun 	box->w = size->width;
1781*4882a593Smuzhiyun 	box->h = size->height;
1782*4882a593Smuzhiyun 	box->d = size->depth;
1783*4882a593Smuzhiyun }
1784*4882a593Smuzhiyun 
1785*4882a593Smuzhiyun /*
1786*4882a593Smuzhiyun  * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for texture
1787*4882a593Smuzhiyun  * surfaces.
1788*4882a593Smuzhiyun  */
vmw_surface_tex_dirty_range_add(struct vmw_resource * res,size_t start,size_t end)1789*4882a593Smuzhiyun static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res,
1790*4882a593Smuzhiyun 					    size_t start, size_t end)
1791*4882a593Smuzhiyun {
1792*4882a593Smuzhiyun 	struct vmw_surface_dirty *dirty =
1793*4882a593Smuzhiyun 		(struct vmw_surface_dirty *) res->dirty;
1794*4882a593Smuzhiyun 	size_t backup_end = res->backup_offset + res->backup_size;
1795*4882a593Smuzhiyun 	struct svga3dsurface_loc loc1, loc2;
1796*4882a593Smuzhiyun 	const struct svga3dsurface_cache *cache;
1797*4882a593Smuzhiyun 
1798*4882a593Smuzhiyun 	start = max_t(size_t, start, res->backup_offset) - res->backup_offset;
1799*4882a593Smuzhiyun 	end = min(end, backup_end) - res->backup_offset;
1800*4882a593Smuzhiyun 	cache = &dirty->cache;
1801*4882a593Smuzhiyun 	svga3dsurface_get_loc(cache, &loc1, start);
1802*4882a593Smuzhiyun 	svga3dsurface_get_loc(cache, &loc2, end - 1);
1803*4882a593Smuzhiyun 	svga3dsurface_inc_loc(cache, &loc2);
1804*4882a593Smuzhiyun 
1805*4882a593Smuzhiyun 	if (loc1.sheet != loc2.sheet) {
1806*4882a593Smuzhiyun 		u32 sub_res;
1807*4882a593Smuzhiyun 
1808*4882a593Smuzhiyun 		/*
1809*4882a593Smuzhiyun 		 * Multiple multisample sheets. To do this in an optimized
1810*4882a593Smuzhiyun 		 * fashion, compute the dirty region for each sheet and the
1811*4882a593Smuzhiyun 		 * resulting union. Since this is not a common case, just dirty
1812*4882a593Smuzhiyun 		 * the whole surface.
1813*4882a593Smuzhiyun 		 */
1814*4882a593Smuzhiyun 		for (sub_res = 0; sub_res < dirty->num_subres; ++sub_res)
1815*4882a593Smuzhiyun 			vmw_subres_dirty_full(dirty, sub_res);
1816*4882a593Smuzhiyun 		return;
1817*4882a593Smuzhiyun 	}
1818*4882a593Smuzhiyun 	if (loc1.sub_resource + 1 == loc2.sub_resource) {
1819*4882a593Smuzhiyun 		/* Dirty range covers a single sub-resource */
1820*4882a593Smuzhiyun 		vmw_subres_dirty_add(dirty, &loc1, &loc2);
1821*4882a593Smuzhiyun 	} else {
1822*4882a593Smuzhiyun 		/* Dirty range covers multiple sub-resources */
1823*4882a593Smuzhiyun 		struct svga3dsurface_loc loc_min, loc_max;
1824*4882a593Smuzhiyun 		u32 sub_res;
1825*4882a593Smuzhiyun 
1826*4882a593Smuzhiyun 		svga3dsurface_max_loc(cache, loc1.sub_resource, &loc_max);
1827*4882a593Smuzhiyun 		vmw_subres_dirty_add(dirty, &loc1, &loc_max);
1828*4882a593Smuzhiyun 		svga3dsurface_min_loc(cache, loc2.sub_resource - 1, &loc_min);
1829*4882a593Smuzhiyun 		vmw_subres_dirty_add(dirty, &loc_min, &loc2);
1830*4882a593Smuzhiyun 		for (sub_res = loc1.sub_resource + 1;
1831*4882a593Smuzhiyun 		     sub_res < loc2.sub_resource - 1; ++sub_res)
1832*4882a593Smuzhiyun 			vmw_subres_dirty_full(dirty, sub_res);
1833*4882a593Smuzhiyun 	}
1834*4882a593Smuzhiyun }
1835*4882a593Smuzhiyun 
1836*4882a593Smuzhiyun /*
1837*4882a593Smuzhiyun  * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for buffer
1838*4882a593Smuzhiyun  * surfaces.
1839*4882a593Smuzhiyun  */
vmw_surface_buf_dirty_range_add(struct vmw_resource * res,size_t start,size_t end)1840*4882a593Smuzhiyun static void vmw_surface_buf_dirty_range_add(struct vmw_resource *res,
1841*4882a593Smuzhiyun 					    size_t start, size_t end)
1842*4882a593Smuzhiyun {
1843*4882a593Smuzhiyun 	struct vmw_surface_dirty *dirty =
1844*4882a593Smuzhiyun 		(struct vmw_surface_dirty *) res->dirty;
1845*4882a593Smuzhiyun 	const struct svga3dsurface_cache *cache = &dirty->cache;
1846*4882a593Smuzhiyun 	size_t backup_end = res->backup_offset + cache->mip_chain_bytes;
1847*4882a593Smuzhiyun 	SVGA3dBox *box = &dirty->boxes[0];
1848*4882a593Smuzhiyun 	u32 box_c2;
1849*4882a593Smuzhiyun 
1850*4882a593Smuzhiyun 	box->h = box->d = 1;
1851*4882a593Smuzhiyun 	start = max_t(size_t, start, res->backup_offset) - res->backup_offset;
1852*4882a593Smuzhiyun 	end = min(end, backup_end) - res->backup_offset;
1853*4882a593Smuzhiyun 	box_c2 = box->x + box->w;
1854*4882a593Smuzhiyun 	if (box->w == 0 || box->x > start)
1855*4882a593Smuzhiyun 		box->x = start;
1856*4882a593Smuzhiyun 	if (box_c2 < end)
1857*4882a593Smuzhiyun 		box->w = end - box->x;
1858*4882a593Smuzhiyun }
1859*4882a593Smuzhiyun 
1860*4882a593Smuzhiyun /*
1861*4882a593Smuzhiyun  * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for surfaces
1862*4882a593Smuzhiyun  */
vmw_surface_dirty_range_add(struct vmw_resource * res,size_t start,size_t end)1863*4882a593Smuzhiyun static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
1864*4882a593Smuzhiyun 					size_t end)
1865*4882a593Smuzhiyun {
1866*4882a593Smuzhiyun 	struct vmw_surface *srf = vmw_res_to_srf(res);
1867*4882a593Smuzhiyun 
1868*4882a593Smuzhiyun 	if (WARN_ON(end <= res->backup_offset ||
1869*4882a593Smuzhiyun 		    start >= res->backup_offset + res->backup_size))
1870*4882a593Smuzhiyun 		return;
1871*4882a593Smuzhiyun 
1872*4882a593Smuzhiyun 	if (srf->metadata.format == SVGA3D_BUFFER)
1873*4882a593Smuzhiyun 		vmw_surface_buf_dirty_range_add(res, start, end);
1874*4882a593Smuzhiyun 	else
1875*4882a593Smuzhiyun 		vmw_surface_tex_dirty_range_add(res, start, end);
1876*4882a593Smuzhiyun }
1877*4882a593Smuzhiyun 
1878*4882a593Smuzhiyun /*
1879*4882a593Smuzhiyun  * vmw_surface_dirty_sync - The surface's dirty_sync callback.
1880*4882a593Smuzhiyun  */
vmw_surface_dirty_sync(struct vmw_resource * res)1881*4882a593Smuzhiyun static int vmw_surface_dirty_sync(struct vmw_resource *res)
1882*4882a593Smuzhiyun {
1883*4882a593Smuzhiyun 	struct vmw_private *dev_priv = res->dev_priv;
1884*4882a593Smuzhiyun 	bool has_dx = 0;
1885*4882a593Smuzhiyun 	u32 i, num_dirty;
1886*4882a593Smuzhiyun 	struct vmw_surface_dirty *dirty =
1887*4882a593Smuzhiyun 		(struct vmw_surface_dirty *) res->dirty;
1888*4882a593Smuzhiyun 	size_t alloc_size;
1889*4882a593Smuzhiyun 	const struct svga3dsurface_cache *cache = &dirty->cache;
1890*4882a593Smuzhiyun 	struct {
1891*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
1892*4882a593Smuzhiyun 		SVGA3dCmdDXUpdateSubResource body;
1893*4882a593Smuzhiyun 	} *cmd1;
1894*4882a593Smuzhiyun 	struct {
1895*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
1896*4882a593Smuzhiyun 		SVGA3dCmdUpdateGBImage body;
1897*4882a593Smuzhiyun 	} *cmd2;
1898*4882a593Smuzhiyun 	void *cmd;
1899*4882a593Smuzhiyun 
1900*4882a593Smuzhiyun 	num_dirty = 0;
1901*4882a593Smuzhiyun 	for (i = 0; i < dirty->num_subres; ++i) {
1902*4882a593Smuzhiyun 		const SVGA3dBox *box = &dirty->boxes[i];
1903*4882a593Smuzhiyun 
1904*4882a593Smuzhiyun 		if (box->d)
1905*4882a593Smuzhiyun 			num_dirty++;
1906*4882a593Smuzhiyun 	}
1907*4882a593Smuzhiyun 
1908*4882a593Smuzhiyun 	if (!num_dirty)
1909*4882a593Smuzhiyun 		goto out;
1910*4882a593Smuzhiyun 
1911*4882a593Smuzhiyun 	alloc_size = num_dirty * ((has_dx) ? sizeof(*cmd1) : sizeof(*cmd2));
1912*4882a593Smuzhiyun 	cmd = VMW_FIFO_RESERVE(dev_priv, alloc_size);
1913*4882a593Smuzhiyun 	if (!cmd)
1914*4882a593Smuzhiyun 		return -ENOMEM;
1915*4882a593Smuzhiyun 
1916*4882a593Smuzhiyun 	cmd1 = cmd;
1917*4882a593Smuzhiyun 	cmd2 = cmd;
1918*4882a593Smuzhiyun 
1919*4882a593Smuzhiyun 	for (i = 0; i < dirty->num_subres; ++i) {
1920*4882a593Smuzhiyun 		const SVGA3dBox *box = &dirty->boxes[i];
1921*4882a593Smuzhiyun 
1922*4882a593Smuzhiyun 		if (!box->d)
1923*4882a593Smuzhiyun 			continue;
1924*4882a593Smuzhiyun 
1925*4882a593Smuzhiyun 		/*
1926*4882a593Smuzhiyun 		 * DX_UPDATE_SUBRESOURCE is aware of array surfaces.
1927*4882a593Smuzhiyun 		 * UPDATE_GB_IMAGE is not.
1928*4882a593Smuzhiyun 		 */
1929*4882a593Smuzhiyun 		if (has_dx) {
1930*4882a593Smuzhiyun 			cmd1->header.id = SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE;
1931*4882a593Smuzhiyun 			cmd1->header.size = sizeof(cmd1->body);
1932*4882a593Smuzhiyun 			cmd1->body.sid = res->id;
1933*4882a593Smuzhiyun 			cmd1->body.subResource = i;
1934*4882a593Smuzhiyun 			cmd1->body.box = *box;
1935*4882a593Smuzhiyun 			cmd1++;
1936*4882a593Smuzhiyun 		} else {
1937*4882a593Smuzhiyun 			cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
1938*4882a593Smuzhiyun 			cmd2->header.size = sizeof(cmd2->body);
1939*4882a593Smuzhiyun 			cmd2->body.image.sid = res->id;
1940*4882a593Smuzhiyun 			cmd2->body.image.face = i / cache->num_mip_levels;
1941*4882a593Smuzhiyun 			cmd2->body.image.mipmap = i -
1942*4882a593Smuzhiyun 				(cache->num_mip_levels * cmd2->body.image.face);
1943*4882a593Smuzhiyun 			cmd2->body.box = *box;
1944*4882a593Smuzhiyun 			cmd2++;
1945*4882a593Smuzhiyun 		}
1946*4882a593Smuzhiyun 
1947*4882a593Smuzhiyun 	}
1948*4882a593Smuzhiyun 	vmw_fifo_commit(dev_priv, alloc_size);
1949*4882a593Smuzhiyun  out:
1950*4882a593Smuzhiyun 	memset(&dirty->boxes[0], 0, sizeof(dirty->boxes[0]) *
1951*4882a593Smuzhiyun 	       dirty->num_subres);
1952*4882a593Smuzhiyun 
1953*4882a593Smuzhiyun 	return 0;
1954*4882a593Smuzhiyun }
1955*4882a593Smuzhiyun 
1956*4882a593Smuzhiyun /*
1957*4882a593Smuzhiyun  * vmw_surface_dirty_alloc - The surface's dirty_alloc callback.
1958*4882a593Smuzhiyun  */
vmw_surface_dirty_alloc(struct vmw_resource * res)1959*4882a593Smuzhiyun static int vmw_surface_dirty_alloc(struct vmw_resource *res)
1960*4882a593Smuzhiyun {
1961*4882a593Smuzhiyun 	struct vmw_surface *srf = vmw_res_to_srf(res);
1962*4882a593Smuzhiyun 	const struct vmw_surface_metadata *metadata = &srf->metadata;
1963*4882a593Smuzhiyun 	struct vmw_surface_dirty *dirty;
1964*4882a593Smuzhiyun 	u32 num_layers = 1;
1965*4882a593Smuzhiyun 	u32 num_mip;
1966*4882a593Smuzhiyun 	u32 num_subres;
1967*4882a593Smuzhiyun 	u32 num_samples;
1968*4882a593Smuzhiyun 	size_t dirty_size, acc_size;
1969*4882a593Smuzhiyun 	static struct ttm_operation_ctx ctx = {
1970*4882a593Smuzhiyun 		.interruptible = false,
1971*4882a593Smuzhiyun 		.no_wait_gpu = false
1972*4882a593Smuzhiyun 	};
1973*4882a593Smuzhiyun 	int ret;
1974*4882a593Smuzhiyun 
1975*4882a593Smuzhiyun 	if (metadata->array_size)
1976*4882a593Smuzhiyun 		num_layers = metadata->array_size;
1977*4882a593Smuzhiyun 	else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP)
1978*4882a593Smuzhiyun 		num_layers *= SVGA3D_MAX_SURFACE_FACES;
1979*4882a593Smuzhiyun 
1980*4882a593Smuzhiyun 	num_mip = metadata->mip_levels[0];
1981*4882a593Smuzhiyun 	if (!num_mip)
1982*4882a593Smuzhiyun 		num_mip = 1;
1983*4882a593Smuzhiyun 
1984*4882a593Smuzhiyun 	num_subres = num_layers * num_mip;
1985*4882a593Smuzhiyun 	dirty_size = struct_size(dirty, boxes, num_subres);
1986*4882a593Smuzhiyun 	acc_size = ttm_round_pot(dirty_size);
1987*4882a593Smuzhiyun 	ret = ttm_mem_global_alloc(vmw_mem_glob(res->dev_priv),
1988*4882a593Smuzhiyun 				   acc_size, &ctx);
1989*4882a593Smuzhiyun 	if (ret) {
1990*4882a593Smuzhiyun 		VMW_DEBUG_USER("Out of graphics memory for surface "
1991*4882a593Smuzhiyun 			       "dirty tracker.\n");
1992*4882a593Smuzhiyun 		return ret;
1993*4882a593Smuzhiyun 	}
1994*4882a593Smuzhiyun 
1995*4882a593Smuzhiyun 	dirty = kvzalloc(dirty_size, GFP_KERNEL);
1996*4882a593Smuzhiyun 	if (!dirty) {
1997*4882a593Smuzhiyun 		ret = -ENOMEM;
1998*4882a593Smuzhiyun 		goto out_no_dirty;
1999*4882a593Smuzhiyun 	}
2000*4882a593Smuzhiyun 
2001*4882a593Smuzhiyun 	num_samples = max_t(u32, 1, metadata->multisample_count);
2002*4882a593Smuzhiyun 	ret = svga3dsurface_setup_cache(&metadata->base_size, metadata->format,
2003*4882a593Smuzhiyun 					num_mip, num_layers, num_samples,
2004*4882a593Smuzhiyun 					&dirty->cache);
2005*4882a593Smuzhiyun 	if (ret)
2006*4882a593Smuzhiyun 		goto out_no_cache;
2007*4882a593Smuzhiyun 
2008*4882a593Smuzhiyun 	dirty->num_subres = num_subres;
2009*4882a593Smuzhiyun 	dirty->size = acc_size;
2010*4882a593Smuzhiyun 	res->dirty = (struct vmw_resource_dirty *) dirty;
2011*4882a593Smuzhiyun 
2012*4882a593Smuzhiyun 	return 0;
2013*4882a593Smuzhiyun 
2014*4882a593Smuzhiyun out_no_cache:
2015*4882a593Smuzhiyun 	kvfree(dirty);
2016*4882a593Smuzhiyun out_no_dirty:
2017*4882a593Smuzhiyun 	ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size);
2018*4882a593Smuzhiyun 	return ret;
2019*4882a593Smuzhiyun }
2020*4882a593Smuzhiyun 
2021*4882a593Smuzhiyun /*
2022*4882a593Smuzhiyun  * vmw_surface_dirty_free - The surface's dirty_free callback
2023*4882a593Smuzhiyun  */
vmw_surface_dirty_free(struct vmw_resource * res)2024*4882a593Smuzhiyun static void vmw_surface_dirty_free(struct vmw_resource *res)
2025*4882a593Smuzhiyun {
2026*4882a593Smuzhiyun 	struct vmw_surface_dirty *dirty =
2027*4882a593Smuzhiyun 		(struct vmw_surface_dirty *) res->dirty;
2028*4882a593Smuzhiyun 	size_t acc_size = dirty->size;
2029*4882a593Smuzhiyun 
2030*4882a593Smuzhiyun 	kvfree(dirty);
2031*4882a593Smuzhiyun 	ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size);
2032*4882a593Smuzhiyun 	res->dirty = NULL;
2033*4882a593Smuzhiyun }
2034*4882a593Smuzhiyun 
2035*4882a593Smuzhiyun /*
2036*4882a593Smuzhiyun  * vmw_surface_clean - The surface's clean callback
2037*4882a593Smuzhiyun  */
vmw_surface_clean(struct vmw_resource * res)2038*4882a593Smuzhiyun static int vmw_surface_clean(struct vmw_resource *res)
2039*4882a593Smuzhiyun {
2040*4882a593Smuzhiyun 	struct vmw_private *dev_priv = res->dev_priv;
2041*4882a593Smuzhiyun 	size_t alloc_size;
2042*4882a593Smuzhiyun 	struct {
2043*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
2044*4882a593Smuzhiyun 		SVGA3dCmdReadbackGBSurface body;
2045*4882a593Smuzhiyun 	} *cmd;
2046*4882a593Smuzhiyun 
2047*4882a593Smuzhiyun 	alloc_size = sizeof(*cmd);
2048*4882a593Smuzhiyun 	cmd = VMW_FIFO_RESERVE(dev_priv, alloc_size);
2049*4882a593Smuzhiyun 	if (!cmd)
2050*4882a593Smuzhiyun 		return -ENOMEM;
2051*4882a593Smuzhiyun 
2052*4882a593Smuzhiyun 	cmd->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
2053*4882a593Smuzhiyun 	cmd->header.size = sizeof(cmd->body);
2054*4882a593Smuzhiyun 	cmd->body.sid = res->id;
2055*4882a593Smuzhiyun 	vmw_fifo_commit(dev_priv, alloc_size);
2056*4882a593Smuzhiyun 
2057*4882a593Smuzhiyun 	return 0;
2058*4882a593Smuzhiyun }
2059*4882a593Smuzhiyun 
2060*4882a593Smuzhiyun /*
2061*4882a593Smuzhiyun  * vmw_gb_surface_define - Define a private GB surface
2062*4882a593Smuzhiyun  *
2063*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private.
2064*4882a593Smuzhiyun  * @user_accounting_size:  Used to track user-space memory usage, set
2065*4882a593Smuzhiyun  *                         to 0 for kernel mode only memory
2066*4882a593Smuzhiyun  * @metadata: Metadata representing the surface to create.
2067*4882a593Smuzhiyun  * @user_srf_out: allocated user_srf. Set to NULL on failure.
2068*4882a593Smuzhiyun  *
2069*4882a593Smuzhiyun  * GB surfaces allocated by this function will not have a user mode handle, and
2070*4882a593Smuzhiyun  * thus will only be visible to vmwgfx.  For optimization reasons the
2071*4882a593Smuzhiyun  * surface may later be given a user mode handle by another function to make
2072*4882a593Smuzhiyun  * it available to user mode drivers.
2073*4882a593Smuzhiyun  */
vmw_gb_surface_define(struct vmw_private * dev_priv,uint32_t user_accounting_size,const struct vmw_surface_metadata * req,struct vmw_surface ** srf_out)2074*4882a593Smuzhiyun int vmw_gb_surface_define(struct vmw_private *dev_priv,
2075*4882a593Smuzhiyun 			  uint32_t user_accounting_size,
2076*4882a593Smuzhiyun 			  const struct vmw_surface_metadata *req,
2077*4882a593Smuzhiyun 			  struct vmw_surface **srf_out)
2078*4882a593Smuzhiyun {
2079*4882a593Smuzhiyun 	struct vmw_surface_metadata *metadata;
2080*4882a593Smuzhiyun 	struct vmw_user_surface *user_srf;
2081*4882a593Smuzhiyun 	struct vmw_surface *srf;
2082*4882a593Smuzhiyun 	struct ttm_operation_ctx ctx = {
2083*4882a593Smuzhiyun 		.interruptible = true,
2084*4882a593Smuzhiyun 		.no_wait_gpu = false
2085*4882a593Smuzhiyun 	};
2086*4882a593Smuzhiyun 	u32 sample_count = 1;
2087*4882a593Smuzhiyun 	u32 num_layers = 1;
2088*4882a593Smuzhiyun 	int ret;
2089*4882a593Smuzhiyun 
2090*4882a593Smuzhiyun 	*srf_out = NULL;
2091*4882a593Smuzhiyun 
2092*4882a593Smuzhiyun 	if (req->scanout) {
2093*4882a593Smuzhiyun 		if (!svga3dsurface_is_screen_target_format(req->format)) {
2094*4882a593Smuzhiyun 			VMW_DEBUG_USER("Invalid Screen Target surface format.");
2095*4882a593Smuzhiyun 			return -EINVAL;
2096*4882a593Smuzhiyun 		}
2097*4882a593Smuzhiyun 
2098*4882a593Smuzhiyun 		if (req->base_size.width > dev_priv->texture_max_width ||
2099*4882a593Smuzhiyun 		    req->base_size.height > dev_priv->texture_max_height) {
2100*4882a593Smuzhiyun 			VMW_DEBUG_USER("%ux%u\n, exceed max surface size %ux%u",
2101*4882a593Smuzhiyun 				       req->base_size.width,
2102*4882a593Smuzhiyun 				       req->base_size.height,
2103*4882a593Smuzhiyun 				       dev_priv->texture_max_width,
2104*4882a593Smuzhiyun 				       dev_priv->texture_max_height);
2105*4882a593Smuzhiyun 			return -EINVAL;
2106*4882a593Smuzhiyun 		}
2107*4882a593Smuzhiyun 	} else {
2108*4882a593Smuzhiyun 		const struct svga3d_surface_desc *desc =
2109*4882a593Smuzhiyun 			svga3dsurface_get_desc(req->format);
2110*4882a593Smuzhiyun 
2111*4882a593Smuzhiyun 		if (desc->block_desc == SVGA3DBLOCKDESC_NONE) {
2112*4882a593Smuzhiyun 			VMW_DEBUG_USER("Invalid surface format.\n");
2113*4882a593Smuzhiyun 			return -EINVAL;
2114*4882a593Smuzhiyun 		}
2115*4882a593Smuzhiyun 	}
2116*4882a593Smuzhiyun 
2117*4882a593Smuzhiyun 	if (req->autogen_filter != SVGA3D_TEX_FILTER_NONE)
2118*4882a593Smuzhiyun 		return -EINVAL;
2119*4882a593Smuzhiyun 
2120*4882a593Smuzhiyun 	if (req->num_sizes != 1)
2121*4882a593Smuzhiyun 		return -EINVAL;
2122*4882a593Smuzhiyun 
2123*4882a593Smuzhiyun 	if (req->sizes != NULL)
2124*4882a593Smuzhiyun 		return -EINVAL;
2125*4882a593Smuzhiyun 
2126*4882a593Smuzhiyun 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
2127*4882a593Smuzhiyun 	if (unlikely(ret != 0))
2128*4882a593Smuzhiyun 		return ret;
2129*4882a593Smuzhiyun 
2130*4882a593Smuzhiyun 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
2131*4882a593Smuzhiyun 				   user_accounting_size, &ctx);
2132*4882a593Smuzhiyun 	if (ret != 0) {
2133*4882a593Smuzhiyun 		if (ret != -ERESTARTSYS)
2134*4882a593Smuzhiyun 			DRM_ERROR("Out of graphics memory for surface.\n");
2135*4882a593Smuzhiyun 		goto out_unlock;
2136*4882a593Smuzhiyun 	}
2137*4882a593Smuzhiyun 
2138*4882a593Smuzhiyun 	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
2139*4882a593Smuzhiyun 	if (unlikely(!user_srf)) {
2140*4882a593Smuzhiyun 		ret = -ENOMEM;
2141*4882a593Smuzhiyun 		goto out_no_user_srf;
2142*4882a593Smuzhiyun 	}
2143*4882a593Smuzhiyun 
2144*4882a593Smuzhiyun 	*srf_out  = &user_srf->srf;
2145*4882a593Smuzhiyun 	user_srf->size = user_accounting_size;
2146*4882a593Smuzhiyun 	user_srf->prime.base.shareable = false;
2147*4882a593Smuzhiyun 	user_srf->prime.base.tfile = NULL;
2148*4882a593Smuzhiyun 
2149*4882a593Smuzhiyun 	srf = &user_srf->srf;
2150*4882a593Smuzhiyun 	srf->metadata = *req;
2151*4882a593Smuzhiyun 	srf->offsets = NULL;
2152*4882a593Smuzhiyun 
2153*4882a593Smuzhiyun 	metadata = &srf->metadata;
2154*4882a593Smuzhiyun 
2155*4882a593Smuzhiyun 	if (metadata->array_size)
2156*4882a593Smuzhiyun 		num_layers = req->array_size;
2157*4882a593Smuzhiyun 	else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP)
2158*4882a593Smuzhiyun 		num_layers = SVGA3D_MAX_SURFACE_FACES;
2159*4882a593Smuzhiyun 
2160*4882a593Smuzhiyun 	if (metadata->flags & SVGA3D_SURFACE_MULTISAMPLE)
2161*4882a593Smuzhiyun 		sample_count = metadata->multisample_count;
2162*4882a593Smuzhiyun 
2163*4882a593Smuzhiyun 	srf->res.backup_size =
2164*4882a593Smuzhiyun 		svga3dsurface_get_serialized_size_extended(metadata->format,
2165*4882a593Smuzhiyun 							   metadata->base_size,
2166*4882a593Smuzhiyun 							   metadata->mip_levels[0],
2167*4882a593Smuzhiyun 							   num_layers,
2168*4882a593Smuzhiyun 							   sample_count);
2169*4882a593Smuzhiyun 
2170*4882a593Smuzhiyun 	if (metadata->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
2171*4882a593Smuzhiyun 		srf->res.backup_size += sizeof(SVGA3dDXSOState);
2172*4882a593Smuzhiyun 
2173*4882a593Smuzhiyun 	/*
2174*4882a593Smuzhiyun 	 * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
2175*4882a593Smuzhiyun 	 * size greater than STDU max width/height. This is really a workaround
2176*4882a593Smuzhiyun 	 * to support creation of big framebuffer requested by some user-space
2177*4882a593Smuzhiyun 	 * for whole topology. That big framebuffer won't really be used for
2178*4882a593Smuzhiyun 	 * binding with screen target as during prepare_fb a separate surface is
2179*4882a593Smuzhiyun 	 * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
2180*4882a593Smuzhiyun 	 */
2181*4882a593Smuzhiyun 	if (dev_priv->active_display_unit == vmw_du_screen_target &&
2182*4882a593Smuzhiyun 	    metadata->scanout &&
2183*4882a593Smuzhiyun 	    metadata->base_size.width <= dev_priv->stdu_max_width &&
2184*4882a593Smuzhiyun 	    metadata->base_size.height <= dev_priv->stdu_max_height)
2185*4882a593Smuzhiyun 		metadata->flags |= SVGA3D_SURFACE_SCREENTARGET;
2186*4882a593Smuzhiyun 
2187*4882a593Smuzhiyun 	/*
2188*4882a593Smuzhiyun 	 * From this point, the generic resource management functions
2189*4882a593Smuzhiyun 	 * destroy the object on failure.
2190*4882a593Smuzhiyun 	 */
2191*4882a593Smuzhiyun 	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
2192*4882a593Smuzhiyun 
2193*4882a593Smuzhiyun 	ttm_read_unlock(&dev_priv->reservation_sem);
2194*4882a593Smuzhiyun 	return ret;
2195*4882a593Smuzhiyun 
2196*4882a593Smuzhiyun out_no_user_srf:
2197*4882a593Smuzhiyun 	ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
2198*4882a593Smuzhiyun 
2199*4882a593Smuzhiyun out_unlock:
2200*4882a593Smuzhiyun 	ttm_read_unlock(&dev_priv->reservation_sem);
2201*4882a593Smuzhiyun 	return ret;
2202*4882a593Smuzhiyun }
2203