xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR MIT
2*4882a593Smuzhiyun /**************************************************************************
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
7*4882a593Smuzhiyun  * copy of this software and associated documentation files (the
8*4882a593Smuzhiyun  * "Software"), to deal in the Software without restriction, including
9*4882a593Smuzhiyun  * without limitation the rights to use, copy, modify, merge, publish,
10*4882a593Smuzhiyun  * distribute, sub license, and/or sell copies of the Software, and to
11*4882a593Smuzhiyun  * permit persons to whom the Software is furnished to do so, subject to
12*4882a593Smuzhiyun  * the following conditions:
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the
15*4882a593Smuzhiyun  * next paragraph) shall be included in all copies or substantial portions
16*4882a593Smuzhiyun  * of the Software.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21*4882a593Smuzhiyun  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22*4882a593Smuzhiyun  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23*4882a593Smuzhiyun  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24*4882a593Smuzhiyun  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  **************************************************************************/
27*4882a593Smuzhiyun #include <linux/sync_file.h>
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #include "vmwgfx_drv.h"
30*4882a593Smuzhiyun #include "vmwgfx_reg.h"
31*4882a593Smuzhiyun #include <drm/ttm/ttm_bo_api.h>
32*4882a593Smuzhiyun #include <drm/ttm/ttm_placement.h>
33*4882a593Smuzhiyun #include "vmwgfx_so.h"
34*4882a593Smuzhiyun #include "vmwgfx_binding.h"
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #define VMW_RES_HT_ORDER 12
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun  * Helper macro to get dx_ctx_node if available otherwise print an error
40*4882a593Smuzhiyun  * message. This is for use in command verifier function where if dx_ctx_node
41*4882a593Smuzhiyun  * is not set then command is invalid.
42*4882a593Smuzhiyun  */
43*4882a593Smuzhiyun #define VMW_GET_CTX_NODE(__sw_context)                                        \
44*4882a593Smuzhiyun ({                                                                            \
45*4882a593Smuzhiyun 	__sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({            \
46*4882a593Smuzhiyun 		VMW_DEBUG_USER("SM context is not set at %s\n", __func__);    \
47*4882a593Smuzhiyun 		__sw_context->dx_ctx_node;                                    \
48*4882a593Smuzhiyun 	});                                                                   \
49*4882a593Smuzhiyun })
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun #define VMW_DECLARE_CMD_VAR(__var, __type)                                    \
52*4882a593Smuzhiyun 	struct {                                                              \
53*4882a593Smuzhiyun 		SVGA3dCmdHeader header;                                       \
54*4882a593Smuzhiyun 		__type body;                                                  \
55*4882a593Smuzhiyun 	} __var
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /**
58*4882a593Smuzhiyun  * struct vmw_relocation - Buffer object relocation
59*4882a593Smuzhiyun  *
60*4882a593Smuzhiyun  * @head: List head for the command submission context's relocation list
61*4882a593Smuzhiyun  * @vbo: Non ref-counted pointer to buffer object
62*4882a593Smuzhiyun  * @mob_loc: Pointer to location for mob id to be modified
63*4882a593Smuzhiyun  * @location: Pointer to location for guest pointer to be modified
64*4882a593Smuzhiyun  */
65*4882a593Smuzhiyun struct vmw_relocation {
66*4882a593Smuzhiyun 	struct list_head head;
67*4882a593Smuzhiyun 	struct vmw_buffer_object *vbo;
68*4882a593Smuzhiyun 	union {
69*4882a593Smuzhiyun 		SVGAMobId *mob_loc;
70*4882a593Smuzhiyun 		SVGAGuestPtr *location;
71*4882a593Smuzhiyun 	};
72*4882a593Smuzhiyun };
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun /**
75*4882a593Smuzhiyun  * enum vmw_resource_relocation_type - Relocation type for resources
76*4882a593Smuzhiyun  *
77*4882a593Smuzhiyun  * @vmw_res_rel_normal: Traditional relocation. The resource id in the
78*4882a593Smuzhiyun  * command stream is replaced with the actual id after validation.
79*4882a593Smuzhiyun  * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
80*4882a593Smuzhiyun  * with a NOP.
81*4882a593Smuzhiyun  * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
82*4882a593Smuzhiyun  * validation is -1, the command is replaced with a NOP. Otherwise no action.
83*4882a593Smuzhiyun  */
84*4882a593Smuzhiyun enum vmw_resource_relocation_type {
85*4882a593Smuzhiyun 	vmw_res_rel_normal,
86*4882a593Smuzhiyun 	vmw_res_rel_nop,
87*4882a593Smuzhiyun 	vmw_res_rel_cond_nop,
88*4882a593Smuzhiyun 	vmw_res_rel_max
89*4882a593Smuzhiyun };
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun /**
92*4882a593Smuzhiyun  * struct vmw_resource_relocation - Relocation info for resources
93*4882a593Smuzhiyun  *
94*4882a593Smuzhiyun  * @head: List head for the software context's relocation list.
95*4882a593Smuzhiyun  * @res: Non-ref-counted pointer to the resource.
96*4882a593Smuzhiyun  * @offset: Offset of single byte entries into the command buffer where the id
97*4882a593Smuzhiyun  * that needs fixup is located.
98*4882a593Smuzhiyun  * @rel_type: Type of relocation.
99*4882a593Smuzhiyun  */
100*4882a593Smuzhiyun struct vmw_resource_relocation {
101*4882a593Smuzhiyun 	struct list_head head;
102*4882a593Smuzhiyun 	const struct vmw_resource *res;
103*4882a593Smuzhiyun 	u32 offset:29;
104*4882a593Smuzhiyun 	enum vmw_resource_relocation_type rel_type:3;
105*4882a593Smuzhiyun };
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun /**
108*4882a593Smuzhiyun  * struct vmw_ctx_validation_info - Extra validation metadata for contexts
109*4882a593Smuzhiyun  *
110*4882a593Smuzhiyun  * @head: List head of context list
111*4882a593Smuzhiyun  * @ctx: The context resource
112*4882a593Smuzhiyun  * @cur: The context's persistent binding state
113*4882a593Smuzhiyun  * @staged: The binding state changes of this command buffer
114*4882a593Smuzhiyun  */
115*4882a593Smuzhiyun struct vmw_ctx_validation_info {
116*4882a593Smuzhiyun 	struct list_head head;
117*4882a593Smuzhiyun 	struct vmw_resource *ctx;
118*4882a593Smuzhiyun 	struct vmw_ctx_binding_state *cur;
119*4882a593Smuzhiyun 	struct vmw_ctx_binding_state *staged;
120*4882a593Smuzhiyun };
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun /**
123*4882a593Smuzhiyun  * struct vmw_cmd_entry - Describe a command for the verifier
124*4882a593Smuzhiyun  *
125*4882a593Smuzhiyun  * @user_allow: Whether allowed from the execbuf ioctl.
126*4882a593Smuzhiyun  * @gb_disable: Whether disabled if guest-backed objects are available.
127*4882a593Smuzhiyun  * @gb_enable: Whether enabled iff guest-backed objects are available.
128*4882a593Smuzhiyun  */
129*4882a593Smuzhiyun struct vmw_cmd_entry {
130*4882a593Smuzhiyun 	int (*func) (struct vmw_private *, struct vmw_sw_context *,
131*4882a593Smuzhiyun 		     SVGA3dCmdHeader *);
132*4882a593Smuzhiyun 	bool user_allow;
133*4882a593Smuzhiyun 	bool gb_disable;
134*4882a593Smuzhiyun 	bool gb_enable;
135*4882a593Smuzhiyun 	const char *cmd_name;
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
139*4882a593Smuzhiyun 	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
140*4882a593Smuzhiyun 				       (_gb_disable), (_gb_enable), #_cmd}
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
143*4882a593Smuzhiyun 					struct vmw_sw_context *sw_context,
144*4882a593Smuzhiyun 					struct vmw_resource *ctx);
145*4882a593Smuzhiyun static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
146*4882a593Smuzhiyun 				 struct vmw_sw_context *sw_context,
147*4882a593Smuzhiyun 				 SVGAMobId *id,
148*4882a593Smuzhiyun 				 struct vmw_buffer_object **vmw_bo_p);
149*4882a593Smuzhiyun /**
150*4882a593Smuzhiyun  * vmw_ptr_diff - Compute the offset from a to b in bytes
151*4882a593Smuzhiyun  *
152*4882a593Smuzhiyun  * @a: A starting pointer.
153*4882a593Smuzhiyun  * @b: A pointer offset in the same address space.
154*4882a593Smuzhiyun  *
155*4882a593Smuzhiyun  * Returns: The offset in bytes between the two pointers.
156*4882a593Smuzhiyun  */
vmw_ptr_diff(void * a,void * b)157*4882a593Smuzhiyun static size_t vmw_ptr_diff(void *a, void *b)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	return (unsigned long) b - (unsigned long) a;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun /**
163*4882a593Smuzhiyun  * vmw_execbuf_bindings_commit - Commit modified binding state
164*4882a593Smuzhiyun  *
165*4882a593Smuzhiyun  * @sw_context: The command submission context
166*4882a593Smuzhiyun  * @backoff: Whether this is part of the error path and binding state changes
167*4882a593Smuzhiyun  * should be ignored
168*4882a593Smuzhiyun  */
vmw_execbuf_bindings_commit(struct vmw_sw_context * sw_context,bool backoff)169*4882a593Smuzhiyun static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
170*4882a593Smuzhiyun 					bool backoff)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	struct vmw_ctx_validation_info *entry;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	list_for_each_entry(entry, &sw_context->ctx_list, head) {
175*4882a593Smuzhiyun 		if (!backoff)
176*4882a593Smuzhiyun 			vmw_binding_state_commit(entry->cur, entry->staged);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 		if (entry->staged != sw_context->staged_bindings)
179*4882a593Smuzhiyun 			vmw_binding_state_free(entry->staged);
180*4882a593Smuzhiyun 		else
181*4882a593Smuzhiyun 			sw_context->staged_bindings_inuse = false;
182*4882a593Smuzhiyun 	}
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	/* List entries are freed with the validation context */
185*4882a593Smuzhiyun 	INIT_LIST_HEAD(&sw_context->ctx_list);
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun /**
189*4882a593Smuzhiyun  * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
190*4882a593Smuzhiyun  *
191*4882a593Smuzhiyun  * @sw_context: The command submission context
192*4882a593Smuzhiyun  */
vmw_bind_dx_query_mob(struct vmw_sw_context * sw_context)193*4882a593Smuzhiyun static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	if (sw_context->dx_query_mob)
196*4882a593Smuzhiyun 		vmw_context_bind_dx_query(sw_context->dx_query_ctx,
197*4882a593Smuzhiyun 					  sw_context->dx_query_mob);
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun /**
201*4882a593Smuzhiyun  * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
202*4882a593Smuzhiyun  * the validate list.
203*4882a593Smuzhiyun  *
204*4882a593Smuzhiyun  * @dev_priv: Pointer to the device private:
205*4882a593Smuzhiyun  * @sw_context: The command submission context
206*4882a593Smuzhiyun  * @node: The validation node holding the context resource metadata
207*4882a593Smuzhiyun  */
vmw_cmd_ctx_first_setup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource * res,struct vmw_ctx_validation_info * node)208*4882a593Smuzhiyun static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
209*4882a593Smuzhiyun 				   struct vmw_sw_context *sw_context,
210*4882a593Smuzhiyun 				   struct vmw_resource *res,
211*4882a593Smuzhiyun 				   struct vmw_ctx_validation_info *node)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	int ret;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
216*4882a593Smuzhiyun 	if (unlikely(ret != 0))
217*4882a593Smuzhiyun 		goto out_err;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	if (!sw_context->staged_bindings) {
220*4882a593Smuzhiyun 		sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
221*4882a593Smuzhiyun 		if (IS_ERR(sw_context->staged_bindings)) {
222*4882a593Smuzhiyun 			ret = PTR_ERR(sw_context->staged_bindings);
223*4882a593Smuzhiyun 			sw_context->staged_bindings = NULL;
224*4882a593Smuzhiyun 			goto out_err;
225*4882a593Smuzhiyun 		}
226*4882a593Smuzhiyun 	}
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	if (sw_context->staged_bindings_inuse) {
229*4882a593Smuzhiyun 		node->staged = vmw_binding_state_alloc(dev_priv);
230*4882a593Smuzhiyun 		if (IS_ERR(node->staged)) {
231*4882a593Smuzhiyun 			ret = PTR_ERR(node->staged);
232*4882a593Smuzhiyun 			node->staged = NULL;
233*4882a593Smuzhiyun 			goto out_err;
234*4882a593Smuzhiyun 		}
235*4882a593Smuzhiyun 	} else {
236*4882a593Smuzhiyun 		node->staged = sw_context->staged_bindings;
237*4882a593Smuzhiyun 		sw_context->staged_bindings_inuse = true;
238*4882a593Smuzhiyun 	}
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	node->ctx = res;
241*4882a593Smuzhiyun 	node->cur = vmw_context_binding_state(res);
242*4882a593Smuzhiyun 	list_add_tail(&node->head, &sw_context->ctx_list);
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	return 0;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun out_err:
247*4882a593Smuzhiyun 	return ret;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun /**
251*4882a593Smuzhiyun  * vmw_execbuf_res_size - calculate extra size fore the resource validation node
252*4882a593Smuzhiyun  *
253*4882a593Smuzhiyun  * @dev_priv: Pointer to the device private struct.
254*4882a593Smuzhiyun  * @res_type: The resource type.
255*4882a593Smuzhiyun  *
256*4882a593Smuzhiyun  * Guest-backed contexts and DX contexts require extra size to store execbuf
257*4882a593Smuzhiyun  * private information in the validation node. Typically the binding manager
258*4882a593Smuzhiyun  * associated data structures.
259*4882a593Smuzhiyun  *
260*4882a593Smuzhiyun  * Returns: The extra size requirement based on resource type.
261*4882a593Smuzhiyun  */
vmw_execbuf_res_size(struct vmw_private * dev_priv,enum vmw_res_type res_type)262*4882a593Smuzhiyun static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
263*4882a593Smuzhiyun 					 enum vmw_res_type res_type)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun 	return (res_type == vmw_res_dx_context ||
266*4882a593Smuzhiyun 		(res_type == vmw_res_context && dev_priv->has_mob)) ?
267*4882a593Smuzhiyun 		sizeof(struct vmw_ctx_validation_info) : 0;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun /**
271*4882a593Smuzhiyun  * vmw_execbuf_rcache_update - Update a resource-node cache entry
272*4882a593Smuzhiyun  *
273*4882a593Smuzhiyun  * @rcache: Pointer to the entry to update.
274*4882a593Smuzhiyun  * @res: Pointer to the resource.
275*4882a593Smuzhiyun  * @private: Pointer to the execbuf-private space in the resource validation
276*4882a593Smuzhiyun  * node.
277*4882a593Smuzhiyun  */
vmw_execbuf_rcache_update(struct vmw_res_cache_entry * rcache,struct vmw_resource * res,void * private)278*4882a593Smuzhiyun static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
279*4882a593Smuzhiyun 				      struct vmw_resource *res,
280*4882a593Smuzhiyun 				      void *private)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	rcache->res = res;
283*4882a593Smuzhiyun 	rcache->private = private;
284*4882a593Smuzhiyun 	rcache->valid = 1;
285*4882a593Smuzhiyun 	rcache->valid_handle = 0;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun /**
289*4882a593Smuzhiyun  * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
290*4882a593Smuzhiyun  * rcu-protected pointer to the validation list.
291*4882a593Smuzhiyun  *
292*4882a593Smuzhiyun  * @sw_context: Pointer to the software context.
293*4882a593Smuzhiyun  * @res: Unreferenced rcu-protected pointer to the resource.
294*4882a593Smuzhiyun  * @dirty: Whether to change dirty status.
295*4882a593Smuzhiyun  *
296*4882a593Smuzhiyun  * Returns: 0 on success. Negative error code on failure. Typical error codes
297*4882a593Smuzhiyun  * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
298*4882a593Smuzhiyun  */
vmw_execbuf_res_noref_val_add(struct vmw_sw_context * sw_context,struct vmw_resource * res,u32 dirty)299*4882a593Smuzhiyun static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
300*4882a593Smuzhiyun 					 struct vmw_resource *res,
301*4882a593Smuzhiyun 					 u32 dirty)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun 	struct vmw_private *dev_priv = res->dev_priv;
304*4882a593Smuzhiyun 	int ret;
305*4882a593Smuzhiyun 	enum vmw_res_type res_type = vmw_res_type(res);
306*4882a593Smuzhiyun 	struct vmw_res_cache_entry *rcache;
307*4882a593Smuzhiyun 	struct vmw_ctx_validation_info *ctx_info;
308*4882a593Smuzhiyun 	bool first_usage;
309*4882a593Smuzhiyun 	unsigned int priv_size;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	rcache = &sw_context->res_cache[res_type];
312*4882a593Smuzhiyun 	if (likely(rcache->valid && rcache->res == res)) {
313*4882a593Smuzhiyun 		if (dirty)
314*4882a593Smuzhiyun 			vmw_validation_res_set_dirty(sw_context->ctx,
315*4882a593Smuzhiyun 						     rcache->private, dirty);
316*4882a593Smuzhiyun 		vmw_user_resource_noref_release();
317*4882a593Smuzhiyun 		return 0;
318*4882a593Smuzhiyun 	}
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	priv_size = vmw_execbuf_res_size(dev_priv, res_type);
321*4882a593Smuzhiyun 	ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
322*4882a593Smuzhiyun 					  dirty, (void **)&ctx_info,
323*4882a593Smuzhiyun 					  &first_usage);
324*4882a593Smuzhiyun 	vmw_user_resource_noref_release();
325*4882a593Smuzhiyun 	if (ret)
326*4882a593Smuzhiyun 		return ret;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	if (priv_size && first_usage) {
329*4882a593Smuzhiyun 		ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
330*4882a593Smuzhiyun 					      ctx_info);
331*4882a593Smuzhiyun 		if (ret) {
332*4882a593Smuzhiyun 			VMW_DEBUG_USER("Failed first usage context setup.\n");
333*4882a593Smuzhiyun 			return ret;
334*4882a593Smuzhiyun 		}
335*4882a593Smuzhiyun 	}
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	vmw_execbuf_rcache_update(rcache, res, ctx_info);
338*4882a593Smuzhiyun 	return 0;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun /**
342*4882a593Smuzhiyun  * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
343*4882a593Smuzhiyun  * validation list if it's not already on it
344*4882a593Smuzhiyun  *
345*4882a593Smuzhiyun  * @sw_context: Pointer to the software context.
346*4882a593Smuzhiyun  * @res: Pointer to the resource.
347*4882a593Smuzhiyun  * @dirty: Whether to change dirty status.
348*4882a593Smuzhiyun  *
349*4882a593Smuzhiyun  * Returns: Zero on success. Negative error code on failure.
350*4882a593Smuzhiyun  */
vmw_execbuf_res_noctx_val_add(struct vmw_sw_context * sw_context,struct vmw_resource * res,u32 dirty)351*4882a593Smuzhiyun static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
352*4882a593Smuzhiyun 					 struct vmw_resource *res,
353*4882a593Smuzhiyun 					 u32 dirty)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun 	struct vmw_res_cache_entry *rcache;
356*4882a593Smuzhiyun 	enum vmw_res_type res_type = vmw_res_type(res);
357*4882a593Smuzhiyun 	void *ptr;
358*4882a593Smuzhiyun 	int ret;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	rcache = &sw_context->res_cache[res_type];
361*4882a593Smuzhiyun 	if (likely(rcache->valid && rcache->res == res)) {
362*4882a593Smuzhiyun 		if (dirty)
363*4882a593Smuzhiyun 			vmw_validation_res_set_dirty(sw_context->ctx,
364*4882a593Smuzhiyun 						     rcache->private, dirty);
365*4882a593Smuzhiyun 		return 0;
366*4882a593Smuzhiyun 	}
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
369*4882a593Smuzhiyun 					  &ptr, NULL);
370*4882a593Smuzhiyun 	if (ret)
371*4882a593Smuzhiyun 		return ret;
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	vmw_execbuf_rcache_update(rcache, res, ptr);
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	return 0;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun /**
379*4882a593Smuzhiyun  * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
380*4882a593Smuzhiyun  * validation list
381*4882a593Smuzhiyun  *
382*4882a593Smuzhiyun  * @sw_context: The software context holding the validation list.
383*4882a593Smuzhiyun  * @view: Pointer to the view resource.
384*4882a593Smuzhiyun  *
385*4882a593Smuzhiyun  * Returns 0 if success, negative error code otherwise.
386*4882a593Smuzhiyun  */
vmw_view_res_val_add(struct vmw_sw_context * sw_context,struct vmw_resource * view)387*4882a593Smuzhiyun static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
388*4882a593Smuzhiyun 				struct vmw_resource *view)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun 	int ret;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	/*
393*4882a593Smuzhiyun 	 * First add the resource the view is pointing to, otherwise it may be
394*4882a593Smuzhiyun 	 * swapped out when the view is validated.
395*4882a593Smuzhiyun 	 */
396*4882a593Smuzhiyun 	ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
397*4882a593Smuzhiyun 					    vmw_view_dirtying(view));
398*4882a593Smuzhiyun 	if (ret)
399*4882a593Smuzhiyun 		return ret;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	return vmw_execbuf_res_noctx_val_add(sw_context, view,
402*4882a593Smuzhiyun 					     VMW_RES_DIRTY_NONE);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun /**
406*4882a593Smuzhiyun  * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
407*4882a593Smuzhiyun  * to to the validation list.
408*4882a593Smuzhiyun  *
409*4882a593Smuzhiyun  * @sw_context: The software context holding the validation list.
410*4882a593Smuzhiyun  * @view_type: The view type to look up.
411*4882a593Smuzhiyun  * @id: view id of the view.
412*4882a593Smuzhiyun  *
413*4882a593Smuzhiyun  * The view is represented by a view id and the DX context it's created on, or
414*4882a593Smuzhiyun  * scheduled for creation on. If there is no DX context set, the function will
415*4882a593Smuzhiyun  * return an -EINVAL error pointer.
416*4882a593Smuzhiyun  *
417*4882a593Smuzhiyun  * Returns: Unreferenced pointer to the resource on success, negative error
418*4882a593Smuzhiyun  * pointer on failure.
419*4882a593Smuzhiyun  */
420*4882a593Smuzhiyun static struct vmw_resource *
vmw_view_id_val_add(struct vmw_sw_context * sw_context,enum vmw_view_type view_type,u32 id)421*4882a593Smuzhiyun vmw_view_id_val_add(struct vmw_sw_context *sw_context,
422*4882a593Smuzhiyun 		    enum vmw_view_type view_type, u32 id)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
425*4882a593Smuzhiyun 	struct vmw_resource *view;
426*4882a593Smuzhiyun 	int ret;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	if (!ctx_node)
429*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	view = vmw_view_lookup(sw_context->man, view_type, id);
432*4882a593Smuzhiyun 	if (IS_ERR(view))
433*4882a593Smuzhiyun 		return view;
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	ret = vmw_view_res_val_add(sw_context, view);
436*4882a593Smuzhiyun 	if (ret)
437*4882a593Smuzhiyun 		return ERR_PTR(ret);
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	return view;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun /**
443*4882a593Smuzhiyun  * vmw_resource_context_res_add - Put resources previously bound to a context on
444*4882a593Smuzhiyun  * the validation list
445*4882a593Smuzhiyun  *
446*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private structure
447*4882a593Smuzhiyun  * @sw_context: Pointer to a software context used for this command submission
448*4882a593Smuzhiyun  * @ctx: Pointer to the context resource
449*4882a593Smuzhiyun  *
450*4882a593Smuzhiyun  * This function puts all resources that were previously bound to @ctx on the
451*4882a593Smuzhiyun  * resource validation list. This is part of the context state reemission
452*4882a593Smuzhiyun  */
vmw_resource_context_res_add(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource * ctx)453*4882a593Smuzhiyun static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
454*4882a593Smuzhiyun 					struct vmw_sw_context *sw_context,
455*4882a593Smuzhiyun 					struct vmw_resource *ctx)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun 	struct list_head *binding_list;
458*4882a593Smuzhiyun 	struct vmw_ctx_bindinfo *entry;
459*4882a593Smuzhiyun 	int ret = 0;
460*4882a593Smuzhiyun 	struct vmw_resource *res;
461*4882a593Smuzhiyun 	u32 i;
462*4882a593Smuzhiyun 	u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
463*4882a593Smuzhiyun 		SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	/* Add all cotables to the validation list. */
466*4882a593Smuzhiyun 	if (has_sm4_context(dev_priv) &&
467*4882a593Smuzhiyun 	    vmw_res_type(ctx) == vmw_res_dx_context) {
468*4882a593Smuzhiyun 		for (i = 0; i < cotable_max; ++i) {
469*4882a593Smuzhiyun 			res = vmw_context_cotable(ctx, i);
470*4882a593Smuzhiyun 			if (IS_ERR(res))
471*4882a593Smuzhiyun 				continue;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 			ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
474*4882a593Smuzhiyun 							    VMW_RES_DIRTY_SET);
475*4882a593Smuzhiyun 			if (unlikely(ret != 0))
476*4882a593Smuzhiyun 				return ret;
477*4882a593Smuzhiyun 		}
478*4882a593Smuzhiyun 	}
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	/* Add all resources bound to the context to the validation list */
481*4882a593Smuzhiyun 	mutex_lock(&dev_priv->binding_mutex);
482*4882a593Smuzhiyun 	binding_list = vmw_context_binding_list(ctx);
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	list_for_each_entry(entry, binding_list, ctx_list) {
485*4882a593Smuzhiyun 		if (vmw_res_type(entry->res) == vmw_res_view)
486*4882a593Smuzhiyun 			ret = vmw_view_res_val_add(sw_context, entry->res);
487*4882a593Smuzhiyun 		else
488*4882a593Smuzhiyun 			ret = vmw_execbuf_res_noctx_val_add
489*4882a593Smuzhiyun 				(sw_context, entry->res,
490*4882a593Smuzhiyun 				 vmw_binding_dirtying(entry->bt));
491*4882a593Smuzhiyun 		if (unlikely(ret != 0))
492*4882a593Smuzhiyun 			break;
493*4882a593Smuzhiyun 	}
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	if (has_sm4_context(dev_priv) &&
496*4882a593Smuzhiyun 	    vmw_res_type(ctx) == vmw_res_dx_context) {
497*4882a593Smuzhiyun 		struct vmw_buffer_object *dx_query_mob;
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 		dx_query_mob = vmw_context_get_dx_query_mob(ctx);
500*4882a593Smuzhiyun 		if (dx_query_mob)
501*4882a593Smuzhiyun 			ret = vmw_validation_add_bo(sw_context->ctx,
502*4882a593Smuzhiyun 						    dx_query_mob, true, false);
503*4882a593Smuzhiyun 	}
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	mutex_unlock(&dev_priv->binding_mutex);
506*4882a593Smuzhiyun 	return ret;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun /**
510*4882a593Smuzhiyun  * vmw_resource_relocation_add - Add a relocation to the relocation list
511*4882a593Smuzhiyun  *
512*4882a593Smuzhiyun  * @list: Pointer to head of relocation list.
513*4882a593Smuzhiyun  * @res: The resource.
514*4882a593Smuzhiyun  * @offset: Offset into the command buffer currently being parsed where the id
515*4882a593Smuzhiyun  * that needs fixup is located. Granularity is one byte.
516*4882a593Smuzhiyun  * @rel_type: Relocation type.
517*4882a593Smuzhiyun  */
vmw_resource_relocation_add(struct vmw_sw_context * sw_context,const struct vmw_resource * res,unsigned long offset,enum vmw_resource_relocation_type rel_type)518*4882a593Smuzhiyun static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
519*4882a593Smuzhiyun 				       const struct vmw_resource *res,
520*4882a593Smuzhiyun 				       unsigned long offset,
521*4882a593Smuzhiyun 				       enum vmw_resource_relocation_type
522*4882a593Smuzhiyun 				       rel_type)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun 	struct vmw_resource_relocation *rel;
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
527*4882a593Smuzhiyun 	if (unlikely(!rel)) {
528*4882a593Smuzhiyun 		VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
529*4882a593Smuzhiyun 		return -ENOMEM;
530*4882a593Smuzhiyun 	}
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	rel->res = res;
533*4882a593Smuzhiyun 	rel->offset = offset;
534*4882a593Smuzhiyun 	rel->rel_type = rel_type;
535*4882a593Smuzhiyun 	list_add_tail(&rel->head, &sw_context->res_relocations);
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	return 0;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun /**
541*4882a593Smuzhiyun  * vmw_resource_relocations_free - Free all relocations on a list
542*4882a593Smuzhiyun  *
543*4882a593Smuzhiyun  * @list: Pointer to the head of the relocation list
544*4882a593Smuzhiyun  */
vmw_resource_relocations_free(struct list_head * list)545*4882a593Smuzhiyun static void vmw_resource_relocations_free(struct list_head *list)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun 	/* Memory is validation context memory, so no need to free it */
548*4882a593Smuzhiyun 	INIT_LIST_HEAD(list);
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun /**
552*4882a593Smuzhiyun  * vmw_resource_relocations_apply - Apply all relocations on a list
553*4882a593Smuzhiyun  *
554*4882a593Smuzhiyun  * @cb: Pointer to the start of the command buffer bein patch. This need not be
555*4882a593Smuzhiyun  * the same buffer as the one being parsed when the relocation list was built,
556*4882a593Smuzhiyun  * but the contents must be the same modulo the resource ids.
557*4882a593Smuzhiyun  * @list: Pointer to the head of the relocation list.
558*4882a593Smuzhiyun  */
vmw_resource_relocations_apply(uint32_t * cb,struct list_head * list)559*4882a593Smuzhiyun static void vmw_resource_relocations_apply(uint32_t *cb,
560*4882a593Smuzhiyun 					   struct list_head *list)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun 	struct vmw_resource_relocation *rel;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	/* Validate the struct vmw_resource_relocation member size */
565*4882a593Smuzhiyun 	BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
566*4882a593Smuzhiyun 	BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	list_for_each_entry(rel, list, head) {
569*4882a593Smuzhiyun 		u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
570*4882a593Smuzhiyun 		switch (rel->rel_type) {
571*4882a593Smuzhiyun 		case vmw_res_rel_normal:
572*4882a593Smuzhiyun 			*addr = rel->res->id;
573*4882a593Smuzhiyun 			break;
574*4882a593Smuzhiyun 		case vmw_res_rel_nop:
575*4882a593Smuzhiyun 			*addr = SVGA_3D_CMD_NOP;
576*4882a593Smuzhiyun 			break;
577*4882a593Smuzhiyun 		default:
578*4882a593Smuzhiyun 			if (rel->res->id == -1)
579*4882a593Smuzhiyun 				*addr = SVGA_3D_CMD_NOP;
580*4882a593Smuzhiyun 			break;
581*4882a593Smuzhiyun 		}
582*4882a593Smuzhiyun 	}
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun 
vmw_cmd_invalid(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)585*4882a593Smuzhiyun static int vmw_cmd_invalid(struct vmw_private *dev_priv,
586*4882a593Smuzhiyun 			   struct vmw_sw_context *sw_context,
587*4882a593Smuzhiyun 			   SVGA3dCmdHeader *header)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun 	return -EINVAL;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun 
vmw_cmd_ok(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)592*4882a593Smuzhiyun static int vmw_cmd_ok(struct vmw_private *dev_priv,
593*4882a593Smuzhiyun 		      struct vmw_sw_context *sw_context,
594*4882a593Smuzhiyun 		      SVGA3dCmdHeader *header)
595*4882a593Smuzhiyun {
596*4882a593Smuzhiyun 	return 0;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun /**
600*4882a593Smuzhiyun  * vmw_resources_reserve - Reserve all resources on the sw_context's resource
601*4882a593Smuzhiyun  * list.
602*4882a593Smuzhiyun  *
603*4882a593Smuzhiyun  * @sw_context: Pointer to the software context.
604*4882a593Smuzhiyun  *
605*4882a593Smuzhiyun  * Note that since vmware's command submission currently is protected by the
606*4882a593Smuzhiyun  * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
607*4882a593Smuzhiyun  * only a single thread at once will attempt this.
608*4882a593Smuzhiyun  */
vmw_resources_reserve(struct vmw_sw_context * sw_context)609*4882a593Smuzhiyun static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun 	int ret;
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	ret = vmw_validation_res_reserve(sw_context->ctx, true);
614*4882a593Smuzhiyun 	if (ret)
615*4882a593Smuzhiyun 		return ret;
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	if (sw_context->dx_query_mob) {
618*4882a593Smuzhiyun 		struct vmw_buffer_object *expected_dx_query_mob;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 		expected_dx_query_mob =
621*4882a593Smuzhiyun 			vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
622*4882a593Smuzhiyun 		if (expected_dx_query_mob &&
623*4882a593Smuzhiyun 		    expected_dx_query_mob != sw_context->dx_query_mob) {
624*4882a593Smuzhiyun 			ret = -EINVAL;
625*4882a593Smuzhiyun 		}
626*4882a593Smuzhiyun 	}
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	return ret;
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun /**
632*4882a593Smuzhiyun  * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
633*4882a593Smuzhiyun  * resource validate list unless it's already there.
634*4882a593Smuzhiyun  *
635*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private structure.
636*4882a593Smuzhiyun  * @sw_context: Pointer to the software context.
637*4882a593Smuzhiyun  * @res_type: Resource type.
638*4882a593Smuzhiyun  * @dirty: Whether to change dirty status.
639*4882a593Smuzhiyun  * @converter: User-space visisble type specific information.
640*4882a593Smuzhiyun  * @id_loc: Pointer to the location in the command buffer currently being parsed
641*4882a593Smuzhiyun  * from where the user-space resource id handle is located.
642*4882a593Smuzhiyun  * @p_val: Pointer to pointer to resource validalidation node. Populated on
643*4882a593Smuzhiyun  * exit.
644*4882a593Smuzhiyun  */
645*4882a593Smuzhiyun static int
vmw_cmd_res_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,enum vmw_res_type res_type,u32 dirty,const struct vmw_user_resource_conv * converter,uint32_t * id_loc,struct vmw_resource ** p_res)646*4882a593Smuzhiyun vmw_cmd_res_check(struct vmw_private *dev_priv,
647*4882a593Smuzhiyun 		  struct vmw_sw_context *sw_context,
648*4882a593Smuzhiyun 		  enum vmw_res_type res_type,
649*4882a593Smuzhiyun 		  u32 dirty,
650*4882a593Smuzhiyun 		  const struct vmw_user_resource_conv *converter,
651*4882a593Smuzhiyun 		  uint32_t *id_loc,
652*4882a593Smuzhiyun 		  struct vmw_resource **p_res)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun 	struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
655*4882a593Smuzhiyun 	struct vmw_resource *res;
656*4882a593Smuzhiyun 	int ret;
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	if (p_res)
659*4882a593Smuzhiyun 		*p_res = NULL;
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 	if (*id_loc == SVGA3D_INVALID_ID) {
662*4882a593Smuzhiyun 		if (res_type == vmw_res_context) {
663*4882a593Smuzhiyun 			VMW_DEBUG_USER("Illegal context invalid id.\n");
664*4882a593Smuzhiyun 			return -EINVAL;
665*4882a593Smuzhiyun 		}
666*4882a593Smuzhiyun 		return 0;
667*4882a593Smuzhiyun 	}
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
670*4882a593Smuzhiyun 		res = rcache->res;
671*4882a593Smuzhiyun 		if (dirty)
672*4882a593Smuzhiyun 			vmw_validation_res_set_dirty(sw_context->ctx,
673*4882a593Smuzhiyun 						     rcache->private, dirty);
674*4882a593Smuzhiyun 	} else {
675*4882a593Smuzhiyun 		unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 		ret = vmw_validation_preload_res(sw_context->ctx, size);
678*4882a593Smuzhiyun 		if (ret)
679*4882a593Smuzhiyun 			return ret;
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 		res = vmw_user_resource_noref_lookup_handle
682*4882a593Smuzhiyun 			(dev_priv, sw_context->fp->tfile, *id_loc, converter);
683*4882a593Smuzhiyun 		if (IS_ERR(res)) {
684*4882a593Smuzhiyun 			VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
685*4882a593Smuzhiyun 				       (unsigned int) *id_loc);
686*4882a593Smuzhiyun 			return PTR_ERR(res);
687*4882a593Smuzhiyun 		}
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 		ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
690*4882a593Smuzhiyun 		if (unlikely(ret != 0))
691*4882a593Smuzhiyun 			return ret;
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 		if (rcache->valid && rcache->res == res) {
694*4882a593Smuzhiyun 			rcache->valid_handle = true;
695*4882a593Smuzhiyun 			rcache->handle = *id_loc;
696*4882a593Smuzhiyun 		}
697*4882a593Smuzhiyun 	}
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	ret = vmw_resource_relocation_add(sw_context, res,
700*4882a593Smuzhiyun 					  vmw_ptr_diff(sw_context->buf_start,
701*4882a593Smuzhiyun 						       id_loc),
702*4882a593Smuzhiyun 					  vmw_res_rel_normal);
703*4882a593Smuzhiyun 	if (p_res)
704*4882a593Smuzhiyun 		*p_res = res;
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	return 0;
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun /**
710*4882a593Smuzhiyun  * vmw_rebind_dx_query - Rebind DX query associated with the context
711*4882a593Smuzhiyun  *
712*4882a593Smuzhiyun  * @ctx_res: context the query belongs to
713*4882a593Smuzhiyun  *
714*4882a593Smuzhiyun  * This function assumes binding_mutex is held.
715*4882a593Smuzhiyun  */
vmw_rebind_all_dx_query(struct vmw_resource * ctx_res)716*4882a593Smuzhiyun static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
717*4882a593Smuzhiyun {
718*4882a593Smuzhiyun 	struct vmw_private *dev_priv = ctx_res->dev_priv;
719*4882a593Smuzhiyun 	struct vmw_buffer_object *dx_query_mob;
720*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	if (!dx_query_mob || dx_query_mob->dx_query_ctx)
725*4882a593Smuzhiyun 		return 0;
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), ctx_res->id);
728*4882a593Smuzhiyun 	if (cmd == NULL)
729*4882a593Smuzhiyun 		return -ENOMEM;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
732*4882a593Smuzhiyun 	cmd->header.size = sizeof(cmd->body);
733*4882a593Smuzhiyun 	cmd->body.cid = ctx_res->id;
734*4882a593Smuzhiyun 	cmd->body.mobid = dx_query_mob->base.mem.start;
735*4882a593Smuzhiyun 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	vmw_context_bind_dx_query(ctx_res, dx_query_mob);
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 	return 0;
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun /**
743*4882a593Smuzhiyun  * vmw_rebind_contexts - Rebind all resources previously bound to referenced
744*4882a593Smuzhiyun  * contexts.
745*4882a593Smuzhiyun  *
746*4882a593Smuzhiyun  * @sw_context: Pointer to the software context.
747*4882a593Smuzhiyun  *
748*4882a593Smuzhiyun  * Rebind context binding points that have been scrubbed because of eviction.
749*4882a593Smuzhiyun  */
vmw_rebind_contexts(struct vmw_sw_context * sw_context)750*4882a593Smuzhiyun static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
751*4882a593Smuzhiyun {
752*4882a593Smuzhiyun 	struct vmw_ctx_validation_info *val;
753*4882a593Smuzhiyun 	int ret;
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 	list_for_each_entry(val, &sw_context->ctx_list, head) {
756*4882a593Smuzhiyun 		ret = vmw_binding_rebind_all(val->cur);
757*4882a593Smuzhiyun 		if (unlikely(ret != 0)) {
758*4882a593Smuzhiyun 			if (ret != -ERESTARTSYS)
759*4882a593Smuzhiyun 				VMW_DEBUG_USER("Failed to rebind context.\n");
760*4882a593Smuzhiyun 			return ret;
761*4882a593Smuzhiyun 		}
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 		ret = vmw_rebind_all_dx_query(val->ctx);
764*4882a593Smuzhiyun 		if (ret != 0) {
765*4882a593Smuzhiyun 			VMW_DEBUG_USER("Failed to rebind queries.\n");
766*4882a593Smuzhiyun 			return ret;
767*4882a593Smuzhiyun 		}
768*4882a593Smuzhiyun 	}
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	return 0;
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun /**
774*4882a593Smuzhiyun  * vmw_view_bindings_add - Add an array of view bindings to a context binding
775*4882a593Smuzhiyun  * state tracker.
776*4882a593Smuzhiyun  *
777*4882a593Smuzhiyun  * @sw_context: The execbuf state used for this command.
778*4882a593Smuzhiyun  * @view_type: View type for the bindings.
779*4882a593Smuzhiyun  * @binding_type: Binding type for the bindings.
780*4882a593Smuzhiyun  * @shader_slot: The shader slot to user for the bindings.
781*4882a593Smuzhiyun  * @view_ids: Array of view ids to be bound.
782*4882a593Smuzhiyun  * @num_views: Number of view ids in @view_ids.
783*4882a593Smuzhiyun  * @first_slot: The binding slot to be used for the first view id in @view_ids.
784*4882a593Smuzhiyun  */
vmw_view_bindings_add(struct vmw_sw_context * sw_context,enum vmw_view_type view_type,enum vmw_ctx_binding_type binding_type,uint32 shader_slot,uint32 view_ids[],u32 num_views,u32 first_slot)785*4882a593Smuzhiyun static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
786*4882a593Smuzhiyun 				 enum vmw_view_type view_type,
787*4882a593Smuzhiyun 				 enum vmw_ctx_binding_type binding_type,
788*4882a593Smuzhiyun 				 uint32 shader_slot,
789*4882a593Smuzhiyun 				 uint32 view_ids[], u32 num_views,
790*4882a593Smuzhiyun 				 u32 first_slot)
791*4882a593Smuzhiyun {
792*4882a593Smuzhiyun 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
793*4882a593Smuzhiyun 	u32 i;
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	if (!ctx_node)
796*4882a593Smuzhiyun 		return -EINVAL;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	for (i = 0; i < num_views; ++i) {
799*4882a593Smuzhiyun 		struct vmw_ctx_bindinfo_view binding;
800*4882a593Smuzhiyun 		struct vmw_resource *view = NULL;
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 		if (view_ids[i] != SVGA3D_INVALID_ID) {
803*4882a593Smuzhiyun 			view = vmw_view_id_val_add(sw_context, view_type,
804*4882a593Smuzhiyun 						   view_ids[i]);
805*4882a593Smuzhiyun 			if (IS_ERR(view)) {
806*4882a593Smuzhiyun 				VMW_DEBUG_USER("View not found.\n");
807*4882a593Smuzhiyun 				return PTR_ERR(view);
808*4882a593Smuzhiyun 			}
809*4882a593Smuzhiyun 		}
810*4882a593Smuzhiyun 		binding.bi.ctx = ctx_node->ctx;
811*4882a593Smuzhiyun 		binding.bi.res = view;
812*4882a593Smuzhiyun 		binding.bi.bt = binding_type;
813*4882a593Smuzhiyun 		binding.shader_slot = shader_slot;
814*4882a593Smuzhiyun 		binding.slot = first_slot + i;
815*4882a593Smuzhiyun 		vmw_binding_add(ctx_node->staged, &binding.bi,
816*4882a593Smuzhiyun 				shader_slot, binding.slot);
817*4882a593Smuzhiyun 	}
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	return 0;
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun /**
823*4882a593Smuzhiyun  * vmw_cmd_cid_check - Check a command header for valid context information.
824*4882a593Smuzhiyun  *
825*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private structure.
826*4882a593Smuzhiyun  * @sw_context: Pointer to the software context.
827*4882a593Smuzhiyun  * @header: A command header with an embedded user-space context handle.
828*4882a593Smuzhiyun  *
829*4882a593Smuzhiyun  * Convenience function: Call vmw_cmd_res_check with the user-space context
830*4882a593Smuzhiyun  * handle embedded in @header.
831*4882a593Smuzhiyun  */
vmw_cmd_cid_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)832*4882a593Smuzhiyun static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
833*4882a593Smuzhiyun 			     struct vmw_sw_context *sw_context,
834*4882a593Smuzhiyun 			     SVGA3dCmdHeader *header)
835*4882a593Smuzhiyun {
836*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
837*4882a593Smuzhiyun 		container_of(header, typeof(*cmd), header);
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
840*4882a593Smuzhiyun 				 VMW_RES_DIRTY_SET, user_context_converter,
841*4882a593Smuzhiyun 				 &cmd->body, NULL);
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun /**
845*4882a593Smuzhiyun  * vmw_execbuf_info_from_res - Get the private validation metadata for a
846*4882a593Smuzhiyun  * recently validated resource
847*4882a593Smuzhiyun  *
848*4882a593Smuzhiyun  * @sw_context: Pointer to the command submission context
849*4882a593Smuzhiyun  * @res: The resource
850*4882a593Smuzhiyun  *
851*4882a593Smuzhiyun  * The resource pointed to by @res needs to be present in the command submission
852*4882a593Smuzhiyun  * context's resource cache and hence the last resource of that type to be
853*4882a593Smuzhiyun  * processed by the validation code.
854*4882a593Smuzhiyun  *
855*4882a593Smuzhiyun  * Return: a pointer to the private metadata of the resource, or NULL if it
856*4882a593Smuzhiyun  * wasn't found
857*4882a593Smuzhiyun  */
858*4882a593Smuzhiyun static struct vmw_ctx_validation_info *
vmw_execbuf_info_from_res(struct vmw_sw_context * sw_context,struct vmw_resource * res)859*4882a593Smuzhiyun vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
860*4882a593Smuzhiyun 			  struct vmw_resource *res)
861*4882a593Smuzhiyun {
862*4882a593Smuzhiyun 	struct vmw_res_cache_entry *rcache =
863*4882a593Smuzhiyun 		&sw_context->res_cache[vmw_res_type(res)];
864*4882a593Smuzhiyun 
865*4882a593Smuzhiyun 	if (rcache->valid && rcache->res == res)
866*4882a593Smuzhiyun 		return rcache->private;
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 	WARN_ON_ONCE(true);
869*4882a593Smuzhiyun 	return NULL;
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun 
vmw_cmd_set_render_target_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)872*4882a593Smuzhiyun static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
873*4882a593Smuzhiyun 					   struct vmw_sw_context *sw_context,
874*4882a593Smuzhiyun 					   SVGA3dCmdHeader *header)
875*4882a593Smuzhiyun {
876*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
877*4882a593Smuzhiyun 	struct vmw_resource *ctx;
878*4882a593Smuzhiyun 	struct vmw_resource *res;
879*4882a593Smuzhiyun 	int ret;
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	cmd = container_of(header, typeof(*cmd), header);
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	if (cmd->body.type >= SVGA3D_RT_MAX) {
884*4882a593Smuzhiyun 		VMW_DEBUG_USER("Illegal render target type %u.\n",
885*4882a593Smuzhiyun 			       (unsigned int) cmd->body.type);
886*4882a593Smuzhiyun 		return -EINVAL;
887*4882a593Smuzhiyun 	}
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
890*4882a593Smuzhiyun 				VMW_RES_DIRTY_SET, user_context_converter,
891*4882a593Smuzhiyun 				&cmd->body.cid, &ctx);
892*4882a593Smuzhiyun 	if (unlikely(ret != 0))
893*4882a593Smuzhiyun 		return ret;
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
896*4882a593Smuzhiyun 				VMW_RES_DIRTY_SET, user_surface_converter,
897*4882a593Smuzhiyun 				&cmd->body.target.sid, &res);
898*4882a593Smuzhiyun 	if (unlikely(ret))
899*4882a593Smuzhiyun 		return ret;
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	if (dev_priv->has_mob) {
902*4882a593Smuzhiyun 		struct vmw_ctx_bindinfo_view binding;
903*4882a593Smuzhiyun 		struct vmw_ctx_validation_info *node;
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 		node = vmw_execbuf_info_from_res(sw_context, ctx);
906*4882a593Smuzhiyun 		if (!node)
907*4882a593Smuzhiyun 			return -EINVAL;
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 		binding.bi.ctx = ctx;
910*4882a593Smuzhiyun 		binding.bi.res = res;
911*4882a593Smuzhiyun 		binding.bi.bt = vmw_ctx_binding_rt;
912*4882a593Smuzhiyun 		binding.slot = cmd->body.type;
913*4882a593Smuzhiyun 		vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
914*4882a593Smuzhiyun 	}
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	return 0;
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun 
vmw_cmd_surface_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)919*4882a593Smuzhiyun static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
920*4882a593Smuzhiyun 				      struct vmw_sw_context *sw_context,
921*4882a593Smuzhiyun 				      SVGA3dCmdHeader *header)
922*4882a593Smuzhiyun {
923*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
924*4882a593Smuzhiyun 	int ret;
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	cmd = container_of(header, typeof(*cmd), header);
927*4882a593Smuzhiyun 
928*4882a593Smuzhiyun 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
929*4882a593Smuzhiyun 				VMW_RES_DIRTY_NONE, user_surface_converter,
930*4882a593Smuzhiyun 				&cmd->body.src.sid, NULL);
931*4882a593Smuzhiyun 	if (ret)
932*4882a593Smuzhiyun 		return ret;
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
935*4882a593Smuzhiyun 				 VMW_RES_DIRTY_SET, user_surface_converter,
936*4882a593Smuzhiyun 				 &cmd->body.dest.sid, NULL);
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun 
vmw_cmd_buffer_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)939*4882a593Smuzhiyun static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
940*4882a593Smuzhiyun 				     struct vmw_sw_context *sw_context,
941*4882a593Smuzhiyun 				     SVGA3dCmdHeader *header)
942*4882a593Smuzhiyun {
943*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
944*4882a593Smuzhiyun 	int ret;
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	cmd = container_of(header, typeof(*cmd), header);
947*4882a593Smuzhiyun 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
948*4882a593Smuzhiyun 				VMW_RES_DIRTY_NONE, user_surface_converter,
949*4882a593Smuzhiyun 				&cmd->body.src, NULL);
950*4882a593Smuzhiyun 	if (ret != 0)
951*4882a593Smuzhiyun 		return ret;
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
954*4882a593Smuzhiyun 				 VMW_RES_DIRTY_SET, user_surface_converter,
955*4882a593Smuzhiyun 				 &cmd->body.dest, NULL);
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun 
vmw_cmd_pred_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)958*4882a593Smuzhiyun static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
959*4882a593Smuzhiyun 				   struct vmw_sw_context *sw_context,
960*4882a593Smuzhiyun 				   SVGA3dCmdHeader *header)
961*4882a593Smuzhiyun {
962*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
963*4882a593Smuzhiyun 	int ret;
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	cmd = container_of(header, typeof(*cmd), header);
966*4882a593Smuzhiyun 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
967*4882a593Smuzhiyun 				VMW_RES_DIRTY_NONE, user_surface_converter,
968*4882a593Smuzhiyun 				&cmd->body.srcSid, NULL);
969*4882a593Smuzhiyun 	if (ret != 0)
970*4882a593Smuzhiyun 		return ret;
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
973*4882a593Smuzhiyun 				 VMW_RES_DIRTY_SET, user_surface_converter,
974*4882a593Smuzhiyun 				 &cmd->body.dstSid, NULL);
975*4882a593Smuzhiyun }
976*4882a593Smuzhiyun 
vmw_cmd_stretch_blt_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)977*4882a593Smuzhiyun static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
978*4882a593Smuzhiyun 				     struct vmw_sw_context *sw_context,
979*4882a593Smuzhiyun 				     SVGA3dCmdHeader *header)
980*4882a593Smuzhiyun {
981*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
982*4882a593Smuzhiyun 	int ret;
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun 	cmd = container_of(header, typeof(*cmd), header);
985*4882a593Smuzhiyun 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
986*4882a593Smuzhiyun 				VMW_RES_DIRTY_NONE, user_surface_converter,
987*4882a593Smuzhiyun 				&cmd->body.src.sid, NULL);
988*4882a593Smuzhiyun 	if (unlikely(ret != 0))
989*4882a593Smuzhiyun 		return ret;
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
992*4882a593Smuzhiyun 				 VMW_RES_DIRTY_SET, user_surface_converter,
993*4882a593Smuzhiyun 				 &cmd->body.dest.sid, NULL);
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun 
vmw_cmd_blt_surf_screen_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)996*4882a593Smuzhiyun static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
997*4882a593Smuzhiyun 					 struct vmw_sw_context *sw_context,
998*4882a593Smuzhiyun 					 SVGA3dCmdHeader *header)
999*4882a593Smuzhiyun {
1000*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
1001*4882a593Smuzhiyun 		container_of(header, typeof(*cmd), header);
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1004*4882a593Smuzhiyun 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1005*4882a593Smuzhiyun 				 &cmd->body.srcImage.sid, NULL);
1006*4882a593Smuzhiyun }
1007*4882a593Smuzhiyun 
vmw_cmd_present_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1008*4882a593Smuzhiyun static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1009*4882a593Smuzhiyun 				 struct vmw_sw_context *sw_context,
1010*4882a593Smuzhiyun 				 SVGA3dCmdHeader *header)
1011*4882a593Smuzhiyun {
1012*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1013*4882a593Smuzhiyun 		container_of(header, typeof(*cmd), header);
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1016*4882a593Smuzhiyun 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1017*4882a593Smuzhiyun 				 &cmd->body.sid, NULL);
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun /**
1021*4882a593Smuzhiyun  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1022*4882a593Smuzhiyun  *
1023*4882a593Smuzhiyun  * @dev_priv: The device private structure.
1024*4882a593Smuzhiyun  * @new_query_bo: The new buffer holding query results.
1025*4882a593Smuzhiyun  * @sw_context: The software context used for this command submission.
1026*4882a593Smuzhiyun  *
1027*4882a593Smuzhiyun  * This function checks whether @new_query_bo is suitable for holding query
1028*4882a593Smuzhiyun  * results, and if another buffer currently is pinned for query results. If so,
1029*4882a593Smuzhiyun  * the function prepares the state of @sw_context for switching pinned buffers
1030*4882a593Smuzhiyun  * after successful submission of the current command batch.
1031*4882a593Smuzhiyun  */
vmw_query_bo_switch_prepare(struct vmw_private * dev_priv,struct vmw_buffer_object * new_query_bo,struct vmw_sw_context * sw_context)1032*4882a593Smuzhiyun static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1033*4882a593Smuzhiyun 				       struct vmw_buffer_object *new_query_bo,
1034*4882a593Smuzhiyun 				       struct vmw_sw_context *sw_context)
1035*4882a593Smuzhiyun {
1036*4882a593Smuzhiyun 	struct vmw_res_cache_entry *ctx_entry =
1037*4882a593Smuzhiyun 		&sw_context->res_cache[vmw_res_context];
1038*4882a593Smuzhiyun 	int ret;
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 	BUG_ON(!ctx_entry->valid);
1041*4882a593Smuzhiyun 	sw_context->last_query_ctx = ctx_entry->res;
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 		if (unlikely(new_query_bo->base.num_pages > 4)) {
1046*4882a593Smuzhiyun 			VMW_DEBUG_USER("Query buffer too large.\n");
1047*4882a593Smuzhiyun 			return -EINVAL;
1048*4882a593Smuzhiyun 		}
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun 		if (unlikely(sw_context->cur_query_bo != NULL)) {
1051*4882a593Smuzhiyun 			sw_context->needs_post_query_barrier = true;
1052*4882a593Smuzhiyun 			ret = vmw_validation_add_bo(sw_context->ctx,
1053*4882a593Smuzhiyun 						    sw_context->cur_query_bo,
1054*4882a593Smuzhiyun 						    dev_priv->has_mob, false);
1055*4882a593Smuzhiyun 			if (unlikely(ret != 0))
1056*4882a593Smuzhiyun 				return ret;
1057*4882a593Smuzhiyun 		}
1058*4882a593Smuzhiyun 		sw_context->cur_query_bo = new_query_bo;
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun 		ret = vmw_validation_add_bo(sw_context->ctx,
1061*4882a593Smuzhiyun 					    dev_priv->dummy_query_bo,
1062*4882a593Smuzhiyun 					    dev_priv->has_mob, false);
1063*4882a593Smuzhiyun 		if (unlikely(ret != 0))
1064*4882a593Smuzhiyun 			return ret;
1065*4882a593Smuzhiyun 	}
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun 	return 0;
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun /**
1071*4882a593Smuzhiyun  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1072*4882a593Smuzhiyun  *
1073*4882a593Smuzhiyun  * @dev_priv: The device private structure.
1074*4882a593Smuzhiyun  * @sw_context: The software context used for this command submission batch.
1075*4882a593Smuzhiyun  *
1076*4882a593Smuzhiyun  * This function will check if we're switching query buffers, and will then,
1077*4882a593Smuzhiyun  * issue a dummy occlusion query wait used as a query barrier. When the fence
1078*4882a593Smuzhiyun  * object following that query wait has signaled, we are sure that all preceding
1079*4882a593Smuzhiyun  * queries have finished, and the old query buffer can be unpinned. However,
1080*4882a593Smuzhiyun  * since both the new query buffer and the old one are fenced with that fence,
1081*4882a593Smuzhiyun  * we can do an asynchronus unpin now, and be sure that the old query buffer
1082*4882a593Smuzhiyun  * won't be moved until the fence has signaled.
1083*4882a593Smuzhiyun  *
1084*4882a593Smuzhiyun  * As mentioned above, both the new - and old query buffers need to be fenced
1085*4882a593Smuzhiyun  * using a sequence emitted *after* calling this function.
1086*4882a593Smuzhiyun  */
vmw_query_bo_switch_commit(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context)1087*4882a593Smuzhiyun static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1088*4882a593Smuzhiyun 				     struct vmw_sw_context *sw_context)
1089*4882a593Smuzhiyun {
1090*4882a593Smuzhiyun 	/*
1091*4882a593Smuzhiyun 	 * The validate list should still hold references to all
1092*4882a593Smuzhiyun 	 * contexts here.
1093*4882a593Smuzhiyun 	 */
1094*4882a593Smuzhiyun 	if (sw_context->needs_post_query_barrier) {
1095*4882a593Smuzhiyun 		struct vmw_res_cache_entry *ctx_entry =
1096*4882a593Smuzhiyun 			&sw_context->res_cache[vmw_res_context];
1097*4882a593Smuzhiyun 		struct vmw_resource *ctx;
1098*4882a593Smuzhiyun 		int ret;
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 		BUG_ON(!ctx_entry->valid);
1101*4882a593Smuzhiyun 		ctx = ctx_entry->res;
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun 		ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 		if (unlikely(ret != 0))
1106*4882a593Smuzhiyun 			VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1107*4882a593Smuzhiyun 	}
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1110*4882a593Smuzhiyun 		if (dev_priv->pinned_bo) {
1111*4882a593Smuzhiyun 			vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1112*4882a593Smuzhiyun 			vmw_bo_unreference(&dev_priv->pinned_bo);
1113*4882a593Smuzhiyun 		}
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun 		if (!sw_context->needs_post_query_barrier) {
1116*4882a593Smuzhiyun 			vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun 			/*
1119*4882a593Smuzhiyun 			 * We pin also the dummy_query_bo buffer so that we
1120*4882a593Smuzhiyun 			 * don't need to validate it when emitting dummy queries
1121*4882a593Smuzhiyun 			 * in context destroy paths.
1122*4882a593Smuzhiyun 			 */
1123*4882a593Smuzhiyun 			if (!dev_priv->dummy_query_bo_pinned) {
1124*4882a593Smuzhiyun 				vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1125*4882a593Smuzhiyun 						    true);
1126*4882a593Smuzhiyun 				dev_priv->dummy_query_bo_pinned = true;
1127*4882a593Smuzhiyun 			}
1128*4882a593Smuzhiyun 
1129*4882a593Smuzhiyun 			BUG_ON(sw_context->last_query_ctx == NULL);
1130*4882a593Smuzhiyun 			dev_priv->query_cid = sw_context->last_query_ctx->id;
1131*4882a593Smuzhiyun 			dev_priv->query_cid_valid = true;
1132*4882a593Smuzhiyun 			dev_priv->pinned_bo =
1133*4882a593Smuzhiyun 				vmw_bo_reference(sw_context->cur_query_bo);
1134*4882a593Smuzhiyun 		}
1135*4882a593Smuzhiyun 	}
1136*4882a593Smuzhiyun }
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun /**
1139*4882a593Smuzhiyun  * vmw_translate_mob_pointer - Prepare to translate a user-space buffer handle
1140*4882a593Smuzhiyun  * to a MOB id.
1141*4882a593Smuzhiyun  *
1142*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private structure.
1143*4882a593Smuzhiyun  * @sw_context: The software context used for this command batch validation.
1144*4882a593Smuzhiyun  * @id: Pointer to the user-space handle to be translated.
1145*4882a593Smuzhiyun  * @vmw_bo_p: Points to a location that, on successful return will carry a
1146*4882a593Smuzhiyun  * non-reference-counted pointer to the buffer object identified by the
1147*4882a593Smuzhiyun  * user-space handle in @id.
1148*4882a593Smuzhiyun  *
1149*4882a593Smuzhiyun  * This function saves information needed to translate a user-space buffer
1150*4882a593Smuzhiyun  * handle to a MOB id. The translation does not take place immediately, but
1151*4882a593Smuzhiyun  * during a call to vmw_apply_relocations().
1152*4882a593Smuzhiyun  *
1153*4882a593Smuzhiyun  * This function builds a relocation list and a list of buffers to validate. The
1154*4882a593Smuzhiyun  * former needs to be freed using either vmw_apply_relocations() or
1155*4882a593Smuzhiyun  * vmw_free_relocations(). The latter needs to be freed using
1156*4882a593Smuzhiyun  * vmw_clear_validations.
1157*4882a593Smuzhiyun  */
vmw_translate_mob_ptr(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGAMobId * id,struct vmw_buffer_object ** vmw_bo_p)1158*4882a593Smuzhiyun static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1159*4882a593Smuzhiyun 				 struct vmw_sw_context *sw_context,
1160*4882a593Smuzhiyun 				 SVGAMobId *id,
1161*4882a593Smuzhiyun 				 struct vmw_buffer_object **vmw_bo_p)
1162*4882a593Smuzhiyun {
1163*4882a593Smuzhiyun 	struct vmw_buffer_object *vmw_bo;
1164*4882a593Smuzhiyun 	uint32_t handle = *id;
1165*4882a593Smuzhiyun 	struct vmw_relocation *reloc;
1166*4882a593Smuzhiyun 	int ret;
1167*4882a593Smuzhiyun 
1168*4882a593Smuzhiyun 	vmw_validation_preload_bo(sw_context->ctx);
1169*4882a593Smuzhiyun 	vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1170*4882a593Smuzhiyun 	if (IS_ERR(vmw_bo)) {
1171*4882a593Smuzhiyun 		VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
1172*4882a593Smuzhiyun 		return PTR_ERR(vmw_bo);
1173*4882a593Smuzhiyun 	}
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun 	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
1176*4882a593Smuzhiyun 	vmw_user_bo_noref_release();
1177*4882a593Smuzhiyun 	if (unlikely(ret != 0))
1178*4882a593Smuzhiyun 		return ret;
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun 	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1181*4882a593Smuzhiyun 	if (!reloc)
1182*4882a593Smuzhiyun 		return -ENOMEM;
1183*4882a593Smuzhiyun 
1184*4882a593Smuzhiyun 	reloc->mob_loc = id;
1185*4882a593Smuzhiyun 	reloc->vbo = vmw_bo;
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun 	*vmw_bo_p = vmw_bo;
1188*4882a593Smuzhiyun 	list_add_tail(&reloc->head, &sw_context->bo_relocations);
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 	return 0;
1191*4882a593Smuzhiyun }
1192*4882a593Smuzhiyun 
1193*4882a593Smuzhiyun /**
1194*4882a593Smuzhiyun  * vmw_translate_guest_pointer - Prepare to translate a user-space buffer handle
1195*4882a593Smuzhiyun  * to a valid SVGAGuestPtr
1196*4882a593Smuzhiyun  *
1197*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private structure.
1198*4882a593Smuzhiyun  * @sw_context: The software context used for this command batch validation.
1199*4882a593Smuzhiyun  * @ptr: Pointer to the user-space handle to be translated.
1200*4882a593Smuzhiyun  * @vmw_bo_p: Points to a location that, on successful return will carry a
1201*4882a593Smuzhiyun  * non-reference-counted pointer to the DMA buffer identified by the user-space
1202*4882a593Smuzhiyun  * handle in @id.
1203*4882a593Smuzhiyun  *
1204*4882a593Smuzhiyun  * This function saves information needed to translate a user-space buffer
1205*4882a593Smuzhiyun  * handle to a valid SVGAGuestPtr. The translation does not take place
1206*4882a593Smuzhiyun  * immediately, but during a call to vmw_apply_relocations().
1207*4882a593Smuzhiyun  *
1208*4882a593Smuzhiyun  * This function builds a relocation list and a list of buffers to validate.
1209*4882a593Smuzhiyun  * The former needs to be freed using either vmw_apply_relocations() or
1210*4882a593Smuzhiyun  * vmw_free_relocations(). The latter needs to be freed using
1211*4882a593Smuzhiyun  * vmw_clear_validations.
1212*4882a593Smuzhiyun  */
vmw_translate_guest_ptr(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGAGuestPtr * ptr,struct vmw_buffer_object ** vmw_bo_p)1213*4882a593Smuzhiyun static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1214*4882a593Smuzhiyun 				   struct vmw_sw_context *sw_context,
1215*4882a593Smuzhiyun 				   SVGAGuestPtr *ptr,
1216*4882a593Smuzhiyun 				   struct vmw_buffer_object **vmw_bo_p)
1217*4882a593Smuzhiyun {
1218*4882a593Smuzhiyun 	struct vmw_buffer_object *vmw_bo;
1219*4882a593Smuzhiyun 	uint32_t handle = ptr->gmrId;
1220*4882a593Smuzhiyun 	struct vmw_relocation *reloc;
1221*4882a593Smuzhiyun 	int ret;
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun 	vmw_validation_preload_bo(sw_context->ctx);
1224*4882a593Smuzhiyun 	vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1225*4882a593Smuzhiyun 	if (IS_ERR(vmw_bo)) {
1226*4882a593Smuzhiyun 		VMW_DEBUG_USER("Could not find or use GMR region.\n");
1227*4882a593Smuzhiyun 		return PTR_ERR(vmw_bo);
1228*4882a593Smuzhiyun 	}
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
1231*4882a593Smuzhiyun 	vmw_user_bo_noref_release();
1232*4882a593Smuzhiyun 	if (unlikely(ret != 0))
1233*4882a593Smuzhiyun 		return ret;
1234*4882a593Smuzhiyun 
1235*4882a593Smuzhiyun 	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1236*4882a593Smuzhiyun 	if (!reloc)
1237*4882a593Smuzhiyun 		return -ENOMEM;
1238*4882a593Smuzhiyun 
1239*4882a593Smuzhiyun 	reloc->location = ptr;
1240*4882a593Smuzhiyun 	reloc->vbo = vmw_bo;
1241*4882a593Smuzhiyun 	*vmw_bo_p = vmw_bo;
1242*4882a593Smuzhiyun 	list_add_tail(&reloc->head, &sw_context->bo_relocations);
1243*4882a593Smuzhiyun 
1244*4882a593Smuzhiyun 	return 0;
1245*4882a593Smuzhiyun }
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun /**
1248*4882a593Smuzhiyun  * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1249*4882a593Smuzhiyun  *
1250*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
1251*4882a593Smuzhiyun  * @sw_context: The software context used for this command submission.
1252*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
1253*4882a593Smuzhiyun  *
1254*4882a593Smuzhiyun  * This function adds the new query into the query COTABLE
1255*4882a593Smuzhiyun  */
vmw_cmd_dx_define_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1256*4882a593Smuzhiyun static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1257*4882a593Smuzhiyun 				   struct vmw_sw_context *sw_context,
1258*4882a593Smuzhiyun 				   SVGA3dCmdHeader *header)
1259*4882a593Smuzhiyun {
1260*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1261*4882a593Smuzhiyun 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1262*4882a593Smuzhiyun 	struct vmw_resource *cotable_res;
1263*4882a593Smuzhiyun 	int ret;
1264*4882a593Smuzhiyun 
1265*4882a593Smuzhiyun 	if (!ctx_node)
1266*4882a593Smuzhiyun 		return -EINVAL;
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun 	cmd = container_of(header, typeof(*cmd), header);
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun 	if (cmd->body.type <  SVGA3D_QUERYTYPE_MIN ||
1271*4882a593Smuzhiyun 	    cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1272*4882a593Smuzhiyun 		return -EINVAL;
1273*4882a593Smuzhiyun 
1274*4882a593Smuzhiyun 	cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1275*4882a593Smuzhiyun 	ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1276*4882a593Smuzhiyun 
1277*4882a593Smuzhiyun 	return ret;
1278*4882a593Smuzhiyun }
1279*4882a593Smuzhiyun 
1280*4882a593Smuzhiyun /**
1281*4882a593Smuzhiyun  * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1282*4882a593Smuzhiyun  *
1283*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
1284*4882a593Smuzhiyun  * @sw_context: The software context used for this command submission.
1285*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
1286*4882a593Smuzhiyun  *
1287*4882a593Smuzhiyun  * The query bind operation will eventually associate the query ID with its
1288*4882a593Smuzhiyun  * backing MOB.  In this function, we take the user mode MOB ID and use
1289*4882a593Smuzhiyun  * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1290*4882a593Smuzhiyun  */
vmw_cmd_dx_bind_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1291*4882a593Smuzhiyun static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1292*4882a593Smuzhiyun 				 struct vmw_sw_context *sw_context,
1293*4882a593Smuzhiyun 				 SVGA3dCmdHeader *header)
1294*4882a593Smuzhiyun {
1295*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1296*4882a593Smuzhiyun 	struct vmw_buffer_object *vmw_bo;
1297*4882a593Smuzhiyun 	int ret;
1298*4882a593Smuzhiyun 
1299*4882a593Smuzhiyun 	cmd = container_of(header, typeof(*cmd), header);
1300*4882a593Smuzhiyun 
1301*4882a593Smuzhiyun 	/*
1302*4882a593Smuzhiyun 	 * Look up the buffer pointed to by q.mobid, put it on the relocation
1303*4882a593Smuzhiyun 	 * list so its kernel mode MOB ID can be filled in later
1304*4882a593Smuzhiyun 	 */
1305*4882a593Smuzhiyun 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1306*4882a593Smuzhiyun 				    &vmw_bo);
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 	if (ret != 0)
1309*4882a593Smuzhiyun 		return ret;
1310*4882a593Smuzhiyun 
1311*4882a593Smuzhiyun 	sw_context->dx_query_mob = vmw_bo;
1312*4882a593Smuzhiyun 	sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1313*4882a593Smuzhiyun 	return 0;
1314*4882a593Smuzhiyun }
1315*4882a593Smuzhiyun 
1316*4882a593Smuzhiyun /**
1317*4882a593Smuzhiyun  * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1318*4882a593Smuzhiyun  *
1319*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
1320*4882a593Smuzhiyun  * @sw_context: The software context used for this command submission.
1321*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
1322*4882a593Smuzhiyun  */
vmw_cmd_begin_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1323*4882a593Smuzhiyun static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1324*4882a593Smuzhiyun 				  struct vmw_sw_context *sw_context,
1325*4882a593Smuzhiyun 				  SVGA3dCmdHeader *header)
1326*4882a593Smuzhiyun {
1327*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1328*4882a593Smuzhiyun 		container_of(header, typeof(*cmd), header);
1329*4882a593Smuzhiyun 
1330*4882a593Smuzhiyun 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1331*4882a593Smuzhiyun 				 VMW_RES_DIRTY_SET, user_context_converter,
1332*4882a593Smuzhiyun 				 &cmd->body.cid, NULL);
1333*4882a593Smuzhiyun }
1334*4882a593Smuzhiyun 
1335*4882a593Smuzhiyun /**
1336*4882a593Smuzhiyun  * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1337*4882a593Smuzhiyun  *
1338*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
1339*4882a593Smuzhiyun  * @sw_context: The software context used for this command submission.
1340*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
1341*4882a593Smuzhiyun  */
vmw_cmd_begin_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1342*4882a593Smuzhiyun static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1343*4882a593Smuzhiyun 			       struct vmw_sw_context *sw_context,
1344*4882a593Smuzhiyun 			       SVGA3dCmdHeader *header)
1345*4882a593Smuzhiyun {
1346*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1347*4882a593Smuzhiyun 		container_of(header, typeof(*cmd), header);
1348*4882a593Smuzhiyun 
1349*4882a593Smuzhiyun 	if (unlikely(dev_priv->has_mob)) {
1350*4882a593Smuzhiyun 		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1353*4882a593Smuzhiyun 
1354*4882a593Smuzhiyun 		gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1355*4882a593Smuzhiyun 		gb_cmd.header.size = cmd->header.size;
1356*4882a593Smuzhiyun 		gb_cmd.body.cid = cmd->body.cid;
1357*4882a593Smuzhiyun 		gb_cmd.body.type = cmd->body.type;
1358*4882a593Smuzhiyun 
1359*4882a593Smuzhiyun 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1360*4882a593Smuzhiyun 		return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1361*4882a593Smuzhiyun 	}
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1364*4882a593Smuzhiyun 				 VMW_RES_DIRTY_SET, user_context_converter,
1365*4882a593Smuzhiyun 				 &cmd->body.cid, NULL);
1366*4882a593Smuzhiyun }
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun /**
1369*4882a593Smuzhiyun  * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1370*4882a593Smuzhiyun  *
1371*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
1372*4882a593Smuzhiyun  * @sw_context: The software context used for this command submission.
1373*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
1374*4882a593Smuzhiyun  */
vmw_cmd_end_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1375*4882a593Smuzhiyun static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1376*4882a593Smuzhiyun 				struct vmw_sw_context *sw_context,
1377*4882a593Smuzhiyun 				SVGA3dCmdHeader *header)
1378*4882a593Smuzhiyun {
1379*4882a593Smuzhiyun 	struct vmw_buffer_object *vmw_bo;
1380*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
1381*4882a593Smuzhiyun 	int ret;
1382*4882a593Smuzhiyun 
1383*4882a593Smuzhiyun 	cmd = container_of(header, typeof(*cmd), header);
1384*4882a593Smuzhiyun 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1385*4882a593Smuzhiyun 	if (unlikely(ret != 0))
1386*4882a593Smuzhiyun 		return ret;
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1389*4882a593Smuzhiyun 				    &vmw_bo);
1390*4882a593Smuzhiyun 	if (unlikely(ret != 0))
1391*4882a593Smuzhiyun 		return ret;
1392*4882a593Smuzhiyun 
1393*4882a593Smuzhiyun 	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun 	return ret;
1396*4882a593Smuzhiyun }
1397*4882a593Smuzhiyun 
1398*4882a593Smuzhiyun /**
1399*4882a593Smuzhiyun  * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1400*4882a593Smuzhiyun  *
1401*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
1402*4882a593Smuzhiyun  * @sw_context: The software context used for this command submission.
1403*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
1404*4882a593Smuzhiyun  */
vmw_cmd_end_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1405*4882a593Smuzhiyun static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1406*4882a593Smuzhiyun 			     struct vmw_sw_context *sw_context,
1407*4882a593Smuzhiyun 			     SVGA3dCmdHeader *header)
1408*4882a593Smuzhiyun {
1409*4882a593Smuzhiyun 	struct vmw_buffer_object *vmw_bo;
1410*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
1411*4882a593Smuzhiyun 	int ret;
1412*4882a593Smuzhiyun 
1413*4882a593Smuzhiyun 	cmd = container_of(header, typeof(*cmd), header);
1414*4882a593Smuzhiyun 	if (dev_priv->has_mob) {
1415*4882a593Smuzhiyun 		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
1416*4882a593Smuzhiyun 
1417*4882a593Smuzhiyun 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1418*4882a593Smuzhiyun 
1419*4882a593Smuzhiyun 		gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1420*4882a593Smuzhiyun 		gb_cmd.header.size = cmd->header.size;
1421*4882a593Smuzhiyun 		gb_cmd.body.cid = cmd->body.cid;
1422*4882a593Smuzhiyun 		gb_cmd.body.type = cmd->body.type;
1423*4882a593Smuzhiyun 		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1424*4882a593Smuzhiyun 		gb_cmd.body.offset = cmd->body.guestResult.offset;
1425*4882a593Smuzhiyun 
1426*4882a593Smuzhiyun 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1427*4882a593Smuzhiyun 		return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1428*4882a593Smuzhiyun 	}
1429*4882a593Smuzhiyun 
1430*4882a593Smuzhiyun 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1431*4882a593Smuzhiyun 	if (unlikely(ret != 0))
1432*4882a593Smuzhiyun 		return ret;
1433*4882a593Smuzhiyun 
1434*4882a593Smuzhiyun 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1435*4882a593Smuzhiyun 				      &cmd->body.guestResult, &vmw_bo);
1436*4882a593Smuzhiyun 	if (unlikely(ret != 0))
1437*4882a593Smuzhiyun 		return ret;
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun 	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1440*4882a593Smuzhiyun 
1441*4882a593Smuzhiyun 	return ret;
1442*4882a593Smuzhiyun }
1443*4882a593Smuzhiyun 
1444*4882a593Smuzhiyun /**
1445*4882a593Smuzhiyun  * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1446*4882a593Smuzhiyun  *
1447*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
1448*4882a593Smuzhiyun  * @sw_context: The software context used for this command submission.
1449*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
1450*4882a593Smuzhiyun  */
vmw_cmd_wait_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1451*4882a593Smuzhiyun static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1452*4882a593Smuzhiyun 				 struct vmw_sw_context *sw_context,
1453*4882a593Smuzhiyun 				 SVGA3dCmdHeader *header)
1454*4882a593Smuzhiyun {
1455*4882a593Smuzhiyun 	struct vmw_buffer_object *vmw_bo;
1456*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
1457*4882a593Smuzhiyun 	int ret;
1458*4882a593Smuzhiyun 
1459*4882a593Smuzhiyun 	cmd = container_of(header, typeof(*cmd), header);
1460*4882a593Smuzhiyun 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1461*4882a593Smuzhiyun 	if (unlikely(ret != 0))
1462*4882a593Smuzhiyun 		return ret;
1463*4882a593Smuzhiyun 
1464*4882a593Smuzhiyun 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1465*4882a593Smuzhiyun 				    &vmw_bo);
1466*4882a593Smuzhiyun 	if (unlikely(ret != 0))
1467*4882a593Smuzhiyun 		return ret;
1468*4882a593Smuzhiyun 
1469*4882a593Smuzhiyun 	return 0;
1470*4882a593Smuzhiyun }
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun /**
1473*4882a593Smuzhiyun  * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1474*4882a593Smuzhiyun  *
1475*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
1476*4882a593Smuzhiyun  * @sw_context: The software context used for this command submission.
1477*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
1478*4882a593Smuzhiyun  */
vmw_cmd_wait_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1479*4882a593Smuzhiyun static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1480*4882a593Smuzhiyun 			      struct vmw_sw_context *sw_context,
1481*4882a593Smuzhiyun 			      SVGA3dCmdHeader *header)
1482*4882a593Smuzhiyun {
1483*4882a593Smuzhiyun 	struct vmw_buffer_object *vmw_bo;
1484*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
1485*4882a593Smuzhiyun 	int ret;
1486*4882a593Smuzhiyun 
1487*4882a593Smuzhiyun 	cmd = container_of(header, typeof(*cmd), header);
1488*4882a593Smuzhiyun 	if (dev_priv->has_mob) {
1489*4882a593Smuzhiyun 		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
1490*4882a593Smuzhiyun 
1491*4882a593Smuzhiyun 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1492*4882a593Smuzhiyun 
1493*4882a593Smuzhiyun 		gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1494*4882a593Smuzhiyun 		gb_cmd.header.size = cmd->header.size;
1495*4882a593Smuzhiyun 		gb_cmd.body.cid = cmd->body.cid;
1496*4882a593Smuzhiyun 		gb_cmd.body.type = cmd->body.type;
1497*4882a593Smuzhiyun 		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1498*4882a593Smuzhiyun 		gb_cmd.body.offset = cmd->body.guestResult.offset;
1499*4882a593Smuzhiyun 
1500*4882a593Smuzhiyun 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1501*4882a593Smuzhiyun 		return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1502*4882a593Smuzhiyun 	}
1503*4882a593Smuzhiyun 
1504*4882a593Smuzhiyun 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1505*4882a593Smuzhiyun 	if (unlikely(ret != 0))
1506*4882a593Smuzhiyun 		return ret;
1507*4882a593Smuzhiyun 
1508*4882a593Smuzhiyun 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1509*4882a593Smuzhiyun 				      &cmd->body.guestResult, &vmw_bo);
1510*4882a593Smuzhiyun 	if (unlikely(ret != 0))
1511*4882a593Smuzhiyun 		return ret;
1512*4882a593Smuzhiyun 
1513*4882a593Smuzhiyun 	return 0;
1514*4882a593Smuzhiyun }
1515*4882a593Smuzhiyun 
vmw_cmd_dma(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1516*4882a593Smuzhiyun static int vmw_cmd_dma(struct vmw_private *dev_priv,
1517*4882a593Smuzhiyun 		       struct vmw_sw_context *sw_context,
1518*4882a593Smuzhiyun 		       SVGA3dCmdHeader *header)
1519*4882a593Smuzhiyun {
1520*4882a593Smuzhiyun 	struct vmw_buffer_object *vmw_bo = NULL;
1521*4882a593Smuzhiyun 	struct vmw_surface *srf = NULL;
1522*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
1523*4882a593Smuzhiyun 	int ret;
1524*4882a593Smuzhiyun 	SVGA3dCmdSurfaceDMASuffix *suffix;
1525*4882a593Smuzhiyun 	uint32_t bo_size;
1526*4882a593Smuzhiyun 	bool dirty;
1527*4882a593Smuzhiyun 
1528*4882a593Smuzhiyun 	cmd = container_of(header, typeof(*cmd), header);
1529*4882a593Smuzhiyun 	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1530*4882a593Smuzhiyun 					       header->size - sizeof(*suffix));
1531*4882a593Smuzhiyun 
1532*4882a593Smuzhiyun 	/* Make sure device and verifier stays in sync. */
1533*4882a593Smuzhiyun 	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1534*4882a593Smuzhiyun 		VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1535*4882a593Smuzhiyun 		return -EINVAL;
1536*4882a593Smuzhiyun 	}
1537*4882a593Smuzhiyun 
1538*4882a593Smuzhiyun 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1539*4882a593Smuzhiyun 				      &cmd->body.guest.ptr, &vmw_bo);
1540*4882a593Smuzhiyun 	if (unlikely(ret != 0))
1541*4882a593Smuzhiyun 		return ret;
1542*4882a593Smuzhiyun 
1543*4882a593Smuzhiyun 	/* Make sure DMA doesn't cross BO boundaries. */
1544*4882a593Smuzhiyun 	bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1545*4882a593Smuzhiyun 	if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1546*4882a593Smuzhiyun 		VMW_DEBUG_USER("Invalid DMA offset.\n");
1547*4882a593Smuzhiyun 		return -EINVAL;
1548*4882a593Smuzhiyun 	}
1549*4882a593Smuzhiyun 
1550*4882a593Smuzhiyun 	bo_size -= cmd->body.guest.ptr.offset;
1551*4882a593Smuzhiyun 	if (unlikely(suffix->maximumOffset > bo_size))
1552*4882a593Smuzhiyun 		suffix->maximumOffset = bo_size;
1553*4882a593Smuzhiyun 
1554*4882a593Smuzhiyun 	dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1555*4882a593Smuzhiyun 		VMW_RES_DIRTY_SET : 0;
1556*4882a593Smuzhiyun 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1557*4882a593Smuzhiyun 				dirty, user_surface_converter,
1558*4882a593Smuzhiyun 				&cmd->body.host.sid, NULL);
1559*4882a593Smuzhiyun 	if (unlikely(ret != 0)) {
1560*4882a593Smuzhiyun 		if (unlikely(ret != -ERESTARTSYS))
1561*4882a593Smuzhiyun 			VMW_DEBUG_USER("could not find surface for DMA.\n");
1562*4882a593Smuzhiyun 		return ret;
1563*4882a593Smuzhiyun 	}
1564*4882a593Smuzhiyun 
1565*4882a593Smuzhiyun 	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1566*4882a593Smuzhiyun 
1567*4882a593Smuzhiyun 	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
1568*4882a593Smuzhiyun 
1569*4882a593Smuzhiyun 	return 0;
1570*4882a593Smuzhiyun }
1571*4882a593Smuzhiyun 
vmw_cmd_draw(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1572*4882a593Smuzhiyun static int vmw_cmd_draw(struct vmw_private *dev_priv,
1573*4882a593Smuzhiyun 			struct vmw_sw_context *sw_context,
1574*4882a593Smuzhiyun 			SVGA3dCmdHeader *header)
1575*4882a593Smuzhiyun {
1576*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
1577*4882a593Smuzhiyun 	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1578*4882a593Smuzhiyun 		(unsigned long)header + sizeof(*cmd));
1579*4882a593Smuzhiyun 	SVGA3dPrimitiveRange *range;
1580*4882a593Smuzhiyun 	uint32_t i;
1581*4882a593Smuzhiyun 	uint32_t maxnum;
1582*4882a593Smuzhiyun 	int ret;
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1585*4882a593Smuzhiyun 	if (unlikely(ret != 0))
1586*4882a593Smuzhiyun 		return ret;
1587*4882a593Smuzhiyun 
1588*4882a593Smuzhiyun 	cmd = container_of(header, typeof(*cmd), header);
1589*4882a593Smuzhiyun 	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1590*4882a593Smuzhiyun 
1591*4882a593Smuzhiyun 	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1592*4882a593Smuzhiyun 		VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1593*4882a593Smuzhiyun 		return -EINVAL;
1594*4882a593Smuzhiyun 	}
1595*4882a593Smuzhiyun 
1596*4882a593Smuzhiyun 	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1597*4882a593Smuzhiyun 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1598*4882a593Smuzhiyun 					VMW_RES_DIRTY_NONE,
1599*4882a593Smuzhiyun 					user_surface_converter,
1600*4882a593Smuzhiyun 					&decl->array.surfaceId, NULL);
1601*4882a593Smuzhiyun 		if (unlikely(ret != 0))
1602*4882a593Smuzhiyun 			return ret;
1603*4882a593Smuzhiyun 	}
1604*4882a593Smuzhiyun 
1605*4882a593Smuzhiyun 	maxnum = (header->size - sizeof(cmd->body) -
1606*4882a593Smuzhiyun 		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1607*4882a593Smuzhiyun 	if (unlikely(cmd->body.numRanges > maxnum)) {
1608*4882a593Smuzhiyun 		VMW_DEBUG_USER("Illegal number of index ranges.\n");
1609*4882a593Smuzhiyun 		return -EINVAL;
1610*4882a593Smuzhiyun 	}
1611*4882a593Smuzhiyun 
1612*4882a593Smuzhiyun 	range = (SVGA3dPrimitiveRange *) decl;
1613*4882a593Smuzhiyun 	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1614*4882a593Smuzhiyun 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1615*4882a593Smuzhiyun 					VMW_RES_DIRTY_NONE,
1616*4882a593Smuzhiyun 					user_surface_converter,
1617*4882a593Smuzhiyun 					&range->indexArray.surfaceId, NULL);
1618*4882a593Smuzhiyun 		if (unlikely(ret != 0))
1619*4882a593Smuzhiyun 			return ret;
1620*4882a593Smuzhiyun 	}
1621*4882a593Smuzhiyun 	return 0;
1622*4882a593Smuzhiyun }
1623*4882a593Smuzhiyun 
vmw_cmd_tex_state(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1624*4882a593Smuzhiyun static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1625*4882a593Smuzhiyun 			     struct vmw_sw_context *sw_context,
1626*4882a593Smuzhiyun 			     SVGA3dCmdHeader *header)
1627*4882a593Smuzhiyun {
1628*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
1629*4882a593Smuzhiyun 	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1630*4882a593Smuzhiyun 	  ((unsigned long) header + header->size + sizeof(header));
1631*4882a593Smuzhiyun 	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1632*4882a593Smuzhiyun 		((unsigned long) header + sizeof(*cmd));
1633*4882a593Smuzhiyun 	struct vmw_resource *ctx;
1634*4882a593Smuzhiyun 	struct vmw_resource *res;
1635*4882a593Smuzhiyun 	int ret;
1636*4882a593Smuzhiyun 
1637*4882a593Smuzhiyun 	cmd = container_of(header, typeof(*cmd), header);
1638*4882a593Smuzhiyun 
1639*4882a593Smuzhiyun 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1640*4882a593Smuzhiyun 				VMW_RES_DIRTY_SET, user_context_converter,
1641*4882a593Smuzhiyun 				&cmd->body.cid, &ctx);
1642*4882a593Smuzhiyun 	if (unlikely(ret != 0))
1643*4882a593Smuzhiyun 		return ret;
1644*4882a593Smuzhiyun 
1645*4882a593Smuzhiyun 	for (; cur_state < last_state; ++cur_state) {
1646*4882a593Smuzhiyun 		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1647*4882a593Smuzhiyun 			continue;
1648*4882a593Smuzhiyun 
1649*4882a593Smuzhiyun 		if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1650*4882a593Smuzhiyun 			VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1651*4882a593Smuzhiyun 				       (unsigned int) cur_state->stage);
1652*4882a593Smuzhiyun 			return -EINVAL;
1653*4882a593Smuzhiyun 		}
1654*4882a593Smuzhiyun 
1655*4882a593Smuzhiyun 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1656*4882a593Smuzhiyun 					VMW_RES_DIRTY_NONE,
1657*4882a593Smuzhiyun 					user_surface_converter,
1658*4882a593Smuzhiyun 					&cur_state->value, &res);
1659*4882a593Smuzhiyun 		if (unlikely(ret != 0))
1660*4882a593Smuzhiyun 			return ret;
1661*4882a593Smuzhiyun 
1662*4882a593Smuzhiyun 		if (dev_priv->has_mob) {
1663*4882a593Smuzhiyun 			struct vmw_ctx_bindinfo_tex binding;
1664*4882a593Smuzhiyun 			struct vmw_ctx_validation_info *node;
1665*4882a593Smuzhiyun 
1666*4882a593Smuzhiyun 			node = vmw_execbuf_info_from_res(sw_context, ctx);
1667*4882a593Smuzhiyun 			if (!node)
1668*4882a593Smuzhiyun 				return -EINVAL;
1669*4882a593Smuzhiyun 
1670*4882a593Smuzhiyun 			binding.bi.ctx = ctx;
1671*4882a593Smuzhiyun 			binding.bi.res = res;
1672*4882a593Smuzhiyun 			binding.bi.bt = vmw_ctx_binding_tex;
1673*4882a593Smuzhiyun 			binding.texture_stage = cur_state->stage;
1674*4882a593Smuzhiyun 			vmw_binding_add(node->staged, &binding.bi, 0,
1675*4882a593Smuzhiyun 					binding.texture_stage);
1676*4882a593Smuzhiyun 		}
1677*4882a593Smuzhiyun 	}
1678*4882a593Smuzhiyun 
1679*4882a593Smuzhiyun 	return 0;
1680*4882a593Smuzhiyun }
1681*4882a593Smuzhiyun 
vmw_cmd_check_define_gmrfb(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf)1682*4882a593Smuzhiyun static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1683*4882a593Smuzhiyun 				      struct vmw_sw_context *sw_context,
1684*4882a593Smuzhiyun 				      void *buf)
1685*4882a593Smuzhiyun {
1686*4882a593Smuzhiyun 	struct vmw_buffer_object *vmw_bo;
1687*4882a593Smuzhiyun 
1688*4882a593Smuzhiyun 	struct {
1689*4882a593Smuzhiyun 		uint32_t header;
1690*4882a593Smuzhiyun 		SVGAFifoCmdDefineGMRFB body;
1691*4882a593Smuzhiyun 	} *cmd = buf;
1692*4882a593Smuzhiyun 
1693*4882a593Smuzhiyun 	return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1694*4882a593Smuzhiyun 				       &vmw_bo);
1695*4882a593Smuzhiyun }
1696*4882a593Smuzhiyun 
1697*4882a593Smuzhiyun /**
1698*4882a593Smuzhiyun  * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1699*4882a593Smuzhiyun  * switching
1700*4882a593Smuzhiyun  *
1701*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
1702*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
1703*4882a593Smuzhiyun  * @val_node: The validation node representing the resource.
1704*4882a593Smuzhiyun  * @buf_id: Pointer to the user-space backup buffer handle in the command
1705*4882a593Smuzhiyun  * stream.
1706*4882a593Smuzhiyun  * @backup_offset: Offset of backup into MOB.
1707*4882a593Smuzhiyun  *
1708*4882a593Smuzhiyun  * This function prepares for registering a switch of backup buffers in the
1709*4882a593Smuzhiyun  * resource metadata just prior to unreserving. It's basically a wrapper around
1710*4882a593Smuzhiyun  * vmw_cmd_res_switch_backup with a different interface.
1711*4882a593Smuzhiyun  */
vmw_cmd_res_switch_backup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource * res,uint32_t * buf_id,unsigned long backup_offset)1712*4882a593Smuzhiyun static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1713*4882a593Smuzhiyun 				     struct vmw_sw_context *sw_context,
1714*4882a593Smuzhiyun 				     struct vmw_resource *res, uint32_t *buf_id,
1715*4882a593Smuzhiyun 				     unsigned long backup_offset)
1716*4882a593Smuzhiyun {
1717*4882a593Smuzhiyun 	struct vmw_buffer_object *vbo;
1718*4882a593Smuzhiyun 	void *info;
1719*4882a593Smuzhiyun 	int ret;
1720*4882a593Smuzhiyun 
1721*4882a593Smuzhiyun 	info = vmw_execbuf_info_from_res(sw_context, res);
1722*4882a593Smuzhiyun 	if (!info)
1723*4882a593Smuzhiyun 		return -EINVAL;
1724*4882a593Smuzhiyun 
1725*4882a593Smuzhiyun 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1726*4882a593Smuzhiyun 	if (ret)
1727*4882a593Smuzhiyun 		return ret;
1728*4882a593Smuzhiyun 
1729*4882a593Smuzhiyun 	vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1730*4882a593Smuzhiyun 					 backup_offset);
1731*4882a593Smuzhiyun 	return 0;
1732*4882a593Smuzhiyun }
1733*4882a593Smuzhiyun 
1734*4882a593Smuzhiyun /**
1735*4882a593Smuzhiyun  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1736*4882a593Smuzhiyun  *
1737*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
1738*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
1739*4882a593Smuzhiyun  * @res_type: The resource type.
1740*4882a593Smuzhiyun  * @converter: Information about user-space binding for this resource type.
1741*4882a593Smuzhiyun  * @res_id: Pointer to the user-space resource handle in the command stream.
1742*4882a593Smuzhiyun  * @buf_id: Pointer to the user-space backup buffer handle in the command
1743*4882a593Smuzhiyun  * stream.
1744*4882a593Smuzhiyun  * @backup_offset: Offset of backup into MOB.
1745*4882a593Smuzhiyun  *
1746*4882a593Smuzhiyun  * This function prepares for registering a switch of backup buffers in the
1747*4882a593Smuzhiyun  * resource metadata just prior to unreserving. It's basically a wrapper around
1748*4882a593Smuzhiyun  * vmw_cmd_res_switch_backup with a different interface.
1749*4882a593Smuzhiyun  */
vmw_cmd_switch_backup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,enum vmw_res_type res_type,const struct vmw_user_resource_conv * converter,uint32_t * res_id,uint32_t * buf_id,unsigned long backup_offset)1750*4882a593Smuzhiyun static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1751*4882a593Smuzhiyun 				 struct vmw_sw_context *sw_context,
1752*4882a593Smuzhiyun 				 enum vmw_res_type res_type,
1753*4882a593Smuzhiyun 				 const struct vmw_user_resource_conv
1754*4882a593Smuzhiyun 				 *converter, uint32_t *res_id, uint32_t *buf_id,
1755*4882a593Smuzhiyun 				 unsigned long backup_offset)
1756*4882a593Smuzhiyun {
1757*4882a593Smuzhiyun 	struct vmw_resource *res;
1758*4882a593Smuzhiyun 	int ret;
1759*4882a593Smuzhiyun 
1760*4882a593Smuzhiyun 	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1761*4882a593Smuzhiyun 				VMW_RES_DIRTY_NONE, converter, res_id, &res);
1762*4882a593Smuzhiyun 	if (ret)
1763*4882a593Smuzhiyun 		return ret;
1764*4882a593Smuzhiyun 
1765*4882a593Smuzhiyun 	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1766*4882a593Smuzhiyun 					 backup_offset);
1767*4882a593Smuzhiyun }
1768*4882a593Smuzhiyun 
1769*4882a593Smuzhiyun /**
1770*4882a593Smuzhiyun  * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1771*4882a593Smuzhiyun  *
1772*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
1773*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
1774*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
1775*4882a593Smuzhiyun  */
vmw_cmd_bind_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1776*4882a593Smuzhiyun static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1777*4882a593Smuzhiyun 				   struct vmw_sw_context *sw_context,
1778*4882a593Smuzhiyun 				   SVGA3dCmdHeader *header)
1779*4882a593Smuzhiyun {
1780*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1781*4882a593Smuzhiyun 		container_of(header, typeof(*cmd), header);
1782*4882a593Smuzhiyun 
1783*4882a593Smuzhiyun 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1784*4882a593Smuzhiyun 				     user_surface_converter, &cmd->body.sid,
1785*4882a593Smuzhiyun 				     &cmd->body.mobid, 0);
1786*4882a593Smuzhiyun }
1787*4882a593Smuzhiyun 
1788*4882a593Smuzhiyun /**
1789*4882a593Smuzhiyun  * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1790*4882a593Smuzhiyun  *
1791*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
1792*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
1793*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
1794*4882a593Smuzhiyun  */
vmw_cmd_update_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1795*4882a593Smuzhiyun static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1796*4882a593Smuzhiyun 				   struct vmw_sw_context *sw_context,
1797*4882a593Smuzhiyun 				   SVGA3dCmdHeader *header)
1798*4882a593Smuzhiyun {
1799*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1800*4882a593Smuzhiyun 		container_of(header, typeof(*cmd), header);
1801*4882a593Smuzhiyun 
1802*4882a593Smuzhiyun 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1803*4882a593Smuzhiyun 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1804*4882a593Smuzhiyun 				 &cmd->body.image.sid, NULL);
1805*4882a593Smuzhiyun }
1806*4882a593Smuzhiyun 
1807*4882a593Smuzhiyun /**
1808*4882a593Smuzhiyun  * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1809*4882a593Smuzhiyun  *
1810*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
1811*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
1812*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
1813*4882a593Smuzhiyun  */
vmw_cmd_update_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1814*4882a593Smuzhiyun static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1815*4882a593Smuzhiyun 				     struct vmw_sw_context *sw_context,
1816*4882a593Smuzhiyun 				     SVGA3dCmdHeader *header)
1817*4882a593Smuzhiyun {
1818*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1819*4882a593Smuzhiyun 		container_of(header, typeof(*cmd), header);
1820*4882a593Smuzhiyun 
1821*4882a593Smuzhiyun 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1822*4882a593Smuzhiyun 				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1823*4882a593Smuzhiyun 				 &cmd->body.sid, NULL);
1824*4882a593Smuzhiyun }
1825*4882a593Smuzhiyun 
1826*4882a593Smuzhiyun /**
1827*4882a593Smuzhiyun  * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1828*4882a593Smuzhiyun  *
1829*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
1830*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
1831*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
1832*4882a593Smuzhiyun  */
vmw_cmd_readback_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1833*4882a593Smuzhiyun static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1834*4882a593Smuzhiyun 				     struct vmw_sw_context *sw_context,
1835*4882a593Smuzhiyun 				     SVGA3dCmdHeader *header)
1836*4882a593Smuzhiyun {
1837*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1838*4882a593Smuzhiyun 		container_of(header, typeof(*cmd), header);
1839*4882a593Smuzhiyun 
1840*4882a593Smuzhiyun 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1841*4882a593Smuzhiyun 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1842*4882a593Smuzhiyun 				 &cmd->body.image.sid, NULL);
1843*4882a593Smuzhiyun }
1844*4882a593Smuzhiyun 
1845*4882a593Smuzhiyun /**
1846*4882a593Smuzhiyun  * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1847*4882a593Smuzhiyun  * command
1848*4882a593Smuzhiyun  *
1849*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
1850*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
1851*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
1852*4882a593Smuzhiyun  */
vmw_cmd_readback_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1853*4882a593Smuzhiyun static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1854*4882a593Smuzhiyun 				       struct vmw_sw_context *sw_context,
1855*4882a593Smuzhiyun 				       SVGA3dCmdHeader *header)
1856*4882a593Smuzhiyun {
1857*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1858*4882a593Smuzhiyun 		container_of(header, typeof(*cmd), header);
1859*4882a593Smuzhiyun 
1860*4882a593Smuzhiyun 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1861*4882a593Smuzhiyun 				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1862*4882a593Smuzhiyun 				 &cmd->body.sid, NULL);
1863*4882a593Smuzhiyun }
1864*4882a593Smuzhiyun 
1865*4882a593Smuzhiyun /**
1866*4882a593Smuzhiyun  * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1867*4882a593Smuzhiyun  * command
1868*4882a593Smuzhiyun  *
1869*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
1870*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
1871*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
1872*4882a593Smuzhiyun  */
vmw_cmd_invalidate_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1873*4882a593Smuzhiyun static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1874*4882a593Smuzhiyun 				       struct vmw_sw_context *sw_context,
1875*4882a593Smuzhiyun 				       SVGA3dCmdHeader *header)
1876*4882a593Smuzhiyun {
1877*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1878*4882a593Smuzhiyun 		container_of(header, typeof(*cmd), header);
1879*4882a593Smuzhiyun 
1880*4882a593Smuzhiyun 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1881*4882a593Smuzhiyun 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1882*4882a593Smuzhiyun 				 &cmd->body.image.sid, NULL);
1883*4882a593Smuzhiyun }
1884*4882a593Smuzhiyun 
1885*4882a593Smuzhiyun /**
1886*4882a593Smuzhiyun  * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1887*4882a593Smuzhiyun  * command
1888*4882a593Smuzhiyun  *
1889*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
1890*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
1891*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
1892*4882a593Smuzhiyun  */
vmw_cmd_invalidate_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1893*4882a593Smuzhiyun static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1894*4882a593Smuzhiyun 					 struct vmw_sw_context *sw_context,
1895*4882a593Smuzhiyun 					 SVGA3dCmdHeader *header)
1896*4882a593Smuzhiyun {
1897*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1898*4882a593Smuzhiyun 		container_of(header, typeof(*cmd), header);
1899*4882a593Smuzhiyun 
1900*4882a593Smuzhiyun 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1901*4882a593Smuzhiyun 				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1902*4882a593Smuzhiyun 				 &cmd->body.sid, NULL);
1903*4882a593Smuzhiyun }
1904*4882a593Smuzhiyun 
1905*4882a593Smuzhiyun /**
1906*4882a593Smuzhiyun  * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1907*4882a593Smuzhiyun  *
1908*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
1909*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
1910*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
1911*4882a593Smuzhiyun  */
vmw_cmd_shader_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1912*4882a593Smuzhiyun static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1913*4882a593Smuzhiyun 				 struct vmw_sw_context *sw_context,
1914*4882a593Smuzhiyun 				 SVGA3dCmdHeader *header)
1915*4882a593Smuzhiyun {
1916*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
1917*4882a593Smuzhiyun 	int ret;
1918*4882a593Smuzhiyun 	size_t size;
1919*4882a593Smuzhiyun 	struct vmw_resource *ctx;
1920*4882a593Smuzhiyun 
1921*4882a593Smuzhiyun 	cmd = container_of(header, typeof(*cmd), header);
1922*4882a593Smuzhiyun 
1923*4882a593Smuzhiyun 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1924*4882a593Smuzhiyun 				VMW_RES_DIRTY_SET, user_context_converter,
1925*4882a593Smuzhiyun 				&cmd->body.cid, &ctx);
1926*4882a593Smuzhiyun 	if (unlikely(ret != 0))
1927*4882a593Smuzhiyun 		return ret;
1928*4882a593Smuzhiyun 
1929*4882a593Smuzhiyun 	if (unlikely(!dev_priv->has_mob))
1930*4882a593Smuzhiyun 		return 0;
1931*4882a593Smuzhiyun 
1932*4882a593Smuzhiyun 	size = cmd->header.size - sizeof(cmd->body);
1933*4882a593Smuzhiyun 	ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1934*4882a593Smuzhiyun 				    cmd->body.shid, cmd + 1, cmd->body.type,
1935*4882a593Smuzhiyun 				    size, &sw_context->staged_cmd_res);
1936*4882a593Smuzhiyun 	if (unlikely(ret != 0))
1937*4882a593Smuzhiyun 		return ret;
1938*4882a593Smuzhiyun 
1939*4882a593Smuzhiyun 	return vmw_resource_relocation_add(sw_context, NULL,
1940*4882a593Smuzhiyun 					   vmw_ptr_diff(sw_context->buf_start,
1941*4882a593Smuzhiyun 							&cmd->header.id),
1942*4882a593Smuzhiyun 					   vmw_res_rel_nop);
1943*4882a593Smuzhiyun }
1944*4882a593Smuzhiyun 
1945*4882a593Smuzhiyun /**
1946*4882a593Smuzhiyun  * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1947*4882a593Smuzhiyun  *
1948*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
1949*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
1950*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
1951*4882a593Smuzhiyun  */
vmw_cmd_shader_destroy(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1952*4882a593Smuzhiyun static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1953*4882a593Smuzhiyun 				  struct vmw_sw_context *sw_context,
1954*4882a593Smuzhiyun 				  SVGA3dCmdHeader *header)
1955*4882a593Smuzhiyun {
1956*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
1957*4882a593Smuzhiyun 	int ret;
1958*4882a593Smuzhiyun 	struct vmw_resource *ctx;
1959*4882a593Smuzhiyun 
1960*4882a593Smuzhiyun 	cmd = container_of(header, typeof(*cmd), header);
1961*4882a593Smuzhiyun 
1962*4882a593Smuzhiyun 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1963*4882a593Smuzhiyun 				VMW_RES_DIRTY_SET, user_context_converter,
1964*4882a593Smuzhiyun 				&cmd->body.cid, &ctx);
1965*4882a593Smuzhiyun 	if (unlikely(ret != 0))
1966*4882a593Smuzhiyun 		return ret;
1967*4882a593Smuzhiyun 
1968*4882a593Smuzhiyun 	if (unlikely(!dev_priv->has_mob))
1969*4882a593Smuzhiyun 		return 0;
1970*4882a593Smuzhiyun 
1971*4882a593Smuzhiyun 	ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1972*4882a593Smuzhiyun 				cmd->body.type, &sw_context->staged_cmd_res);
1973*4882a593Smuzhiyun 	if (unlikely(ret != 0))
1974*4882a593Smuzhiyun 		return ret;
1975*4882a593Smuzhiyun 
1976*4882a593Smuzhiyun 	return vmw_resource_relocation_add(sw_context, NULL,
1977*4882a593Smuzhiyun 					   vmw_ptr_diff(sw_context->buf_start,
1978*4882a593Smuzhiyun 							&cmd->header.id),
1979*4882a593Smuzhiyun 					   vmw_res_rel_nop);
1980*4882a593Smuzhiyun }
1981*4882a593Smuzhiyun 
1982*4882a593Smuzhiyun /**
1983*4882a593Smuzhiyun  * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1984*4882a593Smuzhiyun  *
1985*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
1986*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
1987*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
1988*4882a593Smuzhiyun  */
vmw_cmd_set_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1989*4882a593Smuzhiyun static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1990*4882a593Smuzhiyun 			      struct vmw_sw_context *sw_context,
1991*4882a593Smuzhiyun 			      SVGA3dCmdHeader *header)
1992*4882a593Smuzhiyun {
1993*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1994*4882a593Smuzhiyun 	struct vmw_ctx_bindinfo_shader binding;
1995*4882a593Smuzhiyun 	struct vmw_resource *ctx, *res = NULL;
1996*4882a593Smuzhiyun 	struct vmw_ctx_validation_info *ctx_info;
1997*4882a593Smuzhiyun 	int ret;
1998*4882a593Smuzhiyun 
1999*4882a593Smuzhiyun 	cmd = container_of(header, typeof(*cmd), header);
2000*4882a593Smuzhiyun 
2001*4882a593Smuzhiyun 	if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2002*4882a593Smuzhiyun 		VMW_DEBUG_USER("Illegal shader type %u.\n",
2003*4882a593Smuzhiyun 			       (unsigned int) cmd->body.type);
2004*4882a593Smuzhiyun 		return -EINVAL;
2005*4882a593Smuzhiyun 	}
2006*4882a593Smuzhiyun 
2007*4882a593Smuzhiyun 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2008*4882a593Smuzhiyun 				VMW_RES_DIRTY_SET, user_context_converter,
2009*4882a593Smuzhiyun 				&cmd->body.cid, &ctx);
2010*4882a593Smuzhiyun 	if (unlikely(ret != 0))
2011*4882a593Smuzhiyun 		return ret;
2012*4882a593Smuzhiyun 
2013*4882a593Smuzhiyun 	if (!dev_priv->has_mob)
2014*4882a593Smuzhiyun 		return 0;
2015*4882a593Smuzhiyun 
2016*4882a593Smuzhiyun 	if (cmd->body.shid != SVGA3D_INVALID_ID) {
2017*4882a593Smuzhiyun 		/*
2018*4882a593Smuzhiyun 		 * This is the compat shader path - Per device guest-backed
2019*4882a593Smuzhiyun 		 * shaders, but user-space thinks it's per context host-
2020*4882a593Smuzhiyun 		 * backed shaders.
2021*4882a593Smuzhiyun 		 */
2022*4882a593Smuzhiyun 		res = vmw_shader_lookup(vmw_context_res_man(ctx),
2023*4882a593Smuzhiyun 					cmd->body.shid, cmd->body.type);
2024*4882a593Smuzhiyun 		if (!IS_ERR(res)) {
2025*4882a593Smuzhiyun 			ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2026*4882a593Smuzhiyun 							    VMW_RES_DIRTY_NONE);
2027*4882a593Smuzhiyun 			if (unlikely(ret != 0))
2028*4882a593Smuzhiyun 				return ret;
2029*4882a593Smuzhiyun 
2030*4882a593Smuzhiyun 			ret = vmw_resource_relocation_add
2031*4882a593Smuzhiyun 				(sw_context, res,
2032*4882a593Smuzhiyun 				 vmw_ptr_diff(sw_context->buf_start,
2033*4882a593Smuzhiyun 					      &cmd->body.shid),
2034*4882a593Smuzhiyun 				 vmw_res_rel_normal);
2035*4882a593Smuzhiyun 			if (unlikely(ret != 0))
2036*4882a593Smuzhiyun 				return ret;
2037*4882a593Smuzhiyun 		}
2038*4882a593Smuzhiyun 	}
2039*4882a593Smuzhiyun 
2040*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(res)) {
2041*4882a593Smuzhiyun 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2042*4882a593Smuzhiyun 					VMW_RES_DIRTY_NONE,
2043*4882a593Smuzhiyun 					user_shader_converter, &cmd->body.shid,
2044*4882a593Smuzhiyun 					&res);
2045*4882a593Smuzhiyun 		if (unlikely(ret != 0))
2046*4882a593Smuzhiyun 			return ret;
2047*4882a593Smuzhiyun 	}
2048*4882a593Smuzhiyun 
2049*4882a593Smuzhiyun 	ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2050*4882a593Smuzhiyun 	if (!ctx_info)
2051*4882a593Smuzhiyun 		return -EINVAL;
2052*4882a593Smuzhiyun 
2053*4882a593Smuzhiyun 	binding.bi.ctx = ctx;
2054*4882a593Smuzhiyun 	binding.bi.res = res;
2055*4882a593Smuzhiyun 	binding.bi.bt = vmw_ctx_binding_shader;
2056*4882a593Smuzhiyun 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2057*4882a593Smuzhiyun 	vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2058*4882a593Smuzhiyun 
2059*4882a593Smuzhiyun 	return 0;
2060*4882a593Smuzhiyun }
2061*4882a593Smuzhiyun 
2062*4882a593Smuzhiyun /**
2063*4882a593Smuzhiyun  * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2064*4882a593Smuzhiyun  *
2065*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
2066*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
2067*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
2068*4882a593Smuzhiyun  */
vmw_cmd_set_shader_const(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2069*4882a593Smuzhiyun static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2070*4882a593Smuzhiyun 				    struct vmw_sw_context *sw_context,
2071*4882a593Smuzhiyun 				    SVGA3dCmdHeader *header)
2072*4882a593Smuzhiyun {
2073*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
2074*4882a593Smuzhiyun 	int ret;
2075*4882a593Smuzhiyun 
2076*4882a593Smuzhiyun 	cmd = container_of(header, typeof(*cmd), header);
2077*4882a593Smuzhiyun 
2078*4882a593Smuzhiyun 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2079*4882a593Smuzhiyun 				VMW_RES_DIRTY_SET, user_context_converter,
2080*4882a593Smuzhiyun 				&cmd->body.cid, NULL);
2081*4882a593Smuzhiyun 	if (unlikely(ret != 0))
2082*4882a593Smuzhiyun 		return ret;
2083*4882a593Smuzhiyun 
2084*4882a593Smuzhiyun 	if (dev_priv->has_mob)
2085*4882a593Smuzhiyun 		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2086*4882a593Smuzhiyun 
2087*4882a593Smuzhiyun 	return 0;
2088*4882a593Smuzhiyun }
2089*4882a593Smuzhiyun 
2090*4882a593Smuzhiyun /**
2091*4882a593Smuzhiyun  * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2092*4882a593Smuzhiyun  *
2093*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
2094*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
2095*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
2096*4882a593Smuzhiyun  */
vmw_cmd_bind_gb_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2097*4882a593Smuzhiyun static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2098*4882a593Smuzhiyun 				  struct vmw_sw_context *sw_context,
2099*4882a593Smuzhiyun 				  SVGA3dCmdHeader *header)
2100*4882a593Smuzhiyun {
2101*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2102*4882a593Smuzhiyun 		container_of(header, typeof(*cmd), header);
2103*4882a593Smuzhiyun 
2104*4882a593Smuzhiyun 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2105*4882a593Smuzhiyun 				     user_shader_converter, &cmd->body.shid,
2106*4882a593Smuzhiyun 				     &cmd->body.mobid, cmd->body.offsetInBytes);
2107*4882a593Smuzhiyun }
2108*4882a593Smuzhiyun 
2109*4882a593Smuzhiyun /**
2110*4882a593Smuzhiyun  * vmw_cmd_dx_set_single_constant_buffer - Validate
2111*4882a593Smuzhiyun  * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2112*4882a593Smuzhiyun  *
2113*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
2114*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
2115*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
2116*4882a593Smuzhiyun  */
2117*4882a593Smuzhiyun static int
vmw_cmd_dx_set_single_constant_buffer(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2118*4882a593Smuzhiyun vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2119*4882a593Smuzhiyun 				      struct vmw_sw_context *sw_context,
2120*4882a593Smuzhiyun 				      SVGA3dCmdHeader *header)
2121*4882a593Smuzhiyun {
2122*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2123*4882a593Smuzhiyun 	SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ?
2124*4882a593Smuzhiyun 		SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10;
2125*4882a593Smuzhiyun 
2126*4882a593Smuzhiyun 	struct vmw_resource *res = NULL;
2127*4882a593Smuzhiyun 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2128*4882a593Smuzhiyun 	struct vmw_ctx_bindinfo_cb binding;
2129*4882a593Smuzhiyun 	int ret;
2130*4882a593Smuzhiyun 
2131*4882a593Smuzhiyun 	if (!ctx_node)
2132*4882a593Smuzhiyun 		return -EINVAL;
2133*4882a593Smuzhiyun 
2134*4882a593Smuzhiyun 	cmd = container_of(header, typeof(*cmd), header);
2135*4882a593Smuzhiyun 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2136*4882a593Smuzhiyun 				VMW_RES_DIRTY_NONE, user_surface_converter,
2137*4882a593Smuzhiyun 				&cmd->body.sid, &res);
2138*4882a593Smuzhiyun 	if (unlikely(ret != 0))
2139*4882a593Smuzhiyun 		return ret;
2140*4882a593Smuzhiyun 
2141*4882a593Smuzhiyun 	binding.bi.ctx = ctx_node->ctx;
2142*4882a593Smuzhiyun 	binding.bi.res = res;
2143*4882a593Smuzhiyun 	binding.bi.bt = vmw_ctx_binding_cb;
2144*4882a593Smuzhiyun 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2145*4882a593Smuzhiyun 	binding.offset = cmd->body.offsetInBytes;
2146*4882a593Smuzhiyun 	binding.size = cmd->body.sizeInBytes;
2147*4882a593Smuzhiyun 	binding.slot = cmd->body.slot;
2148*4882a593Smuzhiyun 
2149*4882a593Smuzhiyun 	if (binding.shader_slot >= max_shader_num ||
2150*4882a593Smuzhiyun 	    binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2151*4882a593Smuzhiyun 		VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2152*4882a593Smuzhiyun 			       (unsigned int) cmd->body.type,
2153*4882a593Smuzhiyun 			       (unsigned int) binding.slot);
2154*4882a593Smuzhiyun 		return -EINVAL;
2155*4882a593Smuzhiyun 	}
2156*4882a593Smuzhiyun 
2157*4882a593Smuzhiyun 	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2158*4882a593Smuzhiyun 			binding.slot);
2159*4882a593Smuzhiyun 
2160*4882a593Smuzhiyun 	return 0;
2161*4882a593Smuzhiyun }
2162*4882a593Smuzhiyun 
2163*4882a593Smuzhiyun /**
2164*4882a593Smuzhiyun  * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2165*4882a593Smuzhiyun  * command
2166*4882a593Smuzhiyun  *
2167*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
2168*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
2169*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
2170*4882a593Smuzhiyun  */
vmw_cmd_dx_set_shader_res(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2171*4882a593Smuzhiyun static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2172*4882a593Smuzhiyun 				     struct vmw_sw_context *sw_context,
2173*4882a593Smuzhiyun 				     SVGA3dCmdHeader *header)
2174*4882a593Smuzhiyun {
2175*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2176*4882a593Smuzhiyun 		container_of(header, typeof(*cmd), header);
2177*4882a593Smuzhiyun 	SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
2178*4882a593Smuzhiyun 		SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
2179*4882a593Smuzhiyun 
2180*4882a593Smuzhiyun 	u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2181*4882a593Smuzhiyun 		sizeof(SVGA3dShaderResourceViewId);
2182*4882a593Smuzhiyun 
2183*4882a593Smuzhiyun 	if ((u64) cmd->body.startView + (u64) num_sr_view >
2184*4882a593Smuzhiyun 	    (u64) SVGA3D_DX_MAX_SRVIEWS ||
2185*4882a593Smuzhiyun 	    cmd->body.type >= max_allowed) {
2186*4882a593Smuzhiyun 		VMW_DEBUG_USER("Invalid shader binding.\n");
2187*4882a593Smuzhiyun 		return -EINVAL;
2188*4882a593Smuzhiyun 	}
2189*4882a593Smuzhiyun 
2190*4882a593Smuzhiyun 	return vmw_view_bindings_add(sw_context, vmw_view_sr,
2191*4882a593Smuzhiyun 				     vmw_ctx_binding_sr,
2192*4882a593Smuzhiyun 				     cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2193*4882a593Smuzhiyun 				     (void *) &cmd[1], num_sr_view,
2194*4882a593Smuzhiyun 				     cmd->body.startView);
2195*4882a593Smuzhiyun }
2196*4882a593Smuzhiyun 
2197*4882a593Smuzhiyun /**
2198*4882a593Smuzhiyun  * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2199*4882a593Smuzhiyun  *
2200*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
2201*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
2202*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
2203*4882a593Smuzhiyun  */
vmw_cmd_dx_set_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2204*4882a593Smuzhiyun static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2205*4882a593Smuzhiyun 				 struct vmw_sw_context *sw_context,
2206*4882a593Smuzhiyun 				 SVGA3dCmdHeader *header)
2207*4882a593Smuzhiyun {
2208*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2209*4882a593Smuzhiyun 	SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
2210*4882a593Smuzhiyun 		SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
2211*4882a593Smuzhiyun 	struct vmw_resource *res = NULL;
2212*4882a593Smuzhiyun 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2213*4882a593Smuzhiyun 	struct vmw_ctx_bindinfo_shader binding;
2214*4882a593Smuzhiyun 	int ret = 0;
2215*4882a593Smuzhiyun 
2216*4882a593Smuzhiyun 	if (!ctx_node)
2217*4882a593Smuzhiyun 		return -EINVAL;
2218*4882a593Smuzhiyun 
2219*4882a593Smuzhiyun 	cmd = container_of(header, typeof(*cmd), header);
2220*4882a593Smuzhiyun 
2221*4882a593Smuzhiyun 	if (cmd->body.type >= max_allowed ||
2222*4882a593Smuzhiyun 	    cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
2223*4882a593Smuzhiyun 		VMW_DEBUG_USER("Illegal shader type %u.\n",
2224*4882a593Smuzhiyun 			       (unsigned int) cmd->body.type);
2225*4882a593Smuzhiyun 		return -EINVAL;
2226*4882a593Smuzhiyun 	}
2227*4882a593Smuzhiyun 
2228*4882a593Smuzhiyun 	if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2229*4882a593Smuzhiyun 		res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2230*4882a593Smuzhiyun 		if (IS_ERR(res)) {
2231*4882a593Smuzhiyun 			VMW_DEBUG_USER("Could not find shader for binding.\n");
2232*4882a593Smuzhiyun 			return PTR_ERR(res);
2233*4882a593Smuzhiyun 		}
2234*4882a593Smuzhiyun 
2235*4882a593Smuzhiyun 		ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2236*4882a593Smuzhiyun 						    VMW_RES_DIRTY_NONE);
2237*4882a593Smuzhiyun 		if (ret)
2238*4882a593Smuzhiyun 			return ret;
2239*4882a593Smuzhiyun 	}
2240*4882a593Smuzhiyun 
2241*4882a593Smuzhiyun 	binding.bi.ctx = ctx_node->ctx;
2242*4882a593Smuzhiyun 	binding.bi.res = res;
2243*4882a593Smuzhiyun 	binding.bi.bt = vmw_ctx_binding_dx_shader;
2244*4882a593Smuzhiyun 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2245*4882a593Smuzhiyun 
2246*4882a593Smuzhiyun 	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2247*4882a593Smuzhiyun 
2248*4882a593Smuzhiyun 	return 0;
2249*4882a593Smuzhiyun }
2250*4882a593Smuzhiyun 
2251*4882a593Smuzhiyun /**
2252*4882a593Smuzhiyun  * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2253*4882a593Smuzhiyun  * command
2254*4882a593Smuzhiyun  *
2255*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
2256*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
2257*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
2258*4882a593Smuzhiyun  */
vmw_cmd_dx_set_vertex_buffers(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2259*4882a593Smuzhiyun static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2260*4882a593Smuzhiyun 					 struct vmw_sw_context *sw_context,
2261*4882a593Smuzhiyun 					 SVGA3dCmdHeader *header)
2262*4882a593Smuzhiyun {
2263*4882a593Smuzhiyun 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2264*4882a593Smuzhiyun 	struct vmw_ctx_bindinfo_vb binding;
2265*4882a593Smuzhiyun 	struct vmw_resource *res;
2266*4882a593Smuzhiyun 	struct {
2267*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
2268*4882a593Smuzhiyun 		SVGA3dCmdDXSetVertexBuffers body;
2269*4882a593Smuzhiyun 		SVGA3dVertexBuffer buf[];
2270*4882a593Smuzhiyun 	} *cmd;
2271*4882a593Smuzhiyun 	int i, ret, num;
2272*4882a593Smuzhiyun 
2273*4882a593Smuzhiyun 	if (!ctx_node)
2274*4882a593Smuzhiyun 		return -EINVAL;
2275*4882a593Smuzhiyun 
2276*4882a593Smuzhiyun 	cmd = container_of(header, typeof(*cmd), header);
2277*4882a593Smuzhiyun 	num = (cmd->header.size - sizeof(cmd->body)) /
2278*4882a593Smuzhiyun 		sizeof(SVGA3dVertexBuffer);
2279*4882a593Smuzhiyun 	if ((u64)num + (u64)cmd->body.startBuffer >
2280*4882a593Smuzhiyun 	    (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2281*4882a593Smuzhiyun 		VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2282*4882a593Smuzhiyun 		return -EINVAL;
2283*4882a593Smuzhiyun 	}
2284*4882a593Smuzhiyun 
2285*4882a593Smuzhiyun 	for (i = 0; i < num; i++) {
2286*4882a593Smuzhiyun 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2287*4882a593Smuzhiyun 					VMW_RES_DIRTY_NONE,
2288*4882a593Smuzhiyun 					user_surface_converter,
2289*4882a593Smuzhiyun 					&cmd->buf[i].sid, &res);
2290*4882a593Smuzhiyun 		if (unlikely(ret != 0))
2291*4882a593Smuzhiyun 			return ret;
2292*4882a593Smuzhiyun 
2293*4882a593Smuzhiyun 		binding.bi.ctx = ctx_node->ctx;
2294*4882a593Smuzhiyun 		binding.bi.bt = vmw_ctx_binding_vb;
2295*4882a593Smuzhiyun 		binding.bi.res = res;
2296*4882a593Smuzhiyun 		binding.offset = cmd->buf[i].offset;
2297*4882a593Smuzhiyun 		binding.stride = cmd->buf[i].stride;
2298*4882a593Smuzhiyun 		binding.slot = i + cmd->body.startBuffer;
2299*4882a593Smuzhiyun 
2300*4882a593Smuzhiyun 		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2301*4882a593Smuzhiyun 	}
2302*4882a593Smuzhiyun 
2303*4882a593Smuzhiyun 	return 0;
2304*4882a593Smuzhiyun }
2305*4882a593Smuzhiyun 
2306*4882a593Smuzhiyun /**
2307*4882a593Smuzhiyun  * vmw_cmd_dx_ia_set_vertex_buffers - Validate
2308*4882a593Smuzhiyun  * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2309*4882a593Smuzhiyun  *
2310*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
2311*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
2312*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
2313*4882a593Smuzhiyun  */
vmw_cmd_dx_set_index_buffer(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2314*4882a593Smuzhiyun static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2315*4882a593Smuzhiyun 				       struct vmw_sw_context *sw_context,
2316*4882a593Smuzhiyun 				       SVGA3dCmdHeader *header)
2317*4882a593Smuzhiyun {
2318*4882a593Smuzhiyun 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2319*4882a593Smuzhiyun 	struct vmw_ctx_bindinfo_ib binding;
2320*4882a593Smuzhiyun 	struct vmw_resource *res;
2321*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2322*4882a593Smuzhiyun 	int ret;
2323*4882a593Smuzhiyun 
2324*4882a593Smuzhiyun 	if (!ctx_node)
2325*4882a593Smuzhiyun 		return -EINVAL;
2326*4882a593Smuzhiyun 
2327*4882a593Smuzhiyun 	cmd = container_of(header, typeof(*cmd), header);
2328*4882a593Smuzhiyun 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2329*4882a593Smuzhiyun 				VMW_RES_DIRTY_NONE, user_surface_converter,
2330*4882a593Smuzhiyun 				&cmd->body.sid, &res);
2331*4882a593Smuzhiyun 	if (unlikely(ret != 0))
2332*4882a593Smuzhiyun 		return ret;
2333*4882a593Smuzhiyun 
2334*4882a593Smuzhiyun 	binding.bi.ctx = ctx_node->ctx;
2335*4882a593Smuzhiyun 	binding.bi.res = res;
2336*4882a593Smuzhiyun 	binding.bi.bt = vmw_ctx_binding_ib;
2337*4882a593Smuzhiyun 	binding.offset = cmd->body.offset;
2338*4882a593Smuzhiyun 	binding.format = cmd->body.format;
2339*4882a593Smuzhiyun 
2340*4882a593Smuzhiyun 	vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2341*4882a593Smuzhiyun 
2342*4882a593Smuzhiyun 	return 0;
2343*4882a593Smuzhiyun }
2344*4882a593Smuzhiyun 
2345*4882a593Smuzhiyun /**
2346*4882a593Smuzhiyun  * vmw_cmd_dx_set_rendertarget - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2347*4882a593Smuzhiyun  * command
2348*4882a593Smuzhiyun  *
2349*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
2350*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
2351*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
2352*4882a593Smuzhiyun  */
vmw_cmd_dx_set_rendertargets(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2353*4882a593Smuzhiyun static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2354*4882a593Smuzhiyun 					struct vmw_sw_context *sw_context,
2355*4882a593Smuzhiyun 					SVGA3dCmdHeader *header)
2356*4882a593Smuzhiyun {
2357*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2358*4882a593Smuzhiyun 		container_of(header, typeof(*cmd), header);
2359*4882a593Smuzhiyun 	u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2360*4882a593Smuzhiyun 		sizeof(SVGA3dRenderTargetViewId);
2361*4882a593Smuzhiyun 	int ret;
2362*4882a593Smuzhiyun 
2363*4882a593Smuzhiyun 	if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2364*4882a593Smuzhiyun 		VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2365*4882a593Smuzhiyun 		return -EINVAL;
2366*4882a593Smuzhiyun 	}
2367*4882a593Smuzhiyun 
2368*4882a593Smuzhiyun 	ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2369*4882a593Smuzhiyun 				    0, &cmd->body.depthStencilViewId, 1, 0);
2370*4882a593Smuzhiyun 	if (ret)
2371*4882a593Smuzhiyun 		return ret;
2372*4882a593Smuzhiyun 
2373*4882a593Smuzhiyun 	return vmw_view_bindings_add(sw_context, vmw_view_rt,
2374*4882a593Smuzhiyun 				     vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2375*4882a593Smuzhiyun 				     num_rt_view, 0);
2376*4882a593Smuzhiyun }
2377*4882a593Smuzhiyun 
2378*4882a593Smuzhiyun /**
2379*4882a593Smuzhiyun  * vmw_cmd_dx_clear_rendertarget_view - Validate
2380*4882a593Smuzhiyun  * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2381*4882a593Smuzhiyun  *
2382*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
2383*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
2384*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
2385*4882a593Smuzhiyun  */
vmw_cmd_dx_clear_rendertarget_view(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2386*4882a593Smuzhiyun static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2387*4882a593Smuzhiyun 					      struct vmw_sw_context *sw_context,
2388*4882a593Smuzhiyun 					      SVGA3dCmdHeader *header)
2389*4882a593Smuzhiyun {
2390*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2391*4882a593Smuzhiyun 		container_of(header, typeof(*cmd), header);
2392*4882a593Smuzhiyun 	struct vmw_resource *ret;
2393*4882a593Smuzhiyun 
2394*4882a593Smuzhiyun 	ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
2395*4882a593Smuzhiyun 				  cmd->body.renderTargetViewId);
2396*4882a593Smuzhiyun 
2397*4882a593Smuzhiyun 	return PTR_ERR_OR_ZERO(ret);
2398*4882a593Smuzhiyun }
2399*4882a593Smuzhiyun 
2400*4882a593Smuzhiyun /**
2401*4882a593Smuzhiyun  * vmw_cmd_dx_clear_rendertarget_view - Validate
2402*4882a593Smuzhiyun  * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2403*4882a593Smuzhiyun  *
2404*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
2405*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
2406*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
2407*4882a593Smuzhiyun  */
vmw_cmd_dx_clear_depthstencil_view(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2408*4882a593Smuzhiyun static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2409*4882a593Smuzhiyun 					      struct vmw_sw_context *sw_context,
2410*4882a593Smuzhiyun 					      SVGA3dCmdHeader *header)
2411*4882a593Smuzhiyun {
2412*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2413*4882a593Smuzhiyun 		container_of(header, typeof(*cmd), header);
2414*4882a593Smuzhiyun 	struct vmw_resource *ret;
2415*4882a593Smuzhiyun 
2416*4882a593Smuzhiyun 	ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
2417*4882a593Smuzhiyun 				  cmd->body.depthStencilViewId);
2418*4882a593Smuzhiyun 
2419*4882a593Smuzhiyun 	return PTR_ERR_OR_ZERO(ret);
2420*4882a593Smuzhiyun }
2421*4882a593Smuzhiyun 
vmw_cmd_dx_view_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2422*4882a593Smuzhiyun static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2423*4882a593Smuzhiyun 				  struct vmw_sw_context *sw_context,
2424*4882a593Smuzhiyun 				  SVGA3dCmdHeader *header)
2425*4882a593Smuzhiyun {
2426*4882a593Smuzhiyun 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2427*4882a593Smuzhiyun 	struct vmw_resource *srf;
2428*4882a593Smuzhiyun 	struct vmw_resource *res;
2429*4882a593Smuzhiyun 	enum vmw_view_type view_type;
2430*4882a593Smuzhiyun 	int ret;
2431*4882a593Smuzhiyun 	/*
2432*4882a593Smuzhiyun 	 * This is based on the fact that all affected define commands have the
2433*4882a593Smuzhiyun 	 * same initial command body layout.
2434*4882a593Smuzhiyun 	 */
2435*4882a593Smuzhiyun 	struct {
2436*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
2437*4882a593Smuzhiyun 		uint32 defined_id;
2438*4882a593Smuzhiyun 		uint32 sid;
2439*4882a593Smuzhiyun 	} *cmd;
2440*4882a593Smuzhiyun 
2441*4882a593Smuzhiyun 	if (!ctx_node)
2442*4882a593Smuzhiyun 		return -EINVAL;
2443*4882a593Smuzhiyun 
2444*4882a593Smuzhiyun 	view_type = vmw_view_cmd_to_type(header->id);
2445*4882a593Smuzhiyun 	if (view_type == vmw_view_max)
2446*4882a593Smuzhiyun 		return -EINVAL;
2447*4882a593Smuzhiyun 
2448*4882a593Smuzhiyun 	cmd = container_of(header, typeof(*cmd), header);
2449*4882a593Smuzhiyun 	if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2450*4882a593Smuzhiyun 		VMW_DEBUG_USER("Invalid surface id.\n");
2451*4882a593Smuzhiyun 		return -EINVAL;
2452*4882a593Smuzhiyun 	}
2453*4882a593Smuzhiyun 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2454*4882a593Smuzhiyun 				VMW_RES_DIRTY_NONE, user_surface_converter,
2455*4882a593Smuzhiyun 				&cmd->sid, &srf);
2456*4882a593Smuzhiyun 	if (unlikely(ret != 0))
2457*4882a593Smuzhiyun 		return ret;
2458*4882a593Smuzhiyun 
2459*4882a593Smuzhiyun 	res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2460*4882a593Smuzhiyun 	ret = vmw_cotable_notify(res, cmd->defined_id);
2461*4882a593Smuzhiyun 	if (unlikely(ret != 0))
2462*4882a593Smuzhiyun 		return ret;
2463*4882a593Smuzhiyun 
2464*4882a593Smuzhiyun 	return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2465*4882a593Smuzhiyun 			    cmd->defined_id, header,
2466*4882a593Smuzhiyun 			    header->size + sizeof(*header),
2467*4882a593Smuzhiyun 			    &sw_context->staged_cmd_res);
2468*4882a593Smuzhiyun }
2469*4882a593Smuzhiyun 
2470*4882a593Smuzhiyun /**
2471*4882a593Smuzhiyun  * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2472*4882a593Smuzhiyun  *
2473*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
2474*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
2475*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
2476*4882a593Smuzhiyun  */
vmw_cmd_dx_set_so_targets(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2477*4882a593Smuzhiyun static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2478*4882a593Smuzhiyun 				     struct vmw_sw_context *sw_context,
2479*4882a593Smuzhiyun 				     SVGA3dCmdHeader *header)
2480*4882a593Smuzhiyun {
2481*4882a593Smuzhiyun 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2482*4882a593Smuzhiyun 	struct vmw_ctx_bindinfo_so_target binding;
2483*4882a593Smuzhiyun 	struct vmw_resource *res;
2484*4882a593Smuzhiyun 	struct {
2485*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
2486*4882a593Smuzhiyun 		SVGA3dCmdDXSetSOTargets body;
2487*4882a593Smuzhiyun 		SVGA3dSoTarget targets[];
2488*4882a593Smuzhiyun 	} *cmd;
2489*4882a593Smuzhiyun 	int i, ret, num;
2490*4882a593Smuzhiyun 
2491*4882a593Smuzhiyun 	if (!ctx_node)
2492*4882a593Smuzhiyun 		return -EINVAL;
2493*4882a593Smuzhiyun 
2494*4882a593Smuzhiyun 	cmd = container_of(header, typeof(*cmd), header);
2495*4882a593Smuzhiyun 	num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2496*4882a593Smuzhiyun 
2497*4882a593Smuzhiyun 	if (num > SVGA3D_DX_MAX_SOTARGETS) {
2498*4882a593Smuzhiyun 		VMW_DEBUG_USER("Invalid DX SO binding.\n");
2499*4882a593Smuzhiyun 		return -EINVAL;
2500*4882a593Smuzhiyun 	}
2501*4882a593Smuzhiyun 
2502*4882a593Smuzhiyun 	for (i = 0; i < num; i++) {
2503*4882a593Smuzhiyun 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2504*4882a593Smuzhiyun 					VMW_RES_DIRTY_SET,
2505*4882a593Smuzhiyun 					user_surface_converter,
2506*4882a593Smuzhiyun 					&cmd->targets[i].sid, &res);
2507*4882a593Smuzhiyun 		if (unlikely(ret != 0))
2508*4882a593Smuzhiyun 			return ret;
2509*4882a593Smuzhiyun 
2510*4882a593Smuzhiyun 		binding.bi.ctx = ctx_node->ctx;
2511*4882a593Smuzhiyun 		binding.bi.res = res;
2512*4882a593Smuzhiyun 		binding.bi.bt = vmw_ctx_binding_so_target,
2513*4882a593Smuzhiyun 		binding.offset = cmd->targets[i].offset;
2514*4882a593Smuzhiyun 		binding.size = cmd->targets[i].sizeInBytes;
2515*4882a593Smuzhiyun 		binding.slot = i;
2516*4882a593Smuzhiyun 
2517*4882a593Smuzhiyun 		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2518*4882a593Smuzhiyun 	}
2519*4882a593Smuzhiyun 
2520*4882a593Smuzhiyun 	return 0;
2521*4882a593Smuzhiyun }
2522*4882a593Smuzhiyun 
vmw_cmd_dx_so_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2523*4882a593Smuzhiyun static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2524*4882a593Smuzhiyun 				struct vmw_sw_context *sw_context,
2525*4882a593Smuzhiyun 				SVGA3dCmdHeader *header)
2526*4882a593Smuzhiyun {
2527*4882a593Smuzhiyun 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2528*4882a593Smuzhiyun 	struct vmw_resource *res;
2529*4882a593Smuzhiyun 	/*
2530*4882a593Smuzhiyun 	 * This is based on the fact that all affected define commands have
2531*4882a593Smuzhiyun 	 * the same initial command body layout.
2532*4882a593Smuzhiyun 	 */
2533*4882a593Smuzhiyun 	struct {
2534*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
2535*4882a593Smuzhiyun 		uint32 defined_id;
2536*4882a593Smuzhiyun 	} *cmd;
2537*4882a593Smuzhiyun 	enum vmw_so_type so_type;
2538*4882a593Smuzhiyun 	int ret;
2539*4882a593Smuzhiyun 
2540*4882a593Smuzhiyun 	if (!ctx_node)
2541*4882a593Smuzhiyun 		return -EINVAL;
2542*4882a593Smuzhiyun 
2543*4882a593Smuzhiyun 	so_type = vmw_so_cmd_to_type(header->id);
2544*4882a593Smuzhiyun 	res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2545*4882a593Smuzhiyun 	cmd = container_of(header, typeof(*cmd), header);
2546*4882a593Smuzhiyun 	ret = vmw_cotable_notify(res, cmd->defined_id);
2547*4882a593Smuzhiyun 
2548*4882a593Smuzhiyun 	return ret;
2549*4882a593Smuzhiyun }
2550*4882a593Smuzhiyun 
2551*4882a593Smuzhiyun /**
2552*4882a593Smuzhiyun  * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2553*4882a593Smuzhiyun  * command
2554*4882a593Smuzhiyun  *
2555*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
2556*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
2557*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
2558*4882a593Smuzhiyun  */
vmw_cmd_dx_check_subresource(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2559*4882a593Smuzhiyun static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2560*4882a593Smuzhiyun 					struct vmw_sw_context *sw_context,
2561*4882a593Smuzhiyun 					SVGA3dCmdHeader *header)
2562*4882a593Smuzhiyun {
2563*4882a593Smuzhiyun 	struct {
2564*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
2565*4882a593Smuzhiyun 		union {
2566*4882a593Smuzhiyun 			SVGA3dCmdDXReadbackSubResource r_body;
2567*4882a593Smuzhiyun 			SVGA3dCmdDXInvalidateSubResource i_body;
2568*4882a593Smuzhiyun 			SVGA3dCmdDXUpdateSubResource u_body;
2569*4882a593Smuzhiyun 			SVGA3dSurfaceId sid;
2570*4882a593Smuzhiyun 		};
2571*4882a593Smuzhiyun 	} *cmd;
2572*4882a593Smuzhiyun 
2573*4882a593Smuzhiyun 	BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2574*4882a593Smuzhiyun 		     offsetof(typeof(*cmd), sid));
2575*4882a593Smuzhiyun 	BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2576*4882a593Smuzhiyun 		     offsetof(typeof(*cmd), sid));
2577*4882a593Smuzhiyun 	BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2578*4882a593Smuzhiyun 		     offsetof(typeof(*cmd), sid));
2579*4882a593Smuzhiyun 
2580*4882a593Smuzhiyun 	cmd = container_of(header, typeof(*cmd), header);
2581*4882a593Smuzhiyun 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2582*4882a593Smuzhiyun 				 VMW_RES_DIRTY_NONE, user_surface_converter,
2583*4882a593Smuzhiyun 				 &cmd->sid, NULL);
2584*4882a593Smuzhiyun }
2585*4882a593Smuzhiyun 
vmw_cmd_dx_cid_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2586*4882a593Smuzhiyun static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2587*4882a593Smuzhiyun 				struct vmw_sw_context *sw_context,
2588*4882a593Smuzhiyun 				SVGA3dCmdHeader *header)
2589*4882a593Smuzhiyun {
2590*4882a593Smuzhiyun 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2591*4882a593Smuzhiyun 
2592*4882a593Smuzhiyun 	if (!ctx_node)
2593*4882a593Smuzhiyun 		return -EINVAL;
2594*4882a593Smuzhiyun 
2595*4882a593Smuzhiyun 	return 0;
2596*4882a593Smuzhiyun }
2597*4882a593Smuzhiyun 
2598*4882a593Smuzhiyun /**
2599*4882a593Smuzhiyun  * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2600*4882a593Smuzhiyun  * resource for removal.
2601*4882a593Smuzhiyun  *
2602*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
2603*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
2604*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
2605*4882a593Smuzhiyun  *
2606*4882a593Smuzhiyun  * Check that the view exists, and if it was not created using this command
2607*4882a593Smuzhiyun  * batch, conditionally make this command a NOP.
2608*4882a593Smuzhiyun  */
vmw_cmd_dx_view_remove(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2609*4882a593Smuzhiyun static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2610*4882a593Smuzhiyun 				  struct vmw_sw_context *sw_context,
2611*4882a593Smuzhiyun 				  SVGA3dCmdHeader *header)
2612*4882a593Smuzhiyun {
2613*4882a593Smuzhiyun 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2614*4882a593Smuzhiyun 	struct {
2615*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
2616*4882a593Smuzhiyun 		union vmw_view_destroy body;
2617*4882a593Smuzhiyun 	} *cmd = container_of(header, typeof(*cmd), header);
2618*4882a593Smuzhiyun 	enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2619*4882a593Smuzhiyun 	struct vmw_resource *view;
2620*4882a593Smuzhiyun 	int ret;
2621*4882a593Smuzhiyun 
2622*4882a593Smuzhiyun 	if (!ctx_node)
2623*4882a593Smuzhiyun 		return -EINVAL;
2624*4882a593Smuzhiyun 
2625*4882a593Smuzhiyun 	ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2626*4882a593Smuzhiyun 			      &sw_context->staged_cmd_res, &view);
2627*4882a593Smuzhiyun 	if (ret || !view)
2628*4882a593Smuzhiyun 		return ret;
2629*4882a593Smuzhiyun 
2630*4882a593Smuzhiyun 	/*
2631*4882a593Smuzhiyun 	 * If the view wasn't created during this command batch, it might
2632*4882a593Smuzhiyun 	 * have been removed due to a context swapout, so add a
2633*4882a593Smuzhiyun 	 * relocation to conditionally make this command a NOP to avoid
2634*4882a593Smuzhiyun 	 * device errors.
2635*4882a593Smuzhiyun 	 */
2636*4882a593Smuzhiyun 	return vmw_resource_relocation_add(sw_context, view,
2637*4882a593Smuzhiyun 					   vmw_ptr_diff(sw_context->buf_start,
2638*4882a593Smuzhiyun 							&cmd->header.id),
2639*4882a593Smuzhiyun 					   vmw_res_rel_cond_nop);
2640*4882a593Smuzhiyun }
2641*4882a593Smuzhiyun 
2642*4882a593Smuzhiyun /**
2643*4882a593Smuzhiyun  * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2644*4882a593Smuzhiyun  *
2645*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
2646*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
2647*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
2648*4882a593Smuzhiyun  */
vmw_cmd_dx_define_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2649*4882a593Smuzhiyun static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2650*4882a593Smuzhiyun 				    struct vmw_sw_context *sw_context,
2651*4882a593Smuzhiyun 				    SVGA3dCmdHeader *header)
2652*4882a593Smuzhiyun {
2653*4882a593Smuzhiyun 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2654*4882a593Smuzhiyun 	struct vmw_resource *res;
2655*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2656*4882a593Smuzhiyun 		container_of(header, typeof(*cmd), header);
2657*4882a593Smuzhiyun 	int ret;
2658*4882a593Smuzhiyun 
2659*4882a593Smuzhiyun 	if (!ctx_node)
2660*4882a593Smuzhiyun 		return -EINVAL;
2661*4882a593Smuzhiyun 
2662*4882a593Smuzhiyun 	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2663*4882a593Smuzhiyun 	ret = vmw_cotable_notify(res, cmd->body.shaderId);
2664*4882a593Smuzhiyun 	if (ret)
2665*4882a593Smuzhiyun 		return ret;
2666*4882a593Smuzhiyun 
2667*4882a593Smuzhiyun 	return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2668*4882a593Smuzhiyun 				 cmd->body.shaderId, cmd->body.type,
2669*4882a593Smuzhiyun 				 &sw_context->staged_cmd_res);
2670*4882a593Smuzhiyun }
2671*4882a593Smuzhiyun 
2672*4882a593Smuzhiyun /**
2673*4882a593Smuzhiyun  * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2674*4882a593Smuzhiyun  *
2675*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
2676*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
2677*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
2678*4882a593Smuzhiyun  */
vmw_cmd_dx_destroy_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2679*4882a593Smuzhiyun static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2680*4882a593Smuzhiyun 				     struct vmw_sw_context *sw_context,
2681*4882a593Smuzhiyun 				     SVGA3dCmdHeader *header)
2682*4882a593Smuzhiyun {
2683*4882a593Smuzhiyun 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2684*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2685*4882a593Smuzhiyun 		container_of(header, typeof(*cmd), header);
2686*4882a593Smuzhiyun 	int ret;
2687*4882a593Smuzhiyun 
2688*4882a593Smuzhiyun 	if (!ctx_node)
2689*4882a593Smuzhiyun 		return -EINVAL;
2690*4882a593Smuzhiyun 
2691*4882a593Smuzhiyun 	ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2692*4882a593Smuzhiyun 				&sw_context->staged_cmd_res);
2693*4882a593Smuzhiyun 
2694*4882a593Smuzhiyun 	return ret;
2695*4882a593Smuzhiyun }
2696*4882a593Smuzhiyun 
2697*4882a593Smuzhiyun /**
2698*4882a593Smuzhiyun  * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2699*4882a593Smuzhiyun  *
2700*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
2701*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
2702*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
2703*4882a593Smuzhiyun  */
vmw_cmd_dx_bind_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2704*4882a593Smuzhiyun static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2705*4882a593Smuzhiyun 				  struct vmw_sw_context *sw_context,
2706*4882a593Smuzhiyun 				  SVGA3dCmdHeader *header)
2707*4882a593Smuzhiyun {
2708*4882a593Smuzhiyun 	struct vmw_resource *ctx;
2709*4882a593Smuzhiyun 	struct vmw_resource *res;
2710*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2711*4882a593Smuzhiyun 		container_of(header, typeof(*cmd), header);
2712*4882a593Smuzhiyun 	int ret;
2713*4882a593Smuzhiyun 
2714*4882a593Smuzhiyun 	if (cmd->body.cid != SVGA3D_INVALID_ID) {
2715*4882a593Smuzhiyun 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2716*4882a593Smuzhiyun 					VMW_RES_DIRTY_SET,
2717*4882a593Smuzhiyun 					user_context_converter, &cmd->body.cid,
2718*4882a593Smuzhiyun 					&ctx);
2719*4882a593Smuzhiyun 		if (ret)
2720*4882a593Smuzhiyun 			return ret;
2721*4882a593Smuzhiyun 	} else {
2722*4882a593Smuzhiyun 		struct vmw_ctx_validation_info *ctx_node =
2723*4882a593Smuzhiyun 			VMW_GET_CTX_NODE(sw_context);
2724*4882a593Smuzhiyun 
2725*4882a593Smuzhiyun 		if (!ctx_node)
2726*4882a593Smuzhiyun 			return -EINVAL;
2727*4882a593Smuzhiyun 
2728*4882a593Smuzhiyun 		ctx = ctx_node->ctx;
2729*4882a593Smuzhiyun 	}
2730*4882a593Smuzhiyun 
2731*4882a593Smuzhiyun 	res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2732*4882a593Smuzhiyun 	if (IS_ERR(res)) {
2733*4882a593Smuzhiyun 		VMW_DEBUG_USER("Could not find shader to bind.\n");
2734*4882a593Smuzhiyun 		return PTR_ERR(res);
2735*4882a593Smuzhiyun 	}
2736*4882a593Smuzhiyun 
2737*4882a593Smuzhiyun 	ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2738*4882a593Smuzhiyun 					    VMW_RES_DIRTY_NONE);
2739*4882a593Smuzhiyun 	if (ret) {
2740*4882a593Smuzhiyun 		VMW_DEBUG_USER("Error creating resource validation node.\n");
2741*4882a593Smuzhiyun 		return ret;
2742*4882a593Smuzhiyun 	}
2743*4882a593Smuzhiyun 
2744*4882a593Smuzhiyun 	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2745*4882a593Smuzhiyun 					 &cmd->body.mobid,
2746*4882a593Smuzhiyun 					 cmd->body.offsetInBytes);
2747*4882a593Smuzhiyun }
2748*4882a593Smuzhiyun 
2749*4882a593Smuzhiyun /**
2750*4882a593Smuzhiyun  * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2751*4882a593Smuzhiyun  *
2752*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
2753*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
2754*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
2755*4882a593Smuzhiyun  */
vmw_cmd_dx_genmips(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2756*4882a593Smuzhiyun static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2757*4882a593Smuzhiyun 			      struct vmw_sw_context *sw_context,
2758*4882a593Smuzhiyun 			      SVGA3dCmdHeader *header)
2759*4882a593Smuzhiyun {
2760*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2761*4882a593Smuzhiyun 		container_of(header, typeof(*cmd), header);
2762*4882a593Smuzhiyun 	struct vmw_resource *view;
2763*4882a593Smuzhiyun 	struct vmw_res_cache_entry *rcache;
2764*4882a593Smuzhiyun 
2765*4882a593Smuzhiyun 	view = vmw_view_id_val_add(sw_context, vmw_view_sr,
2766*4882a593Smuzhiyun 				   cmd->body.shaderResourceViewId);
2767*4882a593Smuzhiyun 	if (IS_ERR(view))
2768*4882a593Smuzhiyun 		return PTR_ERR(view);
2769*4882a593Smuzhiyun 
2770*4882a593Smuzhiyun 	/*
2771*4882a593Smuzhiyun 	 * Normally the shader-resource view is not gpu-dirtying, but for
2772*4882a593Smuzhiyun 	 * this particular command it is...
2773*4882a593Smuzhiyun 	 * So mark the last looked-up surface, which is the surface
2774*4882a593Smuzhiyun 	 * the view points to, gpu-dirty.
2775*4882a593Smuzhiyun 	 */
2776*4882a593Smuzhiyun 	rcache = &sw_context->res_cache[vmw_res_surface];
2777*4882a593Smuzhiyun 	vmw_validation_res_set_dirty(sw_context->ctx, rcache->private,
2778*4882a593Smuzhiyun 				     VMW_RES_DIRTY_SET);
2779*4882a593Smuzhiyun 	return 0;
2780*4882a593Smuzhiyun }
2781*4882a593Smuzhiyun 
2782*4882a593Smuzhiyun /**
2783*4882a593Smuzhiyun  * vmw_cmd_dx_transfer_from_buffer - Validate
2784*4882a593Smuzhiyun  * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2785*4882a593Smuzhiyun  *
2786*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
2787*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
2788*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
2789*4882a593Smuzhiyun  */
vmw_cmd_dx_transfer_from_buffer(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2790*4882a593Smuzhiyun static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2791*4882a593Smuzhiyun 					   struct vmw_sw_context *sw_context,
2792*4882a593Smuzhiyun 					   SVGA3dCmdHeader *header)
2793*4882a593Smuzhiyun {
2794*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2795*4882a593Smuzhiyun 		container_of(header, typeof(*cmd), header);
2796*4882a593Smuzhiyun 	int ret;
2797*4882a593Smuzhiyun 
2798*4882a593Smuzhiyun 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2799*4882a593Smuzhiyun 				VMW_RES_DIRTY_NONE, user_surface_converter,
2800*4882a593Smuzhiyun 				&cmd->body.srcSid, NULL);
2801*4882a593Smuzhiyun 	if (ret != 0)
2802*4882a593Smuzhiyun 		return ret;
2803*4882a593Smuzhiyun 
2804*4882a593Smuzhiyun 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2805*4882a593Smuzhiyun 				 VMW_RES_DIRTY_SET, user_surface_converter,
2806*4882a593Smuzhiyun 				 &cmd->body.destSid, NULL);
2807*4882a593Smuzhiyun }
2808*4882a593Smuzhiyun 
2809*4882a593Smuzhiyun /**
2810*4882a593Smuzhiyun  * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2811*4882a593Smuzhiyun  *
2812*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private struct.
2813*4882a593Smuzhiyun  * @sw_context: The software context being used for this batch.
2814*4882a593Smuzhiyun  * @header: Pointer to the command header in the command stream.
2815*4882a593Smuzhiyun  */
vmw_cmd_intra_surface_copy(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2816*4882a593Smuzhiyun static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2817*4882a593Smuzhiyun 					   struct vmw_sw_context *sw_context,
2818*4882a593Smuzhiyun 					   SVGA3dCmdHeader *header)
2819*4882a593Smuzhiyun {
2820*4882a593Smuzhiyun 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2821*4882a593Smuzhiyun 		container_of(header, typeof(*cmd), header);
2822*4882a593Smuzhiyun 
2823*4882a593Smuzhiyun 	if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2824*4882a593Smuzhiyun 		return -EINVAL;
2825*4882a593Smuzhiyun 
2826*4882a593Smuzhiyun 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2827*4882a593Smuzhiyun 				 VMW_RES_DIRTY_SET, user_surface_converter,
2828*4882a593Smuzhiyun 				 &cmd->body.surface.sid, NULL);
2829*4882a593Smuzhiyun }
2830*4882a593Smuzhiyun 
vmw_cmd_sm5(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2831*4882a593Smuzhiyun static int vmw_cmd_sm5(struct vmw_private *dev_priv,
2832*4882a593Smuzhiyun 		       struct vmw_sw_context *sw_context,
2833*4882a593Smuzhiyun 		       SVGA3dCmdHeader *header)
2834*4882a593Smuzhiyun {
2835*4882a593Smuzhiyun 	if (!has_sm5_context(dev_priv))
2836*4882a593Smuzhiyun 		return -EINVAL;
2837*4882a593Smuzhiyun 
2838*4882a593Smuzhiyun 	return 0;
2839*4882a593Smuzhiyun }
2840*4882a593Smuzhiyun 
vmw_cmd_sm5_view_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2841*4882a593Smuzhiyun static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
2842*4882a593Smuzhiyun 				   struct vmw_sw_context *sw_context,
2843*4882a593Smuzhiyun 				   SVGA3dCmdHeader *header)
2844*4882a593Smuzhiyun {
2845*4882a593Smuzhiyun 	if (!has_sm5_context(dev_priv))
2846*4882a593Smuzhiyun 		return -EINVAL;
2847*4882a593Smuzhiyun 
2848*4882a593Smuzhiyun 	return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
2849*4882a593Smuzhiyun }
2850*4882a593Smuzhiyun 
vmw_cmd_sm5_view_remove(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2851*4882a593Smuzhiyun static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
2852*4882a593Smuzhiyun 				   struct vmw_sw_context *sw_context,
2853*4882a593Smuzhiyun 				   SVGA3dCmdHeader *header)
2854*4882a593Smuzhiyun {
2855*4882a593Smuzhiyun 	if (!has_sm5_context(dev_priv))
2856*4882a593Smuzhiyun 		return -EINVAL;
2857*4882a593Smuzhiyun 
2858*4882a593Smuzhiyun 	return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
2859*4882a593Smuzhiyun }
2860*4882a593Smuzhiyun 
vmw_cmd_clear_uav_uint(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2861*4882a593Smuzhiyun static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
2862*4882a593Smuzhiyun 				  struct vmw_sw_context *sw_context,
2863*4882a593Smuzhiyun 				  SVGA3dCmdHeader *header)
2864*4882a593Smuzhiyun {
2865*4882a593Smuzhiyun 	struct {
2866*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
2867*4882a593Smuzhiyun 		SVGA3dCmdDXClearUAViewUint body;
2868*4882a593Smuzhiyun 	} *cmd = container_of(header, typeof(*cmd), header);
2869*4882a593Smuzhiyun 	struct vmw_resource *ret;
2870*4882a593Smuzhiyun 
2871*4882a593Smuzhiyun 	if (!has_sm5_context(dev_priv))
2872*4882a593Smuzhiyun 		return -EINVAL;
2873*4882a593Smuzhiyun 
2874*4882a593Smuzhiyun 	ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2875*4882a593Smuzhiyun 				  cmd->body.uaViewId);
2876*4882a593Smuzhiyun 
2877*4882a593Smuzhiyun 	return PTR_ERR_OR_ZERO(ret);
2878*4882a593Smuzhiyun }
2879*4882a593Smuzhiyun 
vmw_cmd_clear_uav_float(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2880*4882a593Smuzhiyun static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
2881*4882a593Smuzhiyun 				   struct vmw_sw_context *sw_context,
2882*4882a593Smuzhiyun 				   SVGA3dCmdHeader *header)
2883*4882a593Smuzhiyun {
2884*4882a593Smuzhiyun 	struct {
2885*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
2886*4882a593Smuzhiyun 		SVGA3dCmdDXClearUAViewFloat body;
2887*4882a593Smuzhiyun 	} *cmd = container_of(header, typeof(*cmd), header);
2888*4882a593Smuzhiyun 	struct vmw_resource *ret;
2889*4882a593Smuzhiyun 
2890*4882a593Smuzhiyun 	if (!has_sm5_context(dev_priv))
2891*4882a593Smuzhiyun 		return -EINVAL;
2892*4882a593Smuzhiyun 
2893*4882a593Smuzhiyun 	ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2894*4882a593Smuzhiyun 				  cmd->body.uaViewId);
2895*4882a593Smuzhiyun 
2896*4882a593Smuzhiyun 	return PTR_ERR_OR_ZERO(ret);
2897*4882a593Smuzhiyun }
2898*4882a593Smuzhiyun 
vmw_cmd_set_uav(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2899*4882a593Smuzhiyun static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
2900*4882a593Smuzhiyun 			   struct vmw_sw_context *sw_context,
2901*4882a593Smuzhiyun 			   SVGA3dCmdHeader *header)
2902*4882a593Smuzhiyun {
2903*4882a593Smuzhiyun 	struct {
2904*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
2905*4882a593Smuzhiyun 		SVGA3dCmdDXSetUAViews body;
2906*4882a593Smuzhiyun 	} *cmd = container_of(header, typeof(*cmd), header);
2907*4882a593Smuzhiyun 	u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2908*4882a593Smuzhiyun 		sizeof(SVGA3dUAViewId);
2909*4882a593Smuzhiyun 	int ret;
2910*4882a593Smuzhiyun 
2911*4882a593Smuzhiyun 	if (!has_sm5_context(dev_priv))
2912*4882a593Smuzhiyun 		return -EINVAL;
2913*4882a593Smuzhiyun 
2914*4882a593Smuzhiyun 	if (num_uav > SVGA3D_MAX_UAVIEWS) {
2915*4882a593Smuzhiyun 		VMW_DEBUG_USER("Invalid UAV binding.\n");
2916*4882a593Smuzhiyun 		return -EINVAL;
2917*4882a593Smuzhiyun 	}
2918*4882a593Smuzhiyun 
2919*4882a593Smuzhiyun 	ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2920*4882a593Smuzhiyun 				    vmw_ctx_binding_uav, 0, (void *)&cmd[1],
2921*4882a593Smuzhiyun 				    num_uav, 0);
2922*4882a593Smuzhiyun 	if (ret)
2923*4882a593Smuzhiyun 		return ret;
2924*4882a593Smuzhiyun 
2925*4882a593Smuzhiyun 	vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
2926*4882a593Smuzhiyun 					 cmd->body.uavSpliceIndex);
2927*4882a593Smuzhiyun 
2928*4882a593Smuzhiyun 	return ret;
2929*4882a593Smuzhiyun }
2930*4882a593Smuzhiyun 
vmw_cmd_set_cs_uav(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2931*4882a593Smuzhiyun static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
2932*4882a593Smuzhiyun 			      struct vmw_sw_context *sw_context,
2933*4882a593Smuzhiyun 			      SVGA3dCmdHeader *header)
2934*4882a593Smuzhiyun {
2935*4882a593Smuzhiyun 	struct {
2936*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
2937*4882a593Smuzhiyun 		SVGA3dCmdDXSetCSUAViews body;
2938*4882a593Smuzhiyun 	} *cmd = container_of(header, typeof(*cmd), header);
2939*4882a593Smuzhiyun 	u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2940*4882a593Smuzhiyun 		sizeof(SVGA3dUAViewId);
2941*4882a593Smuzhiyun 	int ret;
2942*4882a593Smuzhiyun 
2943*4882a593Smuzhiyun 	if (!has_sm5_context(dev_priv))
2944*4882a593Smuzhiyun 		return -EINVAL;
2945*4882a593Smuzhiyun 
2946*4882a593Smuzhiyun 	if (num_uav > SVGA3D_MAX_UAVIEWS) {
2947*4882a593Smuzhiyun 		VMW_DEBUG_USER("Invalid UAV binding.\n");
2948*4882a593Smuzhiyun 		return -EINVAL;
2949*4882a593Smuzhiyun 	}
2950*4882a593Smuzhiyun 
2951*4882a593Smuzhiyun 	ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2952*4882a593Smuzhiyun 				    vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
2953*4882a593Smuzhiyun 				    num_uav, 0);
2954*4882a593Smuzhiyun 	if (ret)
2955*4882a593Smuzhiyun 		return ret;
2956*4882a593Smuzhiyun 
2957*4882a593Smuzhiyun 	vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
2958*4882a593Smuzhiyun 				  cmd->body.startIndex);
2959*4882a593Smuzhiyun 
2960*4882a593Smuzhiyun 	return ret;
2961*4882a593Smuzhiyun }
2962*4882a593Smuzhiyun 
vmw_cmd_dx_define_streamoutput(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2963*4882a593Smuzhiyun static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
2964*4882a593Smuzhiyun 					  struct vmw_sw_context *sw_context,
2965*4882a593Smuzhiyun 					  SVGA3dCmdHeader *header)
2966*4882a593Smuzhiyun {
2967*4882a593Smuzhiyun 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2968*4882a593Smuzhiyun 	struct vmw_resource *res;
2969*4882a593Smuzhiyun 	struct {
2970*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
2971*4882a593Smuzhiyun 		SVGA3dCmdDXDefineStreamOutputWithMob body;
2972*4882a593Smuzhiyun 	} *cmd = container_of(header, typeof(*cmd), header);
2973*4882a593Smuzhiyun 	int ret;
2974*4882a593Smuzhiyun 
2975*4882a593Smuzhiyun 	if (!has_sm5_context(dev_priv))
2976*4882a593Smuzhiyun 		return -EINVAL;
2977*4882a593Smuzhiyun 
2978*4882a593Smuzhiyun 	if (!ctx_node) {
2979*4882a593Smuzhiyun 		DRM_ERROR("DX Context not set.\n");
2980*4882a593Smuzhiyun 		return -EINVAL;
2981*4882a593Smuzhiyun 	}
2982*4882a593Smuzhiyun 
2983*4882a593Smuzhiyun 	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
2984*4882a593Smuzhiyun 	ret = vmw_cotable_notify(res, cmd->body.soid);
2985*4882a593Smuzhiyun 	if (ret)
2986*4882a593Smuzhiyun 		return ret;
2987*4882a593Smuzhiyun 
2988*4882a593Smuzhiyun 	return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
2989*4882a593Smuzhiyun 				       cmd->body.soid,
2990*4882a593Smuzhiyun 				       &sw_context->staged_cmd_res);
2991*4882a593Smuzhiyun }
2992*4882a593Smuzhiyun 
vmw_cmd_dx_destroy_streamoutput(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2993*4882a593Smuzhiyun static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
2994*4882a593Smuzhiyun 					   struct vmw_sw_context *sw_context,
2995*4882a593Smuzhiyun 					   SVGA3dCmdHeader *header)
2996*4882a593Smuzhiyun {
2997*4882a593Smuzhiyun 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2998*4882a593Smuzhiyun 	struct vmw_resource *res;
2999*4882a593Smuzhiyun 	struct {
3000*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
3001*4882a593Smuzhiyun 		SVGA3dCmdDXDestroyStreamOutput body;
3002*4882a593Smuzhiyun 	} *cmd = container_of(header, typeof(*cmd), header);
3003*4882a593Smuzhiyun 
3004*4882a593Smuzhiyun 	if (!ctx_node) {
3005*4882a593Smuzhiyun 		DRM_ERROR("DX Context not set.\n");
3006*4882a593Smuzhiyun 		return -EINVAL;
3007*4882a593Smuzhiyun 	}
3008*4882a593Smuzhiyun 
3009*4882a593Smuzhiyun 	/*
3010*4882a593Smuzhiyun 	 * When device does not support SM5 then streamoutput with mob command is
3011*4882a593Smuzhiyun 	 * not available to user-space. Simply return in this case.
3012*4882a593Smuzhiyun 	 */
3013*4882a593Smuzhiyun 	if (!has_sm5_context(dev_priv))
3014*4882a593Smuzhiyun 		return 0;
3015*4882a593Smuzhiyun 
3016*4882a593Smuzhiyun 	/*
3017*4882a593Smuzhiyun 	 * With SM5 capable device if lookup fails then user-space probably used
3018*4882a593Smuzhiyun 	 * old streamoutput define command. Return without an error.
3019*4882a593Smuzhiyun 	 */
3020*4882a593Smuzhiyun 	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3021*4882a593Smuzhiyun 					 cmd->body.soid);
3022*4882a593Smuzhiyun 	if (IS_ERR(res))
3023*4882a593Smuzhiyun 		return 0;
3024*4882a593Smuzhiyun 
3025*4882a593Smuzhiyun 	return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
3026*4882a593Smuzhiyun 					  &sw_context->staged_cmd_res);
3027*4882a593Smuzhiyun }
3028*4882a593Smuzhiyun 
vmw_cmd_dx_bind_streamoutput(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3029*4882a593Smuzhiyun static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
3030*4882a593Smuzhiyun 					struct vmw_sw_context *sw_context,
3031*4882a593Smuzhiyun 					SVGA3dCmdHeader *header)
3032*4882a593Smuzhiyun {
3033*4882a593Smuzhiyun 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3034*4882a593Smuzhiyun 	struct vmw_resource *res;
3035*4882a593Smuzhiyun 	struct {
3036*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
3037*4882a593Smuzhiyun 		SVGA3dCmdDXBindStreamOutput body;
3038*4882a593Smuzhiyun 	} *cmd = container_of(header, typeof(*cmd), header);
3039*4882a593Smuzhiyun 	int ret;
3040*4882a593Smuzhiyun 
3041*4882a593Smuzhiyun 	if (!has_sm5_context(dev_priv))
3042*4882a593Smuzhiyun 		return -EINVAL;
3043*4882a593Smuzhiyun 
3044*4882a593Smuzhiyun 	if (!ctx_node) {
3045*4882a593Smuzhiyun 		DRM_ERROR("DX Context not set.\n");
3046*4882a593Smuzhiyun 		return -EINVAL;
3047*4882a593Smuzhiyun 	}
3048*4882a593Smuzhiyun 
3049*4882a593Smuzhiyun 	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3050*4882a593Smuzhiyun 					 cmd->body.soid);
3051*4882a593Smuzhiyun 	if (IS_ERR(res)) {
3052*4882a593Smuzhiyun 		DRM_ERROR("Could not find streamoutput to bind.\n");
3053*4882a593Smuzhiyun 		return PTR_ERR(res);
3054*4882a593Smuzhiyun 	}
3055*4882a593Smuzhiyun 
3056*4882a593Smuzhiyun 	vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
3057*4882a593Smuzhiyun 
3058*4882a593Smuzhiyun 	ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
3059*4882a593Smuzhiyun 					    VMW_RES_DIRTY_NONE);
3060*4882a593Smuzhiyun 	if (ret) {
3061*4882a593Smuzhiyun 		DRM_ERROR("Error creating resource validation node.\n");
3062*4882a593Smuzhiyun 		return ret;
3063*4882a593Smuzhiyun 	}
3064*4882a593Smuzhiyun 
3065*4882a593Smuzhiyun 	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
3066*4882a593Smuzhiyun 					 &cmd->body.mobid,
3067*4882a593Smuzhiyun 					 cmd->body.offsetInBytes);
3068*4882a593Smuzhiyun }
3069*4882a593Smuzhiyun 
vmw_cmd_dx_set_streamoutput(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3070*4882a593Smuzhiyun static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
3071*4882a593Smuzhiyun 				       struct vmw_sw_context *sw_context,
3072*4882a593Smuzhiyun 				       SVGA3dCmdHeader *header)
3073*4882a593Smuzhiyun {
3074*4882a593Smuzhiyun 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3075*4882a593Smuzhiyun 	struct vmw_resource *res;
3076*4882a593Smuzhiyun 	struct vmw_ctx_bindinfo_so binding;
3077*4882a593Smuzhiyun 	struct {
3078*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
3079*4882a593Smuzhiyun 		SVGA3dCmdDXSetStreamOutput body;
3080*4882a593Smuzhiyun 	} *cmd = container_of(header, typeof(*cmd), header);
3081*4882a593Smuzhiyun 	int ret;
3082*4882a593Smuzhiyun 
3083*4882a593Smuzhiyun 	if (!ctx_node) {
3084*4882a593Smuzhiyun 		DRM_ERROR("DX Context not set.\n");
3085*4882a593Smuzhiyun 		return -EINVAL;
3086*4882a593Smuzhiyun 	}
3087*4882a593Smuzhiyun 
3088*4882a593Smuzhiyun 	if (cmd->body.soid == SVGA3D_INVALID_ID)
3089*4882a593Smuzhiyun 		return 0;
3090*4882a593Smuzhiyun 
3091*4882a593Smuzhiyun 	/*
3092*4882a593Smuzhiyun 	 * When device does not support SM5 then streamoutput with mob command is
3093*4882a593Smuzhiyun 	 * not available to user-space. Simply return in this case.
3094*4882a593Smuzhiyun 	 */
3095*4882a593Smuzhiyun 	if (!has_sm5_context(dev_priv))
3096*4882a593Smuzhiyun 		return 0;
3097*4882a593Smuzhiyun 
3098*4882a593Smuzhiyun 	/*
3099*4882a593Smuzhiyun 	 * With SM5 capable device if lookup fails then user-space probably used
3100*4882a593Smuzhiyun 	 * old streamoutput define command. Return without an error.
3101*4882a593Smuzhiyun 	 */
3102*4882a593Smuzhiyun 	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3103*4882a593Smuzhiyun 					 cmd->body.soid);
3104*4882a593Smuzhiyun 	if (IS_ERR(res)) {
3105*4882a593Smuzhiyun 		return 0;
3106*4882a593Smuzhiyun 	}
3107*4882a593Smuzhiyun 
3108*4882a593Smuzhiyun 	ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
3109*4882a593Smuzhiyun 					    VMW_RES_DIRTY_NONE);
3110*4882a593Smuzhiyun 	if (ret) {
3111*4882a593Smuzhiyun 		DRM_ERROR("Error creating resource validation node.\n");
3112*4882a593Smuzhiyun 		return ret;
3113*4882a593Smuzhiyun 	}
3114*4882a593Smuzhiyun 
3115*4882a593Smuzhiyun 	binding.bi.ctx = ctx_node->ctx;
3116*4882a593Smuzhiyun 	binding.bi.res = res;
3117*4882a593Smuzhiyun 	binding.bi.bt = vmw_ctx_binding_so;
3118*4882a593Smuzhiyun 	binding.slot = 0; /* Only one SO set to context at a time. */
3119*4882a593Smuzhiyun 
3120*4882a593Smuzhiyun 	vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
3121*4882a593Smuzhiyun 			binding.slot);
3122*4882a593Smuzhiyun 
3123*4882a593Smuzhiyun 	return ret;
3124*4882a593Smuzhiyun }
3125*4882a593Smuzhiyun 
vmw_cmd_indexed_instanced_indirect(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3126*4882a593Smuzhiyun static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
3127*4882a593Smuzhiyun 					      struct vmw_sw_context *sw_context,
3128*4882a593Smuzhiyun 					      SVGA3dCmdHeader *header)
3129*4882a593Smuzhiyun {
3130*4882a593Smuzhiyun 	struct vmw_draw_indexed_instanced_indirect_cmd {
3131*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
3132*4882a593Smuzhiyun 		SVGA3dCmdDXDrawIndexedInstancedIndirect body;
3133*4882a593Smuzhiyun 	} *cmd = container_of(header, typeof(*cmd), header);
3134*4882a593Smuzhiyun 
3135*4882a593Smuzhiyun 	if (!has_sm5_context(dev_priv))
3136*4882a593Smuzhiyun 		return -EINVAL;
3137*4882a593Smuzhiyun 
3138*4882a593Smuzhiyun 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3139*4882a593Smuzhiyun 				 VMW_RES_DIRTY_NONE, user_surface_converter,
3140*4882a593Smuzhiyun 				 &cmd->body.argsBufferSid, NULL);
3141*4882a593Smuzhiyun }
3142*4882a593Smuzhiyun 
vmw_cmd_instanced_indirect(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3143*4882a593Smuzhiyun static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv,
3144*4882a593Smuzhiyun 				      struct vmw_sw_context *sw_context,
3145*4882a593Smuzhiyun 				      SVGA3dCmdHeader *header)
3146*4882a593Smuzhiyun {
3147*4882a593Smuzhiyun 	struct vmw_draw_instanced_indirect_cmd {
3148*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
3149*4882a593Smuzhiyun 		SVGA3dCmdDXDrawInstancedIndirect body;
3150*4882a593Smuzhiyun 	} *cmd = container_of(header, typeof(*cmd), header);
3151*4882a593Smuzhiyun 
3152*4882a593Smuzhiyun 	if (!has_sm5_context(dev_priv))
3153*4882a593Smuzhiyun 		return -EINVAL;
3154*4882a593Smuzhiyun 
3155*4882a593Smuzhiyun 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3156*4882a593Smuzhiyun 				 VMW_RES_DIRTY_NONE, user_surface_converter,
3157*4882a593Smuzhiyun 				 &cmd->body.argsBufferSid, NULL);
3158*4882a593Smuzhiyun }
3159*4882a593Smuzhiyun 
vmw_cmd_dispatch_indirect(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3160*4882a593Smuzhiyun static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
3161*4882a593Smuzhiyun 				     struct vmw_sw_context *sw_context,
3162*4882a593Smuzhiyun 				     SVGA3dCmdHeader *header)
3163*4882a593Smuzhiyun {
3164*4882a593Smuzhiyun 	struct vmw_dispatch_indirect_cmd {
3165*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
3166*4882a593Smuzhiyun 		SVGA3dCmdDXDispatchIndirect body;
3167*4882a593Smuzhiyun 	} *cmd = container_of(header, typeof(*cmd), header);
3168*4882a593Smuzhiyun 
3169*4882a593Smuzhiyun 	if (!has_sm5_context(dev_priv))
3170*4882a593Smuzhiyun 		return -EINVAL;
3171*4882a593Smuzhiyun 
3172*4882a593Smuzhiyun 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3173*4882a593Smuzhiyun 				 VMW_RES_DIRTY_NONE, user_surface_converter,
3174*4882a593Smuzhiyun 				 &cmd->body.argsBufferSid, NULL);
3175*4882a593Smuzhiyun }
3176*4882a593Smuzhiyun 
vmw_cmd_check_not_3d(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t * size)3177*4882a593Smuzhiyun static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3178*4882a593Smuzhiyun 				struct vmw_sw_context *sw_context,
3179*4882a593Smuzhiyun 				void *buf, uint32_t *size)
3180*4882a593Smuzhiyun {
3181*4882a593Smuzhiyun 	uint32_t size_remaining = *size;
3182*4882a593Smuzhiyun 	uint32_t cmd_id;
3183*4882a593Smuzhiyun 
3184*4882a593Smuzhiyun 	cmd_id = ((uint32_t *)buf)[0];
3185*4882a593Smuzhiyun 	switch (cmd_id) {
3186*4882a593Smuzhiyun 	case SVGA_CMD_UPDATE:
3187*4882a593Smuzhiyun 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3188*4882a593Smuzhiyun 		break;
3189*4882a593Smuzhiyun 	case SVGA_CMD_DEFINE_GMRFB:
3190*4882a593Smuzhiyun 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3191*4882a593Smuzhiyun 		break;
3192*4882a593Smuzhiyun 	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3193*4882a593Smuzhiyun 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3194*4882a593Smuzhiyun 		break;
3195*4882a593Smuzhiyun 	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3196*4882a593Smuzhiyun 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3197*4882a593Smuzhiyun 		break;
3198*4882a593Smuzhiyun 	default:
3199*4882a593Smuzhiyun 		VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
3200*4882a593Smuzhiyun 		return -EINVAL;
3201*4882a593Smuzhiyun 	}
3202*4882a593Smuzhiyun 
3203*4882a593Smuzhiyun 	if (*size > size_remaining) {
3204*4882a593Smuzhiyun 		VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
3205*4882a593Smuzhiyun 			       cmd_id);
3206*4882a593Smuzhiyun 		return -EINVAL;
3207*4882a593Smuzhiyun 	}
3208*4882a593Smuzhiyun 
3209*4882a593Smuzhiyun 	if (unlikely(!sw_context->kernel)) {
3210*4882a593Smuzhiyun 		VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
3211*4882a593Smuzhiyun 		return -EPERM;
3212*4882a593Smuzhiyun 	}
3213*4882a593Smuzhiyun 
3214*4882a593Smuzhiyun 	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3215*4882a593Smuzhiyun 		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3216*4882a593Smuzhiyun 
3217*4882a593Smuzhiyun 	return 0;
3218*4882a593Smuzhiyun }
3219*4882a593Smuzhiyun 
3220*4882a593Smuzhiyun static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3221*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3222*4882a593Smuzhiyun 		    false, false, false),
3223*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3224*4882a593Smuzhiyun 		    false, false, false),
3225*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3226*4882a593Smuzhiyun 		    true, false, false),
3227*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3228*4882a593Smuzhiyun 		    true, false, false),
3229*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3230*4882a593Smuzhiyun 		    true, false, false),
3231*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3232*4882a593Smuzhiyun 		    false, false, false),
3233*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3234*4882a593Smuzhiyun 		    false, false, false),
3235*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3236*4882a593Smuzhiyun 		    true, false, false),
3237*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3238*4882a593Smuzhiyun 		    true, false, false),
3239*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3240*4882a593Smuzhiyun 		    true, false, false),
3241*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3242*4882a593Smuzhiyun 		    &vmw_cmd_set_render_target_check, true, false, false),
3243*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3244*4882a593Smuzhiyun 		    true, false, false),
3245*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3246*4882a593Smuzhiyun 		    true, false, false),
3247*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3248*4882a593Smuzhiyun 		    true, false, false),
3249*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3250*4882a593Smuzhiyun 		    true, false, false),
3251*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3252*4882a593Smuzhiyun 		    true, false, false),
3253*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3254*4882a593Smuzhiyun 		    true, false, false),
3255*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3256*4882a593Smuzhiyun 		    true, false, false),
3257*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3258*4882a593Smuzhiyun 		    false, false, false),
3259*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3260*4882a593Smuzhiyun 		    true, false, false),
3261*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3262*4882a593Smuzhiyun 		    true, false, false),
3263*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3264*4882a593Smuzhiyun 		    true, false, false),
3265*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3266*4882a593Smuzhiyun 		    true, false, false),
3267*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3268*4882a593Smuzhiyun 		    true, false, false),
3269*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3270*4882a593Smuzhiyun 		    true, false, false),
3271*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3272*4882a593Smuzhiyun 		    true, false, false),
3273*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3274*4882a593Smuzhiyun 		    true, false, false),
3275*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3276*4882a593Smuzhiyun 		    true, false, false),
3277*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3278*4882a593Smuzhiyun 		    true, false, false),
3279*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3280*4882a593Smuzhiyun 		    &vmw_cmd_blt_surf_screen_check, false, false, false),
3281*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3282*4882a593Smuzhiyun 		    false, false, false),
3283*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3284*4882a593Smuzhiyun 		    false, false, false),
3285*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3286*4882a593Smuzhiyun 		    false, false, false),
3287*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3288*4882a593Smuzhiyun 		    false, false, false),
3289*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3290*4882a593Smuzhiyun 		    false, false, false),
3291*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
3292*4882a593Smuzhiyun 		    false, false, false),
3293*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
3294*4882a593Smuzhiyun 		    false, false, false),
3295*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false),
3296*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false),
3297*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false),
3298*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false),
3299*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false),
3300*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false),
3301*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3302*4882a593Smuzhiyun 		    false, false, true),
3303*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3304*4882a593Smuzhiyun 		    false, false, true),
3305*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3306*4882a593Smuzhiyun 		    false, false, true),
3307*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3308*4882a593Smuzhiyun 		    false, false, true),
3309*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3310*4882a593Smuzhiyun 		    false, false, true),
3311*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3312*4882a593Smuzhiyun 		    false, false, true),
3313*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3314*4882a593Smuzhiyun 		    false, false, true),
3315*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3316*4882a593Smuzhiyun 		    false, false, true),
3317*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3318*4882a593Smuzhiyun 		    true, false, true),
3319*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3320*4882a593Smuzhiyun 		    false, false, true),
3321*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3322*4882a593Smuzhiyun 		    true, false, true),
3323*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3324*4882a593Smuzhiyun 		    &vmw_cmd_update_gb_surface, true, false, true),
3325*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3326*4882a593Smuzhiyun 		    &vmw_cmd_readback_gb_image, true, false, true),
3327*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3328*4882a593Smuzhiyun 		    &vmw_cmd_readback_gb_surface, true, false, true),
3329*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3330*4882a593Smuzhiyun 		    &vmw_cmd_invalidate_gb_image, true, false, true),
3331*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3332*4882a593Smuzhiyun 		    &vmw_cmd_invalidate_gb_surface, true, false, true),
3333*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3334*4882a593Smuzhiyun 		    false, false, true),
3335*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3336*4882a593Smuzhiyun 		    false, false, true),
3337*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3338*4882a593Smuzhiyun 		    false, false, true),
3339*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3340*4882a593Smuzhiyun 		    false, false, true),
3341*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3342*4882a593Smuzhiyun 		    false, false, true),
3343*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3344*4882a593Smuzhiyun 		    false, false, true),
3345*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3346*4882a593Smuzhiyun 		    true, false, true),
3347*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3348*4882a593Smuzhiyun 		    false, false, true),
3349*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3350*4882a593Smuzhiyun 		    false, false, false),
3351*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3352*4882a593Smuzhiyun 		    true, false, true),
3353*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3354*4882a593Smuzhiyun 		    true, false, true),
3355*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3356*4882a593Smuzhiyun 		    true, false, true),
3357*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3358*4882a593Smuzhiyun 		    true, false, true),
3359*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3360*4882a593Smuzhiyun 		    true, false, true),
3361*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3362*4882a593Smuzhiyun 		    false, false, true),
3363*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3364*4882a593Smuzhiyun 		    false, false, true),
3365*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3366*4882a593Smuzhiyun 		    false, false, true),
3367*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3368*4882a593Smuzhiyun 		    false, false, true),
3369*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3370*4882a593Smuzhiyun 		    false, false, true),
3371*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3372*4882a593Smuzhiyun 		    false, false, true),
3373*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3374*4882a593Smuzhiyun 		    false, false, true),
3375*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3376*4882a593Smuzhiyun 		    false, false, true),
3377*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3378*4882a593Smuzhiyun 		    false, false, true),
3379*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3380*4882a593Smuzhiyun 		    false, false, true),
3381*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3382*4882a593Smuzhiyun 		    true, false, true),
3383*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3384*4882a593Smuzhiyun 		    false, false, true),
3385*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3386*4882a593Smuzhiyun 		    false, false, true),
3387*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3388*4882a593Smuzhiyun 		    false, false, true),
3389*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3390*4882a593Smuzhiyun 		    false, false, true),
3391*4882a593Smuzhiyun 
3392*4882a593Smuzhiyun 	/* SM commands */
3393*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3394*4882a593Smuzhiyun 		    false, false, true),
3395*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3396*4882a593Smuzhiyun 		    false, false, true),
3397*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3398*4882a593Smuzhiyun 		    false, false, true),
3399*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3400*4882a593Smuzhiyun 		    false, false, true),
3401*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3402*4882a593Smuzhiyun 		    false, false, true),
3403*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3404*4882a593Smuzhiyun 		    &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3405*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3406*4882a593Smuzhiyun 		    &vmw_cmd_dx_set_shader_res, true, false, true),
3407*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3408*4882a593Smuzhiyun 		    true, false, true),
3409*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3410*4882a593Smuzhiyun 		    true, false, true),
3411*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3412*4882a593Smuzhiyun 		    true, false, true),
3413*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3414*4882a593Smuzhiyun 		    true, false, true),
3415*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3416*4882a593Smuzhiyun 		    true, false, true),
3417*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3418*4882a593Smuzhiyun 		    &vmw_cmd_dx_cid_check, true, false, true),
3419*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3420*4882a593Smuzhiyun 		    true, false, true),
3421*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3422*4882a593Smuzhiyun 		    &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3423*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3424*4882a593Smuzhiyun 		    &vmw_cmd_dx_set_index_buffer, true, false, true),
3425*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3426*4882a593Smuzhiyun 		    &vmw_cmd_dx_set_rendertargets, true, false, true),
3427*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3428*4882a593Smuzhiyun 		    true, false, true),
3429*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3430*4882a593Smuzhiyun 		    &vmw_cmd_dx_cid_check, true, false, true),
3431*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3432*4882a593Smuzhiyun 		    &vmw_cmd_dx_cid_check, true, false, true),
3433*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3434*4882a593Smuzhiyun 		    true, false, true),
3435*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3436*4882a593Smuzhiyun 		    true, false, true),
3437*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3438*4882a593Smuzhiyun 		    true, false, true),
3439*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3440*4882a593Smuzhiyun 		    &vmw_cmd_dx_cid_check, true, false, true),
3441*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3442*4882a593Smuzhiyun 		    true, false, true),
3443*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3444*4882a593Smuzhiyun 		    true, false, true),
3445*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3446*4882a593Smuzhiyun 		    true, false, true),
3447*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3448*4882a593Smuzhiyun 		    true, false, true),
3449*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3450*4882a593Smuzhiyun 		    true, false, true),
3451*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3452*4882a593Smuzhiyun 		    true, false, true),
3453*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3454*4882a593Smuzhiyun 		    &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3455*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3456*4882a593Smuzhiyun 		    &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3457*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3458*4882a593Smuzhiyun 		    true, false, true),
3459*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3460*4882a593Smuzhiyun 		    true, false, true),
3461*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3462*4882a593Smuzhiyun 		    &vmw_cmd_dx_check_subresource, true, false, true),
3463*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3464*4882a593Smuzhiyun 		    &vmw_cmd_dx_check_subresource, true, false, true),
3465*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3466*4882a593Smuzhiyun 		    &vmw_cmd_dx_check_subresource, true, false, true),
3467*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3468*4882a593Smuzhiyun 		    &vmw_cmd_dx_view_define, true, false, true),
3469*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3470*4882a593Smuzhiyun 		    &vmw_cmd_dx_view_remove, true, false, true),
3471*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3472*4882a593Smuzhiyun 		    &vmw_cmd_dx_view_define, true, false, true),
3473*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3474*4882a593Smuzhiyun 		    &vmw_cmd_dx_view_remove, true, false, true),
3475*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3476*4882a593Smuzhiyun 		    &vmw_cmd_dx_view_define, true, false, true),
3477*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3478*4882a593Smuzhiyun 		    &vmw_cmd_dx_view_remove, true, false, true),
3479*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3480*4882a593Smuzhiyun 		    &vmw_cmd_dx_so_define, true, false, true),
3481*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3482*4882a593Smuzhiyun 		    &vmw_cmd_dx_cid_check, true, false, true),
3483*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3484*4882a593Smuzhiyun 		    &vmw_cmd_dx_so_define, true, false, true),
3485*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3486*4882a593Smuzhiyun 		    &vmw_cmd_dx_cid_check, true, false, true),
3487*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3488*4882a593Smuzhiyun 		    &vmw_cmd_dx_so_define, true, false, true),
3489*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3490*4882a593Smuzhiyun 		    &vmw_cmd_dx_cid_check, true, false, true),
3491*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3492*4882a593Smuzhiyun 		    &vmw_cmd_dx_so_define, true, false, true),
3493*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3494*4882a593Smuzhiyun 		    &vmw_cmd_dx_cid_check, true, false, true),
3495*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3496*4882a593Smuzhiyun 		    &vmw_cmd_dx_so_define, true, false, true),
3497*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3498*4882a593Smuzhiyun 		    &vmw_cmd_dx_cid_check, true, false, true),
3499*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3500*4882a593Smuzhiyun 		    &vmw_cmd_dx_define_shader, true, false, true),
3501*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3502*4882a593Smuzhiyun 		    &vmw_cmd_dx_destroy_shader, true, false, true),
3503*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3504*4882a593Smuzhiyun 		    &vmw_cmd_dx_bind_shader, true, false, true),
3505*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3506*4882a593Smuzhiyun 		    &vmw_cmd_dx_so_define, true, false, true),
3507*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3508*4882a593Smuzhiyun 		    &vmw_cmd_dx_destroy_streamoutput, true, false, true),
3509*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
3510*4882a593Smuzhiyun 		    &vmw_cmd_dx_set_streamoutput, true, false, true),
3511*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3512*4882a593Smuzhiyun 		    &vmw_cmd_dx_set_so_targets, true, false, true),
3513*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3514*4882a593Smuzhiyun 		    &vmw_cmd_dx_cid_check, true, false, true),
3515*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3516*4882a593Smuzhiyun 		    &vmw_cmd_dx_cid_check, true, false, true),
3517*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3518*4882a593Smuzhiyun 		    &vmw_cmd_buffer_copy_check, true, false, true),
3519*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3520*4882a593Smuzhiyun 		    &vmw_cmd_pred_copy_check, true, false, true),
3521*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3522*4882a593Smuzhiyun 		    &vmw_cmd_dx_transfer_from_buffer,
3523*4882a593Smuzhiyun 		    true, false, true),
3524*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3525*4882a593Smuzhiyun 		    true, false, true),
3526*4882a593Smuzhiyun 
3527*4882a593Smuzhiyun 	/*
3528*4882a593Smuzhiyun 	 * SM5 commands
3529*4882a593Smuzhiyun 	 */
3530*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
3531*4882a593Smuzhiyun 		    true, false, true),
3532*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
3533*4882a593Smuzhiyun 		    true, false, true),
3534*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
3535*4882a593Smuzhiyun 		    true, false, true),
3536*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
3537*4882a593Smuzhiyun 		    &vmw_cmd_clear_uav_float, true, false, true),
3538*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
3539*4882a593Smuzhiyun 		    false, true),
3540*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
3541*4882a593Smuzhiyun 		    true),
3542*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT,
3543*4882a593Smuzhiyun 		    &vmw_cmd_indexed_instanced_indirect, true, false, true),
3544*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT,
3545*4882a593Smuzhiyun 		    &vmw_cmd_instanced_indirect, true, false, true),
3546*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
3547*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
3548*4882a593Smuzhiyun 		    &vmw_cmd_dispatch_indirect, true, false, true),
3549*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
3550*4882a593Smuzhiyun 		    false, true),
3551*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
3552*4882a593Smuzhiyun 		    &vmw_cmd_sm5_view_define, true, false, true),
3553*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
3554*4882a593Smuzhiyun 		    &vmw_cmd_dx_define_streamoutput, true, false, true),
3555*4882a593Smuzhiyun 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
3556*4882a593Smuzhiyun 		    &vmw_cmd_dx_bind_streamoutput, true, false, true),
3557*4882a593Smuzhiyun };
3558*4882a593Smuzhiyun 
vmw_cmd_describe(const void * buf,u32 * size,char const ** cmd)3559*4882a593Smuzhiyun bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3560*4882a593Smuzhiyun {
3561*4882a593Smuzhiyun 	u32 cmd_id = ((u32 *) buf)[0];
3562*4882a593Smuzhiyun 
3563*4882a593Smuzhiyun 	if (cmd_id >= SVGA_CMD_MAX) {
3564*4882a593Smuzhiyun 		SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3565*4882a593Smuzhiyun 		const struct vmw_cmd_entry *entry;
3566*4882a593Smuzhiyun 
3567*4882a593Smuzhiyun 		*size = header->size + sizeof(SVGA3dCmdHeader);
3568*4882a593Smuzhiyun 		cmd_id = header->id;
3569*4882a593Smuzhiyun 		if (cmd_id >= SVGA_3D_CMD_MAX)
3570*4882a593Smuzhiyun 			return false;
3571*4882a593Smuzhiyun 
3572*4882a593Smuzhiyun 		cmd_id -= SVGA_3D_CMD_BASE;
3573*4882a593Smuzhiyun 		entry = &vmw_cmd_entries[cmd_id];
3574*4882a593Smuzhiyun 		*cmd = entry->cmd_name;
3575*4882a593Smuzhiyun 		return true;
3576*4882a593Smuzhiyun 	}
3577*4882a593Smuzhiyun 
3578*4882a593Smuzhiyun 	switch (cmd_id) {
3579*4882a593Smuzhiyun 	case SVGA_CMD_UPDATE:
3580*4882a593Smuzhiyun 		*cmd = "SVGA_CMD_UPDATE";
3581*4882a593Smuzhiyun 		*size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3582*4882a593Smuzhiyun 		break;
3583*4882a593Smuzhiyun 	case SVGA_CMD_DEFINE_GMRFB:
3584*4882a593Smuzhiyun 		*cmd = "SVGA_CMD_DEFINE_GMRFB";
3585*4882a593Smuzhiyun 		*size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3586*4882a593Smuzhiyun 		break;
3587*4882a593Smuzhiyun 	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3588*4882a593Smuzhiyun 		*cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3589*4882a593Smuzhiyun 		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3590*4882a593Smuzhiyun 		break;
3591*4882a593Smuzhiyun 	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3592*4882a593Smuzhiyun 		*cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3593*4882a593Smuzhiyun 		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3594*4882a593Smuzhiyun 		break;
3595*4882a593Smuzhiyun 	default:
3596*4882a593Smuzhiyun 		*cmd = "UNKNOWN";
3597*4882a593Smuzhiyun 		*size = 0;
3598*4882a593Smuzhiyun 		return false;
3599*4882a593Smuzhiyun 	}
3600*4882a593Smuzhiyun 
3601*4882a593Smuzhiyun 	return true;
3602*4882a593Smuzhiyun }
3603*4882a593Smuzhiyun 
vmw_cmd_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t * size)3604*4882a593Smuzhiyun static int vmw_cmd_check(struct vmw_private *dev_priv,
3605*4882a593Smuzhiyun 			 struct vmw_sw_context *sw_context, void *buf,
3606*4882a593Smuzhiyun 			 uint32_t *size)
3607*4882a593Smuzhiyun {
3608*4882a593Smuzhiyun 	uint32_t cmd_id;
3609*4882a593Smuzhiyun 	uint32_t size_remaining = *size;
3610*4882a593Smuzhiyun 	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3611*4882a593Smuzhiyun 	int ret;
3612*4882a593Smuzhiyun 	const struct vmw_cmd_entry *entry;
3613*4882a593Smuzhiyun 	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3614*4882a593Smuzhiyun 
3615*4882a593Smuzhiyun 	cmd_id = ((uint32_t *)buf)[0];
3616*4882a593Smuzhiyun 	/* Handle any none 3D commands */
3617*4882a593Smuzhiyun 	if (unlikely(cmd_id < SVGA_CMD_MAX))
3618*4882a593Smuzhiyun 		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3619*4882a593Smuzhiyun 
3620*4882a593Smuzhiyun 
3621*4882a593Smuzhiyun 	cmd_id = header->id;
3622*4882a593Smuzhiyun 	*size = header->size + sizeof(SVGA3dCmdHeader);
3623*4882a593Smuzhiyun 
3624*4882a593Smuzhiyun 	cmd_id -= SVGA_3D_CMD_BASE;
3625*4882a593Smuzhiyun 	if (unlikely(*size > size_remaining))
3626*4882a593Smuzhiyun 		goto out_invalid;
3627*4882a593Smuzhiyun 
3628*4882a593Smuzhiyun 	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3629*4882a593Smuzhiyun 		goto out_invalid;
3630*4882a593Smuzhiyun 
3631*4882a593Smuzhiyun 	entry = &vmw_cmd_entries[cmd_id];
3632*4882a593Smuzhiyun 	if (unlikely(!entry->func))
3633*4882a593Smuzhiyun 		goto out_invalid;
3634*4882a593Smuzhiyun 
3635*4882a593Smuzhiyun 	if (unlikely(!entry->user_allow && !sw_context->kernel))
3636*4882a593Smuzhiyun 		goto out_privileged;
3637*4882a593Smuzhiyun 
3638*4882a593Smuzhiyun 	if (unlikely(entry->gb_disable && gb))
3639*4882a593Smuzhiyun 		goto out_old;
3640*4882a593Smuzhiyun 
3641*4882a593Smuzhiyun 	if (unlikely(entry->gb_enable && !gb))
3642*4882a593Smuzhiyun 		goto out_new;
3643*4882a593Smuzhiyun 
3644*4882a593Smuzhiyun 	ret = entry->func(dev_priv, sw_context, header);
3645*4882a593Smuzhiyun 	if (unlikely(ret != 0)) {
3646*4882a593Smuzhiyun 		VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3647*4882a593Smuzhiyun 			       cmd_id + SVGA_3D_CMD_BASE, ret);
3648*4882a593Smuzhiyun 		return ret;
3649*4882a593Smuzhiyun 	}
3650*4882a593Smuzhiyun 
3651*4882a593Smuzhiyun 	return 0;
3652*4882a593Smuzhiyun out_invalid:
3653*4882a593Smuzhiyun 	VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3654*4882a593Smuzhiyun 		       cmd_id + SVGA_3D_CMD_BASE);
3655*4882a593Smuzhiyun 	return -EINVAL;
3656*4882a593Smuzhiyun out_privileged:
3657*4882a593Smuzhiyun 	VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3658*4882a593Smuzhiyun 		       cmd_id + SVGA_3D_CMD_BASE);
3659*4882a593Smuzhiyun 	return -EPERM;
3660*4882a593Smuzhiyun out_old:
3661*4882a593Smuzhiyun 	VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3662*4882a593Smuzhiyun 		       cmd_id + SVGA_3D_CMD_BASE);
3663*4882a593Smuzhiyun 	return -EINVAL;
3664*4882a593Smuzhiyun out_new:
3665*4882a593Smuzhiyun 	VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3666*4882a593Smuzhiyun 		       cmd_id + SVGA_3D_CMD_BASE);
3667*4882a593Smuzhiyun 	return -EINVAL;
3668*4882a593Smuzhiyun }
3669*4882a593Smuzhiyun 
vmw_cmd_check_all(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t size)3670*4882a593Smuzhiyun static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3671*4882a593Smuzhiyun 			     struct vmw_sw_context *sw_context, void *buf,
3672*4882a593Smuzhiyun 			     uint32_t size)
3673*4882a593Smuzhiyun {
3674*4882a593Smuzhiyun 	int32_t cur_size = size;
3675*4882a593Smuzhiyun 	int ret;
3676*4882a593Smuzhiyun 
3677*4882a593Smuzhiyun 	sw_context->buf_start = buf;
3678*4882a593Smuzhiyun 
3679*4882a593Smuzhiyun 	while (cur_size > 0) {
3680*4882a593Smuzhiyun 		size = cur_size;
3681*4882a593Smuzhiyun 		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3682*4882a593Smuzhiyun 		if (unlikely(ret != 0))
3683*4882a593Smuzhiyun 			return ret;
3684*4882a593Smuzhiyun 		buf = (void *)((unsigned long) buf + size);
3685*4882a593Smuzhiyun 		cur_size -= size;
3686*4882a593Smuzhiyun 	}
3687*4882a593Smuzhiyun 
3688*4882a593Smuzhiyun 	if (unlikely(cur_size != 0)) {
3689*4882a593Smuzhiyun 		VMW_DEBUG_USER("Command verifier out of sync.\n");
3690*4882a593Smuzhiyun 		return -EINVAL;
3691*4882a593Smuzhiyun 	}
3692*4882a593Smuzhiyun 
3693*4882a593Smuzhiyun 	return 0;
3694*4882a593Smuzhiyun }
3695*4882a593Smuzhiyun 
vmw_free_relocations(struct vmw_sw_context * sw_context)3696*4882a593Smuzhiyun static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3697*4882a593Smuzhiyun {
3698*4882a593Smuzhiyun 	/* Memory is validation context memory, so no need to free it */
3699*4882a593Smuzhiyun 	INIT_LIST_HEAD(&sw_context->bo_relocations);
3700*4882a593Smuzhiyun }
3701*4882a593Smuzhiyun 
vmw_apply_relocations(struct vmw_sw_context * sw_context)3702*4882a593Smuzhiyun static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3703*4882a593Smuzhiyun {
3704*4882a593Smuzhiyun 	struct vmw_relocation *reloc;
3705*4882a593Smuzhiyun 	struct ttm_buffer_object *bo;
3706*4882a593Smuzhiyun 
3707*4882a593Smuzhiyun 	list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3708*4882a593Smuzhiyun 		bo = &reloc->vbo->base;
3709*4882a593Smuzhiyun 		switch (bo->mem.mem_type) {
3710*4882a593Smuzhiyun 		case TTM_PL_VRAM:
3711*4882a593Smuzhiyun 			reloc->location->offset += bo->mem.start << PAGE_SHIFT;
3712*4882a593Smuzhiyun 			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3713*4882a593Smuzhiyun 			break;
3714*4882a593Smuzhiyun 		case VMW_PL_GMR:
3715*4882a593Smuzhiyun 			reloc->location->gmrId = bo->mem.start;
3716*4882a593Smuzhiyun 			break;
3717*4882a593Smuzhiyun 		case VMW_PL_MOB:
3718*4882a593Smuzhiyun 			*reloc->mob_loc = bo->mem.start;
3719*4882a593Smuzhiyun 			break;
3720*4882a593Smuzhiyun 		default:
3721*4882a593Smuzhiyun 			BUG();
3722*4882a593Smuzhiyun 		}
3723*4882a593Smuzhiyun 	}
3724*4882a593Smuzhiyun 	vmw_free_relocations(sw_context);
3725*4882a593Smuzhiyun }
3726*4882a593Smuzhiyun 
vmw_resize_cmd_bounce(struct vmw_sw_context * sw_context,uint32_t size)3727*4882a593Smuzhiyun static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3728*4882a593Smuzhiyun 				 uint32_t size)
3729*4882a593Smuzhiyun {
3730*4882a593Smuzhiyun 	if (likely(sw_context->cmd_bounce_size >= size))
3731*4882a593Smuzhiyun 		return 0;
3732*4882a593Smuzhiyun 
3733*4882a593Smuzhiyun 	if (sw_context->cmd_bounce_size == 0)
3734*4882a593Smuzhiyun 		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3735*4882a593Smuzhiyun 
3736*4882a593Smuzhiyun 	while (sw_context->cmd_bounce_size < size) {
3737*4882a593Smuzhiyun 		sw_context->cmd_bounce_size =
3738*4882a593Smuzhiyun 			PAGE_ALIGN(sw_context->cmd_bounce_size +
3739*4882a593Smuzhiyun 				   (sw_context->cmd_bounce_size >> 1));
3740*4882a593Smuzhiyun 	}
3741*4882a593Smuzhiyun 
3742*4882a593Smuzhiyun 	vfree(sw_context->cmd_bounce);
3743*4882a593Smuzhiyun 	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3744*4882a593Smuzhiyun 
3745*4882a593Smuzhiyun 	if (sw_context->cmd_bounce == NULL) {
3746*4882a593Smuzhiyun 		VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3747*4882a593Smuzhiyun 		sw_context->cmd_bounce_size = 0;
3748*4882a593Smuzhiyun 		return -ENOMEM;
3749*4882a593Smuzhiyun 	}
3750*4882a593Smuzhiyun 
3751*4882a593Smuzhiyun 	return 0;
3752*4882a593Smuzhiyun }
3753*4882a593Smuzhiyun 
3754*4882a593Smuzhiyun /**
3755*4882a593Smuzhiyun  * vmw_execbuf_fence_commands - create and submit a command stream fence
3756*4882a593Smuzhiyun  *
3757*4882a593Smuzhiyun  * Creates a fence object and submits a command stream marker.
3758*4882a593Smuzhiyun  * If this fails for some reason, We sync the fifo and return NULL.
3759*4882a593Smuzhiyun  * It is then safe to fence buffers with a NULL pointer.
3760*4882a593Smuzhiyun  *
3761*4882a593Smuzhiyun  * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3762*4882a593Smuzhiyun  * userspace handle if @p_handle is not NULL, otherwise not.
3763*4882a593Smuzhiyun  */
3764*4882a593Smuzhiyun 
vmw_execbuf_fence_commands(struct drm_file * file_priv,struct vmw_private * dev_priv,struct vmw_fence_obj ** p_fence,uint32_t * p_handle)3765*4882a593Smuzhiyun int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3766*4882a593Smuzhiyun 			       struct vmw_private *dev_priv,
3767*4882a593Smuzhiyun 			       struct vmw_fence_obj **p_fence,
3768*4882a593Smuzhiyun 			       uint32_t *p_handle)
3769*4882a593Smuzhiyun {
3770*4882a593Smuzhiyun 	uint32_t sequence;
3771*4882a593Smuzhiyun 	int ret;
3772*4882a593Smuzhiyun 	bool synced = false;
3773*4882a593Smuzhiyun 
3774*4882a593Smuzhiyun 	/* p_handle implies file_priv. */
3775*4882a593Smuzhiyun 	BUG_ON(p_handle != NULL && file_priv == NULL);
3776*4882a593Smuzhiyun 
3777*4882a593Smuzhiyun 	ret = vmw_fifo_send_fence(dev_priv, &sequence);
3778*4882a593Smuzhiyun 	if (unlikely(ret != 0)) {
3779*4882a593Smuzhiyun 		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3780*4882a593Smuzhiyun 		synced = true;
3781*4882a593Smuzhiyun 	}
3782*4882a593Smuzhiyun 
3783*4882a593Smuzhiyun 	if (p_handle != NULL)
3784*4882a593Smuzhiyun 		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3785*4882a593Smuzhiyun 					    sequence, p_fence, p_handle);
3786*4882a593Smuzhiyun 	else
3787*4882a593Smuzhiyun 		ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3788*4882a593Smuzhiyun 
3789*4882a593Smuzhiyun 	if (unlikely(ret != 0 && !synced)) {
3790*4882a593Smuzhiyun 		(void) vmw_fallback_wait(dev_priv, false, false, sequence,
3791*4882a593Smuzhiyun 					 false, VMW_FENCE_WAIT_TIMEOUT);
3792*4882a593Smuzhiyun 		*p_fence = NULL;
3793*4882a593Smuzhiyun 	}
3794*4882a593Smuzhiyun 
3795*4882a593Smuzhiyun 	return ret;
3796*4882a593Smuzhiyun }
3797*4882a593Smuzhiyun 
3798*4882a593Smuzhiyun /**
3799*4882a593Smuzhiyun  * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3800*4882a593Smuzhiyun  *
3801*4882a593Smuzhiyun  * @dev_priv: Pointer to a vmw_private struct.
3802*4882a593Smuzhiyun  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3803*4882a593Smuzhiyun  * @ret: Return value from fence object creation.
3804*4882a593Smuzhiyun  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3805*4882a593Smuzhiyun  * the information should be copied.
3806*4882a593Smuzhiyun  * @fence: Pointer to the fenc object.
3807*4882a593Smuzhiyun  * @fence_handle: User-space fence handle.
3808*4882a593Smuzhiyun  * @out_fence_fd: exported file descriptor for the fence.  -1 if not used
3809*4882a593Smuzhiyun  * @sync_file:  Only used to clean up in case of an error in this function.
3810*4882a593Smuzhiyun  *
3811*4882a593Smuzhiyun  * This function copies fence information to user-space. If copying fails, the
3812*4882a593Smuzhiyun  * user-space struct drm_vmw_fence_rep::error member is hopefully left
3813*4882a593Smuzhiyun  * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3814*4882a593Smuzhiyun  * will hopefully be detected.
3815*4882a593Smuzhiyun  *
3816*4882a593Smuzhiyun  * Also if copying fails, user-space will be unable to signal the fence object
3817*4882a593Smuzhiyun  * so we wait for it immediately, and then unreference the user-space reference.
3818*4882a593Smuzhiyun  */
3819*4882a593Smuzhiyun int
vmw_execbuf_copy_fence_user(struct vmw_private * dev_priv,struct vmw_fpriv * vmw_fp,int ret,struct drm_vmw_fence_rep __user * user_fence_rep,struct vmw_fence_obj * fence,uint32_t fence_handle,int32_t out_fence_fd)3820*4882a593Smuzhiyun vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3821*4882a593Smuzhiyun 			    struct vmw_fpriv *vmw_fp, int ret,
3822*4882a593Smuzhiyun 			    struct drm_vmw_fence_rep __user *user_fence_rep,
3823*4882a593Smuzhiyun 			    struct vmw_fence_obj *fence, uint32_t fence_handle,
3824*4882a593Smuzhiyun 			    int32_t out_fence_fd)
3825*4882a593Smuzhiyun {
3826*4882a593Smuzhiyun 	struct drm_vmw_fence_rep fence_rep;
3827*4882a593Smuzhiyun 
3828*4882a593Smuzhiyun 	if (user_fence_rep == NULL)
3829*4882a593Smuzhiyun 		return 0;
3830*4882a593Smuzhiyun 
3831*4882a593Smuzhiyun 	memset(&fence_rep, 0, sizeof(fence_rep));
3832*4882a593Smuzhiyun 
3833*4882a593Smuzhiyun 	fence_rep.error = ret;
3834*4882a593Smuzhiyun 	fence_rep.fd = out_fence_fd;
3835*4882a593Smuzhiyun 	if (ret == 0) {
3836*4882a593Smuzhiyun 		BUG_ON(fence == NULL);
3837*4882a593Smuzhiyun 
3838*4882a593Smuzhiyun 		fence_rep.handle = fence_handle;
3839*4882a593Smuzhiyun 		fence_rep.seqno = fence->base.seqno;
3840*4882a593Smuzhiyun 		vmw_update_seqno(dev_priv, &dev_priv->fifo);
3841*4882a593Smuzhiyun 		fence_rep.passed_seqno = dev_priv->last_read_seqno;
3842*4882a593Smuzhiyun 	}
3843*4882a593Smuzhiyun 
3844*4882a593Smuzhiyun 	/*
3845*4882a593Smuzhiyun 	 * copy_to_user errors will be detected by user space not seeing
3846*4882a593Smuzhiyun 	 * fence_rep::error filled in. Typically user-space would have pre-set
3847*4882a593Smuzhiyun 	 * that member to -EFAULT.
3848*4882a593Smuzhiyun 	 */
3849*4882a593Smuzhiyun 	ret = copy_to_user(user_fence_rep, &fence_rep,
3850*4882a593Smuzhiyun 			   sizeof(fence_rep));
3851*4882a593Smuzhiyun 
3852*4882a593Smuzhiyun 	/*
3853*4882a593Smuzhiyun 	 * User-space lost the fence object. We need to sync and unreference the
3854*4882a593Smuzhiyun 	 * handle.
3855*4882a593Smuzhiyun 	 */
3856*4882a593Smuzhiyun 	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3857*4882a593Smuzhiyun 		ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
3858*4882a593Smuzhiyun 					  TTM_REF_USAGE);
3859*4882a593Smuzhiyun 		VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3860*4882a593Smuzhiyun 		(void) vmw_fence_obj_wait(fence, false, false,
3861*4882a593Smuzhiyun 					  VMW_FENCE_WAIT_TIMEOUT);
3862*4882a593Smuzhiyun 	}
3863*4882a593Smuzhiyun 
3864*4882a593Smuzhiyun 	return ret ? -EFAULT : 0;
3865*4882a593Smuzhiyun }
3866*4882a593Smuzhiyun 
3867*4882a593Smuzhiyun /**
3868*4882a593Smuzhiyun  * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3869*4882a593Smuzhiyun  *
3870*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private structure.
3871*4882a593Smuzhiyun  * @kernel_commands: Pointer to the unpatched command batch.
3872*4882a593Smuzhiyun  * @command_size: Size of the unpatched command batch.
3873*4882a593Smuzhiyun  * @sw_context: Structure holding the relocation lists.
3874*4882a593Smuzhiyun  *
3875*4882a593Smuzhiyun  * Side effects: If this function returns 0, then the command batch pointed to
3876*4882a593Smuzhiyun  * by @kernel_commands will have been modified.
3877*4882a593Smuzhiyun  */
vmw_execbuf_submit_fifo(struct vmw_private * dev_priv,void * kernel_commands,u32 command_size,struct vmw_sw_context * sw_context)3878*4882a593Smuzhiyun static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3879*4882a593Smuzhiyun 				   void *kernel_commands, u32 command_size,
3880*4882a593Smuzhiyun 				   struct vmw_sw_context *sw_context)
3881*4882a593Smuzhiyun {
3882*4882a593Smuzhiyun 	void *cmd;
3883*4882a593Smuzhiyun 
3884*4882a593Smuzhiyun 	if (sw_context->dx_ctx_node)
3885*4882a593Smuzhiyun 		cmd = VMW_FIFO_RESERVE_DX(dev_priv, command_size,
3886*4882a593Smuzhiyun 					  sw_context->dx_ctx_node->ctx->id);
3887*4882a593Smuzhiyun 	else
3888*4882a593Smuzhiyun 		cmd = VMW_FIFO_RESERVE(dev_priv, command_size);
3889*4882a593Smuzhiyun 
3890*4882a593Smuzhiyun 	if (!cmd)
3891*4882a593Smuzhiyun 		return -ENOMEM;
3892*4882a593Smuzhiyun 
3893*4882a593Smuzhiyun 	vmw_apply_relocations(sw_context);
3894*4882a593Smuzhiyun 	memcpy(cmd, kernel_commands, command_size);
3895*4882a593Smuzhiyun 	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3896*4882a593Smuzhiyun 	vmw_resource_relocations_free(&sw_context->res_relocations);
3897*4882a593Smuzhiyun 	vmw_fifo_commit(dev_priv, command_size);
3898*4882a593Smuzhiyun 
3899*4882a593Smuzhiyun 	return 0;
3900*4882a593Smuzhiyun }
3901*4882a593Smuzhiyun 
3902*4882a593Smuzhiyun /**
3903*4882a593Smuzhiyun  * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3904*4882a593Smuzhiyun  * command buffer manager.
3905*4882a593Smuzhiyun  *
3906*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private structure.
3907*4882a593Smuzhiyun  * @header: Opaque handle to the command buffer allocation.
3908*4882a593Smuzhiyun  * @command_size: Size of the unpatched command batch.
3909*4882a593Smuzhiyun  * @sw_context: Structure holding the relocation lists.
3910*4882a593Smuzhiyun  *
3911*4882a593Smuzhiyun  * Side effects: If this function returns 0, then the command buffer represented
3912*4882a593Smuzhiyun  * by @header will have been modified.
3913*4882a593Smuzhiyun  */
vmw_execbuf_submit_cmdbuf(struct vmw_private * dev_priv,struct vmw_cmdbuf_header * header,u32 command_size,struct vmw_sw_context * sw_context)3914*4882a593Smuzhiyun static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3915*4882a593Smuzhiyun 				     struct vmw_cmdbuf_header *header,
3916*4882a593Smuzhiyun 				     u32 command_size,
3917*4882a593Smuzhiyun 				     struct vmw_sw_context *sw_context)
3918*4882a593Smuzhiyun {
3919*4882a593Smuzhiyun 	u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3920*4882a593Smuzhiyun 		  SVGA3D_INVALID_ID);
3921*4882a593Smuzhiyun 	void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3922*4882a593Smuzhiyun 				       header);
3923*4882a593Smuzhiyun 
3924*4882a593Smuzhiyun 	vmw_apply_relocations(sw_context);
3925*4882a593Smuzhiyun 	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3926*4882a593Smuzhiyun 	vmw_resource_relocations_free(&sw_context->res_relocations);
3927*4882a593Smuzhiyun 	vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3928*4882a593Smuzhiyun 
3929*4882a593Smuzhiyun 	return 0;
3930*4882a593Smuzhiyun }
3931*4882a593Smuzhiyun 
3932*4882a593Smuzhiyun /**
3933*4882a593Smuzhiyun  * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3934*4882a593Smuzhiyun  * submission using a command buffer.
3935*4882a593Smuzhiyun  *
3936*4882a593Smuzhiyun  * @dev_priv: Pointer to a device private structure.
3937*4882a593Smuzhiyun  * @user_commands: User-space pointer to the commands to be submitted.
3938*4882a593Smuzhiyun  * @command_size: Size of the unpatched command batch.
3939*4882a593Smuzhiyun  * @header: Out parameter returning the opaque pointer to the command buffer.
3940*4882a593Smuzhiyun  *
3941*4882a593Smuzhiyun  * This function checks whether we can use the command buffer manager for
3942*4882a593Smuzhiyun  * submission and if so, creates a command buffer of suitable size and copies
3943*4882a593Smuzhiyun  * the user data into that buffer.
3944*4882a593Smuzhiyun  *
3945*4882a593Smuzhiyun  * On successful return, the function returns a pointer to the data in the
3946*4882a593Smuzhiyun  * command buffer and *@header is set to non-NULL.
3947*4882a593Smuzhiyun  *
3948*4882a593Smuzhiyun  * If command buffers could not be used, the function will return the value of
3949*4882a593Smuzhiyun  * @kernel_commands on function call. That value may be NULL. In that case, the
3950*4882a593Smuzhiyun  * value of *@header will be set to NULL.
3951*4882a593Smuzhiyun  *
3952*4882a593Smuzhiyun  * If an error is encountered, the function will return a pointer error value.
3953*4882a593Smuzhiyun  * If the function is interrupted by a signal while sleeping, it will return
3954*4882a593Smuzhiyun  * -ERESTARTSYS casted to a pointer error value.
3955*4882a593Smuzhiyun  */
vmw_execbuf_cmdbuf(struct vmw_private * dev_priv,void __user * user_commands,void * kernel_commands,u32 command_size,struct vmw_cmdbuf_header ** header)3956*4882a593Smuzhiyun static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3957*4882a593Smuzhiyun 				void __user *user_commands,
3958*4882a593Smuzhiyun 				void *kernel_commands, u32 command_size,
3959*4882a593Smuzhiyun 				struct vmw_cmdbuf_header **header)
3960*4882a593Smuzhiyun {
3961*4882a593Smuzhiyun 	size_t cmdbuf_size;
3962*4882a593Smuzhiyun 	int ret;
3963*4882a593Smuzhiyun 
3964*4882a593Smuzhiyun 	*header = NULL;
3965*4882a593Smuzhiyun 	if (command_size > SVGA_CB_MAX_SIZE) {
3966*4882a593Smuzhiyun 		VMW_DEBUG_USER("Command buffer is too large.\n");
3967*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
3968*4882a593Smuzhiyun 	}
3969*4882a593Smuzhiyun 
3970*4882a593Smuzhiyun 	if (!dev_priv->cman || kernel_commands)
3971*4882a593Smuzhiyun 		return kernel_commands;
3972*4882a593Smuzhiyun 
3973*4882a593Smuzhiyun 	/* If possible, add a little space for fencing. */
3974*4882a593Smuzhiyun 	cmdbuf_size = command_size + 512;
3975*4882a593Smuzhiyun 	cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3976*4882a593Smuzhiyun 	kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
3977*4882a593Smuzhiyun 					   header);
3978*4882a593Smuzhiyun 	if (IS_ERR(kernel_commands))
3979*4882a593Smuzhiyun 		return kernel_commands;
3980*4882a593Smuzhiyun 
3981*4882a593Smuzhiyun 	ret = copy_from_user(kernel_commands, user_commands, command_size);
3982*4882a593Smuzhiyun 	if (ret) {
3983*4882a593Smuzhiyun 		VMW_DEBUG_USER("Failed copying commands.\n");
3984*4882a593Smuzhiyun 		vmw_cmdbuf_header_free(*header);
3985*4882a593Smuzhiyun 		*header = NULL;
3986*4882a593Smuzhiyun 		return ERR_PTR(-EFAULT);
3987*4882a593Smuzhiyun 	}
3988*4882a593Smuzhiyun 
3989*4882a593Smuzhiyun 	return kernel_commands;
3990*4882a593Smuzhiyun }
3991*4882a593Smuzhiyun 
vmw_execbuf_tie_context(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,uint32_t handle)3992*4882a593Smuzhiyun static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3993*4882a593Smuzhiyun 				   struct vmw_sw_context *sw_context,
3994*4882a593Smuzhiyun 				   uint32_t handle)
3995*4882a593Smuzhiyun {
3996*4882a593Smuzhiyun 	struct vmw_resource *res;
3997*4882a593Smuzhiyun 	int ret;
3998*4882a593Smuzhiyun 	unsigned int size;
3999*4882a593Smuzhiyun 
4000*4882a593Smuzhiyun 	if (handle == SVGA3D_INVALID_ID)
4001*4882a593Smuzhiyun 		return 0;
4002*4882a593Smuzhiyun 
4003*4882a593Smuzhiyun 	size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
4004*4882a593Smuzhiyun 	ret = vmw_validation_preload_res(sw_context->ctx, size);
4005*4882a593Smuzhiyun 	if (ret)
4006*4882a593Smuzhiyun 		return ret;
4007*4882a593Smuzhiyun 
4008*4882a593Smuzhiyun 	res = vmw_user_resource_noref_lookup_handle
4009*4882a593Smuzhiyun 		(dev_priv, sw_context->fp->tfile, handle,
4010*4882a593Smuzhiyun 		 user_context_converter);
4011*4882a593Smuzhiyun 	if (IS_ERR(res)) {
4012*4882a593Smuzhiyun 		VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
4013*4882a593Smuzhiyun 			       (unsigned int) handle);
4014*4882a593Smuzhiyun 		return PTR_ERR(res);
4015*4882a593Smuzhiyun 	}
4016*4882a593Smuzhiyun 
4017*4882a593Smuzhiyun 	ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
4018*4882a593Smuzhiyun 	if (unlikely(ret != 0))
4019*4882a593Smuzhiyun 		return ret;
4020*4882a593Smuzhiyun 
4021*4882a593Smuzhiyun 	sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
4022*4882a593Smuzhiyun 	sw_context->man = vmw_context_res_man(res);
4023*4882a593Smuzhiyun 
4024*4882a593Smuzhiyun 	return 0;
4025*4882a593Smuzhiyun }
4026*4882a593Smuzhiyun 
vmw_execbuf_process(struct drm_file * file_priv,struct vmw_private * dev_priv,void __user * user_commands,void * kernel_commands,uint32_t command_size,uint64_t throttle_us,uint32_t dx_context_handle,struct drm_vmw_fence_rep __user * user_fence_rep,struct vmw_fence_obj ** out_fence,uint32_t flags)4027*4882a593Smuzhiyun int vmw_execbuf_process(struct drm_file *file_priv,
4028*4882a593Smuzhiyun 			struct vmw_private *dev_priv,
4029*4882a593Smuzhiyun 			void __user *user_commands, void *kernel_commands,
4030*4882a593Smuzhiyun 			uint32_t command_size, uint64_t throttle_us,
4031*4882a593Smuzhiyun 			uint32_t dx_context_handle,
4032*4882a593Smuzhiyun 			struct drm_vmw_fence_rep __user *user_fence_rep,
4033*4882a593Smuzhiyun 			struct vmw_fence_obj **out_fence, uint32_t flags)
4034*4882a593Smuzhiyun {
4035*4882a593Smuzhiyun 	struct vmw_sw_context *sw_context = &dev_priv->ctx;
4036*4882a593Smuzhiyun 	struct vmw_fence_obj *fence = NULL;
4037*4882a593Smuzhiyun 	struct vmw_cmdbuf_header *header;
4038*4882a593Smuzhiyun 	uint32_t handle = 0;
4039*4882a593Smuzhiyun 	int ret;
4040*4882a593Smuzhiyun 	int32_t out_fence_fd = -1;
4041*4882a593Smuzhiyun 	struct sync_file *sync_file = NULL;
4042*4882a593Smuzhiyun 	DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
4043*4882a593Smuzhiyun 
4044*4882a593Smuzhiyun 	vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
4045*4882a593Smuzhiyun 
4046*4882a593Smuzhiyun 	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4047*4882a593Smuzhiyun 		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4048*4882a593Smuzhiyun 		if (out_fence_fd < 0) {
4049*4882a593Smuzhiyun 			VMW_DEBUG_USER("Failed to get a fence fd.\n");
4050*4882a593Smuzhiyun 			return out_fence_fd;
4051*4882a593Smuzhiyun 		}
4052*4882a593Smuzhiyun 	}
4053*4882a593Smuzhiyun 
4054*4882a593Smuzhiyun 	if (throttle_us) {
4055*4882a593Smuzhiyun 		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
4056*4882a593Smuzhiyun 				   throttle_us);
4057*4882a593Smuzhiyun 
4058*4882a593Smuzhiyun 		if (ret)
4059*4882a593Smuzhiyun 			goto out_free_fence_fd;
4060*4882a593Smuzhiyun 	}
4061*4882a593Smuzhiyun 
4062*4882a593Smuzhiyun 	kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4063*4882a593Smuzhiyun 					     kernel_commands, command_size,
4064*4882a593Smuzhiyun 					     &header);
4065*4882a593Smuzhiyun 	if (IS_ERR(kernel_commands)) {
4066*4882a593Smuzhiyun 		ret = PTR_ERR(kernel_commands);
4067*4882a593Smuzhiyun 		goto out_free_fence_fd;
4068*4882a593Smuzhiyun 	}
4069*4882a593Smuzhiyun 
4070*4882a593Smuzhiyun 	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4071*4882a593Smuzhiyun 	if (ret) {
4072*4882a593Smuzhiyun 		ret = -ERESTARTSYS;
4073*4882a593Smuzhiyun 		goto out_free_header;
4074*4882a593Smuzhiyun 	}
4075*4882a593Smuzhiyun 
4076*4882a593Smuzhiyun 	sw_context->kernel = false;
4077*4882a593Smuzhiyun 	if (kernel_commands == NULL) {
4078*4882a593Smuzhiyun 		ret = vmw_resize_cmd_bounce(sw_context, command_size);
4079*4882a593Smuzhiyun 		if (unlikely(ret != 0))
4080*4882a593Smuzhiyun 			goto out_unlock;
4081*4882a593Smuzhiyun 
4082*4882a593Smuzhiyun 		ret = copy_from_user(sw_context->cmd_bounce, user_commands,
4083*4882a593Smuzhiyun 				     command_size);
4084*4882a593Smuzhiyun 		if (unlikely(ret != 0)) {
4085*4882a593Smuzhiyun 			ret = -EFAULT;
4086*4882a593Smuzhiyun 			VMW_DEBUG_USER("Failed copying commands.\n");
4087*4882a593Smuzhiyun 			goto out_unlock;
4088*4882a593Smuzhiyun 		}
4089*4882a593Smuzhiyun 
4090*4882a593Smuzhiyun 		kernel_commands = sw_context->cmd_bounce;
4091*4882a593Smuzhiyun 	} else if (!header) {
4092*4882a593Smuzhiyun 		sw_context->kernel = true;
4093*4882a593Smuzhiyun 	}
4094*4882a593Smuzhiyun 
4095*4882a593Smuzhiyun 	sw_context->fp = vmw_fpriv(file_priv);
4096*4882a593Smuzhiyun 	INIT_LIST_HEAD(&sw_context->ctx_list);
4097*4882a593Smuzhiyun 	sw_context->cur_query_bo = dev_priv->pinned_bo;
4098*4882a593Smuzhiyun 	sw_context->last_query_ctx = NULL;
4099*4882a593Smuzhiyun 	sw_context->needs_post_query_barrier = false;
4100*4882a593Smuzhiyun 	sw_context->dx_ctx_node = NULL;
4101*4882a593Smuzhiyun 	sw_context->dx_query_mob = NULL;
4102*4882a593Smuzhiyun 	sw_context->dx_query_ctx = NULL;
4103*4882a593Smuzhiyun 	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4104*4882a593Smuzhiyun 	INIT_LIST_HEAD(&sw_context->res_relocations);
4105*4882a593Smuzhiyun 	INIT_LIST_HEAD(&sw_context->bo_relocations);
4106*4882a593Smuzhiyun 
4107*4882a593Smuzhiyun 	if (sw_context->staged_bindings)
4108*4882a593Smuzhiyun 		vmw_binding_state_reset(sw_context->staged_bindings);
4109*4882a593Smuzhiyun 
4110*4882a593Smuzhiyun 	if (!sw_context->res_ht_initialized) {
4111*4882a593Smuzhiyun 		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
4112*4882a593Smuzhiyun 		if (unlikely(ret != 0))
4113*4882a593Smuzhiyun 			goto out_unlock;
4114*4882a593Smuzhiyun 
4115*4882a593Smuzhiyun 		sw_context->res_ht_initialized = true;
4116*4882a593Smuzhiyun 	}
4117*4882a593Smuzhiyun 
4118*4882a593Smuzhiyun 	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4119*4882a593Smuzhiyun 	sw_context->ctx = &val_ctx;
4120*4882a593Smuzhiyun 	ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4121*4882a593Smuzhiyun 	if (unlikely(ret != 0))
4122*4882a593Smuzhiyun 		goto out_err_nores;
4123*4882a593Smuzhiyun 
4124*4882a593Smuzhiyun 	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4125*4882a593Smuzhiyun 				command_size);
4126*4882a593Smuzhiyun 	if (unlikely(ret != 0))
4127*4882a593Smuzhiyun 		goto out_err_nores;
4128*4882a593Smuzhiyun 
4129*4882a593Smuzhiyun 	ret = vmw_resources_reserve(sw_context);
4130*4882a593Smuzhiyun 	if (unlikely(ret != 0))
4131*4882a593Smuzhiyun 		goto out_err_nores;
4132*4882a593Smuzhiyun 
4133*4882a593Smuzhiyun 	ret = vmw_validation_bo_reserve(&val_ctx, true);
4134*4882a593Smuzhiyun 	if (unlikely(ret != 0))
4135*4882a593Smuzhiyun 		goto out_err_nores;
4136*4882a593Smuzhiyun 
4137*4882a593Smuzhiyun 	ret = vmw_validation_bo_validate(&val_ctx, true);
4138*4882a593Smuzhiyun 	if (unlikely(ret != 0))
4139*4882a593Smuzhiyun 		goto out_err;
4140*4882a593Smuzhiyun 
4141*4882a593Smuzhiyun 	ret = vmw_validation_res_validate(&val_ctx, true);
4142*4882a593Smuzhiyun 	if (unlikely(ret != 0))
4143*4882a593Smuzhiyun 		goto out_err;
4144*4882a593Smuzhiyun 
4145*4882a593Smuzhiyun 	vmw_validation_drop_ht(&val_ctx);
4146*4882a593Smuzhiyun 
4147*4882a593Smuzhiyun 	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4148*4882a593Smuzhiyun 	if (unlikely(ret != 0)) {
4149*4882a593Smuzhiyun 		ret = -ERESTARTSYS;
4150*4882a593Smuzhiyun 		goto out_err;
4151*4882a593Smuzhiyun 	}
4152*4882a593Smuzhiyun 
4153*4882a593Smuzhiyun 	if (dev_priv->has_mob) {
4154*4882a593Smuzhiyun 		ret = vmw_rebind_contexts(sw_context);
4155*4882a593Smuzhiyun 		if (unlikely(ret != 0))
4156*4882a593Smuzhiyun 			goto out_unlock_binding;
4157*4882a593Smuzhiyun 	}
4158*4882a593Smuzhiyun 
4159*4882a593Smuzhiyun 	if (!header) {
4160*4882a593Smuzhiyun 		ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4161*4882a593Smuzhiyun 					      command_size, sw_context);
4162*4882a593Smuzhiyun 	} else {
4163*4882a593Smuzhiyun 		ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4164*4882a593Smuzhiyun 						sw_context);
4165*4882a593Smuzhiyun 		header = NULL;
4166*4882a593Smuzhiyun 	}
4167*4882a593Smuzhiyun 	mutex_unlock(&dev_priv->binding_mutex);
4168*4882a593Smuzhiyun 	if (ret)
4169*4882a593Smuzhiyun 		goto out_err;
4170*4882a593Smuzhiyun 
4171*4882a593Smuzhiyun 	vmw_query_bo_switch_commit(dev_priv, sw_context);
4172*4882a593Smuzhiyun 	ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
4173*4882a593Smuzhiyun 					 (user_fence_rep) ? &handle : NULL);
4174*4882a593Smuzhiyun 	/*
4175*4882a593Smuzhiyun 	 * This error is harmless, because if fence submission fails,
4176*4882a593Smuzhiyun 	 * vmw_fifo_send_fence will sync. The error will be propagated to
4177*4882a593Smuzhiyun 	 * user-space in @fence_rep
4178*4882a593Smuzhiyun 	 */
4179*4882a593Smuzhiyun 	if (ret != 0)
4180*4882a593Smuzhiyun 		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
4181*4882a593Smuzhiyun 
4182*4882a593Smuzhiyun 	vmw_execbuf_bindings_commit(sw_context, false);
4183*4882a593Smuzhiyun 	vmw_bind_dx_query_mob(sw_context);
4184*4882a593Smuzhiyun 	vmw_validation_res_unreserve(&val_ctx, false);
4185*4882a593Smuzhiyun 
4186*4882a593Smuzhiyun 	vmw_validation_bo_fence(sw_context->ctx, fence);
4187*4882a593Smuzhiyun 
4188*4882a593Smuzhiyun 	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4189*4882a593Smuzhiyun 		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
4190*4882a593Smuzhiyun 
4191*4882a593Smuzhiyun 	/*
4192*4882a593Smuzhiyun 	 * If anything fails here, give up trying to export the fence and do a
4193*4882a593Smuzhiyun 	 * sync since the user mode will not be able to sync the fence itself.
4194*4882a593Smuzhiyun 	 * This ensures we are still functionally correct.
4195*4882a593Smuzhiyun 	 */
4196*4882a593Smuzhiyun 	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4197*4882a593Smuzhiyun 
4198*4882a593Smuzhiyun 		sync_file = sync_file_create(&fence->base);
4199*4882a593Smuzhiyun 		if (!sync_file) {
4200*4882a593Smuzhiyun 			VMW_DEBUG_USER("Sync file create failed for fence\n");
4201*4882a593Smuzhiyun 			put_unused_fd(out_fence_fd);
4202*4882a593Smuzhiyun 			out_fence_fd = -1;
4203*4882a593Smuzhiyun 
4204*4882a593Smuzhiyun 			(void) vmw_fence_obj_wait(fence, false, false,
4205*4882a593Smuzhiyun 						  VMW_FENCE_WAIT_TIMEOUT);
4206*4882a593Smuzhiyun 		}
4207*4882a593Smuzhiyun 	}
4208*4882a593Smuzhiyun 
4209*4882a593Smuzhiyun 	ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4210*4882a593Smuzhiyun 				    user_fence_rep, fence, handle, out_fence_fd);
4211*4882a593Smuzhiyun 
4212*4882a593Smuzhiyun 	if (sync_file) {
4213*4882a593Smuzhiyun 		if (ret) {
4214*4882a593Smuzhiyun 			/* usercopy of fence failed, put the file object */
4215*4882a593Smuzhiyun 			fput(sync_file->file);
4216*4882a593Smuzhiyun 			put_unused_fd(out_fence_fd);
4217*4882a593Smuzhiyun 		} else {
4218*4882a593Smuzhiyun 			/* Link the fence with the FD created earlier */
4219*4882a593Smuzhiyun 			fd_install(out_fence_fd, sync_file->file);
4220*4882a593Smuzhiyun 		}
4221*4882a593Smuzhiyun 	}
4222*4882a593Smuzhiyun 
4223*4882a593Smuzhiyun 	/* Don't unreference when handing fence out */
4224*4882a593Smuzhiyun 	if (unlikely(out_fence != NULL)) {
4225*4882a593Smuzhiyun 		*out_fence = fence;
4226*4882a593Smuzhiyun 		fence = NULL;
4227*4882a593Smuzhiyun 	} else if (likely(fence != NULL)) {
4228*4882a593Smuzhiyun 		vmw_fence_obj_unreference(&fence);
4229*4882a593Smuzhiyun 	}
4230*4882a593Smuzhiyun 
4231*4882a593Smuzhiyun 	vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4232*4882a593Smuzhiyun 	mutex_unlock(&dev_priv->cmdbuf_mutex);
4233*4882a593Smuzhiyun 
4234*4882a593Smuzhiyun 	/*
4235*4882a593Smuzhiyun 	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4236*4882a593Smuzhiyun 	 * in resource destruction paths.
4237*4882a593Smuzhiyun 	 */
4238*4882a593Smuzhiyun 	vmw_validation_unref_lists(&val_ctx);
4239*4882a593Smuzhiyun 
4240*4882a593Smuzhiyun 	return ret;
4241*4882a593Smuzhiyun 
4242*4882a593Smuzhiyun out_unlock_binding:
4243*4882a593Smuzhiyun 	mutex_unlock(&dev_priv->binding_mutex);
4244*4882a593Smuzhiyun out_err:
4245*4882a593Smuzhiyun 	vmw_validation_bo_backoff(&val_ctx);
4246*4882a593Smuzhiyun out_err_nores:
4247*4882a593Smuzhiyun 	vmw_execbuf_bindings_commit(sw_context, true);
4248*4882a593Smuzhiyun 	vmw_validation_res_unreserve(&val_ctx, true);
4249*4882a593Smuzhiyun 	vmw_resource_relocations_free(&sw_context->res_relocations);
4250*4882a593Smuzhiyun 	vmw_free_relocations(sw_context);
4251*4882a593Smuzhiyun 	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4252*4882a593Smuzhiyun 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4253*4882a593Smuzhiyun out_unlock:
4254*4882a593Smuzhiyun 	vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4255*4882a593Smuzhiyun 	vmw_validation_drop_ht(&val_ctx);
4256*4882a593Smuzhiyun 	WARN_ON(!list_empty(&sw_context->ctx_list));
4257*4882a593Smuzhiyun 	mutex_unlock(&dev_priv->cmdbuf_mutex);
4258*4882a593Smuzhiyun 
4259*4882a593Smuzhiyun 	/*
4260*4882a593Smuzhiyun 	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4261*4882a593Smuzhiyun 	 * in resource destruction paths.
4262*4882a593Smuzhiyun 	 */
4263*4882a593Smuzhiyun 	vmw_validation_unref_lists(&val_ctx);
4264*4882a593Smuzhiyun out_free_header:
4265*4882a593Smuzhiyun 	if (header)
4266*4882a593Smuzhiyun 		vmw_cmdbuf_header_free(header);
4267*4882a593Smuzhiyun out_free_fence_fd:
4268*4882a593Smuzhiyun 	if (out_fence_fd >= 0)
4269*4882a593Smuzhiyun 		put_unused_fd(out_fence_fd);
4270*4882a593Smuzhiyun 
4271*4882a593Smuzhiyun 	return ret;
4272*4882a593Smuzhiyun }
4273*4882a593Smuzhiyun 
4274*4882a593Smuzhiyun /**
4275*4882a593Smuzhiyun  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4276*4882a593Smuzhiyun  *
4277*4882a593Smuzhiyun  * @dev_priv: The device private structure.
4278*4882a593Smuzhiyun  *
4279*4882a593Smuzhiyun  * This function is called to idle the fifo and unpin the query buffer if the
4280*4882a593Smuzhiyun  * normal way to do this hits an error, which should typically be extremely
4281*4882a593Smuzhiyun  * rare.
4282*4882a593Smuzhiyun  */
vmw_execbuf_unpin_panic(struct vmw_private * dev_priv)4283*4882a593Smuzhiyun static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4284*4882a593Smuzhiyun {
4285*4882a593Smuzhiyun 	VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
4286*4882a593Smuzhiyun 
4287*4882a593Smuzhiyun 	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4288*4882a593Smuzhiyun 	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4289*4882a593Smuzhiyun 	if (dev_priv->dummy_query_bo_pinned) {
4290*4882a593Smuzhiyun 		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4291*4882a593Smuzhiyun 		dev_priv->dummy_query_bo_pinned = false;
4292*4882a593Smuzhiyun 	}
4293*4882a593Smuzhiyun }
4294*4882a593Smuzhiyun 
4295*4882a593Smuzhiyun 
4296*4882a593Smuzhiyun /**
4297*4882a593Smuzhiyun  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
4298*4882a593Smuzhiyun  * bo.
4299*4882a593Smuzhiyun  *
4300*4882a593Smuzhiyun  * @dev_priv: The device private structure.
4301*4882a593Smuzhiyun  * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
4302*4882a593Smuzhiyun  * query barrier that flushes all queries touching the current buffer pointed to
4303*4882a593Smuzhiyun  * by @dev_priv->pinned_bo
4304*4882a593Smuzhiyun  *
4305*4882a593Smuzhiyun  * This function should be used to unpin the pinned query bo, or as a query
4306*4882a593Smuzhiyun  * barrier when we need to make sure that all queries have finished before the
4307*4882a593Smuzhiyun  * next fifo command. (For example on hardware context destructions where the
4308*4882a593Smuzhiyun  * hardware may otherwise leak unfinished queries).
4309*4882a593Smuzhiyun  *
4310*4882a593Smuzhiyun  * This function does not return any failure codes, but make attempts to do safe
4311*4882a593Smuzhiyun  * unpinning in case of errors.
4312*4882a593Smuzhiyun  *
4313*4882a593Smuzhiyun  * The function will synchronize on the previous query barrier, and will thus
4314*4882a593Smuzhiyun  * not finish until that barrier has executed.
4315*4882a593Smuzhiyun  *
4316*4882a593Smuzhiyun  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
4317*4882a593Smuzhiyun  * calling this function.
4318*4882a593Smuzhiyun  */
__vmw_execbuf_release_pinned_bo(struct vmw_private * dev_priv,struct vmw_fence_obj * fence)4319*4882a593Smuzhiyun void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4320*4882a593Smuzhiyun 				     struct vmw_fence_obj *fence)
4321*4882a593Smuzhiyun {
4322*4882a593Smuzhiyun 	int ret = 0;
4323*4882a593Smuzhiyun 	struct vmw_fence_obj *lfence = NULL;
4324*4882a593Smuzhiyun 	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
4325*4882a593Smuzhiyun 
4326*4882a593Smuzhiyun 	if (dev_priv->pinned_bo == NULL)
4327*4882a593Smuzhiyun 		goto out_unlock;
4328*4882a593Smuzhiyun 
4329*4882a593Smuzhiyun 	ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
4330*4882a593Smuzhiyun 				    false);
4331*4882a593Smuzhiyun 	if (ret)
4332*4882a593Smuzhiyun 		goto out_no_reserve;
4333*4882a593Smuzhiyun 
4334*4882a593Smuzhiyun 	ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
4335*4882a593Smuzhiyun 				    false);
4336*4882a593Smuzhiyun 	if (ret)
4337*4882a593Smuzhiyun 		goto out_no_reserve;
4338*4882a593Smuzhiyun 
4339*4882a593Smuzhiyun 	ret = vmw_validation_bo_reserve(&val_ctx, false);
4340*4882a593Smuzhiyun 	if (ret)
4341*4882a593Smuzhiyun 		goto out_no_reserve;
4342*4882a593Smuzhiyun 
4343*4882a593Smuzhiyun 	if (dev_priv->query_cid_valid) {
4344*4882a593Smuzhiyun 		BUG_ON(fence != NULL);
4345*4882a593Smuzhiyun 		ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
4346*4882a593Smuzhiyun 		if (ret)
4347*4882a593Smuzhiyun 			goto out_no_emit;
4348*4882a593Smuzhiyun 		dev_priv->query_cid_valid = false;
4349*4882a593Smuzhiyun 	}
4350*4882a593Smuzhiyun 
4351*4882a593Smuzhiyun 	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4352*4882a593Smuzhiyun 	if (dev_priv->dummy_query_bo_pinned) {
4353*4882a593Smuzhiyun 		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4354*4882a593Smuzhiyun 		dev_priv->dummy_query_bo_pinned = false;
4355*4882a593Smuzhiyun 	}
4356*4882a593Smuzhiyun 	if (fence == NULL) {
4357*4882a593Smuzhiyun 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4358*4882a593Smuzhiyun 						  NULL);
4359*4882a593Smuzhiyun 		fence = lfence;
4360*4882a593Smuzhiyun 	}
4361*4882a593Smuzhiyun 	vmw_validation_bo_fence(&val_ctx, fence);
4362*4882a593Smuzhiyun 	if (lfence != NULL)
4363*4882a593Smuzhiyun 		vmw_fence_obj_unreference(&lfence);
4364*4882a593Smuzhiyun 
4365*4882a593Smuzhiyun 	vmw_validation_unref_lists(&val_ctx);
4366*4882a593Smuzhiyun 	vmw_bo_unreference(&dev_priv->pinned_bo);
4367*4882a593Smuzhiyun 
4368*4882a593Smuzhiyun out_unlock:
4369*4882a593Smuzhiyun 	return;
4370*4882a593Smuzhiyun out_no_emit:
4371*4882a593Smuzhiyun 	vmw_validation_bo_backoff(&val_ctx);
4372*4882a593Smuzhiyun out_no_reserve:
4373*4882a593Smuzhiyun 	vmw_validation_unref_lists(&val_ctx);
4374*4882a593Smuzhiyun 	vmw_execbuf_unpin_panic(dev_priv);
4375*4882a593Smuzhiyun 	vmw_bo_unreference(&dev_priv->pinned_bo);
4376*4882a593Smuzhiyun }
4377*4882a593Smuzhiyun 
4378*4882a593Smuzhiyun /**
4379*4882a593Smuzhiyun  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
4380*4882a593Smuzhiyun  *
4381*4882a593Smuzhiyun  * @dev_priv: The device private structure.
4382*4882a593Smuzhiyun  *
4383*4882a593Smuzhiyun  * This function should be used to unpin the pinned query bo, or as a query
4384*4882a593Smuzhiyun  * barrier when we need to make sure that all queries have finished before the
4385*4882a593Smuzhiyun  * next fifo command. (For example on hardware context destructions where the
4386*4882a593Smuzhiyun  * hardware may otherwise leak unfinished queries).
4387*4882a593Smuzhiyun  *
4388*4882a593Smuzhiyun  * This function does not return any failure codes, but make attempts to do safe
4389*4882a593Smuzhiyun  * unpinning in case of errors.
4390*4882a593Smuzhiyun  *
4391*4882a593Smuzhiyun  * The function will synchronize on the previous query barrier, and will thus
4392*4882a593Smuzhiyun  * not finish until that barrier has executed.
4393*4882a593Smuzhiyun  */
vmw_execbuf_release_pinned_bo(struct vmw_private * dev_priv)4394*4882a593Smuzhiyun void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4395*4882a593Smuzhiyun {
4396*4882a593Smuzhiyun 	mutex_lock(&dev_priv->cmdbuf_mutex);
4397*4882a593Smuzhiyun 	if (dev_priv->query_cid_valid)
4398*4882a593Smuzhiyun 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4399*4882a593Smuzhiyun 	mutex_unlock(&dev_priv->cmdbuf_mutex);
4400*4882a593Smuzhiyun }
4401*4882a593Smuzhiyun 
vmw_execbuf_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)4402*4882a593Smuzhiyun int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
4403*4882a593Smuzhiyun 		      struct drm_file *file_priv)
4404*4882a593Smuzhiyun {
4405*4882a593Smuzhiyun 	struct vmw_private *dev_priv = vmw_priv(dev);
4406*4882a593Smuzhiyun 	struct drm_vmw_execbuf_arg *arg = data;
4407*4882a593Smuzhiyun 	int ret;
4408*4882a593Smuzhiyun 	struct dma_fence *in_fence = NULL;
4409*4882a593Smuzhiyun 
4410*4882a593Smuzhiyun 	/*
4411*4882a593Smuzhiyun 	 * Extend the ioctl argument while maintaining backwards compatibility:
4412*4882a593Smuzhiyun 	 * We take different code paths depending on the value of arg->version.
4413*4882a593Smuzhiyun 	 *
4414*4882a593Smuzhiyun 	 * Note: The ioctl argument is extended and zeropadded by core DRM.
4415*4882a593Smuzhiyun 	 */
4416*4882a593Smuzhiyun 	if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4417*4882a593Smuzhiyun 		     arg->version == 0)) {
4418*4882a593Smuzhiyun 		VMW_DEBUG_USER("Incorrect execbuf version.\n");
4419*4882a593Smuzhiyun 		return -EINVAL;
4420*4882a593Smuzhiyun 	}
4421*4882a593Smuzhiyun 
4422*4882a593Smuzhiyun 	switch (arg->version) {
4423*4882a593Smuzhiyun 	case 1:
4424*4882a593Smuzhiyun 		/* For v1 core DRM have extended + zeropadded the data */
4425*4882a593Smuzhiyun 		arg->context_handle = (uint32_t) -1;
4426*4882a593Smuzhiyun 		break;
4427*4882a593Smuzhiyun 	case 2:
4428*4882a593Smuzhiyun 	default:
4429*4882a593Smuzhiyun 		/* For v2 and later core DRM would have correctly copied it */
4430*4882a593Smuzhiyun 		break;
4431*4882a593Smuzhiyun 	}
4432*4882a593Smuzhiyun 
4433*4882a593Smuzhiyun 	/* If imported a fence FD from elsewhere, then wait on it */
4434*4882a593Smuzhiyun 	if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4435*4882a593Smuzhiyun 		in_fence = sync_file_get_fence(arg->imported_fence_fd);
4436*4882a593Smuzhiyun 
4437*4882a593Smuzhiyun 		if (!in_fence) {
4438*4882a593Smuzhiyun 			VMW_DEBUG_USER("Cannot get imported fence\n");
4439*4882a593Smuzhiyun 			return -EINVAL;
4440*4882a593Smuzhiyun 		}
4441*4882a593Smuzhiyun 
4442*4882a593Smuzhiyun 		ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4443*4882a593Smuzhiyun 		if (ret)
4444*4882a593Smuzhiyun 			goto out;
4445*4882a593Smuzhiyun 	}
4446*4882a593Smuzhiyun 
4447*4882a593Smuzhiyun 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4448*4882a593Smuzhiyun 	if (unlikely(ret != 0))
4449*4882a593Smuzhiyun 		return ret;
4450*4882a593Smuzhiyun 
4451*4882a593Smuzhiyun 	ret = vmw_execbuf_process(file_priv, dev_priv,
4452*4882a593Smuzhiyun 				  (void __user *)(unsigned long)arg->commands,
4453*4882a593Smuzhiyun 				  NULL, arg->command_size, arg->throttle_us,
4454*4882a593Smuzhiyun 				  arg->context_handle,
4455*4882a593Smuzhiyun 				  (void __user *)(unsigned long)arg->fence_rep,
4456*4882a593Smuzhiyun 				  NULL, arg->flags);
4457*4882a593Smuzhiyun 
4458*4882a593Smuzhiyun 	ttm_read_unlock(&dev_priv->reservation_sem);
4459*4882a593Smuzhiyun 	if (unlikely(ret != 0))
4460*4882a593Smuzhiyun 		goto out;
4461*4882a593Smuzhiyun 
4462*4882a593Smuzhiyun 	vmw_kms_cursor_post_execbuf(dev_priv);
4463*4882a593Smuzhiyun 
4464*4882a593Smuzhiyun out:
4465*4882a593Smuzhiyun 	if (in_fence)
4466*4882a593Smuzhiyun 		dma_fence_put(in_fence);
4467*4882a593Smuzhiyun 	return ret;
4468*4882a593Smuzhiyun }
4469